mirror of
https://github.com/open-webui/open-webui.git
synced 2026-03-10 15:54:15 -05:00
Compare commits
2776 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f10c729e3d | ||
|
|
488d7f10b3 | ||
|
|
bbc7e2776d | ||
|
|
07c9ace061 | ||
|
|
b6f548035c | ||
|
|
554e181ccb | ||
|
|
0cf8f58efe | ||
|
|
0a7bc50279 | ||
|
|
1cd036e768 | ||
|
|
f2c78ac0fb | ||
|
|
925a903e38 | ||
|
|
47e4250f58 | ||
|
|
21a28e3bf0 | ||
|
|
5d2714bc92 | ||
|
|
076c54c486 | ||
|
|
780591e991 | ||
|
|
50dcad0f73 | ||
|
|
adede5480d | ||
|
|
8b61b39c75 | ||
|
|
58d929fd65 | ||
|
|
04babeb45a | ||
|
|
5c3c00f9fb | ||
|
|
3df1ee9954 | ||
|
|
b0e83ccea0 | ||
|
|
40764d91cf | ||
|
|
37853d6a26 | ||
|
|
0b38584e52 | ||
|
|
af3456511b | ||
|
|
9b030f6ad6 | ||
|
|
ad952b9394 | ||
|
|
a1469cae59 | ||
|
|
bbc98259a5 | ||
|
|
44433929e7 | ||
|
|
b4acf689e3 | ||
|
|
23461332b9 | ||
|
|
46a97e62c7 | ||
|
|
d905bda000 | ||
|
|
144dc0ed2f | ||
|
|
1472f12f5e | ||
|
|
68916f7ec4 | ||
|
|
4282b20495 | ||
|
|
9b12b75df6 | ||
|
|
856c00bc2f | ||
|
|
99dd7fb5a8 | ||
|
|
e9e49babf8 | ||
|
|
6b620d93a9 | ||
|
|
c5d974606f | ||
|
|
587dab23f0 | ||
|
|
a4f29e1e55 | ||
|
|
205402e096 | ||
|
|
42a58da487 | ||
|
|
d160d0351f | ||
|
|
744139d2ce | ||
|
|
a28ef8acc5 | ||
|
|
9c2d592c73 | ||
|
|
160e63e509 | ||
|
|
056950b4f9 | ||
|
|
d7151fe6fa | ||
|
|
cde33002c7 | ||
|
|
0f4b6cdb67 | ||
|
|
d0662e2518 | ||
|
|
ec37d801be | ||
|
|
b5d7e57d3c | ||
|
|
076f9fd9c0 | ||
|
|
73ab84efe6 | ||
|
|
0c6e9e3e86 | ||
|
|
c1a054de4a | ||
|
|
51265b4683 | ||
|
|
d4c42dcfa9 | ||
|
|
d85480b4d6 | ||
|
|
ee2f8d3552 | ||
|
|
0e56ef20cb | ||
|
|
b16c4bc1df | ||
|
|
68a47f6a9f | ||
|
|
affb0e5c37 | ||
|
|
15c3bdf7af | ||
|
|
42914a162f | ||
|
|
928a5e747c | ||
|
|
a494ef8a99 | ||
|
|
8d2089e5b8 | ||
|
|
4557f4d165 | ||
|
|
b9af35be4c | ||
|
|
a41a5cc0c8 | ||
|
|
4a1bf6d6c4 | ||
|
|
0d924a1cba | ||
|
|
01651ee0a6 | ||
|
|
3531e009ce | ||
|
|
6edbbde2eb | ||
|
|
8514ed148a | ||
|
|
2897c25b99 | ||
|
|
58261a7b83 | ||
|
|
40d5082b9f | ||
|
|
6536c5b7f7 | ||
|
|
139136e700 | ||
|
|
5101b440a8 | ||
|
|
75980010a9 | ||
|
|
f6893edcc2 | ||
|
|
ce85400817 | ||
|
|
bc95e62600 | ||
|
|
4c691c0edb | ||
|
|
5499b5acc8 | ||
|
|
0a765744b8 | ||
|
|
00e9362c2c | ||
|
|
6d52f913d2 | ||
|
|
2d99f275a3 | ||
|
|
170ec2f9d0 | ||
|
|
1b5ac834ef | ||
|
|
c57ef980fb | ||
|
|
04b324996c | ||
|
|
b31d314638 | ||
|
|
424062d75f | ||
|
|
00b2038efb | ||
|
|
24e7ae8767 | ||
|
|
8d38af8ddd | ||
|
|
a0692a434d | ||
|
|
a89d29db9c | ||
|
|
b759f488b4 | ||
|
|
645657e307 | ||
|
|
3c8944cb12 | ||
|
|
9f285fb2fb | ||
|
|
8497db9f43 | ||
|
|
ac09d59a3f | ||
|
|
ee16177924 | ||
|
|
9153525c84 | ||
|
|
a7aa669038 | ||
|
|
41bd16acbe | ||
|
|
ae44445464 | ||
|
|
09f7cdc272 | ||
|
|
b11c48ac16 | ||
|
|
e0b7d95429 | ||
|
|
c03cccfd14 | ||
|
|
39e3afd55d | ||
|
|
fc71f441c4 | ||
|
|
117bdfcb70 | ||
|
|
3dd10f2ca0 | ||
|
|
e2d4a69750 | ||
|
|
24885a2e38 | ||
|
|
b4e73c7f19 | ||
|
|
f99facf383 | ||
|
|
d105be9ca2 | ||
|
|
e1baa9cc3f | ||
|
|
ec17bbc867 | ||
|
|
f1b0d7eb41 | ||
|
|
fc6dc43a19 | ||
|
|
8d71323009 | ||
|
|
ded22b3204 | ||
|
|
e3b1b717be | ||
|
|
2bb82f258a | ||
|
|
6cb41a38a6 | ||
|
|
47e377967e | ||
|
|
f27c0f7dcf | ||
|
|
cd8271eb95 | ||
|
|
e43e91edd3 | ||
|
|
7984980619 | ||
|
|
cb86e09005 | ||
|
|
822024f44e | ||
|
|
f02ef01e0c | ||
|
|
d5c1c2f0a7 | ||
|
|
145a7bbda5 | ||
|
|
de1e29eed0 | ||
|
|
438b277be0 | ||
|
|
2f5c65bd1f | ||
|
|
4b357a7b62 | ||
|
|
3f598ee5c9 | ||
|
|
c023afac8c | ||
|
|
9936583477 | ||
|
|
768b7e139c | ||
|
|
b529212a2d | ||
|
|
b38ed4221a | ||
|
|
254c6ca709 | ||
|
|
e378f70f34 | ||
|
|
d23252b8a9 | ||
|
|
30eb43f5b8 | ||
|
|
4d46bfe03b | ||
|
|
c5787a2b55 | ||
|
|
67647c8747 | ||
|
|
d14443a4ba | ||
|
|
da9a6c1078 | ||
|
|
e8591b57b4 | ||
|
|
15cbb66e5e | ||
|
|
05c18cd664 | ||
|
|
b9b670fcfb | ||
|
|
a0b58e244e | ||
|
|
875e75b8c2 | ||
|
|
a850a60a23 | ||
|
|
8102bdbed1 | ||
|
|
97b6fb4766 | ||
|
|
146a9d7f05 | ||
|
|
c5952d62ad | ||
|
|
a922f4b2e7 | ||
|
|
03282da45c | ||
|
|
e8c629a2e2 | ||
|
|
e530914328 | ||
|
|
7476bcaa2b | ||
|
|
86999157de | ||
|
|
54b843c367 | ||
|
|
8330fcdb5c | ||
|
|
d795940ced | ||
|
|
2db0f58dcb | ||
|
|
3069452210 | ||
|
|
7ce51f2b4c | ||
|
|
335b6b6c7a | ||
|
|
953a8285f7 | ||
|
|
73b33c3781 | ||
|
|
60dd3230ae | ||
|
|
3436523b79 | ||
|
|
211c41843c | ||
|
|
e252ca1dc4 | ||
|
|
6d25d23326 | ||
|
|
82edd0e3d9 | ||
|
|
f6e7af346e | ||
|
|
7d322a7238 | ||
|
|
3c1afa97af | ||
|
|
545f2a0fe2 | ||
|
|
e153108b51 | ||
|
|
971e54d7b4 | ||
|
|
c993cc5515 | ||
|
|
fb84fc8e77 | ||
|
|
ce917eba81 | ||
|
|
81f62d5bff | ||
|
|
0556479cbe | ||
|
|
99f70cac8b | ||
|
|
94c4aa8ddb | ||
|
|
ca6a624534 | ||
|
|
28d6425f87 | ||
|
|
6b43cc97d2 | ||
|
|
6c0d3ce736 | ||
|
|
0107a70343 | ||
|
|
f46b95300b | ||
|
|
5e96922eba | ||
|
|
a3728e6957 | ||
|
|
353dc83542 | ||
|
|
2fc8fa1869 | ||
|
|
c2d1a3164e | ||
|
|
79cb9383d9 | ||
|
|
670441f548 | ||
|
|
988a5e2b8d | ||
|
|
8648a330f2 | ||
|
|
cad31f6f2b | ||
|
|
8ce5c2eaf0 | ||
|
|
001788b6e4 | ||
|
|
f0fbc586c8 | ||
|
|
11588af34c | ||
|
|
2962aa9f06 | ||
|
|
f821de9470 | ||
|
|
590dc0895f | ||
|
|
e6f4da2bfc | ||
|
|
c171e624eb | ||
|
|
b877bc0086 | ||
|
|
351c29917b | ||
|
|
6e4820cad8 | ||
|
|
7ffa3cb022 | ||
|
|
42d048741c | ||
|
|
dff9254e34 | ||
|
|
e6fea74b60 | ||
|
|
c9c79852a5 | ||
|
|
186923210b | ||
|
|
6336d34b59 | ||
|
|
f54ab7e551 | ||
|
|
4dcbf1af07 | ||
|
|
86adcf33b0 | ||
|
|
87d2738864 | ||
|
|
3578c85e39 | ||
|
|
b402061546 | ||
|
|
d8b513023c | ||
|
|
36a541d6b0 | ||
|
|
d5bf32f240 | ||
|
|
e421be5759 | ||
|
|
29c39d44e1 | ||
|
|
7b97d7a718 | ||
|
|
9df9f4a990 | ||
|
|
dea12360f4 | ||
|
|
a942c30ca8 | ||
|
|
ede71740d2 | ||
|
|
8e807bcf8a | ||
|
|
ea070e34f9 | ||
|
|
de442792ca | ||
|
|
9ad07ad0ce | ||
|
|
b888ee17ff | ||
|
|
e6802c7e98 | ||
|
|
b1554be3f2 | ||
|
|
57d54160d3 | ||
|
|
8eebd6bce1 | ||
|
|
c797c2e18b | ||
|
|
dedb26fd5c | ||
|
|
08ff494754 | ||
|
|
eef9045dcc | ||
|
|
2f2af94d73 | ||
|
|
2af9727bd3 | ||
|
|
b5f46023d4 | ||
|
|
0131afe667 | ||
|
|
69d0472898 | ||
|
|
86c961e342 | ||
|
|
bf0043881a | ||
|
|
98ba3f8428 | ||
|
|
24f149f686 | ||
|
|
b4cd503a02 | ||
|
|
97bb9d41a6 | ||
|
|
2f4c04055c | ||
|
|
b01f9e8ec3 | ||
|
|
73a251fc49 | ||
|
|
cf24a65caa | ||
|
|
9d2d53bfb5 | ||
|
|
6703cacb99 | ||
|
|
d8a30bd6ae | ||
|
|
4c7651c113 | ||
|
|
2afc5f3339 | ||
|
|
93dab86e8d | ||
|
|
f8bb77324d | ||
|
|
29e8e2d938 | ||
|
|
90b7754cd6 | ||
|
|
bc75870289 | ||
|
|
effa77379e | ||
|
|
8eb45acf10 | ||
|
|
1db1ef7c26 | ||
|
|
0b2c7046cd | ||
|
|
466eea1a4c | ||
|
|
f0179270e2 | ||
|
|
86bc3023b3 | ||
|
|
6f07afc79b | ||
|
|
7a0933a72f | ||
|
|
c5921dea58 | ||
|
|
79c834d0e4 | ||
|
|
33c3dbd9fa | ||
|
|
f0f4de59eb | ||
|
|
6233494ed1 | ||
|
|
dce91a8557 | ||
|
|
12516c8a45 | ||
|
|
1294ba9d61 | ||
|
|
ee079df8ed | ||
|
|
e0e249c1b9 | ||
|
|
a4a5614de5 | ||
|
|
55f14e37ea | ||
|
|
adb1bfcaa8 | ||
|
|
67885c71dc | ||
|
|
ee8b8220f0 | ||
|
|
ebec6cd426 | ||
|
|
6f3f5ff922 | ||
|
|
5c45643028 | ||
|
|
586e005f0f | ||
|
|
28892fe2d7 | ||
|
|
8a0da6d376 | ||
|
|
797afd0b72 | ||
|
|
92605fd59f | ||
|
|
5ffd216fca | ||
|
|
dff3732fcd | ||
|
|
8ae605ec4b | ||
|
|
5273dc4535 | ||
|
|
112cbdccbb | ||
|
|
5dc05eac67 | ||
|
|
9c1820f785 | ||
|
|
c41261e72b | ||
|
|
bb97535cad | ||
|
|
333317a7ce | ||
|
|
9fbff16a08 | ||
|
|
f47c9c69e3 | ||
|
|
0bebc898c8 | ||
|
|
9d4d96429f | ||
|
|
5c48fce382 | ||
|
|
e576ddce67 | ||
|
|
c5b670cccd | ||
|
|
5a96fcbeaf | ||
|
|
0a08495fec | ||
|
|
5837dcbfd4 | ||
|
|
11d7aec61d | ||
|
|
30d3d28b9f | ||
|
|
1cad157071 | ||
|
|
428fd202c5 | ||
|
|
db35ea0ae1 | ||
|
|
1c5b6987e2 | ||
|
|
5867ebb1ee | ||
|
|
11afce7895 | ||
|
|
63d297756a | ||
|
|
ba2df1c33a | ||
|
|
9658c2559a | ||
|
|
acb5dcf30a | ||
|
|
7f586f1c3d | ||
|
|
b2c492ce1f | ||
|
|
4adc57fd34 | ||
|
|
c748d33192 | ||
|
|
edc15d0d7c | ||
|
|
df824db042 | ||
|
|
e8babe62bc | ||
|
|
741230bcdb | ||
|
|
89c77f05a8 | ||
|
|
ad31bd51fd | ||
|
|
7e253df175 | ||
|
|
d7a71f3b34 | ||
|
|
36c9371227 | ||
|
|
c157004e07 | ||
|
|
451f1bae15 | ||
|
|
34150fc3ed | ||
|
|
54dc94317c | ||
|
|
ce5143c535 | ||
|
|
a2e889c8bb | ||
|
|
2c59f2dcaf | ||
|
|
b56f77ed47 | ||
|
|
b185524a8c | ||
|
|
878a570a2c | ||
|
|
b38e2fab32 | ||
|
|
3516eea189 | ||
|
|
a3f2b7045c | ||
|
|
37fdb0ea2e | ||
|
|
e66619262a | ||
|
|
d7a00af576 | ||
|
|
a04f22d55f | ||
|
|
caeca9d300 | ||
|
|
0da0e12096 | ||
|
|
d48f54f7b2 | ||
|
|
3391a855f0 | ||
|
|
a4870f4c91 | ||
|
|
7f37b9340d | ||
|
|
187ea38beb | ||
|
|
07a556df55 | ||
|
|
0feaf6f649 | ||
|
|
595cf191fb | ||
|
|
dcab991d44 | ||
|
|
457360dae7 | ||
|
|
9bd5567552 | ||
|
|
d2a1f4a93d | ||
|
|
69bf610b17 | ||
|
|
89a29653b8 | ||
|
|
345090c769 | ||
|
|
16dd352524 | ||
|
|
8c677ff7a1 | ||
|
|
929e665c5e | ||
|
|
4d45083b5d | ||
|
|
3b88ba8812 | ||
|
|
133ff406d7 | ||
|
|
d05c2f56ea | ||
|
|
3c5b216612 | ||
|
|
f099b277c8 | ||
|
|
e7ae9a2107 | ||
|
|
b242460874 | ||
|
|
3fcdca2f28 | ||
|
|
885b9f1ece | ||
|
|
09f34a7561 | ||
|
|
cf544b9e60 | ||
|
|
5a78f1d915 | ||
|
|
854024cdf8 | ||
|
|
fe872aa3fc | ||
|
|
d9ccef8150 | ||
|
|
958d882ff9 | ||
|
|
48e7f47558 | ||
|
|
73c291193b | ||
|
|
209948af6f | ||
|
|
9fc813cfa6 | ||
|
|
b105efa05f | ||
|
|
86caca495b | ||
|
|
4eb47716e2 | ||
|
|
3f892583c3 | ||
|
|
ed9fbe153b | ||
|
|
d255f5cf8d | ||
|
|
ad7bc624c2 | ||
|
|
6e8e0454e4 | ||
|
|
7cebcf064a | ||
|
|
1fc1bf5f0d | ||
|
|
bc29d5d3c3 | ||
|
|
fe18aebdd9 | ||
|
|
57df49274c | ||
|
|
9193c9bd7d | ||
|
|
656e75372c | ||
|
|
6f9080dfe0 | ||
|
|
86f822fd9a | ||
|
|
8b3972c6df | ||
|
|
58ca5e42c2 | ||
|
|
cd62d89dd9 | ||
|
|
3686cf7365 | ||
|
|
aaa4350eb4 | ||
|
|
2113c2f57f | ||
|
|
ee22ba9676 | ||
|
|
466974f344 | ||
|
|
95616e92d7 | ||
|
|
c8c41e07e9 | ||
|
|
d195b83c9e | ||
|
|
0bff6d38ee | ||
|
|
b0bf6b5d31 | ||
|
|
7abdf8e342 | ||
|
|
cfb7357103 | ||
|
|
05c15b017d | ||
|
|
c8e609c3d1 | ||
|
|
4623fdfce2 | ||
|
|
1c1e6a7172 | ||
|
|
297835628a | ||
|
|
7b6723a955 | ||
|
|
a40eb87c31 | ||
|
|
53954e4c05 | ||
|
|
50b1a3471c | ||
|
|
dc6e68bd60 | ||
|
|
da1e88a427 | ||
|
|
d23f05fe0a | ||
|
|
90effa925e | ||
|
|
8a3c9b9a3e | ||
|
|
a7f543d737 | ||
|
|
4b00036dbf | ||
|
|
babfc97c90 | ||
|
|
913620ff0c | ||
|
|
e6265897c8 | ||
|
|
91e1b548a9 | ||
|
|
8ad44cd690 | ||
|
|
5f74cfaa51 | ||
|
|
9ef3fb0bc7 | ||
|
|
7d1890c7d1 | ||
|
|
eb2aaea6cb | ||
|
|
9706b76b36 | ||
|
|
059ac466e0 | ||
|
|
75780256d1 | ||
|
|
b5efe15f73 | ||
|
|
be657f45e4 | ||
|
|
f295e26f25 | ||
|
|
0591cd82d4 | ||
|
|
70ca6527f6 | ||
|
|
cdd024db81 | ||
|
|
3082d0de7a | ||
|
|
f5e10704c0 | ||
|
|
c90cd6019a | ||
|
|
c26d7ff463 | ||
|
|
b21af908a7 | ||
|
|
097e90f1de | ||
|
|
ed47e24494 | ||
|
|
aeaf761ecd | ||
|
|
6d2eaaf602 | ||
|
|
1815cfe99d | ||
|
|
539e95a206 | ||
|
|
de59ecf8a3 | ||
|
|
2981212dfa | ||
|
|
7ee07df26a | ||
|
|
008febb6d7 | ||
|
|
6417ccef00 | ||
|
|
198749aed1 | ||
|
|
46bd97c100 | ||
|
|
1086a0e662 | ||
|
|
9586749314 | ||
|
|
54552c3d13 | ||
|
|
ed337b94e1 | ||
|
|
0f287c8a09 | ||
|
|
dc2e75017d | ||
|
|
c3862bc387 | ||
|
|
6fdfa62845 | ||
|
|
e0fc3e89a7 | ||
|
|
ed5669410b | ||
|
|
6d3d80b347 | ||
|
|
d84368a8f6 | ||
|
|
5adf9ed445 | ||
|
|
49e9275c5b | ||
|
|
c746fd94cb | ||
|
|
0bfe28711a | ||
|
|
c09e51c1bf | ||
|
|
3dd1d1bc4a | ||
|
|
a860e98ab9 | ||
|
|
ed1a2ab5e8 | ||
|
|
bbbd94f69c | ||
|
|
2e067b0541 | ||
|
|
a0a79fbc5b | ||
|
|
ecd6a135cd | ||
|
|
9c18cf204d | ||
|
|
a571ae4ec2 | ||
|
|
1558f64c48 | ||
|
|
1739313c13 | ||
|
|
1b8acc42b4 | ||
|
|
1397bfa84c | ||
|
|
c09af435ac | ||
|
|
f5004fd9d4 | ||
|
|
fb58e842b7 | ||
|
|
dcf8d9c7eb | ||
|
|
a61c54531c | ||
|
|
e184a65dea | ||
|
|
fafeec3d86 | ||
|
|
55d0ecc879 | ||
|
|
614f3d5f80 | ||
|
|
46328333c7 | ||
|
|
582036fb3f | ||
|
|
28602b12ea | ||
|
|
f5b6785e53 | ||
|
|
81440460f2 | ||
|
|
82ac2e4edb | ||
|
|
fc44924256 | ||
|
|
9ca2350a13 | ||
|
|
12a6216477 | ||
|
|
fb5fb27ebe | ||
|
|
cb0f759420 | ||
|
|
378223aedb | ||
|
|
61b147441c | ||
|
|
1f9b5b6456 | ||
|
|
4ca870bf6d | ||
|
|
52a3ab5333 | ||
|
|
ff22e2156d | ||
|
|
e0c775f6e3 | ||
|
|
7036166535 | ||
|
|
2e231982a3 | ||
|
|
fe7e7abf15 | ||
|
|
3e7a163660 | ||
|
|
0319e63999 | ||
|
|
0bd88090bb | ||
|
|
ed3e1397ca | ||
|
|
0ad35ffad9 | ||
|
|
1f488a0072 | ||
|
|
493745a70b | ||
|
|
c400f40663 | ||
|
|
bc6113f4ba | ||
|
|
b6703be859 | ||
|
|
62f30ce098 | ||
|
|
9f812e7022 | ||
|
|
a909aa1c20 | ||
|
|
e3889522d6 | ||
|
|
79c005a041 | ||
|
|
a6c797d4c2 | ||
|
|
0e50c8bb31 | ||
|
|
9ad5ffb8c1 | ||
|
|
f052010d8e | ||
|
|
ee6e41b144 | ||
|
|
2e267b420a | ||
|
|
0c618b8145 | ||
|
|
8f41db2f2e | ||
|
|
a53537ccde | ||
|
|
5017ca90ff | ||
|
|
f3ee07a8a2 | ||
|
|
263cc71dd3 | ||
|
|
8f5e426a13 | ||
|
|
87700d02a7 | ||
|
|
8f22e911e0 | ||
|
|
1647cbef21 | ||
|
|
528ac51ba9 | ||
|
|
f751d22a20 | ||
|
|
650ca95784 | ||
|
|
6ddd8c7241 | ||
|
|
d1b8af6220 | ||
|
|
332ef045ee | ||
|
|
0876c9b5ef | ||
|
|
ebc7da6f82 | ||
|
|
1fe1c27220 | ||
|
|
8013c152d0 | ||
|
|
630a78cead | ||
|
|
17c772831d | ||
|
|
6147f41589 | ||
|
|
5e70afc054 | ||
|
|
75fae69def | ||
|
|
90ef9f751a | ||
|
|
544251a7fd | ||
|
|
05970157f6 | ||
|
|
d834bd2a18 | ||
|
|
e9b68524e8 | ||
|
|
b291271df3 | ||
|
|
9dd76b72b4 | ||
|
|
12977e07f3 | ||
|
|
29467a7057 | ||
|
|
b862dff185 | ||
|
|
6747478f67 | ||
|
|
124a17e826 | ||
|
|
57360b7a61 | ||
|
|
2d40efe9d6 | ||
|
|
79b9c8a677 | ||
|
|
8e4776ada1 | ||
|
|
5b2e1ca7cd | ||
|
|
0a7373dae1 | ||
|
|
c9d948f284 | ||
|
|
dc92178641 | ||
|
|
2fc07fd6a2 | ||
|
|
78413d0c2e | ||
|
|
1c01e52f7c | ||
|
|
15a3c6b171 | ||
|
|
d394f8b7be | ||
|
|
325ca98773 | ||
|
|
9a691c0387 | ||
|
|
351b1dbf31 | ||
|
|
a2eadb30f5 | ||
|
|
2e7e346e19 | ||
|
|
7c145ce5d5 | ||
|
|
f19b61307d | ||
|
|
a8ec73d01e | ||
|
|
318f61161e | ||
|
|
1a26e67611 | ||
|
|
6974cb248c | ||
|
|
dd5da10d2a | ||
|
|
9c4b55c86a | ||
|
|
4aca1e86ad | ||
|
|
b2f09e4623 | ||
|
|
bdb2ce9448 | ||
|
|
973c08babd | ||
|
|
7f51ef1838 | ||
|
|
08969ecf89 | ||
|
|
3e012f0219 | ||
|
|
5933d7a216 | ||
|
|
1b7d363d32 | ||
|
|
c2732a0990 | ||
|
|
c5eb0a9732 | ||
|
|
bf57dd808e | ||
|
|
d353ea449a | ||
|
|
c85cdbde46 | ||
|
|
fb083237cd | ||
|
|
a0fb4a9b84 | ||
|
|
5c9dd25459 | ||
|
|
0907c32e10 | ||
|
|
4752df9bd8 | ||
|
|
7b9b29253f | ||
|
|
126aff7a7d | ||
|
|
6ca1de3601 | ||
|
|
fb8af637e1 | ||
|
|
ab983887cc | ||
|
|
75c1f47500 | ||
|
|
3da981d9cd | ||
|
|
8a0e30a901 | ||
|
|
736cbb961f | ||
|
|
8d8c2752dc | ||
|
|
d78a7d431d | ||
|
|
91ca5c424b | ||
|
|
7e1c55d2ed | ||
|
|
3a2c241450 | ||
|
|
13b6b43cea | ||
|
|
c7a0e45bea | ||
|
|
6bff5a4d09 | ||
|
|
37e0d47082 | ||
|
|
e6b91036e1 | ||
|
|
79a83adc89 | ||
|
|
ffd598c5d7 | ||
|
|
d255251e5f | ||
|
|
49fe04a627 | ||
|
|
21c919988d | ||
|
|
1c4b6b9cd9 | ||
|
|
3899405864 | ||
|
|
209828c7c3 | ||
|
|
7152af949b | ||
|
|
8206c47a47 | ||
|
|
f7aba20d79 | ||
|
|
6afc686e17 | ||
|
|
677c36c3aa | ||
|
|
6d764ee55e | ||
|
|
1d8b3b8c51 | ||
|
|
92dd173b27 | ||
|
|
f2ec020b64 | ||
|
|
d784d5c367 | ||
|
|
b3517c63e8 | ||
|
|
550075bba4 | ||
|
|
c93a10388b | ||
|
|
5a168ecc2a | ||
|
|
276ce3374d | ||
|
|
e77c3ab043 | ||
|
|
d2e2e535dd | ||
|
|
90ec458c4c | ||
|
|
9636913de0 | ||
|
|
e039b4ec54 | ||
|
|
9d2ed3d2be | ||
|
|
b8b994a820 | ||
|
|
00eb022450 | ||
|
|
2428878f42 | ||
|
|
af57a2c153 | ||
|
|
1b349016ff | ||
|
|
c1b4fbf5c2 | ||
|
|
a52e8cd537 | ||
|
|
5b7cf88915 | ||
|
|
e1103305f5 | ||
|
|
6e9db3e3c8 | ||
|
|
eab30781e0 | ||
|
|
c30c876659 | ||
|
|
4ead3c5b80 | ||
|
|
f0f176b80f | ||
|
|
5b3ee30ca9 | ||
|
|
e13614e11b | ||
|
|
464b6a329e | ||
|
|
44d768ecf3 | ||
|
|
0bd9d59c78 | ||
|
|
be74a4c9c1 | ||
|
|
719f4da1dc | ||
|
|
fbc9634ffc | ||
|
|
619c81472b | ||
|
|
d8f71e1d7f | ||
|
|
1715446b13 | ||
|
|
8204f06485 | ||
|
|
4c92a0f571 | ||
|
|
1d225dd804 | ||
|
|
15bd5ebd7b | ||
|
|
3af50f08bd | ||
|
|
4fe1f2487d | ||
|
|
a4bc0b2829 | ||
|
|
7b8f923981 | ||
|
|
be41994d40 | ||
|
|
5361896411 | ||
|
|
c754c53906 | ||
|
|
78d6647885 | ||
|
|
4eeb669ac3 | ||
|
|
26465f3e92 | ||
|
|
9d25207b83 | ||
|
|
cc19b8049a | ||
|
|
3a163b6392 | ||
|
|
405d0561df | ||
|
|
ee33b4e2a3 | ||
|
|
888479aaf0 | ||
|
|
82cda6e522 | ||
|
|
119a7f1933 | ||
|
|
6c4445d545 | ||
|
|
92b1acd6fb | ||
|
|
1767b64135 | ||
|
|
e030f261d1 | ||
|
|
d501ece247 | ||
|
|
534e4c90ca | ||
|
|
40f2c3521b | ||
|
|
07b1327708 | ||
|
|
525095b3de | ||
|
|
f84513e856 | ||
|
|
bf423b8577 | ||
|
|
ba20c71963 | ||
|
|
7bbc57f225 | ||
|
|
e703e172e2 | ||
|
|
3ff52fd1ad | ||
|
|
e19406cdd7 | ||
|
|
30e65b33f6 | ||
|
|
3f1255b39e | ||
|
|
71743b25fe | ||
|
|
c7e93b32c5 | ||
|
|
3cee507687 | ||
|
|
ff651ddc36 | ||
|
|
c0738cef26 | ||
|
|
a44e9a8dda | ||
|
|
38b9a63fa5 | ||
|
|
2d60e42258 | ||
|
|
60ac69eb27 | ||
|
|
504d910557 | ||
|
|
e48d66f918 | ||
|
|
0bfbace9aa | ||
|
|
019cf8199f | ||
|
|
c5f85eed92 | ||
|
|
21719ccdf1 | ||
|
|
299b3d72cf | ||
|
|
85e9e231ed | ||
|
|
f382a78e31 | ||
|
|
377cc427b6 | ||
|
|
7ec72679f0 | ||
|
|
31b311c3c9 | ||
|
|
e24ab4c6d2 | ||
|
|
6739983cf1 | ||
|
|
ff00815b61 | ||
|
|
8f6a927be3 | ||
|
|
f6add92702 | ||
|
|
deedfdceae | ||
|
|
e268ee5675 | ||
|
|
33d8d818bd | ||
|
|
9a81a37008 | ||
|
|
8e620b0c2c | ||
|
|
214546399a | ||
|
|
fdd27aa321 | ||
|
|
4ebff2c5ce | ||
|
|
8266d343bf | ||
|
|
36ddb19023 | ||
|
|
ebc410d8d4 | ||
|
|
1772db4712 | ||
|
|
40d7d7d6dd | ||
|
|
0d01ea5f2f | ||
|
|
822c47c171 | ||
|
|
fdf02c4e86 | ||
|
|
398bc96b1a | ||
|
|
ad82eae6a9 | ||
|
|
d7b64ff447 | ||
|
|
93cb2be35d | ||
|
|
8f1b9bdf8a | ||
|
|
f69956bda8 | ||
|
|
e06667ead8 | ||
|
|
35f64cc53f | ||
|
|
12f95555fc | ||
|
|
84c1810b6e | ||
|
|
ba39f9bf56 | ||
|
|
8eb82265d0 | ||
|
|
ac277e8e9e | ||
|
|
b1521cacad | ||
|
|
5978e7c9a6 | ||
|
|
fd5e8b4fcf | ||
|
|
291b6dd744 | ||
|
|
73ff524a8f | ||
|
|
c2b5bf2130 | ||
|
|
8532f9da03 | ||
|
|
2cfe6830df | ||
|
|
44355a4bdc | ||
|
|
47a858393b | ||
|
|
d3e80f515d | ||
|
|
c5b93ca631 | ||
|
|
3335eee1b9 | ||
|
|
29175405a6 | ||
|
|
d53062a9b0 | ||
|
|
96ef15362a | ||
|
|
d6d6098378 | ||
|
|
620e629edc | ||
|
|
c292fd89f9 | ||
|
|
c346130774 | ||
|
|
768717aaf9 | ||
|
|
d055e1f888 | ||
|
|
5f84145a2d | ||
|
|
47a9395a22 | ||
|
|
8dbac0f7e3 | ||
|
|
6b463164f4 | ||
|
|
5f15e9ee68 | ||
|
|
54d63ece6f | ||
|
|
e35883ca9c | ||
|
|
19e49e43cb | ||
|
|
e429b5548c | ||
|
|
839dacc4a4 | ||
|
|
4225591a26 | ||
|
|
b4adffc3af | ||
|
|
8fc178ae91 | ||
|
|
580ad46036 | ||
|
|
f47dffe6e1 | ||
|
|
00f6b4bf09 | ||
|
|
9126ceac08 | ||
|
|
ff8a2da751 | ||
|
|
2f028b45fe | ||
|
|
d3ef9d980b | ||
|
|
cb81dfe4ba | ||
|
|
426de76690 | ||
|
|
16a767e04e | ||
|
|
2adaf9ba3d | ||
|
|
f381850bb2 | ||
|
|
273787fe78 | ||
|
|
70d16c3904 | ||
|
|
ee6b1376c3 | ||
|
|
aed8e2156f | ||
|
|
5d848ad130 | ||
|
|
98928f6bd7 | ||
|
|
692f04d457 | ||
|
|
e1ea0c23eb | ||
|
|
657d443a3e | ||
|
|
95985e7bbb | ||
|
|
3a0a1aca11 | ||
|
|
41926172d3 | ||
|
|
585b9eb84a | ||
|
|
ffd7d74f77 | ||
|
|
578d52b89d | ||
|
|
e599d5db3c | ||
|
|
79b5430a9e | ||
|
|
9b05fe3c54 | ||
|
|
5fe2795db8 | ||
|
|
732f730213 | ||
|
|
c89bb01db9 | ||
|
|
1cbbf75807 | ||
|
|
bd81fc8bff | ||
|
|
d4df552076 | ||
|
|
b7ad82757d | ||
|
|
d12b6cda4e | ||
|
|
83855b713b | ||
|
|
bd09b6dbad | ||
|
|
eb9ad47ef8 | ||
|
|
5b46a252ff | ||
|
|
39c57c0e94 | ||
|
|
619dbbe9f5 | ||
|
|
9be73ea94a | ||
|
|
4e43663448 | ||
|
|
cd117f5b67 | ||
|
|
2dad9b9432 | ||
|
|
b0bc36f2af | ||
|
|
1688f5ecaf | ||
|
|
cba2d31175 | ||
|
|
a2b77fd072 | ||
|
|
9b83e57372 | ||
|
|
687cae9b79 | ||
|
|
f8fffdd288 | ||
|
|
60d6279055 | ||
|
|
7b330d1490 | ||
|
|
7078af635c | ||
|
|
e1b57d80a4 | ||
|
|
8426874426 | ||
|
|
70dd790afc | ||
|
|
e99cba53fe | ||
|
|
628d7ae72d | ||
|
|
ff737a9e25 | ||
|
|
5c16631ec5 | ||
|
|
d52eaf19d6 | ||
|
|
27dd6ef14e | ||
|
|
ed75f72358 | ||
|
|
276d629a14 | ||
|
|
aedd77b81d | ||
|
|
1053863175 | ||
|
|
f448341211 | ||
|
|
9fc6b999d0 | ||
|
|
ff2fff857a | ||
|
|
dd4cf102cc | ||
|
|
5d3a89dd25 | ||
|
|
b4f1a0b5a6 | ||
|
|
afa42dd2e4 | ||
|
|
be44af4680 | ||
|
|
e6b6f42139 | ||
|
|
4d9677e808 | ||
|
|
eacb69074e | ||
|
|
6477bf37fe | ||
|
|
1743d3c6c1 | ||
|
|
e723b2a4c6 | ||
|
|
67f704c98d | ||
|
|
8d92093570 | ||
|
|
1ccac9111b | ||
|
|
98984166f9 | ||
|
|
d1bf18eeb0 | ||
|
|
2018a6c000 | ||
|
|
56152230f8 | ||
|
|
7e8cf5504a | ||
|
|
3c03d5069d | ||
|
|
705508a674 | ||
|
|
771482c6e6 | ||
|
|
d25ccfba5f | ||
|
|
614c219010 | ||
|
|
63b3076d64 | ||
|
|
8e3ad45ce4 | ||
|
|
984e0c533e | ||
|
|
5065291f72 | ||
|
|
8c273ba58a | ||
|
|
7a9c0946a4 | ||
|
|
7e0a26ef4e | ||
|
|
65b7c9898b | ||
|
|
67f95ddfdc | ||
|
|
d1dbb9a3be | ||
|
|
bb087a5989 | ||
|
|
a0d24105fc | ||
|
|
a5c9160b7d | ||
|
|
0e2cdbe9fb | ||
|
|
3c55a46484 | ||
|
|
d6b68f405e | ||
|
|
351bbdb36c | ||
|
|
896baf021b | ||
|
|
499e5e4f60 | ||
|
|
3ad003bccb | ||
|
|
af92184c93 | ||
|
|
06debb322b | ||
|
|
b7f0759485 | ||
|
|
bc6f23f82f | ||
|
|
cb9e76c7f9 | ||
|
|
b38986a0aa | ||
|
|
db0c576f48 | ||
|
|
40d7e5089d | ||
|
|
9b8d42c670 | ||
|
|
bed3b71860 | ||
|
|
253791b92c | ||
|
|
5dd6ae6ec4 | ||
|
|
902f30c123 | ||
|
|
fa8d7bd9c6 | ||
|
|
d0df2cbe53 | ||
|
|
27baa00afb | ||
|
|
869063c743 | ||
|
|
2f9f568dd9 | ||
|
|
937ce5f797 | ||
|
|
1b612209a3 | ||
|
|
d00d3341af | ||
|
|
3199e26500 | ||
|
|
bc06e7a282 | ||
|
|
1eec9e2b59 | ||
|
|
d46f652f7e | ||
|
|
d75d638b9a | ||
|
|
b64c9d966a | ||
|
|
8a928e5356 | ||
|
|
adf958559b | ||
|
|
82b35492af | ||
|
|
a7ea07036e | ||
|
|
c1d3481c41 | ||
|
|
bc0baa35e6 | ||
|
|
f549cb1f87 | ||
|
|
823093eea6 | ||
|
|
939bfd153e | ||
|
|
b943b7d337 | ||
|
|
a1f3ece528 | ||
|
|
8a4b3e6bc9 | ||
|
|
a9c497612b | ||
|
|
7df997c992 | ||
|
|
bebc0d2073 | ||
|
|
857fc3ce99 | ||
|
|
47e9c12fc2 | ||
|
|
5970dadead | ||
|
|
7dc4cb30b2 | ||
|
|
fb122e67d0 | ||
|
|
ca9874757f | ||
|
|
65d5545cf0 | ||
|
|
e872f5dc78 | ||
|
|
b4ad64586a | ||
|
|
9661fee554 | ||
|
|
adf26789b8 | ||
|
|
209e246e6f | ||
|
|
ed2a1e7db9 | ||
|
|
143ac08c35 | ||
|
|
53f03f6556 | ||
|
|
62f1933e3c | ||
|
|
0b30dc357c | ||
|
|
d5b595a842 | ||
|
|
d05ba042c0 | ||
|
|
eed2d735a1 | ||
|
|
675403d26d | ||
|
|
8be6e16513 | ||
|
|
c7fc17da69 | ||
|
|
4775fe43d8 | ||
|
|
2a04dd0f9b | ||
|
|
61ee4bd629 | ||
|
|
dbd661a417 | ||
|
|
26700ac4ac | ||
|
|
b0defe5524 | ||
|
|
91cd8c7608 | ||
|
|
62b01c5f8e | ||
|
|
b33ab6c5fd | ||
|
|
0886b3a0a4 | ||
|
|
522afbb0a0 | ||
|
|
d5f13dd9e0 | ||
|
|
3d6d8c91dd | ||
|
|
4354f270ce | ||
|
|
28087ccf40 | ||
|
|
eb0e683b47 | ||
|
|
1023ff8454 | ||
|
|
9401f6c821 | ||
|
|
827c419251 | ||
|
|
82db64a700 | ||
|
|
f1fae805a2 | ||
|
|
272e2386dd | ||
|
|
67e1c0a10b | ||
|
|
f88a86f9b0 | ||
|
|
601982f52b | ||
|
|
fc839011f6 | ||
|
|
b407f24950 | ||
|
|
d15f57cfff | ||
|
|
82fbfd69a5 | ||
|
|
8e6ea49e0e | ||
|
|
83a3e53d8d | ||
|
|
698976add0 | ||
|
|
cc1d3c48e0 | ||
|
|
e510c8b11f | ||
|
|
666086a806 | ||
|
|
9bdbe88bda | ||
|
|
50db51ebe0 | ||
|
|
0beaab51ae | ||
|
|
0e30b0f9b4 | ||
|
|
214722d39e | ||
|
|
7f6dae41f0 | ||
|
|
9aa8eff44e | ||
|
|
2544f7eaf0 | ||
|
|
1a6ce1d5d9 | ||
|
|
98eaec22d4 | ||
|
|
2e40719f4e | ||
|
|
71a2bd2fea | ||
|
|
1c20db775c | ||
|
|
e2ef36b582 | ||
|
|
90a064972c | ||
|
|
f4a0b845be | ||
|
|
1f22aa99d8 | ||
|
|
4c9ea084d5 | ||
|
|
a3094bcd1b | ||
|
|
5c8fb4b3d5 | ||
|
|
ff46fe2b4a | ||
|
|
8d6a424604 | ||
|
|
8dfbdbd883 | ||
|
|
f533c9750b | ||
|
|
2f841f9f5a | ||
|
|
02d5bca44d | ||
|
|
14eda1bf5b | ||
|
|
2d4cdc5be9 | ||
|
|
d038e831dc | ||
|
|
9fe62fc80d | ||
|
|
bfb12a7851 | ||
|
|
4617f3a4e2 | ||
|
|
97e331632c | ||
|
|
4cf777ab5a | ||
|
|
05c0423d6e | ||
|
|
377efc8a0c | ||
|
|
b35bbaade2 | ||
|
|
c9107fa87f | ||
|
|
0c1fab09ff | ||
|
|
8b67959695 | ||
|
|
21e81d78d9 | ||
|
|
31623ff330 | ||
|
|
7edfffdcc5 | ||
|
|
3b868be77b | ||
|
|
c3271e84ef | ||
|
|
1cac9fce46 | ||
|
|
d0869bbfbc | ||
|
|
ca4beb413b | ||
|
|
7b31ef60fb | ||
|
|
73576dfcaf | ||
|
|
8a411decac | ||
|
|
fa786c8e05 | ||
|
|
85fc35492d | ||
|
|
1c94f4dd71 | ||
|
|
74169b0320 | ||
|
|
83574ccf6c | ||
|
|
a0be3822bf | ||
|
|
004700c125 | ||
|
|
19ee110b7d | ||
|
|
998616c0fd | ||
|
|
9204498420 | ||
|
|
b1957e5cfe | ||
|
|
92a88df484 | ||
|
|
f2f713023d | ||
|
|
cf86ba7786 | ||
|
|
94502d6494 | ||
|
|
5e3f9ec757 | ||
|
|
d6fc0ccf65 | ||
|
|
a988c53949 | ||
|
|
3afd66d50f | ||
|
|
a9801147b8 | ||
|
|
f3d488fb0c | ||
|
|
28e3701187 | ||
|
|
8f6369374d | ||
|
|
e5cfa6501b | ||
|
|
175ffc5c66 | ||
|
|
c0441ab2b8 | ||
|
|
6d1bd3ab66 | ||
|
|
1779e6fecc | ||
|
|
03d5a670f6 | ||
|
|
76806a998f | ||
|
|
9bcbf5e9b3 | ||
|
|
6bbb755997 | ||
|
|
aec7cd572c | ||
|
|
856759d350 | ||
|
|
5f92f7e41f | ||
|
|
890824ced4 | ||
|
|
2cd1207e73 | ||
|
|
1803c7adeb | ||
|
|
fa8bb51b81 | ||
|
|
9bea1950dc | ||
|
|
df0bb4b4b0 | ||
|
|
fdfa96ba51 | ||
|
|
ff94a8e57f | ||
|
|
41369aefdf | ||
|
|
d5ad5c5422 | ||
|
|
1357237a3e | ||
|
|
4946f08671 | ||
|
|
665c851f66 | ||
|
|
115503098e | ||
|
|
b7cda48a03 | ||
|
|
7780640938 | ||
|
|
92488c254d | ||
|
|
bccc6e08cc | ||
|
|
a8c15e1b54 | ||
|
|
953beb369c | ||
|
|
9232e03102 | ||
|
|
c386d0b1a5 | ||
|
|
08efabc696 | ||
|
|
560b9228fd | ||
|
|
d3bcfd4d5f | ||
|
|
3b21547c83 | ||
|
|
62c475f6b4 | ||
|
|
5367d5ec3b | ||
|
|
01a5569ee9 | ||
|
|
f75908ebe3 | ||
|
|
928326103c | ||
|
|
8a11f12ffd | ||
|
|
a6b5bf8df2 | ||
|
|
a26f4306a4 | ||
|
|
0ead3ed37a | ||
|
|
45363a2abb | ||
|
|
a7b6d5507f | ||
|
|
2706b8c59c | ||
|
|
fd1612935d | ||
|
|
7e1923fcfe | ||
|
|
22117e06b5 | ||
|
|
718d69b148 | ||
|
|
30e03cbac4 | ||
|
|
0525dd2bb5 | ||
|
|
6589450bd6 | ||
|
|
f4df49e600 | ||
|
|
bc6d06b49e | ||
|
|
025f0f390e | ||
|
|
0c0a860538 | ||
|
|
8994728d8b | ||
|
|
693dc3107a | ||
|
|
63c0772135 | ||
|
|
fb9b7275ad | ||
|
|
7a024fbe1e | ||
|
|
9dade91ef5 | ||
|
|
35fa278b37 | ||
|
|
6a21a77ee9 | ||
|
|
628310b12b | ||
|
|
ef28330c1a | ||
|
|
69c4687a53 | ||
|
|
09cba5b87a | ||
|
|
600409682e | ||
|
|
689b05a73d | ||
|
|
062649e483 | ||
|
|
bbeed7cd85 | ||
|
|
6c49a15e20 | ||
|
|
18a6b00083 | ||
|
|
7c841c9f63 | ||
|
|
97d73d3d33 | ||
|
|
b2e682e263 | ||
|
|
29cbdbcadd | ||
|
|
ebca735f5e | ||
|
|
775478534a | ||
|
|
b01d72ade3 | ||
|
|
b6da4baa97 | ||
|
|
7fa9f381e1 | ||
|
|
a9673c793a | ||
|
|
b148865ee8 | ||
|
|
f4f7adb377 | ||
|
|
b96239fb0b | ||
|
|
7fc049a513 | ||
|
|
faeabfb3d4 | ||
|
|
de6b5a7bbe | ||
|
|
efd4b03f78 | ||
|
|
e98a20fce9 | ||
|
|
5a0e1c5f75 | ||
|
|
c224f1105e | ||
|
|
95185aaaec | ||
|
|
cb7ee6212e | ||
|
|
d78c35c9ba | ||
|
|
73998a70cc | ||
|
|
3967c34261 | ||
|
|
f30428754f | ||
|
|
56c9552ab3 | ||
|
|
e414ba2d8c | ||
|
|
b1355e16bc | ||
|
|
882b76cefa | ||
|
|
63d82dbece | ||
|
|
f568389235 | ||
|
|
814473878f | ||
|
|
a44bae2d3a | ||
|
|
fd0370d801 | ||
|
|
58cf1be20c | ||
|
|
072945c40b | ||
|
|
7c7d407f34 | ||
|
|
c63a2dfbcd | ||
|
|
e1022b3a28 | ||
|
|
0022902e8d | ||
|
|
9be7c8b969 | ||
|
|
913b454fc3 | ||
|
|
ff1ea70dfa | ||
|
|
550386f52a | ||
|
|
7a1fecbdb3 | ||
|
|
8a99eaa68f | ||
|
|
e442b3b169 | ||
|
|
553293f4d5 | ||
|
|
48503573c1 | ||
|
|
4519ddd0e9 | ||
|
|
8b3d5e8b80 | ||
|
|
0a5a2e67e8 | ||
|
|
7b91be21b4 | ||
|
|
591962d906 | ||
|
|
64c0157271 | ||
|
|
f5c0e670aa | ||
|
|
83de28bac2 | ||
|
|
c6885a8bcf | ||
|
|
5d8dbff486 | ||
|
|
0939cb5ed6 | ||
|
|
153a2876b4 | ||
|
|
61503a654c | ||
|
|
c268a4e217 | ||
|
|
70ab7735ba | ||
|
|
25de3e753d | ||
|
|
670672c067 | ||
|
|
85f8a80389 | ||
|
|
673b893a8a | ||
|
|
d8d2f3529f | ||
|
|
63ba8145b9 | ||
|
|
99db82a161 | ||
|
|
a3089e0472 | ||
|
|
9186abf9c1 | ||
|
|
97808ba1c3 | ||
|
|
589420c208 | ||
|
|
3bf7d569c6 | ||
|
|
14187b027d | ||
|
|
06f067fda9 | ||
|
|
9b5dfe64b7 | ||
|
|
1c89c91f07 | ||
|
|
ed5761f18f | ||
|
|
16ec25d296 | ||
|
|
abe17ab4b5 | ||
|
|
14f0e6a2ba | ||
|
|
f1f2f034f8 | ||
|
|
63418a583a | ||
|
|
4031cb9eda | ||
|
|
ee6b8c5b72 | ||
|
|
11aecbe79a | ||
|
|
a9bace0f97 | ||
|
|
7e4fed2451 | ||
|
|
ac8a16ec5b | ||
|
|
95d016dea4 | ||
|
|
abe36a3e67 | ||
|
|
89ebbed67b | ||
|
|
ec075e2612 | ||
|
|
e2b7296786 | ||
|
|
6fcd40d4d8 | ||
|
|
847ca66001 | ||
|
|
85b4129219 | ||
|
|
c36f83df5b | ||
|
|
bb026cdd9c | ||
|
|
8843898a8c | ||
|
|
f036aa0a48 | ||
|
|
1e928e463f | ||
|
|
36e895c135 | ||
|
|
3447f233fa | ||
|
|
f3e8dd1f2e | ||
|
|
3f0b7b29db | ||
|
|
95cf90d787 | ||
|
|
f3f6941205 | ||
|
|
4a21c5c5e7 | ||
|
|
180f2b9a83 | ||
|
|
94eb91063c | ||
|
|
dc4c6b3b14 | ||
|
|
50d53c6f8d | ||
|
|
a93b0ac143 | ||
|
|
ed92205d7d | ||
|
|
acaf135a2f | ||
|
|
bf6c6afb21 | ||
|
|
f5994e3a44 | ||
|
|
ccf5bd1492 | ||
|
|
510a0f3a2f | ||
|
|
8a620cab44 | ||
|
|
fcffb0adf2 | ||
|
|
efcb56f0dc | ||
|
|
86ee19178e | ||
|
|
1fc306acd0 | ||
|
|
da1d5ad917 | ||
|
|
b45db22a17 | ||
|
|
19455ab04e | ||
|
|
71b1661f00 | ||
|
|
99764bfd29 | ||
|
|
063e006446 | ||
|
|
436f009dab | ||
|
|
71a66ed4cf | ||
|
|
29bef261be | ||
|
|
e2291f7148 | ||
|
|
8b5aed7a2b | ||
|
|
95057d2368 | ||
|
|
20dadf9b5a | ||
|
|
3d6ac3a7db | ||
|
|
bd47bbbce9 | ||
|
|
ddebfc7413 | ||
|
|
c5310e84db | ||
|
|
0c8301e79c | ||
|
|
73faa8dc80 | ||
|
|
ee526b4b07 | ||
|
|
454f59d59a | ||
|
|
2e3146263c | ||
|
|
bcbcd5fde9 | ||
|
|
83a596612a | ||
|
|
2b896989b8 | ||
|
|
27109d22e4 | ||
|
|
22d8f8f1ef | ||
|
|
fd26e5635d | ||
|
|
b350b0023f | ||
|
|
330eb0fbb1 | ||
|
|
d79d3f1352 | ||
|
|
44966db505 | ||
|
|
1e8abea753 | ||
|
|
21d8ff61bb | ||
|
|
9652c8f8af | ||
|
|
de8f5b9c13 | ||
|
|
c89df923c5 | ||
|
|
556bc8669a | ||
|
|
89ba98e927 | ||
|
|
6c8a15fae2 | ||
|
|
68d8fd69c0 | ||
|
|
b31de299e4 | ||
|
|
9d7037b730 | ||
|
|
5edc211392 | ||
|
|
1329eea5e5 | ||
|
|
28022b056b | ||
|
|
cbadf39d7d | ||
|
|
ec99ac7121 | ||
|
|
dfa5041b6f | ||
|
|
0fa85c5c64 | ||
|
|
5abe1076ed | ||
|
|
7feda56e7b | ||
|
|
a54d3ad512 | ||
|
|
cdf9f5a4ec | ||
|
|
a472bfe6dc | ||
|
|
f88d994c35 | ||
|
|
ad92aded84 | ||
|
|
b4de0a52f8 | ||
|
|
bd23dac92e | ||
|
|
528df12bf1 | ||
|
|
a933319adb | ||
|
|
13c03bfd7d | ||
|
|
18965dcdac | ||
|
|
a4a7d678f9 | ||
|
|
fd422d2e3c | ||
|
|
ce7a1a73ac | ||
|
|
32874a816d | ||
|
|
3164354c0b | ||
|
|
f96aaf1177 | ||
|
|
589c79f3c2 | ||
|
|
a0f6864216 | ||
|
|
a4cc2c5c48 | ||
|
|
0550d12106 | ||
|
|
845628c100 | ||
|
|
7bcdc10539 | ||
|
|
d2f10d50bf | ||
|
|
446b2a334a | ||
|
|
b0fd90ada9 | ||
|
|
6bfd48e412 | ||
|
|
07ffd6e231 | ||
|
|
c675beeda4 | ||
|
|
fe0cf9506c | ||
|
|
7c81509804 | ||
|
|
65923006a8 | ||
|
|
b02f6db475 | ||
|
|
7f394f3f00 | ||
|
|
fc8524bbfd | ||
|
|
c9505531fd | ||
|
|
1b809fe42e | ||
|
|
a1b4df1b85 | ||
|
|
079f37a2d6 | ||
|
|
0980066363 | ||
|
|
a3b6654cbb | ||
|
|
f4328325be | ||
|
|
7badff49d8 | ||
|
|
176c689f8d | ||
|
|
fa20b1dc09 | ||
|
|
2ca9989d20 | ||
|
|
3420818c52 | ||
|
|
0ae6ca608c | ||
|
|
536b40890a | ||
|
|
d5337917db | ||
|
|
15f3ebba93 | ||
|
|
e71f55e58f | ||
|
|
fe747382c1 | ||
|
|
c4946d42e0 | ||
|
|
c1823b4b73 | ||
|
|
862a30842c | ||
|
|
cbb0940ff8 | ||
|
|
bd8df3583d | ||
|
|
5f36807dbe | ||
|
|
442f50303a | ||
|
|
4560f3b1ae | ||
|
|
59d2c670ba | ||
|
|
02577f6a45 | ||
|
|
094fdd8943 | ||
|
|
17169dff1f | ||
|
|
28e3e6e8cb | ||
|
|
4f47053e93 | ||
|
|
9025e9dbb1 | ||
|
|
eee1dad217 | ||
|
|
769df698be | ||
|
|
4ef042e966 | ||
|
|
92062ff722 | ||
|
|
623aa08b9e | ||
|
|
a529343b2b | ||
|
|
2161903163 | ||
|
|
3de9a1a130 | ||
|
|
587c1a3ca2 | ||
|
|
d224566957 | ||
|
|
8ea1a10525 | ||
|
|
b6d6094018 | ||
|
|
e5e1bac242 | ||
|
|
8c2ba7f7ea | ||
|
|
dc6ca61548 | ||
|
|
723caf2a09 | ||
|
|
439cb66672 | ||
|
|
dbd5b4c9f1 | ||
|
|
4dd404ac3b | ||
|
|
ba370438b2 | ||
|
|
f73a60d96c | ||
|
|
afe1f13c5b | ||
|
|
0554cc6128 | ||
|
|
5a6ece9513 | ||
|
|
4042219b3e | ||
|
|
fdc89cbcee | ||
|
|
6df6170c44 | ||
|
|
d598d4bb93 | ||
|
|
790bdcf9fc | ||
|
|
2efcda837c | ||
|
|
e86688284a | ||
|
|
ff9d899f9c | ||
|
|
a68b918cbb | ||
|
|
9fb70969d7 | ||
|
|
0c9119d619 | ||
|
|
556141cdd8 | ||
|
|
60003c976a | ||
|
|
23f1bee7bd | ||
|
|
589efcdc5f | ||
|
|
3befadb29f | ||
|
|
13b0e7d64a | ||
|
|
c8badfe21f | ||
|
|
2a1b9cae91 | ||
|
|
5ba7bbdd98 | ||
|
|
9435d2044a | ||
|
|
609a42c29c | ||
|
|
6a1e7ab038 | ||
|
|
53daa15b9a | ||
|
|
55097410f6 | ||
|
|
04e2b6e2bd | ||
|
|
9f0c9d973c | ||
|
|
1597e33a74 | ||
|
|
0b218bbb72 | ||
|
|
7b21b718fe | ||
|
|
6e5b557a1f | ||
|
|
6aefc79807 | ||
|
|
ec9e0dadea | ||
|
|
0470146d7b | ||
|
|
a518d50477 | ||
|
|
29f904db45 | ||
|
|
038fc48ac0 | ||
|
|
6f72def1ac | ||
|
|
0ec1f9e331 | ||
|
|
9682806476 | ||
|
|
fc6fa7887b | ||
|
|
e1e69cfbcb | ||
|
|
7a8f8960c5 | ||
|
|
30c44d431b | ||
|
|
b177976f29 | ||
|
|
af6b92a6fa | ||
|
|
a1888b3757 | ||
|
|
c75e77cdde | ||
|
|
6447b484c1 | ||
|
|
0c5236ac27 | ||
|
|
7ef5aa520c | ||
|
|
d620bcf516 | ||
|
|
b0c2e607b8 | ||
|
|
56e2d579f2 | ||
|
|
e63d5778a8 | ||
|
|
a53c2a8c6b | ||
|
|
70f580ec45 | ||
|
|
eae35dddc2 | ||
|
|
bb979c9a78 | ||
|
|
4f959c31de | ||
|
|
3bbe065db1 | ||
|
|
41460e1335 | ||
|
|
ffdb44f887 | ||
|
|
549d3b4d10 | ||
|
|
879f85802a | ||
|
|
d652a1bbb1 | ||
|
|
9c2429ff97 | ||
|
|
d0645d3c4f | ||
|
|
748be3e637 | ||
|
|
153ba168a0 | ||
|
|
c70b18b2ef | ||
|
|
af6420e06d | ||
|
|
547611b703 | ||
|
|
1b2ae7bb77 | ||
|
|
5cd8011d53 | ||
|
|
208833d9f2 | ||
|
|
6863028540 | ||
|
|
5e97a52cbe | ||
|
|
af865ab035 | ||
|
|
01f622866f | ||
|
|
e9d14601a1 | ||
|
|
92e77d7b33 | ||
|
|
3b370bbcb3 | ||
|
|
20b1753a91 | ||
|
|
b2379c7104 | ||
|
|
e491e96f88 | ||
|
|
8afc8c5714 | ||
|
|
d388c20373 | ||
|
|
13403cd7dc | ||
|
|
52fd701f2e | ||
|
|
204a4fbe7a | ||
|
|
549817627f | ||
|
|
fa4d1d42a5 | ||
|
|
309cd645f1 | ||
|
|
a725801e55 | ||
|
|
8cdf9814bd | ||
|
|
e6bbce439d | ||
|
|
8d257ed596 | ||
|
|
a7063a598d | ||
|
|
367fa039a0 | ||
|
|
71d88fe35d | ||
|
|
958fe9639a | ||
|
|
670f28d694 | ||
|
|
3715994c25 | ||
|
|
d72d5d0e8e | ||
|
|
dbe463a53d | ||
|
|
678dd780ee | ||
|
|
44c870447f | ||
|
|
70a8f6e707 | ||
|
|
2fb4d3356c | ||
|
|
590fd129c8 | ||
|
|
99d10d1189 | ||
|
|
ad9a7cb1e2 | ||
|
|
8187922ef1 | ||
|
|
c869652ef4 | ||
|
|
240a30147d | ||
|
|
91fdb86fcc | ||
|
|
0e5a56e2cf | ||
|
|
40ecc2563a | ||
|
|
7e473f194d | ||
|
|
f62281a0c7 | ||
|
|
0c231fd387 | ||
|
|
7f3ebcaa91 | ||
|
|
1f8d08eaa2 | ||
|
|
5904ef86f2 | ||
|
|
d692649bac | ||
|
|
0d019a00c9 | ||
|
|
e6b3de310b | ||
|
|
d19b96d0c2 | ||
|
|
7cbc94592e | ||
|
|
9747f1e841 | ||
|
|
42bc24a646 | ||
|
|
35115957d8 | ||
|
|
b193eb1d82 | ||
|
|
4105c9735e | ||
|
|
ed205d82e8 | ||
|
|
fc31267a54 | ||
|
|
44c781f414 | ||
|
|
ff90b125ee | ||
|
|
831fe9f509 | ||
|
|
a140d319fe | ||
|
|
9581bf5dde | ||
|
|
76dcabdce6 | ||
|
|
dc262a5f79 | ||
|
|
db5830dc28 | ||
|
|
ecf3d567cf | ||
|
|
e8375e9acd | ||
|
|
8c84c74197 | ||
|
|
f7d1225c23 | ||
|
|
3dc90b854c | ||
|
|
ac3c657315 | ||
|
|
37b117a84c | ||
|
|
ab6346ea1c | ||
|
|
b289eef523 | ||
|
|
6e6f9862e6 | ||
|
|
7d42a79177 | ||
|
|
5315fb201d | ||
|
|
6a012d290e | ||
|
|
05bbca5b07 | ||
|
|
5afe0be5b4 | ||
|
|
d8aa2cc05f | ||
|
|
7b071d403f | ||
|
|
d5b1e5e0c0 | ||
|
|
cec5fdd144 | ||
|
|
d8498aa2b0 | ||
|
|
e593d3aee3 | ||
|
|
323cfaf2d7 | ||
|
|
637f0c6fb2 | ||
|
|
2756403102 | ||
|
|
92861d095a | ||
|
|
bf6c6627e8 | ||
|
|
b217cb76f0 | ||
|
|
b414fde6ca | ||
|
|
d8c39569be | ||
|
|
5ce15bb0df | ||
|
|
88086935af | ||
|
|
ace3552e15 | ||
|
|
003ceff7e4 | ||
|
|
209ccdf668 | ||
|
|
e5dd7e65d4 | ||
|
|
b2999ad590 | ||
|
|
1871a7a2fa | ||
|
|
49677e9c9d | ||
|
|
73954f4a63 | ||
|
|
a084938d9c | ||
|
|
1bf042ac84 | ||
|
|
a2f9f7c975 | ||
|
|
565f40c642 | ||
|
|
4441338574 | ||
|
|
389d650ee3 | ||
|
|
080d45239a | ||
|
|
91851114e4 | ||
|
|
5b11099a0b | ||
|
|
2996d1e096 | ||
|
|
be82175f86 | ||
|
|
05977bd1c7 | ||
|
|
8074289a8f | ||
|
|
f8ba0334e8 | ||
|
|
7c9bed83d2 | ||
|
|
482a7723b2 | ||
|
|
948d29b676 | ||
|
|
3fa6c41303 | ||
|
|
220a4bb535 | ||
|
|
287559e834 | ||
|
|
cdac0cd1df | ||
|
|
3653126179 | ||
|
|
2eb662df85 | ||
|
|
cdcf214455 | ||
|
|
a75a9c953a | ||
|
|
800ba206f7 | ||
|
|
8d5aa98297 | ||
|
|
283234d51d | ||
|
|
f9e1a933a9 | ||
|
|
ebd8ef1a9e | ||
|
|
ed691e26a6 | ||
|
|
067d76fece | ||
|
|
534c18c94c | ||
|
|
af9e0cc33a | ||
|
|
6847c2fc8c | ||
|
|
028eb7b351 | ||
|
|
12c21fac22 | ||
|
|
774defd184 | ||
|
|
6657f28694 | ||
|
|
9d7507236f | ||
|
|
8f49429228 | ||
|
|
f21e9dbd9a | ||
|
|
d3146d20ad | ||
|
|
24a177a149 | ||
|
|
a58dfccb7d | ||
|
|
b35293228e | ||
|
|
963e250654 | ||
|
|
ca8b766c09 | ||
|
|
21eca7f1c1 | ||
|
|
8062866973 | ||
|
|
99530358fd | ||
|
|
621393ca62 | ||
|
|
0c6284be02 | ||
|
|
9d5e3e2a91 | ||
|
|
7f260938db | ||
|
|
c416444e24 | ||
|
|
b559bc84a7 | ||
|
|
3f53abb233 | ||
|
|
67efd0dd39 | ||
|
|
dcd32faa83 | ||
|
|
8fcb5cee66 | ||
|
|
13b757d847 | ||
|
|
c6d7f24542 | ||
|
|
4c1a9380e6 | ||
|
|
ad6e8edcd3 | ||
|
|
95c742ba9b | ||
|
|
546d442626 | ||
|
|
ed8c0b23a0 | ||
|
|
c8f44b73f1 | ||
|
|
bf6b149b8b | ||
|
|
f334c834e5 | ||
|
|
7c850bb78a | ||
|
|
a280fed4be | ||
|
|
cf9b5241ae | ||
|
|
dfa25af271 | ||
|
|
b538c5a206 | ||
|
|
4fa3fcbb3b | ||
|
|
b049ae269b | ||
|
|
e5bf27e716 | ||
|
|
a9a6ed8b71 | ||
|
|
e6c64282fc | ||
|
|
64b41655bb | ||
|
|
6ac40552b4 | ||
|
|
039c5c540b | ||
|
|
c9ed934d0b | ||
|
|
06a64219bc | ||
|
|
ef36b21684 | ||
|
|
c44e51ae58 | ||
|
|
2c4bc7a2b2 | ||
|
|
62dc486c85 | ||
|
|
d11961626c | ||
|
|
519375b4c0 | ||
|
|
6ecb7c80f1 | ||
|
|
380cb93e46 | ||
|
|
36e7662706 | ||
|
|
49199819db | ||
|
|
aa8d26496c | ||
|
|
4619e32b6a | ||
|
|
0352e956b6 | ||
|
|
f999956997 | ||
|
|
bfd268c5e6 | ||
|
|
b7ad47017d | ||
|
|
502b722902 | ||
|
|
c89b34fd75 | ||
|
|
b9b1fdd1a1 | ||
|
|
2e0fa1c6a0 | ||
|
|
f8726719ef | ||
|
|
3569fe9c73 | ||
|
|
4ffcabfb3f | ||
|
|
a7ed74744f | ||
|
|
ae0bb8f1eb | ||
|
|
034411e47e | ||
|
|
baf58ef396 | ||
|
|
006fc3495e | ||
|
|
29a3b82336 | ||
|
|
22a5e196c9 | ||
|
|
deec41d29a | ||
|
|
3978efd710 | ||
|
|
7a80e732df | ||
|
|
6681df29d2 | ||
|
|
9d58bb1c66 | ||
|
|
acbdb05bd0 | ||
|
|
25f5259e84 | ||
|
|
0ced153f86 | ||
|
|
faca8c8b53 | ||
|
|
4ad8181088 | ||
|
|
4f3ff4a69d | ||
|
|
fc0f2f2b3a | ||
|
|
b4cd084117 | ||
|
|
c1fd55bb04 | ||
|
|
77b2d2dbee | ||
|
|
f92aed1b91 | ||
|
|
90a6be8001 | ||
|
|
2e4de209fe | ||
|
|
287147687e | ||
|
|
a53f5d01aa | ||
|
|
1aaa2e8219 | ||
|
|
02b104f56b | ||
|
|
e691839771 | ||
|
|
4c22d130d9 | ||
|
|
065d23d0b1 | ||
|
|
4b0a232ae1 | ||
|
|
81130c205c | ||
|
|
36b94ca5f5 | ||
|
|
9e4326a582 | ||
|
|
8193c2f847 | ||
|
|
fbef731a04 | ||
|
|
dbf88a2eca | ||
|
|
413a9031eb | ||
|
|
23e69bcdb4 | ||
|
|
edff071cd2 | ||
|
|
3b487cfae9 | ||
|
|
a8d2072e9f | ||
|
|
260e42a691 | ||
|
|
596a9c60d7 | ||
|
|
a47e4a1148 | ||
|
|
312c8b9e7a | ||
|
|
1d99508943 | ||
|
|
b42fda0971 | ||
|
|
cf377d2e1c | ||
|
|
ec9d0b8a6d | ||
|
|
14cac8ed22 | ||
|
|
4ecf9dd62d | ||
|
|
b56dcf155c | ||
|
|
63ffdb38aa | ||
|
|
f9289d3079 | ||
|
|
709b56bc40 | ||
|
|
eb38d382ba | ||
|
|
5beaa5d892 | ||
|
|
01b24f6282 | ||
|
|
cbe3694b81 | ||
|
|
f531a51e91 | ||
|
|
a0667dfd1b | ||
|
|
2b78e613a4 | ||
|
|
b34ab10b5e | ||
|
|
f07cc37939 | ||
|
|
957b0ecd15 | ||
|
|
659bc246c9 | ||
|
|
1502af7e94 | ||
|
|
26c5a533ec | ||
|
|
4274016491 | ||
|
|
c6c298b412 | ||
|
|
63eda0fe42 | ||
|
|
c3693c91b3 | ||
|
|
e426067e46 | ||
|
|
2e482eca6d | ||
|
|
fdea1afc7f | ||
|
|
41fecb800d | ||
|
|
52cf06b36c | ||
|
|
ebce006974 | ||
|
|
ff60695a0f | ||
|
|
3aa9685f5a | ||
|
|
6cecb964a9 | ||
|
|
0216ee101c | ||
|
|
d3c6bfb764 | ||
|
|
ea6a5121b3 | ||
|
|
d6a4fe00b3 | ||
|
|
36318e8a1b | ||
|
|
79562a2531 | ||
|
|
d2664ab2a6 | ||
|
|
e664a429a1 | ||
|
|
b09bd1ed34 | ||
|
|
f83c80aaec | ||
|
|
bbc8adca94 | ||
|
|
20c71d127b | ||
|
|
e35df904a0 | ||
|
|
62682a8c4c | ||
|
|
253c02cc3c | ||
|
|
175d1cc8f2 | ||
|
|
23c9122458 | ||
|
|
b42d2886a2 | ||
|
|
c3f90ba347 | ||
|
|
c6eba8c0a1 | ||
|
|
51bdb4cbea | ||
|
|
376b2d0297 | ||
|
|
7e03624408 | ||
|
|
133f223df6 | ||
|
|
8dc73fdbdb | ||
|
|
8ec3ed1830 | ||
|
|
8b71ce23bb | ||
|
|
82079e644a | ||
|
|
1e7a364c68 | ||
|
|
c74e7df6a0 | ||
|
|
29d8f8d91b | ||
|
|
0d1553562d | ||
|
|
8e3312c032 | ||
|
|
878d12da24 | ||
|
|
c1dfafcc9e | ||
|
|
6668d79519 | ||
|
|
775a3dc359 | ||
|
|
98f935b8f7 | ||
|
|
86359ef374 | ||
|
|
ef8fbaae28 | ||
|
|
9e48aeca45 | ||
|
|
9518111ed3 | ||
|
|
4fea55bac3 | ||
|
|
027dd848eb | ||
|
|
98928fc5f0 | ||
|
|
6e843ab563 | ||
|
|
b3a0d47a67 | ||
|
|
d492d29153 | ||
|
|
ed21f9f906 | ||
|
|
bdadfc958c | ||
|
|
3bbd238d7d | ||
|
|
4eecdbadd3 | ||
|
|
a33b0abbe0 | ||
|
|
3d0f457306 | ||
|
|
ca5f1c1efb | ||
|
|
e72592eac3 | ||
|
|
72e9cecb17 | ||
|
|
27e7494853 | ||
|
|
f0a8aca0e3 | ||
|
|
4514595811 | ||
|
|
e0bcbf0b79 | ||
|
|
285725eaab | ||
|
|
9a4770eb17 | ||
|
|
8a11985fce | ||
|
|
731b3654d8 | ||
|
|
da4e976d5c | ||
|
|
b74739f91b | ||
|
|
6b5c452a3c | ||
|
|
40075f69f9 | ||
|
|
7702803e5c | ||
|
|
3731f1950a | ||
|
|
9ab47cf628 | ||
|
|
676688aabf | ||
|
|
5958d8775c | ||
|
|
dfd5de9381 | ||
|
|
ac0164c4cc | ||
|
|
9e258ea466 | ||
|
|
0aacb95700 | ||
|
|
3668179dfe | ||
|
|
fa26b8d7cf | ||
|
|
07283ca0cf | ||
|
|
917dde7edf | ||
|
|
8102347296 | ||
|
|
736d8586f1 | ||
|
|
c97d4bbb50 | ||
|
|
553dcd208c | ||
|
|
e19c327279 | ||
|
|
6d0e4852cc | ||
|
|
d258523a3c | ||
|
|
dbc352f01b | ||
|
|
5e8a74ef74 | ||
|
|
3cc3671e74 | ||
|
|
d4d7c3d8b6 | ||
|
|
69083b6485 | ||
|
|
feba1f2e3c | ||
|
|
bb20251b43 | ||
|
|
1ec2e74b07 | ||
|
|
db6f6e62b9 | ||
|
|
a99743c1c8 | ||
|
|
04dd0f2b72 | ||
|
|
b4eb630fc2 | ||
|
|
8494b97e05 | ||
|
|
a98ff3c151 | ||
|
|
b16967a29f | ||
|
|
73afa73286 | ||
|
|
63ab526bfb | ||
|
|
cf92909d9a | ||
|
|
ca6a71749c | ||
|
|
79778fa221 | ||
|
|
6806b01327 | ||
|
|
6f06577f18 | ||
|
|
bc4bb0b4e5 | ||
|
|
f40aee9d2e | ||
|
|
26bf1ef621 | ||
|
|
03618fd2e0 | ||
|
|
fa3721bcde | ||
|
|
a049b38ab5 | ||
|
|
540a669dae | ||
|
|
e266504463 | ||
|
|
f07172bfe6 | ||
|
|
856845e5f2 | ||
|
|
14c0efe300 | ||
|
|
eb10001eb7 | ||
|
|
90c3d68f00 | ||
|
|
97098edfeb | ||
|
|
9f9122b6d7 | ||
|
|
8dcb3d78dc | ||
|
|
4dd77b785a | ||
|
|
7d7a29cfb9 | ||
|
|
f462744fc8 | ||
|
|
9ab97b834a | ||
|
|
7ba7b959a8 | ||
|
|
b094153af2 | ||
|
|
0ef27bfc5e | ||
|
|
a62b0924df | ||
|
|
eff736acd2 | ||
|
|
38cc0da990 | ||
|
|
37ff651723 | ||
|
|
fae7db89e6 | ||
|
|
ef21397f0d | ||
|
|
2011cbd210 | ||
|
|
ebcc5be1bf | ||
|
|
3661dd16b8 | ||
|
|
2aa5d26735 | ||
|
|
d50639971a | ||
|
|
fede1e9e3b | ||
|
|
f7777f489e | ||
|
|
93ae6084d6 | ||
|
|
3b39334bae | ||
|
|
da8ee43481 | ||
|
|
a843e81aaf | ||
|
|
9bcd4ce5c0 | ||
|
|
b38abf23bb | ||
|
|
7f2f45e176 | ||
|
|
0444497eea | ||
|
|
1d20c27553 | ||
|
|
8f23df5749 | ||
|
|
144581a7df | ||
|
|
7ffd75b991 | ||
|
|
ff474936f8 | ||
|
|
d7dd901f01 | ||
|
|
e3e02e04e8 | ||
|
|
f9e3c47d4a | ||
|
|
24ef5af2a1 | ||
|
|
f8e2b31756 | ||
|
|
a11887cee1 | ||
|
|
78a5748727 | ||
|
|
666a8ea5c0 | ||
|
|
0bc46b19b3 | ||
|
|
3f8af02aaa | ||
|
|
c9f5029a9d | ||
|
|
aa896ba702 | ||
|
|
c2f4eab8ed | ||
|
|
47c76ab5fe | ||
|
|
2396010d6c | ||
|
|
37285b8749 | ||
|
|
0a08a4d2fb | ||
|
|
1b7ff1c5df | ||
|
|
1d979d9b75 | ||
|
|
d34e9ef935 | ||
|
|
d752002836 | ||
|
|
7df35d978f | ||
|
|
18ea8df685 | ||
|
|
a23146ebd1 | ||
|
|
446d85474e | ||
|
|
8942d0d0e2 | ||
|
|
eb1e176f14 | ||
|
|
30e2ec7544 | ||
|
|
9cea5f75bb | ||
|
|
b8d153ebb2 | ||
|
|
781ad70598 | ||
|
|
3598aee71e | ||
|
|
e8fbb8f181 | ||
|
|
088e7b02a9 | ||
|
|
4b6ee584c2 | ||
|
|
d3ef3a7494 | ||
|
|
68d775e1ab | ||
|
|
40abddff9a | ||
|
|
68d6e738ac | ||
|
|
3ddd88dad7 | ||
|
|
42742d03d7 | ||
|
|
87f656b029 | ||
|
|
489ef9b731 | ||
|
|
51ae387d59 | ||
|
|
642c5e035d | ||
|
|
18962104a2 | ||
|
|
9eb0f89db8 | ||
|
|
1b660453f0 | ||
|
|
3b27acc77e | ||
|
|
95426fc6c9 | ||
|
|
39d3dcd032 | ||
|
|
36c0f7f273 | ||
|
|
3b112375ee | ||
|
|
47ec2425ee | ||
|
|
d157af2fc7 | ||
|
|
c1d706dc5a | ||
|
|
a7fb560e84 | ||
|
|
44bbb40551 | ||
|
|
d9a393eae1 | ||
|
|
842a6a6ee0 | ||
|
|
404b47274e | ||
|
|
8eb03c02da | ||
|
|
1bb2724282 | ||
|
|
f971ba0c0b | ||
|
|
d51e866505 | ||
|
|
068dff6aff | ||
|
|
7e6c5193d6 | ||
|
|
a6dd62082f | ||
|
|
4ae0a1f3f8 | ||
|
|
21b8072f35 | ||
|
|
a73a9c7310 | ||
|
|
08f0a19812 | ||
|
|
c9b85bd4a2 | ||
|
|
0d70d7c9ac | ||
|
|
e8ed48bd7a | ||
|
|
d5716ae751 | ||
|
|
8f6f76682b | ||
|
|
4e75150174 | ||
|
|
1436bb7c61 | ||
|
|
d60f06608e | ||
|
|
97a8491866 | ||
|
|
73899e1c0d | ||
|
|
70efbef0c7 | ||
|
|
c3c15cbb7e | ||
|
|
89e6044bdb | ||
|
|
3928ac1905 | ||
|
|
983fe4b2bc | ||
|
|
8fd128ef1c | ||
|
|
45fae65ec1 | ||
|
|
8381346378 | ||
|
|
67c2ab006d | ||
|
|
55b7c30028 | ||
|
|
ca3f8e6cb5 | ||
|
|
f611533764 | ||
|
|
78ba18a680 | ||
|
|
db58bb5f0f | ||
|
|
d5c0876a0b | ||
|
|
2389c36a70 | ||
|
|
d20601dc47 | ||
|
|
0527755600 | ||
|
|
740b6f5c17 | ||
|
|
9a6cbafdef | ||
|
|
8b13755d56 | ||
|
|
8fe2a7bb75 | ||
|
|
37a5d2c06b | ||
|
|
864646094e | ||
|
|
1b65df3acc | ||
|
|
bfc53b49fd | ||
|
|
f6dcffab13 | ||
|
|
15f6f7bd15 | ||
|
|
0d78b63805 | ||
|
|
3efb8ab6ba | ||
|
|
f5a33ed3f9 | ||
|
|
08c024d752 | ||
|
|
c83704d6ca | ||
|
|
f548762892 | ||
|
|
d0e0aba593 | ||
|
|
4e433d9015 | ||
|
|
2fedd91ed9 | ||
|
|
3cd0b1077d | ||
|
|
1f026a1811 | ||
|
|
49b4211c06 | ||
|
|
24c7990fd4 | ||
|
|
4d23957035 | ||
|
|
aa88022624 | ||
|
|
44a9b86eec | ||
|
|
647aa1966f | ||
|
|
d0e89a0318 | ||
|
|
2c061777ca | ||
|
|
f57a435576 | ||
|
|
cf317f8fdf | ||
|
|
655238dcd7 | ||
|
|
16fa454558 | ||
|
|
02f242e9e8 | ||
|
|
09514751b5 | ||
|
|
afd74213cc | ||
|
|
e4c85921b2 | ||
|
|
7725a877d6 | ||
|
|
fefa8a81d0 | ||
|
|
05ec71beb9 | ||
|
|
439ab7a335 | ||
|
|
044a0fbbbe | ||
|
|
d97a4d687e | ||
|
|
52cae406b4 | ||
|
|
d6dbd73ec9 | ||
|
|
a07051f51b | ||
|
|
e5895af7a0 | ||
|
|
c7a9b5ccfa | ||
|
|
b62d2a9b28 | ||
|
|
a392865615 | ||
|
|
3c1ea24374 | ||
|
|
62ba6a2413 | ||
|
|
b875efab1f | ||
|
|
bd45b7a04b | ||
|
|
9c01297191 | ||
|
|
6d350fb8bc | ||
|
|
7212b7c8aa | ||
|
|
9cc46629c2 | ||
|
|
a03629178e | ||
|
|
46a084d457 | ||
|
|
7aa35a3757 | ||
|
|
7f937af029 | ||
|
|
4041484a19 | ||
|
|
8a38b0a286 | ||
|
|
19d4c7f6cd | ||
|
|
4ead3dabd8 | ||
|
|
77f5d90be3 | ||
|
|
db495a1df0 | ||
|
|
8634140306 | ||
|
|
586de1f5ba | ||
|
|
2aecd7d0b9 | ||
|
|
a94c7e5c09 | ||
|
|
e475f025b7 | ||
|
|
6e934c2d17 | ||
|
|
f89fa061e8 | ||
|
|
f3c1ff9efc | ||
|
|
4547afe0b9 | ||
|
|
7955c9ba3b | ||
|
|
c52e16a844 | ||
|
|
c676a78cd7 | ||
|
|
61b2bcae47 | ||
|
|
ee59cbc634 | ||
|
|
0c552b4730 | ||
|
|
de3d49000e | ||
|
|
8c7fb0312c | ||
|
|
0ee984b444 | ||
|
|
2fadc0c68f | ||
|
|
3f31bb0975 | ||
|
|
5d1573fd1b | ||
|
|
5c6e30cb5d | ||
|
|
a55d6e6077 | ||
|
|
a48ac6a209 | ||
|
|
17c684369e | ||
|
|
11210fb6e4 | ||
|
|
824966ad4a | ||
|
|
f77073410e | ||
|
|
9cf622d981 | ||
|
|
d9a229b1ec | ||
|
|
7bc88eb00d | ||
|
|
f6efda9e2f | ||
|
|
bb53282cb0 | ||
|
|
c933865172 | ||
|
|
ce990d3859 | ||
|
|
315c8b2af3 | ||
|
|
66182bac75 | ||
|
|
5ee7da54a1 | ||
|
|
4b0c422ec5 | ||
|
|
5dacd41278 | ||
|
|
052c582930 | ||
|
|
3c0ce5d1b0 | ||
|
|
9d1aa4aebe | ||
|
|
e540f8a4e4 | ||
|
|
4900ac5136 | ||
|
|
58d8dd94fc | ||
|
|
df8d1dacc4 | ||
|
|
56137acd94 | ||
|
|
41c55fbb81 | ||
|
|
16454c0de5 | ||
|
|
d0567832cc | ||
|
|
9c2313a1bd | ||
|
|
61972ca97d | ||
|
|
73135b6e51 | ||
|
|
27709c835c | ||
|
|
d3a67b44cd | ||
|
|
58398b6eeb | ||
|
|
98ae064cd0 | ||
|
|
776082dc87 | ||
|
|
6e03a1c168 | ||
|
|
45cb1c0fcb | ||
|
|
5b848dcf54 | ||
|
|
2ef899220a | ||
|
|
8ab4b7ab47 | ||
|
|
269db0748a | ||
|
|
6ffa189814 | ||
|
|
24f84a6069 | ||
|
|
c138445944 | ||
|
|
4d3e1ffbe6 | ||
|
|
9f32e9ef60 | ||
|
|
0c3f9a16e3 | ||
|
|
7d10dacad6 | ||
|
|
57c330576d | ||
|
|
3cc5da835c | ||
|
|
9111970580 | ||
|
|
5391f4c1f7 | ||
|
|
df47c496d3 | ||
|
|
24b638fcde | ||
|
|
cd9170ed24 | ||
|
|
f7da94ff85 | ||
|
|
feba50f68c | ||
|
|
b0724811df | ||
|
|
1dd54ced41 | ||
|
|
1eebb85f48 | ||
|
|
b224ba0030 | ||
|
|
46ae25826d | ||
|
|
c954f1b4de | ||
|
|
edbd07f893 | ||
|
|
c8c85ba7fc | ||
|
|
c262d9ad4f | ||
|
|
3f5f410453 | ||
|
|
3c7f45ced4 | ||
|
|
c07da8d1f3 | ||
|
|
d3b9f03bdb | ||
|
|
433abe5a25 | ||
|
|
b59684328d | ||
|
|
850d4aac70 | ||
|
|
bae87ec4ae | ||
|
|
a5138b7621 | ||
|
|
4e6d165d00 | ||
|
|
827b1e58e9 | ||
|
|
23e4d9daff | ||
|
|
d4b6b7c4e8 | ||
|
|
642c352c69 | ||
|
|
d88bd51e3c | ||
|
|
2fb27adbf6 | ||
|
|
8f939cf55b | ||
|
|
a9b148791d | ||
|
|
da403f3e3c | ||
|
|
eb01e8d275 | ||
|
|
c134eab27a | ||
|
|
320e658595 | ||
|
|
070d9083d5 | ||
|
|
bee835cb65 | ||
|
|
df09d0830a | ||
|
|
8dac2a2140 | ||
|
|
aad23af3a3 | ||
|
|
755850e453 | ||
|
|
581e7800fa | ||
|
|
0ec4d56593 | ||
|
|
0e7db89117 | ||
|
|
bc5e39d31b | ||
|
|
6cd47a3c9b | ||
|
|
70c985feec | ||
|
|
044bd00386 | ||
|
|
b0ed8ded48 | ||
|
|
8bdbd2f9fb | ||
|
|
ef619321ce | ||
|
|
01ad197973 | ||
|
|
a92c5381fb | ||
|
|
3750e69e09 | ||
|
|
771ac86163 | ||
|
|
36e88d479b | ||
|
|
c147147001 | ||
|
|
4d5e161a3e | ||
|
|
f8f6943128 | ||
|
|
d5b91fb084 | ||
|
|
263d4bf496 | ||
|
|
16a8eebd8d | ||
|
|
d17dc59246 | ||
|
|
09082a070b | ||
|
|
a40d0ec3cb | ||
|
|
bf4dcc10af | ||
|
|
503d16f49c | ||
|
|
9504c9c9b4 | ||
|
|
284ab648b6 | ||
|
|
0f4f01cee9 | ||
|
|
68b3cce0fd | ||
|
|
d361404a60 | ||
|
|
465c3a9987 | ||
|
|
4c9fa6cf37 | ||
|
|
837944dcc5 | ||
|
|
9fb30d4787 | ||
|
|
345b3491a7 | ||
|
|
b4c9e46357 | ||
|
|
f6082579c9 | ||
|
|
1c4e7f0324 | ||
|
|
920fc29bdd | ||
|
|
d9c2b6e890 | ||
|
|
74a4f642fd | ||
|
|
e0e9fcaa24 | ||
|
|
6bad71adca | ||
|
|
fbbffc4386 | ||
|
|
8dc9735873 | ||
|
|
5e9e1108a3 | ||
|
|
855b5508d0 | ||
|
|
2ece4e7cbb | ||
|
|
665f87e84a | ||
|
|
3d1de6144e | ||
|
|
9c81d84e16 | ||
|
|
c7855b3b9d | ||
|
|
09e95b8d3c | ||
|
|
85b3b81617 | ||
|
|
06a337061b | ||
|
|
a8198d7a7f | ||
|
|
79f8620b34 | ||
|
|
ca8c098f62 | ||
|
|
f26d80dcae | ||
|
|
f54a66b86b | ||
|
|
655ba475cc | ||
|
|
a82154de3f | ||
|
|
82b44740db | ||
|
|
f33ca4c9a5 | ||
|
|
bc73cb1390 | ||
|
|
37a052327a | ||
|
|
51adb1c04a | ||
|
|
9788633ce1 | ||
|
|
0b8f5c2232 | ||
|
|
be2340d4ef | ||
|
|
5c0015cd66 | ||
|
|
abf212c28f | ||
|
|
8b99870189 | ||
|
|
f4a2ae5eac | ||
|
|
0250f69da0 | ||
|
|
99e7b328a4 | ||
|
|
5f2d37dce5 | ||
|
|
2eb15ea1fc | ||
|
|
ab700a16be | ||
|
|
0cf936f9e8 | ||
|
|
26e735618e | ||
|
|
db9e5e008f | ||
|
|
65dbf9ba3f | ||
|
|
dc25f44d31 | ||
|
|
627705a347 | ||
|
|
3a629ffe00 | ||
|
|
d8c112d8b0 | ||
|
|
3034f3d310 | ||
|
|
120b1857b2 | ||
|
|
58e69230a9 | ||
|
|
fc2001da83 | ||
|
|
5d7de927e5 | ||
|
|
7cd4a3cd1a | ||
|
|
e52ed258f0 | ||
|
|
9c57402269 | ||
|
|
7a218d77b7 | ||
|
|
ecf1d89a8b | ||
|
|
57455f084b | ||
|
|
44e3d384d5 | ||
|
|
8e2c377a21 | ||
|
|
6ee94c5e97 | ||
|
|
6e084b4a73 | ||
|
|
fd96c9c68d | ||
|
|
60e5adc70e | ||
|
|
de367e488d | ||
|
|
df71d7c63b | ||
|
|
5b64c28f33 | ||
|
|
03bb4bcda6 | ||
|
|
2d795cffe0 | ||
|
|
bcefa71b03 | ||
|
|
b441511359 | ||
|
|
3af8c5b03d | ||
|
|
d0f34baaa3 | ||
|
|
98c18b3032 | ||
|
|
c1971fd8d7 | ||
|
|
cf6447eb2a | ||
|
|
9205b90af6 | ||
|
|
6ce91de7e0 | ||
|
|
f524238910 | ||
|
|
646832ba8c | ||
|
|
6ccb5e8f67 | ||
|
|
d362fd027e | ||
|
|
15fc23df87 | ||
|
|
7c9fb9199e | ||
|
|
8345bb55d4 | ||
|
|
e4af3852f7 | ||
|
|
0ac8e97447 | ||
|
|
a8a451344c | ||
|
|
ae567796ee | ||
|
|
9220fdc4a4 | ||
|
|
b06d7dd56a | ||
|
|
14fd3a8aca | ||
|
|
74ba2d6548 | ||
|
|
372bbcdf34 | ||
|
|
0c7df2cde2 | ||
|
|
981f384154 | ||
|
|
e011e7b695 | ||
|
|
416e8d1ef9 | ||
|
|
49a00d61ac | ||
|
|
983112d17c | ||
|
|
922dfae51c | ||
|
|
4aab460905 | ||
|
|
f0b5008fc8 | ||
|
|
4d66e39402 | ||
|
|
a2ea6b1b5b | ||
|
|
4370f233a1 | ||
|
|
5621025c12 | ||
|
|
6bb2f41812 | ||
|
|
58ae91369e | ||
|
|
c72e911f6a | ||
|
|
f1de635988 | ||
|
|
f62d033e6b | ||
|
|
7c304d225b | ||
|
|
89b259acb8 | ||
|
|
571dfd8a69 | ||
|
|
e29e54a8d2 | ||
|
|
05e251da0e | ||
|
|
f342f8adc7 | ||
|
|
deb4e36095 | ||
|
|
619ee152b7 | ||
|
|
6e04ca85cb | ||
|
|
9f39bcb60e | ||
|
|
64814098a3 | ||
|
|
1bd6626ca5 | ||
|
|
36de6576b8 | ||
|
|
71a7750c9d | ||
|
|
5a2c2770a4 | ||
|
|
09a81eb225 | ||
|
|
9ebd308d28 | ||
|
|
59fa2f8f26 | ||
|
|
d6e4aef607 | ||
|
|
de26a78a16 | ||
|
|
c689356b31 | ||
|
|
015772ef9a | ||
|
|
e20baad601 | ||
|
|
a3f09949c0 | ||
|
|
afd270523c | ||
|
|
3101ff143b | ||
|
|
f14ca48334 | ||
|
|
96d7c3e99f | ||
|
|
c4bd60114e | ||
|
|
6b8a7b9939 | ||
|
|
448ca9d836 | ||
|
|
08cc20cb93 | ||
|
|
bf5775e07a | ||
|
|
9108df177c | ||
|
|
40cde07e5c | ||
|
|
43e08c6afa | ||
|
|
27f8afebab | ||
|
|
f68aba687e | ||
|
|
e16ae92edc | ||
|
|
d3160166e9 | ||
|
|
bec04279aa | ||
|
|
f9283bc311 | ||
|
|
d5b76b5ed2 | ||
|
|
7b64b40270 | ||
|
|
e3668a2f1c | ||
|
|
ab270c1682 | ||
|
|
1e0453221d | ||
|
|
b36c525ebc | ||
|
|
1b100660af | ||
|
|
1007f67ab1 | ||
|
|
ac37b50be2 | ||
|
|
ece48d640a | ||
|
|
8125fd3554 | ||
|
|
9e87012489 | ||
|
|
b6ad539379 | ||
|
|
ce659b9e1c | ||
|
|
dad7af6de1 | ||
|
|
dcac1a3cb7 | ||
|
|
6f9a31eba5 | ||
|
|
eef125d085 | ||
|
|
ff9b94bab7 | ||
|
|
d1eb2c2207 | ||
|
|
807cfdecfb | ||
|
|
31f768bf8a | ||
|
|
a2e1ea103c | ||
|
|
d6ab954f81 | ||
|
|
514c7f1520 | ||
|
|
bcc27e3852 | ||
|
|
35026849df | ||
|
|
d5a1030000 | ||
|
|
9fa8633dcb | ||
|
|
1a22ae54a2 | ||
|
|
20e4f6cc16 | ||
|
|
eb21750466 | ||
|
|
d93160799f | ||
|
|
b4bdea6d85 | ||
|
|
83986620ee | ||
|
|
9e7b7a895e | ||
|
|
1000bcaeb7 | ||
|
|
7e061d19ca | ||
|
|
7c2a198370 | ||
|
|
146e550239 | ||
|
|
4d9220c2c8 | ||
|
|
3c0678fb84 | ||
|
|
6985567758 | ||
|
|
5a5bf20d42 | ||
|
|
ba7091c25b | ||
|
|
e076803f98 | ||
|
|
453f9be16c | ||
|
|
19b67f4975 | ||
|
|
0a3390947c | ||
|
|
47d9c9fc74 | ||
|
|
81b2416923 | ||
|
|
59c6ff727a | ||
|
|
493fe562ac | ||
|
|
b1d83fc42c | ||
|
|
20f052eb37 | ||
|
|
edfe20b2e1 | ||
|
|
a4748af822 | ||
|
|
46e570bd04 | ||
|
|
686c5081e6 | ||
|
|
84bd4994cd | ||
|
|
55dfc2013a | ||
|
|
4a3362f889 | ||
|
|
44e145c6e9 | ||
|
|
eb98631ff6 | ||
|
|
48e1356ed9 | ||
|
|
5c655f298b | ||
|
|
cc4e192a51 | ||
|
|
981866eb93 | ||
|
|
cc98e89baa | ||
|
|
d8a1d7eb36 | ||
|
|
2f7120a73a | ||
|
|
a60d6c316e | ||
|
|
ab62228877 | ||
|
|
f1b350cbe6 | ||
|
|
9023a60d0d | ||
|
|
49fdb68908 | ||
|
|
db7d63d11a | ||
|
|
720ff35edf | ||
|
|
5fa355e1ae | ||
|
|
c487385980 | ||
|
|
bcb84235b1 | ||
|
|
6b8290fa6d | ||
|
|
9c446d9fb4 | ||
|
|
3cc0e3ecb6 | ||
|
|
d8beed13b4 | ||
|
|
7d2ad8c4bf | ||
|
|
a02139ba9d | ||
|
|
b3d136b3b3 | ||
|
|
a3ac9ee774 | ||
|
|
75e51ecf6d | ||
|
|
a28ad06bf0 | ||
|
|
9e4dd4b86f | ||
|
|
4559c8af74 | ||
|
|
4a67ae1195 | ||
|
|
1efa25eed5 | ||
|
|
54b65a89fd | ||
|
|
b4008b0b30 | ||
|
|
16c95991d3 | ||
|
|
f3bd81073d | ||
|
|
6d9ed32929 | ||
|
|
ad2ffc33d8 | ||
|
|
e183b0e5ff | ||
|
|
4b6b33b08b | ||
|
|
10fa887eab | ||
|
|
c00a6fa02a | ||
|
|
8e62c36148 | ||
|
|
f76a569527 | ||
|
|
9a6c2fafe3 | ||
|
|
1c355929fc | ||
|
|
ea074fa9bf | ||
|
|
60ee08a8ac | ||
|
|
9f7ef209fa | ||
|
|
fc1a66ea76 | ||
|
|
5d0b77e64a | ||
|
|
a4810a5e42 | ||
|
|
39c466d542 | ||
|
|
c8418da8c1 | ||
|
|
75d713057c | ||
|
|
c0c875eae2 | ||
|
|
3eba963d03 | ||
|
|
78a145f181 | ||
|
|
4ff17acc1b | ||
|
|
9928114ca8 | ||
|
|
e5f4f64102 | ||
|
|
7a98c1750f | ||
|
|
ee279b7976 | ||
|
|
dbfb6d5993 | ||
|
|
a02ba52de8 | ||
|
|
1275371e10 | ||
|
|
f56da1a39f | ||
|
|
fe9685867e | ||
|
|
dfa2cf9e6d | ||
|
|
262501304c | ||
|
|
a0dbc970a9 | ||
|
|
2abb6788fe | ||
|
|
91cec11500 | ||
|
|
68aca76805 | ||
|
|
2f501aee14 | ||
|
|
3c599e24e5 | ||
|
|
a6ee7415d8 | ||
|
|
d3a3d27f70 | ||
|
|
16f9dcf17a | ||
|
|
f49d814dc0 | ||
|
|
cb988e58ba | ||
|
|
bec00e7e64 | ||
|
|
7ba98ad498 | ||
|
|
aba2ca39d4 | ||
|
|
8db439a0d1 | ||
|
|
aec8e47fbb | ||
|
|
d03a00758f | ||
|
|
717bbe62a6 | ||
|
|
b9da72560a | ||
|
|
460bc43846 | ||
|
|
26505079b8 | ||
|
|
3de4c6189b | ||
|
|
2e4f060ebb | ||
|
|
53858c9b0e | ||
|
|
454a386612 | ||
|
|
346caaa1db | ||
|
|
42ba43fc81 | ||
|
|
3f5e36271f | ||
|
|
18ae5860bc | ||
|
|
7f70de99d3 | ||
|
|
385fcfe8d0 | ||
|
|
09a6206d28 | ||
|
|
b0d9aa38d2 | ||
|
|
26575c5086 | ||
|
|
91d53530e6 | ||
|
|
c07e7221e5 | ||
|
|
5844d0525a | ||
|
|
7ea572fdca | ||
|
|
162643a4b1 | ||
|
|
4727e5cbb1 | ||
|
|
b8136951e4 | ||
|
|
5300d2c531 | ||
|
|
d6fd2a8228 | ||
|
|
493e3068d8 | ||
|
|
7b5f434a07 | ||
|
|
e130ad74d1 | ||
|
|
a382e82dec | ||
|
|
bdd2ac0015 | ||
|
|
c0a06f7db4 | ||
|
|
e82027310d | ||
|
|
c6c0bc19d8 | ||
|
|
33bb787b15 | ||
|
|
c794d59fd5 | ||
|
|
1fefafb254 | ||
|
|
90dadf0bec | ||
|
|
95ff355737 | ||
|
|
c2d41b0376 | ||
|
|
8f3c9b391b | ||
|
|
5a9d883a93 | ||
|
|
c2e8af5ed1 | ||
|
|
70a85d6111 | ||
|
|
1163745a03 | ||
|
|
529fcaa5c9 | ||
|
|
681fd4ff3b | ||
|
|
305ec59d76 | ||
|
|
0917fa6f4a | ||
|
|
c7d3969a53 | ||
|
|
bba4c4242f | ||
|
|
ae376ec8fe | ||
|
|
af4f8aa589 | ||
|
|
ad32a2ef3c | ||
|
|
e59e1f5049 | ||
|
|
e1fa453eda | ||
|
|
dfbc125947 | ||
|
|
a1f4706aa1 | ||
|
|
6a36039a59 | ||
|
|
985fdca585 | ||
|
|
06dbf59742 | ||
|
|
4a4e323a44 | ||
|
|
776bb2892c | ||
|
|
7385016e36 | ||
|
|
b3557d05b1 | ||
|
|
0210a105bf | ||
|
|
a842d8d62b |
@@ -10,7 +10,8 @@ node_modules
|
||||
vite.config.js.timestamp-*
|
||||
vite.config.ts.timestamp-*
|
||||
__pycache__
|
||||
.env
|
||||
.idea
|
||||
venv
|
||||
_old
|
||||
uploads
|
||||
.ipynb_checkpoints
|
||||
|
||||
53
.github/ISSUE_TEMPLATE/bug_report.md
vendored
53
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -8,36 +8,57 @@ assignees: ''
|
||||
|
||||
# Bug Report
|
||||
|
||||
## Description
|
||||
## Important Notes
|
||||
|
||||
**Bug Summary:**
|
||||
[Provide a brief but clear summary of the bug]
|
||||
- **Before submitting a bug report**: Please check the Issues or Discussions section to see if a similar issue or feature request has already been posted. It's likely we're already tracking it! If you’re unsure, start a discussion post first. This will help us efficiently focus on improving the project.
|
||||
|
||||
**Steps to Reproduce:**
|
||||
[Outline the steps to reproduce the bug. Be as detailed as possible.]
|
||||
- **Collaborate respectfully**: We value a constructive attitude, so please be mindful of your communication. If negativity is part of your approach, our capacity to engage may be limited. We’re here to help if you’re open to learning and communicating positively. Remember, Open WebUI is a volunteer-driven project managed by a single maintainer and supported by contributors who also have full-time jobs. We appreciate your time and ask that you respect ours.
|
||||
|
||||
**Expected Behavior:**
|
||||
[Describe what you expected to happen.]
|
||||
- **Contributing**: If you encounter an issue, we highly encourage you to submit a pull request or fork the project. We actively work to prevent contributor burnout to maintain the quality and continuity of Open WebUI.
|
||||
|
||||
**Actual Behavior:**
|
||||
[Describe what actually happened.]
|
||||
- **Bug reproducibility**: If a bug cannot be reproduced with a `:main` or `:dev` Docker setup, or a pip install with Python 3.11, it may require additional help from the community. In such cases, we will move it to the "issues" Discussions section due to our limited resources. We encourage the community to assist with these issues. Remember, it’s not that the issue doesn’t exist; we need your help!
|
||||
|
||||
Note: Please remove the notes above when submitting your post. Thank you for your understanding and support!
|
||||
|
||||
---
|
||||
|
||||
## Installation Method
|
||||
|
||||
[Describe the method you used to install the project, e.g., git clone, Docker, pip, etc.]
|
||||
|
||||
## Environment
|
||||
|
||||
- **Open WebUI Version:** [e.g., 0.1.120]
|
||||
- **Ollama (if applicable):** [e.g., 0.1.30, 0.1.32-rc1]
|
||||
- **Open WebUI Version:** [e.g., v0.3.11]
|
||||
- **Ollama (if applicable):** [e.g., v0.2.0, v0.1.32-rc1]
|
||||
|
||||
- **Operating System:** [e.g., Windows 10, macOS Big Sur, Ubuntu 20.04]
|
||||
- **Browser (if applicable):** [e.g., Chrome 100.0, Firefox 98.0]
|
||||
|
||||
## Reproduction Details
|
||||
|
||||
**Confirmation:**
|
||||
|
||||
- [ ] I have read and followed all the instructions provided in the README.md.
|
||||
- [ ] I am on the latest version of both Open WebUI and Ollama.
|
||||
- [ ] I have included the browser console logs.
|
||||
- [ ] I have included the Docker container logs.
|
||||
- [ ] I have provided the exact steps to reproduce the bug in the "Steps to Reproduce" section below.
|
||||
|
||||
## Expected Behavior:
|
||||
|
||||
[Describe what you expected to happen.]
|
||||
|
||||
## Actual Behavior:
|
||||
|
||||
[Describe what actually happened.]
|
||||
|
||||
## Description
|
||||
|
||||
**Bug Summary:**
|
||||
[Provide a brief but clear summary of the bug]
|
||||
|
||||
## Reproduction Details
|
||||
|
||||
**Steps to Reproduce:**
|
||||
[Outline the steps to reproduce the bug. Be as detailed as possible.]
|
||||
|
||||
## Logs and Screenshots
|
||||
|
||||
@@ -47,13 +68,9 @@ assignees: ''
|
||||
**Docker Container Logs:**
|
||||
[Include relevant Docker container logs, if applicable]
|
||||
|
||||
**Screenshots (if applicable):**
|
||||
**Screenshots/Screen Recordings (if applicable):**
|
||||
[Attach any relevant screenshots to help illustrate the issue]
|
||||
|
||||
## Installation Method
|
||||
|
||||
[Describe the method you used to install the project, e.g., manual installation, Docker, package manager, etc.]
|
||||
|
||||
## Additional Information
|
||||
|
||||
[Include any additional details that may help in understanding and reproducing the issue. This could include specific configurations, error messages, or anything else relevant to the bug.]
|
||||
|
||||
16
.github/ISSUE_TEMPLATE/feature_request.md
vendored
16
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -6,6 +6,22 @@ labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
# Feature Request
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Before submitting a report**: Please check the Issues or Discussions section to see if a similar issue or feature request has already been posted. It's likely we're already tracking it! If you’re unsure, start a discussion post first. This will help us efficiently focus on improving the project.
|
||||
|
||||
- **Collaborate respectfully**: We value a constructive attitude, so please be mindful of your communication. If negativity is part of your approach, our capacity to engage may be limited. We’re here to help if you’re open to learning and communicating positively. Remember, Open WebUI is a volunteer-driven project managed by a single maintainer and supported by contributors who also have full-time jobs. We appreciate your time and ask that you respect ours.
|
||||
|
||||
- **Contributing**: If you encounter an issue, we highly encourage you to submit a pull request or fork the project. We actively work to prevent contributor burnout to maintain the quality and continuity of Open WebUI.
|
||||
|
||||
- **Bug reproducibility**: If a bug cannot be reproduced with a `:main` or `:dev` Docker setup, or a pip install with Python 3.11, it may require additional help from the community. In such cases, we will move it to the "issues" Discussions section due to our limited resources. We encourage the community to assist with these issues. Remember, it’s not that the issue doesn’t exist; we need your help!
|
||||
|
||||
Note: Please remove the notes above when submitting your post. Thank you for your understanding and support!
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
|
||||
@@ -3,9 +3,10 @@ updates:
|
||||
- package-ecosystem: pip
|
||||
directory: '/backend'
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
target-branch: 'dev'
|
||||
- package-ecosystem: 'github-actions'
|
||||
directory: '/'
|
||||
schedule:
|
||||
# Check for updates to GitHub Actions every week
|
||||
interval: 'weekly'
|
||||
interval: monthly
|
||||
108
.github/workflows/build-release.yml
vendored
108
.github/workflows/build-release.yml
vendored
@@ -10,61 +10,63 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check for changes in package.json
|
||||
run: |
|
||||
git diff --cached --diff-filter=d package.json || {
|
||||
echo "No changes to package.json"
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Get version number from package.json
|
||||
id: get_version
|
||||
run: |
|
||||
VERSION=$(jq -r '.version' package.json)
|
||||
echo "::set-output name=version::$VERSION"
|
||||
- name: Check for changes in package.json
|
||||
run: |
|
||||
git diff --cached --diff-filter=d package.json || {
|
||||
echo "No changes to package.json"
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Extract latest CHANGELOG entry
|
||||
id: changelog
|
||||
run: |
|
||||
CHANGELOG_CONTENT=$(awk 'BEGIN {print_section=0;} /^## \[/ {if (print_section == 0) {print_section=1;} else {exit;}} print_section {print;}' CHANGELOG.md)
|
||||
CHANGELOG_ESCAPED=$(echo "$CHANGELOG_CONTENT" | sed ':a;N;$!ba;s/\n/%0A/g')
|
||||
echo "Extracted latest release notes from CHANGELOG.md:"
|
||||
echo -e "$CHANGELOG_CONTENT"
|
||||
echo "::set-output name=content::$CHANGELOG_ESCAPED"
|
||||
- name: Get version number from package.json
|
||||
id: get_version
|
||||
run: |
|
||||
VERSION=$(jq -r '.version' package.json)
|
||||
echo "::set-output name=version::$VERSION"
|
||||
|
||||
- name: Create GitHub release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const changelog = `${{ steps.changelog.outputs.content }}`;
|
||||
const release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: `v${{ steps.get_version.outputs.version }}`,
|
||||
name: `v${{ steps.get_version.outputs.version }}`,
|
||||
body: changelog,
|
||||
})
|
||||
console.log(`Created release ${release.data.html_url}`)
|
||||
- name: Extract latest CHANGELOG entry
|
||||
id: changelog
|
||||
run: |
|
||||
CHANGELOG_CONTENT=$(awk 'BEGIN {print_section=0;} /^## \[/ {if (print_section == 0) {print_section=1;} else {exit;}} print_section {print;}' CHANGELOG.md)
|
||||
CHANGELOG_ESCAPED=$(echo "$CHANGELOG_CONTENT" | sed ':a;N;$!ba;s/\n/%0A/g')
|
||||
echo "Extracted latest release notes from CHANGELOG.md:"
|
||||
echo -e "$CHANGELOG_CONTENT"
|
||||
echo "::set-output name=content::$CHANGELOG_ESCAPED"
|
||||
|
||||
- name: Upload package to GitHub release
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: package
|
||||
path: .
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create GitHub release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const changelog = `${{ steps.changelog.outputs.content }}`;
|
||||
const release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: `v${{ steps.get_version.outputs.version }}`,
|
||||
name: `v${{ steps.get_version.outputs.version }}`,
|
||||
body: changelog,
|
||||
})
|
||||
console.log(`Created release ${release.data.html_url}`)
|
||||
|
||||
- name: Trigger Docker build workflow
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
github.rest.actions.createWorkflowDispatch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
workflow_id: 'docker-build.yaml',
|
||||
ref: 'v${{ steps.get_version.outputs.version }}',
|
||||
})
|
||||
- name: Upload package to GitHub release
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: package
|
||||
path: |
|
||||
.
|
||||
!.git
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Trigger Docker build workflow
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
github.rest.actions.createWorkflowDispatch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
workflow_id: 'docker-build.yaml',
|
||||
ref: 'v${{ steps.get_version.outputs.version }}',
|
||||
})
|
||||
|
||||
63
.github/workflows/docker-build.yaml
vendored
63
.github/workflows/docker-build.yaml
vendored
@@ -11,8 +11,6 @@ on:
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
FULL_IMAGE_NAME: ghcr.io/${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-main-image:
|
||||
@@ -28,6 +26,15 @@ jobs:
|
||||
- linux/arm64
|
||||
|
||||
steps:
|
||||
# GitHub Packages requires the entire repository name to be in lowercase
|
||||
# although the repository owner has a lowercase username, this prevents some people from running actions after forking
|
||||
- name: Set repository and image name to lowercase
|
||||
run: |
|
||||
echo "IMAGE_NAME=${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
echo "FULL_IMAGE_NAME=ghcr.io/${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
env:
|
||||
IMAGE_NAME: '${{ github.repository }}'
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
@@ -116,6 +123,15 @@ jobs:
|
||||
- linux/arm64
|
||||
|
||||
steps:
|
||||
# GitHub Packages requires the entire repository name to be in lowercase
|
||||
# although the repository owner has a lowercase username, this prevents some people from running actions after forking
|
||||
- name: Set repository and image name to lowercase
|
||||
run: |
|
||||
echo "IMAGE_NAME=${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
echo "FULL_IMAGE_NAME=ghcr.io/${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
env:
|
||||
IMAGE_NAME: '${{ github.repository }}'
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
@@ -207,6 +223,15 @@ jobs:
|
||||
- linux/arm64
|
||||
|
||||
steps:
|
||||
# GitHub Packages requires the entire repository name to be in lowercase
|
||||
# although the repository owner has a lowercase username, this prevents some people from running actions after forking
|
||||
- name: Set repository and image name to lowercase
|
||||
run: |
|
||||
echo "IMAGE_NAME=${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
echo "FULL_IMAGE_NAME=ghcr.io/${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
env:
|
||||
IMAGE_NAME: '${{ github.repository }}'
|
||||
|
||||
- name: Prepare
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
@@ -287,8 +312,17 @@ jobs:
|
||||
|
||||
merge-main-images:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ build-main-image ]
|
||||
needs: [build-main-image]
|
||||
steps:
|
||||
# GitHub Packages requires the entire repository name to be in lowercase
|
||||
# although the repository owner has a lowercase username, this prevents some people from running actions after forking
|
||||
- name: Set repository and image name to lowercase
|
||||
run: |
|
||||
echo "IMAGE_NAME=${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
echo "FULL_IMAGE_NAME=ghcr.io/${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
env:
|
||||
IMAGE_NAME: '${{ github.repository }}'
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -330,11 +364,19 @@ jobs:
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.FULL_IMAGE_NAME }}:${{ steps.meta.outputs.version }}
|
||||
|
||||
|
||||
merge-cuda-images:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ build-cuda-image ]
|
||||
needs: [build-cuda-image]
|
||||
steps:
|
||||
# GitHub Packages requires the entire repository name to be in lowercase
|
||||
# although the repository owner has a lowercase username, this prevents some people from running actions after forking
|
||||
- name: Set repository and image name to lowercase
|
||||
run: |
|
||||
echo "IMAGE_NAME=${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
echo "FULL_IMAGE_NAME=ghcr.io/${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
env:
|
||||
IMAGE_NAME: '${{ github.repository }}'
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -380,8 +422,17 @@ jobs:
|
||||
|
||||
merge-ollama-images:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ build-ollama-image ]
|
||||
needs: [build-ollama-image]
|
||||
steps:
|
||||
# GitHub Packages requires the entire repository name to be in lowercase
|
||||
# although the repository owner has a lowercase username, this prevents some people from running actions after forking
|
||||
- name: Set repository and image name to lowercase
|
||||
run: |
|
||||
echo "IMAGE_NAME=${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
echo "FULL_IMAGE_NAME=ghcr.io/${IMAGE_NAME,,}" >>${GITHUB_ENV}
|
||||
env:
|
||||
IMAGE_NAME: '${{ github.repository }}'
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
|
||||
2
.github/workflows/format-backend.yaml
vendored
2
.github/workflows/format-backend.yaml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
|
||||
4
.github/workflows/format-build-frontend.yaml
vendored
4
.github/workflows/format-build-frontend.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20' # Or specify any other version you want to use
|
||||
node-version: '22' # Or specify any other version you want to use
|
||||
|
||||
- name: Install Dependencies
|
||||
run: npm install
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
node-version: '22'
|
||||
|
||||
- name: Install Dependencies
|
||||
run: npm ci
|
||||
|
||||
94
.github/workflows/integration-test.yml
vendored
94
.github/workflows/integration-test.yml
vendored
@@ -15,6 +15,13 @@ jobs:
|
||||
name: Run Cypress Integration Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Maximize build space
|
||||
uses: AdityaGarg8/remove-unwanted-software@v4.1
|
||||
with:
|
||||
remove-android: 'true'
|
||||
remove-haskell: 'true'
|
||||
remove-codeql: 'true'
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
@@ -25,7 +32,11 @@ jobs:
|
||||
--file docker-compose.api.yaml \
|
||||
--file docker-compose.a1111-test.yaml \
|
||||
up --detach --build
|
||||
|
||||
|
||||
- name: Delete Docker build cache
|
||||
run: |
|
||||
docker builder prune --all --force
|
||||
|
||||
- name: Wait for Ollama to be up
|
||||
timeout-minutes: 5
|
||||
run: |
|
||||
@@ -67,6 +78,28 @@ jobs:
|
||||
path: compose-logs.txt
|
||||
if-no-files-found: ignore
|
||||
|
||||
# pytest:
|
||||
# name: Run Backend Tests
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
|
||||
# - name: Set up Python
|
||||
# uses: actions/setup-python@v5
|
||||
# with:
|
||||
# python-version: ${{ matrix.python-version }}
|
||||
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# python -m pip install --upgrade pip
|
||||
# pip install -r backend/requirements.txt
|
||||
|
||||
# - name: pytest run
|
||||
# run: |
|
||||
# ls -al
|
||||
# cd backend
|
||||
# PYTHONPATH=. pytest . -o log_cli=true -o log_cli_level=INFO
|
||||
|
||||
migration_test:
|
||||
name: Run Migration Tests
|
||||
runs-on: ubuntu-latest
|
||||
@@ -82,18 +115,18 @@ jobs:
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 5432:5432
|
||||
# mysql:
|
||||
# image: mysql
|
||||
# env:
|
||||
# MYSQL_ROOT_PASSWORD: mysql
|
||||
# MYSQL_DATABASE: mysql
|
||||
# options: >-
|
||||
# --health-cmd "mysqladmin ping -h localhost"
|
||||
# --health-interval 10s
|
||||
# --health-timeout 5s
|
||||
# --health-retries 5
|
||||
# ports:
|
||||
# - 3306:3306
|
||||
# mysql:
|
||||
# image: mysql
|
||||
# env:
|
||||
# MYSQL_ROOT_PASSWORD: mysql
|
||||
# MYSQL_DATABASE: mysql
|
||||
# options: >-
|
||||
# --health-cmd "mysqladmin ping -h localhost"
|
||||
# --health-interval 10s
|
||||
# --health-timeout 5s
|
||||
# --health-retries 5
|
||||
# ports:
|
||||
# - 3306:3306
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -124,13 +157,13 @@ jobs:
|
||||
GLOBAL_LOG_LEVEL: debug
|
||||
run: |
|
||||
cd backend
|
||||
uvicorn main:app --port "8080" --forwarded-allow-ips '*' &
|
||||
uvicorn open_webui.main:app --port "8080" --forwarded-allow-ips '*' &
|
||||
UVICORN_PID=$!
|
||||
# Wait up to 20 seconds for the server to start
|
||||
for i in {1..20}; do
|
||||
# Wait up to 40 seconds for the server to start
|
||||
for i in {1..40}; do
|
||||
curl -s http://localhost:8080/api/config > /dev/null && break
|
||||
sleep 1
|
||||
if [ $i -eq 20 ]; then
|
||||
if [ $i -eq 40 ]; then
|
||||
echo "Server failed to start"
|
||||
kill -9 $UVICORN_PID
|
||||
exit 1
|
||||
@@ -142,7 +175,6 @@ jobs:
|
||||
echo "Server has stopped"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
- name: Test backend with Postgres
|
||||
if: success() || steps.sqlite.conclusion == 'failure'
|
||||
@@ -150,9 +182,12 @@ jobs:
|
||||
WEBUI_SECRET_KEY: secret-key
|
||||
GLOBAL_LOG_LEVEL: debug
|
||||
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/postgres
|
||||
DATABASE_POOL_SIZE: 10
|
||||
DATABASE_POOL_MAX_OVERFLOW: 10
|
||||
DATABASE_POOL_TIMEOUT: 30
|
||||
run: |
|
||||
cd backend
|
||||
uvicorn main:app --port "8081" --forwarded-allow-ips '*' &
|
||||
uvicorn open_webui.main:app --port "8081" --forwarded-allow-ips '*' &
|
||||
UVICORN_PID=$!
|
||||
# Wait up to 20 seconds for the server to start
|
||||
for i in {1..20}; do
|
||||
@@ -171,6 +206,25 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that service will reconnect to postgres when connection will be closed
|
||||
status_code=$(curl --write-out %{http_code} -s --output /dev/null http://localhost:8081/health/db)
|
||||
if [[ "$status_code" -ne 200 ]] ; then
|
||||
echo "Server has failed before postgres reconnect check"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Terminating all connections to postgres..."
|
||||
python -c "import os, psycopg2 as pg2; \
|
||||
conn = pg2.connect(dsn=os.environ['DATABASE_URL'].replace('+pool', '')); \
|
||||
cur = conn.cursor(); \
|
||||
cur.execute('SELECT pg_terminate_backend(psa.pid) FROM pg_stat_activity psa WHERE datname = current_database() AND pid <> pg_backend_pid();')"
|
||||
|
||||
status_code=$(curl --write-out %{http_code} -s --output /dev/null http://localhost:8081/health/db)
|
||||
if [[ "$status_code" -ne 200 ]] ; then
|
||||
echo "Server has not reconnected to postgres after connection was closed: returned status $status_code"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# - name: Test backend with MySQL
|
||||
# if: success() || steps.sqlite.conclusion == 'failure' || steps.postgres.conclusion == 'failure'
|
||||
# env:
|
||||
@@ -179,7 +233,7 @@ jobs:
|
||||
# DATABASE_URL: mysql://root:mysql@localhost:3306/mysql
|
||||
# run: |
|
||||
# cd backend
|
||||
# uvicorn main:app --port "8083" --forwarded-allow-ips '*' &
|
||||
# uvicorn open_webui.main:app --port "8083" --forwarded-allow-ips '*' &
|
||||
# UVICORN_PID=$!
|
||||
# # Wait up to 20 seconds for the server to start
|
||||
# for i in {1..20}; do
|
||||
|
||||
2
.github/workflows/lint-backend.disabled
vendored
2
.github/workflows/lint-backend.disabled
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
- name: Use Bun
|
||||
uses: oven-sh/setup-bun@v1
|
||||
- name: Install dependencies
|
||||
|
||||
1
.github/workflows/release-pypi.yml
vendored
1
.github/workflows/release-pypi.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main # or whatever branch you want to use
|
||||
- pypi-release
|
||||
|
||||
jobs:
|
||||
release:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -306,3 +306,4 @@ dist
|
||||
# cypress artifacts
|
||||
cypress/videos
|
||||
cypress/screenshots
|
||||
.vscode/settings.json
|
||||
|
||||
614
CHANGELOG.md
614
CHANGELOG.md
@@ -5,6 +5,620 @@ All notable changes to this project will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.3.34] - 2024-10-26
|
||||
|
||||
### Added
|
||||
|
||||
- **🔧 Feedback Export Enhancements**: Feedback history data can now be exported to JSON, allowing for seamless integration in RLHF processing and further analysis.
|
||||
- **🗂️ Embedding Model Lazy Loading**: Search functionality for leaderboard reranking is now more efficient, as embedding models are lazy-loaded only when needed, optimizing performance.
|
||||
- **🎨 Rich Text Input Toggle**: Users can now switch back to legacy textarea input for chat if they prefer simpler text input, though rich text is still the default until deprecation.
|
||||
- **🛠️ Improved Tool Calling Mechanism**: Enhanced method for parsing and calling tools, improving the reliability and robustness of tool function calls.
|
||||
- **🌐 Globalization Enhancements**: Updates to internationalization (i18n) support, further refining multi-language compatibility and accuracy.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🖥️ Folder Rename Fix for Firefox**: Addressed a persistent issue where users could not rename folders by pressing enter in Firefox, now ensuring seamless folder management across browsers.
|
||||
- **🔠 Tiktoken Model Text Splitter Issue**: Resolved an issue where the tiktoken text splitter wasn’t working in Docker installations, restoring full functionality for tokenized text editing.
|
||||
- **💼 S3 File Upload Issue**: Fixed a problem affecting S3 file uploads, ensuring smooth operations for those who store files on cloud storage.
|
||||
- **🔒 Strict-Transport-Security Crash**: Resolved a crash when setting the Strict-Transport-Security (HSTS) header, improving stability and security enhancements.
|
||||
- **📂 Firefox Folder Rename Persistence**: Fixed the bug in Firefox where folder renaming was not saved upon pressing enter, ensuring consistent user experience.
|
||||
- **🚫 OIDC Boolean Access Fix**: Addressed an issue with boolean values not being accessed correctly during OIDC logins, ensuring login reliability.
|
||||
- **⚙️ Rich Text Paste Behavior**: Refined paste behavior in rich text input to make it smoother and more intuitive when pasting various content types.
|
||||
- **🔨 Model Exclusion for Arena Fix**: Corrected the filter function that was not properly excluding models from the arena, improving model management.
|
||||
- **🏷️ "Tags Generation Prompt" Fix**: Addressed an issue preventing custom "tags generation prompts" from registering properly, ensuring custom prompt work seamlessly.
|
||||
|
||||
## [0.3.33] - 2024-10-24
|
||||
|
||||
### Added
|
||||
|
||||
- **🏆 Evaluation Leaderboard**: Easily track your performance through a new leaderboard system where your ratings contribute to a real-time ranking based on the Elo system. Sibling responses (regenerations, many model chats) are required for your ratings to count in the leaderboard. Additionally, you can opt-in to share your feedback history and be part of the community-wide leaderboard. Expect further improvements as we refine the algorithm—help us build the best community leaderboard!
|
||||
- **⚔️ Arena Model Evaluation**: Enable blind A/B testing of models directly from Admin Settings > Evaluation for a true side-by-side comparison. Ideal for pinpointing the best model for your needs.
|
||||
- **🎯 Topic-Based Leaderboard**: Discover more accurate rankings with experimental topic-based reranking, which adjusts leaderboard standings based on tag similarity in feedback. Get more relevant insights based on specific topics!
|
||||
- **📁 Folders Support for Chats**: Organize your chats better by grouping them into folders. Drag and drop chats between folders and export them seamlessly for easy sharing or analysis.
|
||||
- **📤 Easy Chat Import via Drag & Drop**: Save time by simply dragging and dropping chat exports (JSON) directly onto the sidebar to import them into your workspace—streamlined, efficient, and intuitive!
|
||||
- **📚 Enhanced Knowledge Collection**: Now, you can reference individual files from a knowledge collection—ideal for more precise Retrieval-Augmented Generations (RAG) queries and document analysis.
|
||||
- **🏷️ Enhanced Tagging System**: Tags now take up less space! Utilize the new 'tag:' query system to manage, search, and organize your conversations more effectively without cluttering the interface.
|
||||
- **🧠 Auto-Tagging for Chats**: Your conversations are now automatically tagged for improved organization, mirroring the efficiency of auto-generated titles.
|
||||
- **🔍 Backend Chat Query System**: Chat filtering has become more efficient, now handled through the backend\*\* instead of your browser, improving search performance and accuracy.
|
||||
- **🎮 Revamped Playground**: Experience a refreshed and optimized Playground for smoother testing, tweaks, and experimentation of your models and tools.
|
||||
- **🧩 Token-Based Text Splitter**: Introducing token-based text splitting (tiktoken), giving you more precise control over how text is processed. Previously, only character-based splitting was available.
|
||||
- **🔢 Ollama Batch Embeddings**: Leverage new batch embedding support for improved efficiency and performance with Ollama embedding models.
|
||||
- **🔍 Enhanced Add Text Content Modal**: Enjoy a cleaner, more intuitive workflow for adding and curating knowledge content with an upgraded input modal from our Knowledge workspace.
|
||||
- **🖋️ Rich Text Input for Chats**: Make your chat inputs more dynamic with support for rich text formatting. Your conversations just got a lot more polished and professional.
|
||||
- **⚡ Faster Whisper Model Configurability**: Customize your local faster whisper model directly from the WebUI.
|
||||
- **☁️ Experimental S3 Support**: Enable stateless WebUI instances with S3 support, greatly enhancing scalability and balancing heavy workloads.
|
||||
- **🔕 Disable Update Toast**: Now you can streamline your workspace even further—choose to disable update notifications for a more focused experience.
|
||||
- **🌟 RAG Citation Relevance Percentage**: Easily assess citation accuracy with the addition of relevance percentages in RAG results.
|
||||
- **⚙️ Mermaid Copy Button**: Mermaid diagrams now come with a handy copy button, simplifying the extraction and use of diagram contents directly in your workflow.
|
||||
- **🎨 UI Redesign**: Major interface redesign that will make navigation smoother, keep your focus where it matters, and ensure a modern look.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🎙️ Voice Note Mic Stopping Issue**: Fixed the issue where the microphone stayed active after ending a voice note recording, ensuring your audio workflow runs smoothly.
|
||||
|
||||
### Removed
|
||||
|
||||
- **👋 Goodbye Sidebar Tags**: Sidebar tag clutter is gone. We’ve shifted tag buttons to more effective query-based tag filtering for a sleeker, more agile interface.
|
||||
|
||||
## [0.3.32] - 2024-10-06
|
||||
|
||||
### Added
|
||||
|
||||
- **🔢 Workspace Enhancements**: Added a display count for models, prompts, tools, and functions in the workspace, providing a clear overview and easier management.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🖥️ Web and YouTube Attachment Fix**: Resolved an issue where attaching web links and YouTube videos was malfunctioning, ensuring seamless integration and display within chats.
|
||||
- **📞 Call Mode Activation on Landing Page**: Fixed a bug where call mode was not operational from the landing page.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🔄 URL Parameter Refinement**: Updated the 'tool_ids' URL parameter to 'tools' or 'tool-ids' for more intuitive and consistent user experience.
|
||||
- **🎨 Floating Buttons Styling Update**: Refactored the styling of floating buttons to intelligently adjust to the left side when there isn't enough room on the right, improving interface usability and aesthetic.
|
||||
- **🔧 Enhanced Accessibility for Floating Buttons**: Implemented the ability to close floating buttons with the 'Esc' key, making workflow smoother and more efficient for users navigating via keyboard.
|
||||
- **🖇️ Updated Information URL**: Information URLs now direct users to a general release page rather than a version-specific URL, ensuring access to the latest and relevant details all in one place.
|
||||
- **📦 Library Dependencies Update**: Upgraded dependencies to ensure compatibility and performance optimization for pip installs.
|
||||
|
||||
## [0.3.31] - 2024-10-06
|
||||
|
||||
### Added
|
||||
|
||||
- **📚 Knowledge Feature**: Reimagined documents feature, now more performant with a better UI for enhanced organization; includes streamlined API integration for Retrieval-Augmented Generation (RAG). Detailed documentation forthcoming: https://docs.openwebui.com/
|
||||
- **🌐 New Landing Page**: Freshly designed landing page; toggle between the new UI and the classic chat UI from Settings > Interface for a personalized experience.
|
||||
- **📁 Full Document Retrieval Mode**: Toggle between full document retrieval or traditional snippets by clicking on the file item. This mode enhances document capabilities and supports comprehensive tasks like summarization by utilizing the entire content instead of RAG.
|
||||
- **📄 Extracted File Content Display**: View extracted content directly by clicking on the file item, simplifying file analysis.
|
||||
- **🎨 Artifacts Feature**: Render web content and SVGs directly in the interface, supporting quick iterations and live changes.
|
||||
- **🖊️ Editable Code Blocks**: Supercharged code blocks now allow live editing directly in the LLM response, with live reloads supported by artifacts.
|
||||
- **🔧 Code Block Enhancements**: Introduced a floating copy button in code blocks to facilitate easier code copying without scrolling.
|
||||
- **🔍 SVG Pan/Zoom**: Enhanced interaction with SVG images, including Mermaid diagrams, via new pan and zoom capabilities.
|
||||
- **🔍 Text Select Quick Actions**: New floating buttons appear when text is highlighted in LLM responses, offering deeper interactions like "Ask a Question" or "Explain".
|
||||
- **🗃️ Database Pool Configuration**: Enhanced database handling to support scalable user growth.
|
||||
- **🔊 Experimental Audio Compression**: Compress audio files to navigate around the 25MB limit for OpenAI's speech-to-text processing.
|
||||
- **🔍 Query Embedding**: Adjusted embedding behavior to enhance system performance by not repeating query embedding.
|
||||
- **💾 Lazy Load Optimizations**: Implemented lazy loading of large dependencies to minimize initial memory usage, boosting performance.
|
||||
- **🍏 Apple Touch Icon Support**: Optimizes the display of icons for web bookmarks on Apple mobile devices.
|
||||
- **🔽 Expandable Content Markdown Support**: Introducing 'details', 'summary' tag support for creating expandable content sections in markdown, facilitating cleaner, organized documentation and interactive content display.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔘 Action Button Issue**: Resolved a bug where action buttons were not functioning, enhancing UI reliability.
|
||||
- **🔄 Multi-Model Chat Loop**: Fixed an infinite loop issue in multi-model chat environments, ensuring smoother chat operations.
|
||||
- **📄 Chat PDF/TXT Export Issue**: Resolved problems with exporting chat logs to PDF and TXT formats.
|
||||
- **🔊 Call to Text-to-Speech Issues**: Rectified problems with text-to-speech functions to improve audio interactions.
|
||||
|
||||
### Changed
|
||||
|
||||
- **⚙️ Endpoint Renaming**: Renamed 'rag' endpoints to 'retrieval' for clearer function description.
|
||||
- **🎨 Styling and Interface Updates**: Multiple refinements across the platform to enhance visual appeal and user interaction.
|
||||
|
||||
### Removed
|
||||
|
||||
- **🗑️ Deprecated 'DOCS_DIR'**: Removed the outdated 'docs_dir' variable in favor of more direct file management solutions, with direct file directory syncing and API uploads for a more integrated experience.
|
||||
|
||||
## [0.3.30] - 2024-09-26
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🍞 Update Available Toast Dismissal**: Enhanced user experience by ensuring that once the update available notification is dismissed, it won't reappear for 24 hours.
|
||||
- **📋 Ollama /embed Form Data**: Adjusted the integration inaccuracies in the /embed form data to ensure it perfectly matches with Ollama's specifications.
|
||||
- **🔧 O1 Max Completion Tokens Issue**: Resolved compatibility issues with OpenAI's o1 models max_completion_tokens param to ensure smooth operation.
|
||||
- **🔄 Pip Install Database Issue**: Fixed a critical issue where database changes during pip installations were reverting and not saving chat logs, now ensuring data persistence and reliability in chat operations.
|
||||
- **🏷️ Chat Rename Tab Update**: Fixed the functionality to change the web browser's tab title simultaneously when a chat is renamed, keeping tab titles consistent.
|
||||
|
||||
## [0.3.29] - 2023-09-25
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔧 KaTeX Rendering Improvement**: Resolved specific corner cases in KaTeX rendering to enhance the display of complex mathematical notation.
|
||||
- **📞 'Call' URL Parameter Fix**: Corrected functionality for 'call' URL search parameter ensuring reliable activation of voice calls through URL triggers.
|
||||
- **🔄 Configuration Reset Fix**: Fixed the RESET_CONFIG_ON_START to ensure settings revert to default correctly upon each startup, improving reliability in configuration management.
|
||||
- **🌍 Filter Outlet Hook Fix**: Addressed issues in the filter outlet hook, ensuring all filter functions operate as intended.
|
||||
|
||||
## [0.3.28] - 2024-09-24
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔍 Web Search Functionality**: Corrected an issue where the web search option was not functioning properly.
|
||||
|
||||
## [0.3.27] - 2024-09-24
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔄 Periodic Cleanup Error Resolved**: Fixed a critical RuntimeError related to the 'periodic_usage_pool_cleanup' coroutine, ensuring smooth and efficient performance post-pip install, correcting a persisting issue from version 0.3.26.
|
||||
- **📊 Enhanced LaTeX Rendering**: Improved rendering for LaTeX content, enhancing clarity and visual presentation in documents and mathematical models.
|
||||
|
||||
## [0.3.26] - 2024-09-24
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔄 Event Loop Error Resolution**: Addressed a critical error where a missing running event loop caused 'periodic_usage_pool_cleanup' to fail with pip installs. This fix ensures smoother and more reliable updates and installations, enhancing overall system stability.
|
||||
|
||||
## [0.3.25] - 2024-09-24
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🖼️ Image Generation Functionality**: Resolved an issue where image generation was not functioning, restoring full capability for visual content creation.
|
||||
- **⚖️ Rate Response Corrections**: Addressed a problem where rate responses were not working, ensuring reliable feedback mechanisms are operational.
|
||||
|
||||
## [0.3.24] - 2024-09-24
|
||||
|
||||
### Added
|
||||
|
||||
- **🚀 Rendering Optimization**: Significantly improved message rendering performance, enhancing user experience and webui responsiveness.
|
||||
- **💖 Favorite Response Feature in Chat Overview**: Users can now mark responses as favorite directly from the chat overview, enhancing ease of retrieval and organization of preferred responses.
|
||||
- **💬 Create Message Pairs with Shortcut**: Implemented creation of new message pairs using Cmd/Ctrl+Shift+Enter, making conversation editing faster and more intuitive.
|
||||
- **🌍 Expanded User Prompt Variables**: Added weekday, timezone, and language information variables to user prompts to match system prompt variables.
|
||||
- **🎵 Enhanced Audio Support**: Now includes support for 'audio/x-m4a' files, broadening compatibility with audio content within the platform.
|
||||
- **🔏 Model URL Search Parameter**: Added an ability to select a model directly via URL parameters, streamlining navigation and model access.
|
||||
- **📄 Enhanced PDF Citations**: PDF citations now open at the associated page, streamlining reference checks and document handling.
|
||||
- **🔧Use of Redis in Sockets**: Enhanced socket implementation to fully support Redis, enabling effective stateless instances suitable for scalable load balancing.
|
||||
- **🌍 Stream Individual Model Responses**: Allows specific models to have individualized streaming settings, enhancing performance and customization.
|
||||
- **🕒 Display Model Hash and Last Modified Timestamp for Ollama Models**: Provides critical model details directly in the Models workspace for enhanced tracking.
|
||||
- **❗ Update Info Notification for Admins**: Ensures administrators receive immediate updates upon login, keeping them informed of the latest changes and system statuses.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🗑️ Temporary File Handling On Windows**: Fixed an issue causing errors when accessing a temporary file being used by another process, Tools & Functions should now work as intended.
|
||||
- **🔓 Authentication Toggle Issue**: Resolved the malfunction where setting 'WEBUI_AUTH=False' did not appropriately disable authentication, ensuring that user experience and system security settings function as configured.
|
||||
- **🔧 Save As Copy Issue for Many Model Chats**: Resolved an error preventing users from save messages as copies in many model chats.
|
||||
- **🔒 Sidebar Closure on Mobile**: Resolved an issue where the mobile sidebar remained open after menu engagement, improving user interface responsivity and comfort.
|
||||
- **🛡️ Tooltip XSS Vulnerability**: Resolved a cross-site scripting (XSS) issue within tooltips, ensuring enhanced security and data integrity during user interactions.
|
||||
|
||||
### Changed
|
||||
|
||||
- **↩️ Deprecated Interface Stream Response Settings**: Moved to advanced parameters to streamline interface settings and enhance user clarity.
|
||||
- **⚙️ Renamed 'speedRate' to 'playbackRate'**: Standardizes terminology, improving usability and understanding in media settings.
|
||||
|
||||
## [0.3.23] - 2024-09-21
|
||||
|
||||
### Added
|
||||
|
||||
- **🚀 WebSocket Redis Support**: Enhanced load balancing capabilities for multiple instance setups, promoting better performance and reliability in WebUI.
|
||||
- **🔧 Adjustable Chat Controls**: Introduced width-adjustable chat controls, enabling a personalized and more comfortable user interface.
|
||||
- **🌎 i18n Updates**: Improved and updated the Chinese translations.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🌐 Task Model Unloading Issue**: Modified task handling to use the Ollama /api/chat endpoint instead of OpenAI compatible endpoint, ensuring models stay loaded and ready with custom parameters, thus minimizing delays in task execution.
|
||||
- **📝 Title Generation Fix for OpenAI Compatible APIs**: Resolved an issue preventing the generation of titles, enhancing consistency and reliability when using multiple API providers.
|
||||
- **🗃️ RAG Duplicate Collection Issue**: Fixed a bug causing repeated processing of the same uploaded file. Now utilizes indexed files to prevent unnecessary duplications, optimizing resource usage.
|
||||
- **🖼️ Image Generation Enhancement**: Refactored OpenAI image generation endpoint to be asynchronous, preventing the WebUI from becoming unresponsive during processing, thus enhancing user experience.
|
||||
- **🔓 Downgrade Authlib**: Reverted Authlib to version 1.3.1 to address and resolve issues concerning OAuth functionality.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🔍 Improved Message Interaction**: Enhanced the message node interface to allow for easier focus redirection with a simple click, streamlining user interaction.
|
||||
- **✨ Styling Refactor**: Updated WebUI styling for a cleaner, more modern look, enhancing user experience across the platform.
|
||||
|
||||
## [0.3.22] - 2024-09-19
|
||||
|
||||
### Added
|
||||
|
||||
- **⭐ Chat Overview**: Introducing a node-based interactive messages diagram for improved visualization of conversation flows.
|
||||
- **🔗 Multiple Vector DB Support**: Now supports multiple vector databases, including the newly added Milvus support. Community contributions for additional database support are highly encouraged!
|
||||
- **📡 Experimental Non-Stream Chat Completion**: Experimental feature allowing the use of OpenAI o1 models, which do not support streaming, ensuring more versatile model deployment.
|
||||
- **🔍 Experimental Colbert-AI Reranker Integration**: Added support for "jinaai/jina-colbert-v2" as a reranker, enhancing search relevance and accuracy. Note: it may not function at all on low-spec computers.
|
||||
- **🕸️ ENABLE_WEBSOCKET_SUPPORT**: Added environment variable for instances to ignore websocket upgrades, stabilizing connections on platforms with websocket issues.
|
||||
- **🔊 Azure Speech Service Integration**: Added support for Azure Speech services for Text-to-Speech (TTS).
|
||||
- **🎚️ Customizable Playback Speed**: Playback speed control is now available in Call mode settings, allowing users to adjust audio playback speed to their preferences.
|
||||
- **🧠 Enhanced Error Messaging**: System now displays helpful error messages directly to users during chat completion issues.
|
||||
- **📂 Save Model as Transparent PNG**: Model profile images are now saved as PNGs, supporting transparency and improving visual integration.
|
||||
- **📱 iPhone Compatibility Adjustments**: Added padding to accommodate the iPhone navigation bar, improving UI display on these devices.
|
||||
- **🔗 Secure Response Headers**: Implemented security response headers, bolstering web application security.
|
||||
- **🔧 Enhanced AUTOMATIC1111 Settings**: Users can now configure 'CFG Scale', 'Sampler', and 'Scheduler' parameters directly in the admin settings, enhancing workflow flexibility without source code modifications.
|
||||
- **🌍 i18n Updates**: Enhanced translations for Chinese, Ukrainian, Russian, and French, fostering a better localized experience.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🛠️ Chat Message Deletion**: Resolved issues with chat message deletion, ensuring a smoother user interaction and system stability.
|
||||
- **🔢 Ordered List Numbering**: Fixed the incorrect ordering in lists.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🎨 Transparent Icon Handling**: Allowed model icons to be displayed on transparent backgrounds, improving UI aesthetics.
|
||||
- **📝 Improved RAG Template**: Enhanced Retrieval-Augmented Generation template, optimizing context handling and error checking for more precise operation.
|
||||
|
||||
## [0.3.21] - 2024-09-08
|
||||
|
||||
### Added
|
||||
|
||||
- **📊 Document Count Display**: Now displays the total number of documents directly within the dashboard.
|
||||
- **🚀 Ollama Embed API Endpoint**: Enabled /api/embed endpoint proxy support.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🐳 Docker Launch Issue**: Resolved the problem preventing Open-WebUI from launching correctly when using Docker.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🔍 Enhanced Search Prompts**: Improved the search query generation prompts for better accuracy and user interaction, enhancing the overall search experience.
|
||||
|
||||
## [0.3.20] - 2024-09-07
|
||||
|
||||
### Added
|
||||
|
||||
- **🌐 Translation Update**: Updated Catalan translations to improve user experience for Catalan speakers.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **📄 PDF Download**: Resolved a configuration issue with fonts directory, ensuring PDFs are now downloaded with the correct formatting.
|
||||
- **🛠️ Installation of Tools & Functions Requirements**: Fixed a bug where necessary requirements for tools and functions were not properly installing.
|
||||
- **🔗 Inline Image Link Rendering**: Enabled rendering of images directly from links in chat.
|
||||
- **📞 Post-Call User Interface Cleanup**: Adjusted UI behavior to automatically close chat controls after a voice call ends, reducing screen clutter.
|
||||
- **🎙️ Microphone Deactivation Post-Call**: Addressed an issue where the microphone remained active after calls.
|
||||
- **✍️ Markdown Spacing Correction**: Corrected spacing in Markdown rendering, ensuring text appears neatly and as expected.
|
||||
- **🔄 Message Re-rendering**: Fixed an issue causing all response messages to re-render with each new message, now improving chat performance.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🌐 Refined Web Search Integration**: Deprecated the Search Query Generation Prompt threshold; introduced a toggle button for "Enable Web Search Query Generation" allowing users to opt-in to using web search more judiciously.
|
||||
- **📝 Default Prompt Templates Update**: Emptied environment variable templates for search and title generation now default to the Open WebUI default prompt templates, simplifying configuration efforts.
|
||||
|
||||
## [0.3.19] - 2024-09-05
|
||||
|
||||
### Added
|
||||
|
||||
- **🌐 Translation Update**: Improved Chinese translations.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **📂 DATA_DIR Overriding**: Fixed an issue to avoid overriding DATA_DIR, preventing errors when directories are set identically, ensuring smoother operation and data management.
|
||||
- **🛠️ Frontmatter Extraction**: Fixed the extraction process for frontmatter in tools and functions.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🎨 UI Styling**: Refined the user interface styling for enhanced visual coherence and user experience.
|
||||
|
||||
## [0.3.18] - 2024-09-04
|
||||
|
||||
### Added
|
||||
|
||||
- **🛠️ Direct Database Execution for Tools & Functions**: Enhanced the execution of Python files for tools and functions, now directly loading from the database for a more streamlined backend process.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔄 Automatic Rewrite of Import Statements in Tools & Functions**: Tool and function scripts that import 'utils', 'apps', 'main', 'config' will now automatically rename these with 'open_webui.', ensuring compatibility and consistency across different modules.
|
||||
- **🎨 Styling Adjustments**: Minor fixes in the visual styling to improve user experience and interface consistency.
|
||||
|
||||
## [0.3.17] - 2024-09-04
|
||||
|
||||
### Added
|
||||
|
||||
- **🔄 Import/Export Configuration**: Users can now import and export webui configurations from admin settings > Database, simplifying setup replication across systems.
|
||||
- **🌍 Web Search via URL Parameter**: Added support for activating web search directly through URL by setting 'web-search=true'.
|
||||
- **🌐 SearchApi Integration**: Added support for SearchApi as an alternative web search provider, enhancing search capabilities within the platform.
|
||||
- **🔍 Literal Type Support in Tools**: Tools now support the Literal type.
|
||||
- **🌍 Updated Translations**: Improved translations for Chinese, Ukrainian, and Catalan.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔧 Pip Install Issue**: Resolved the issue where pip install failed due to missing 'alembic.ini', ensuring smoother installation processes.
|
||||
- **🌃 Automatic Theme Update**: Fixed an issue where the color theme did not update dynamically with system changes.
|
||||
- **🛠️ User Agent in ComfyUI**: Added default headers in ComfyUI to fix access issues, improving reliability in network communications.
|
||||
- **🔄 Missing Chat Completion Response Headers**: Ensured proper return of proxied response headers during chat completion, improving API reliability.
|
||||
- **🔗 Websocket Connection Prioritization**: Modified socket.io configuration to prefer websockets and more reliably fallback to polling, enhancing connection stability.
|
||||
- **🎭 Accessibility Enhancements**: Added missing ARIA labels for buttons, improving accessibility for visually impaired users.
|
||||
- **⚖️ Advanced Parameter**: Fixed an issue ensuring that advanced parameters are correctly applied in all scenarios, ensuring consistent behavior of user-defined settings.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🔁 Namespace Reorganization**: Reorganized all Python files under the 'open_webui' namespace to streamline the project structure and improve maintainability. Tools and functions importing from 'utils' should now use 'open_webui.utils'.
|
||||
- **🚧 Dependency Updates**: Updated several backend dependencies like 'aiohttp', 'authlib', 'duckduckgo-search', 'flask-cors', and 'langchain' to their latest versions, enhancing performance and security.
|
||||
|
||||
## [0.3.16] - 2024-08-27
|
||||
|
||||
### Added
|
||||
|
||||
- **🚀 Config DB Migration**: Migrated configuration handling from config.json to the database, enabling high-availability setups and load balancing across multiple Open WebUI instances.
|
||||
- **🔗 Call Mode Activation via URL**: Added a 'call=true' URL search parameter enabling direct shortcuts to activate call mode, enhancing user interaction on mobile devices.
|
||||
- **✨ TTS Content Control**: Added functionality to control how message content is segmented for Text-to-Speech (TTS) generation requests, allowing for more flexible speech output options.
|
||||
- **😄 Show Knowledge Search Status**: Enhanced model usage transparency by displaying status when working with knowledge-augmented models, helping users understand the system's state during queries.
|
||||
- **👆 Click-to-Copy for Codespan**: Enhanced interactive experience in the WebUI by allowing users to click to copy content from code spans directly.
|
||||
- **🚫 API User Blocking via Model Filter**: Introduced the ability to block API users based on customized model filters, enhancing security and control over API access.
|
||||
- **🎬 Call Overlay Styling**: Adjusted call overlay styling on large screens to not cover the entire interface, but only the chat control area, for a more unobtrusive interaction experience.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔧 LaTeX Rendering Issue**: Addressed an issue that affected the correct rendering of LaTeX.
|
||||
- **📁 File Leak Prevention**: Resolved the issue of uploaded files mistakenly being accessible across user chats.
|
||||
- **🔧 Pipe Functions with '**files**' Param**: Fixed issues with '**files**' parameter not functioning correctly in pipe functions.
|
||||
- **📝 Markdown Processing for RAG**: Fixed issues with processing Markdown in files.
|
||||
- **🚫 Duplicate System Prompts**: Fixed bugs causing system prompts to duplicate.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🔋 Wakelock Permission**: Optimized the activation of wakelock to only engage during call mode, conserving device resources and improving battery performance during idle periods.
|
||||
- **🔍 Content-Type for Ollama Chats**: Added 'application/x-ndjson' content-type to '/api/chat' endpoint responses to match raw Ollama responses.
|
||||
- **✋ Disable Signups Conditionally**: Implemented conditional logic to disable sign-ups when 'ENABLE_LOGIN_FORM' is set to false.
|
||||
|
||||
## [0.3.15] - 2024-08-21
|
||||
|
||||
### Added
|
||||
|
||||
- **🔗 Temporary Chat Activation**: Integrated a new URL parameter 'temporary-chat=true' to enable temporary chat sessions directly through the URL.
|
||||
- **🌄 ComfyUI Seed Node Support**: Introduced seed node support in ComfyUI for image generation, allowing users to specify node IDs for randomized seed assignment.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🛠️ Tools and Functions**: Resolved a critical issue where Tools and Functions were not properly functioning, restoring full capability and reliability to these essential features.
|
||||
- **🔘 Chat Action Button in Many Model Chat**: Fixed the malfunctioning of chat action buttons in many model chat environments, ensuring a smoother and more responsive user interaction.
|
||||
- **⏪ Many Model Chat Compatibility**: Restored backward compatibility for many model chats.
|
||||
|
||||
## [0.3.14] - 2024-08-21
|
||||
|
||||
### Added
|
||||
|
||||
- **🛠️ Custom ComfyUI Workflow**: Deprecating several older environment variables, this enhancement introduces a new, customizable workflow for a more tailored user experience.
|
||||
- **🔀 Merge Responses in Many Model Chat**: Enhances the dialogue by merging responses from multiple models into a single, coherent reply, improving the interaction quality in many model chats.
|
||||
- **✅ Multiple Instances of Same Model in Chats**: Enhanced many model chat to support adding multiple instances of the same model.
|
||||
- **🔧 Quick Actions in Model Workspace**: Enhanced Shift key quick actions for hiding/unhiding and deleting models, facilitating a smoother workflow.
|
||||
- **🗨️ Markdown Rendering in User Messages**: User messages are now rendered in Markdown, enhancing readability and interaction.
|
||||
- **💬 Temporary Chat Feature**: Introduced a temporary chat feature, deprecating the old chat history setting to enhance user interaction flexibility.
|
||||
- **🖋️ User Message Editing**: Enhanced the user chat editing feature to allow saving changes without sending, providing more flexibility in message management.
|
||||
- **🛡️ Security Enhancements**: Various security improvements implemented across the platform to ensure safer user experiences.
|
||||
- **🌍 Updated Translations**: Enhanced translations for Chinese, Ukrainian, and Bahasa Malaysia, improving localization and user comprehension.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **📑 Mermaid Rendering Issue**: Addressed issues with Mermaid chart rendering to ensure clean and clear visual data representation.
|
||||
- **🎭 PWA Icon Maskability**: Fixed the Progressive Web App icon to be maskable, ensuring proper display on various device home screens.
|
||||
- **🔀 Cloned Model Chat Freezing Issue**: Fixed a bug where cloning many model chats would cause freezing, enhancing stability and responsiveness.
|
||||
- **🔍 Generic Error Handling and Refinements**: Various minor fixes and refinements to address previously untracked issues, ensuring smoother operations.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🖼️ Image Generation Refactor**: Overhauled image generation processes for improved efficiency and quality.
|
||||
- **🔨 Refactor Tool and Function Calling**: Refactored tool and function calling mechanisms for improved clarity and maintainability.
|
||||
- **🌐 Backend Library Updates**: Updated critical backend libraries including SQLAlchemy, uvicorn[standard], faster-whisper, bcrypt, and boto3 for enhanced performance and security.
|
||||
|
||||
### Removed
|
||||
|
||||
- **🚫 Deprecated ComfyUI Environment Variables**: Removed several outdated environment variables related to ComfyUI settings, simplifying configuration management.
|
||||
|
||||
## [0.3.13] - 2024-08-14
|
||||
|
||||
### Added
|
||||
|
||||
- **🎨 Enhanced Markdown Rendering**: Significant improvements in rendering markdown, ensuring smooth and reliable display of LaTeX and Mermaid charts, enhancing user experience with more robust visual content.
|
||||
- **🔄 Auto-Install Tools & Functions Python Dependencies**: For 'Tools' and 'Functions', Open WebUI now automatically install extra python requirements specified in the frontmatter, streamlining setup processes and customization.
|
||||
- **🌀 OAuth Email Claim Customization**: Introduced an 'OAUTH_EMAIL_CLAIM' variable to allow customization of the default "email" claim within OAuth configurations, providing greater flexibility in authentication processes.
|
||||
- **📶 Websocket Reconnection**: Enhanced reliability with the capability to automatically reconnect when a websocket is closed, ensuring consistent and stable communication.
|
||||
- **🤳 Haptic Feedback on Support Devices**: Android devices now support haptic feedback for an immersive tactile experience during certain interactions.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🛠️ ComfyUI Performance Improvement**: Addressed an issue causing FastAPI to stall when ComfyUI image generation was active; now runs in a separate thread to prevent UI unresponsiveness.
|
||||
- **🔀 Session Handling**: Fixed an issue mandating session_id on client-side to ensure smoother session management and transitions.
|
||||
- **🖋️ Minor Bug Fixes and Format Corrections**: Various minor fixes including typo corrections, backend formatting improvements, and test amendments enhancing overall system stability and performance.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🚀 Migration to SvelteKit 2**: Upgraded the underlying framework to SvelteKit version 2, offering enhanced speed, better code structure, and improved deployment capabilities.
|
||||
- **🧹 General Cleanup and Refactoring**: Performed broad cleanup and refactoring across the platform, improving code efficiency and maintaining high standards of code health.
|
||||
- **🚧 Integration Testing Improvements**: Modified how Cypress integration tests detect chat messages and updated sharing tests for better reliability and accuracy.
|
||||
- **📁 Standardized '.safetensors' File Extension**: Renamed the '.sft' file extension to '.safetensors' for ComfyUI workflows, standardizing file formats across the platform.
|
||||
|
||||
### Removed
|
||||
|
||||
- **🗑️ Deprecated Frontend Functions**: Removed frontend functions that were migrated to backend to declutter the codebase and reduce redundancy.
|
||||
|
||||
## [0.3.12] - 2024-08-07
|
||||
|
||||
### Added
|
||||
|
||||
- **🔄 Sidebar Infinite Scroll**: Added an infinite scroll feature in the sidebar for more efficient chat navigation, reducing load times and enhancing user experience.
|
||||
- **🚀 Enhanced Markdown Rendering**: Support for rendering all code blocks and making images clickable for preview; codespan styling is also enhanced to improve readability and user interaction.
|
||||
- **🔒 Admin Shared Chat Visibility**: Admins no longer have default visibility over shared chats when ENABLE_ADMIN_CHAT_ACCESS is set to false, tightening security and privacy settings for users.
|
||||
- **🌍 Language Updates**: Added Malay (Bahasa Malaysia) translation and updated Catalan and Traditional Chinese translations to improve accessibility for more users.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **📊 Markdown Rendering Issues**: Resolved issues with markdown rendering to ensure consistent and correct display across components.
|
||||
- **🛠️ Styling Issues**: Multiple fixes applied to styling throughout the application, improving the overall visual experience and interface consistency.
|
||||
- **🗃️ Modal Handling**: Fixed an issue where modals were not closing correctly in various model chat scenarios, enhancing usability and interface reliability.
|
||||
- **📄 Missing OpenAI Usage Information**: Resolved issues where usage statistics for OpenAI services were not being correctly displayed, ensuring users have access to crucial data for managing and monitoring their API consumption.
|
||||
- **🔧 Non-Streaming Support for Functions Plugin**: Fixed a functionality issue with the Functions plugin where non-streaming operations were not functioning as intended, restoring full capabilities for async and sync integration within the platform.
|
||||
- **🔄 Environment Variable Type Correction (COMFYUI_FLUX_FP8_CLIP)**: Corrected the data type of the 'COMFYUI_FLUX_FP8_CLIP' environment variable from string to boolean, ensuring environment settings apply correctly and enhance configuration management.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🔧 Backend Dependency Updates**: Updated several backend dependencies such as boto3, pypdf, python-pptx, validators, and black, ensuring up-to-date security and performance optimizations.
|
||||
|
||||
## [0.3.11] - 2024-08-02
|
||||
|
||||
### Added
|
||||
|
||||
- **📊 Model Information Display**: Added visuals for model selection, including images next to model names for more intuitive navigation.
|
||||
- **🗣 ElevenLabs Voice Adaptations**: Voice enhancements including support for ElevenLabs voice ID by name for personalized vocal interactions.
|
||||
- **⌨️ Arrow Keys Model Selection**: Users can now use arrow keys for quicker model selection, enhancing accessibility.
|
||||
- **🔍 Fuzzy Search in Model Selector**: Enhanced model selector with fuzzy search to locate models swiftly, including descriptions.
|
||||
- **🕹️ ComfyUI Flux Image Generation**: Added support for the new Flux image gen model; introduces environment controls like weight precision and CLIP model options in Settings.
|
||||
- **💾 Display File Size for Uploads**: Enhanced file interface now displays file size, preparing for upcoming upload restrictions.
|
||||
- **🎚️ Advanced Params "Min P"**: Added 'Min P' parameter in the advanced settings for customized model precision control.
|
||||
- **🔒 Enhanced OAuth**: Introduced custom redirect URI support for OAuth behind reverse proxies, enabling safer authentication processes.
|
||||
- **🖥 Enhanced Latex Rendering**: Adjustments made to latex rendering processes, now accurately detecting and presenting latex inputs from text.
|
||||
- **🌐 Internationalization**: Enhanced with new Romanian and updated Vietnamese and Ukrainian translations, helping broaden accessibility for international users.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔧 Tags Handling in Document Upload**: Tags are now properly sent to the upload document handler, resolving issues with missing metadata.
|
||||
- **🖥️ Sensitive Input Fields**: Corrected browser misinterpretation of secure input fields, preventing misclassification as password fields.
|
||||
- **📂 Static Path Resolution in PDF Generation**: Fixed static paths that adjust dynamically to prevent issues across various environments.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🎨 UI/UX Styling Enhancements**: Multiple minor styling updates for a cleaner and more intuitive user interface.
|
||||
- **🚧 Refactoring Various Components**: Numerous refactoring changes across styling, file handling, and function simplifications for clarity and performance.
|
||||
- **🎛️ User Valves Management**: Moved user valves from settings to direct chat controls for more user-friendly access during interactions.
|
||||
|
||||
### Removed
|
||||
|
||||
- **⚙️ Health Check Logging**: Removed verbose logging from the health checking processes to declutter logs and improve backend performance.
|
||||
|
||||
## [0.3.10] - 2024-07-17
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔄 Improved File Upload**: Addressed the issue where file uploads lacked animation.
|
||||
- **💬 Chat Continuity**: Fixed a problem where existing chats were not functioning properly in some instances.
|
||||
- **🗂️ Chat File Reset**: Resolved the issue of chat files not resetting for new conversations, now ensuring a clean slate for each chat session.
|
||||
- **📁 Document Workspace Uploads**: Corrected the handling of document uploads in the workspace using the Files API.
|
||||
|
||||
## [0.3.9] - 2024-07-17
|
||||
|
||||
### Added
|
||||
|
||||
- **📁 Files Chat Controls**: We've reverted to the old file handling behavior where uploaded files are always included. You can now manage files directly within the chat controls section, giving you the ability to remove files as needed.
|
||||
- **🔧 "Action" Function Support**: Introducing a new "Action" function to write custom buttons to the message toolbar. This feature enables more interactive messaging, with documentation coming soon.
|
||||
- **📜 Citations Handling**: For newly uploaded files in documents workspace, citations will now display the actual filename. Additionally, you can click on these filenames to open the file in a new tab for easier access.
|
||||
- **🛠️ Event Emitter and Call Updates**: Enhanced 'event_emitter' to allow message replacement and 'event_call' to support text input for Tools and Functions. Detailed documentation will be provided shortly.
|
||||
- **🎨 Styling Refactor**: Various styling updates for a cleaner and more cohesive user interface.
|
||||
- **🌐 Enhanced Translations**: Improved translations for Catalan, Ukrainian, and Brazilian Portuguese.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔧 Chat Controls Priority**: Resolved an issue where Chat Controls values were being overridden by model information parameters. The priority is now Chat Controls, followed by Global Settings, then Model Settings.
|
||||
- **🪲 Debug Logs**: Fixed an issue where debug logs were not being logged properly.
|
||||
- **🔑 Automatic1111 Auth Key**: The auth key for Automatic1111 is no longer required.
|
||||
- **📝 Title Generation**: Ensured that the title generation runs only once, even when multiple models are in a chat.
|
||||
- **✅ Boolean Values in Params**: Added support for boolean values in parameters.
|
||||
- **🖼️ Files Overlay Styling**: Fixed the styling issue with the files overlay.
|
||||
|
||||
### Changed
|
||||
|
||||
- **⬆️ Dependency Updates**
|
||||
- Upgraded 'pydantic' from version 2.7.1 to 2.8.2.
|
||||
- Upgraded 'sqlalchemy' from version 2.0.30 to 2.0.31.
|
||||
- Upgraded 'unstructured' from version 0.14.9 to 0.14.10.
|
||||
- Upgraded 'chromadb' from version 0.5.3 to 0.5.4.
|
||||
|
||||
## [0.3.8] - 2024-07-09
|
||||
|
||||
### Added
|
||||
|
||||
- **💬 Chat Controls**: Easily adjust parameters for each chat session, offering more precise control over your interactions.
|
||||
- **📌 Pinned Chats**: Support for pinned chats, allowing you to keep important conversations easily accessible.
|
||||
- **📄 Apache Tika Integration**: Added support for using Apache Tika as a document loader, enhancing document processing capabilities.
|
||||
- **🛠️ Custom Environment for OpenID Claims**: Allows setting custom claims for OpenID, providing more flexibility in user authentication.
|
||||
- **🔧 Enhanced Tools & Functions API**: Introduced 'event_emitter' and 'event_call', now you can also add citations for better documentation and tracking. Detailed documentation will be provided on our documentation website.
|
||||
- **↔️ Sideways Scrolling in Settings**: Settings tabs container now supports horizontal scrolling for easier navigation.
|
||||
- **🌑 Darker OLED Theme**: Includes a new, darker OLED theme and improved styling for the light theme, enhancing visual appeal.
|
||||
- **🌐 Language Updates**: Updated translations for Indonesian, German, French, and Catalan languages, expanding accessibility.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **⏰ OpenAI Streaming Timeout**: Resolved issues with OpenAI streaming response using the 'AIOHTTP_CLIENT_TIMEOUT' setting, ensuring reliable performance.
|
||||
- **💡 User Valves**: Fixed malfunctioning user valves, ensuring proper functionality.
|
||||
- **🔄 Collapsible Components**: Addressed issues with collapsible components not working, restoring expected behavior.
|
||||
|
||||
### Changed
|
||||
|
||||
- **🗃️ Database Backend**: Switched from Peewee to SQLAlchemy for improved concurrency support, enhancing database performance.
|
||||
- **⬆️ ChromaDB Update**: Upgraded to version 0.5.3. Ensure your remote ChromaDB instance matches this version.
|
||||
- **🔤 Primary Font Styling**: Updated primary font to Archivo for better visual consistency.
|
||||
- **🔄 Font Change for Windows**: Replaced Arimo with Inter font for Windows users, improving readability.
|
||||
- **🚀 Lazy Loading**: Implemented lazy loading for 'faster_whisper' and 'sentence_transformers' to reduce startup memory usage.
|
||||
- **📋 Task Generation Payload**: Task generations now include only the "task" field in the body instead of "title".
|
||||
|
||||
## [0.3.7] - 2024-06-29
|
||||
|
||||
### Added
|
||||
|
||||
- **🌐 Enhanced Internationalization (i18n)**: Newly introduced Indonesian translation, and updated translations for Turkish, Chinese, and Catalan languages to improve user accessibility.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🕵️♂️ Browser Language Detection**: Corrected the issue where the application was not properly detecting and adapting to the browser's language settings.
|
||||
- **🔐 OIDC Admin Role Assignment**: Fixed a bug where the admin role was not being assigned to the first user who signed up via OpenID Connect (OIDC).
|
||||
- **💬 Chat/Completions Endpoint**: Resolved an issue where the chat/completions endpoint was non-functional when the stream option was set to False.
|
||||
- **🚫 'WEBUI_AUTH' Configuration**: Addressed the problem where setting 'WEBUI_AUTH' to False was not being applied correctly.
|
||||
|
||||
### Changed
|
||||
|
||||
- **📦 Dependency Update**: Upgraded 'authlib' from version 1.3.0 to 1.3.1 to ensure better security and performance enhancements.
|
||||
|
||||
## [0.3.6] - 2024-06-27
|
||||
|
||||
### Added
|
||||
|
||||
- **✨ "Functions" Feature**: You can now utilize "Functions" like filters (middleware) and pipe (model) functions directly within the WebUI. While largely compatible with Pipelines, these native functions can be executed easily within Open WebUI. Example use cases for filter functions include usage monitoring, real-time translation, moderation, and automemory. For pipe functions, the scope ranges from Cohere and Anthropic integration directly within Open WebUI, enabling "Valves" for per-user OpenAI API key usage, and much more. If you encounter issues, SAFE_MODE has been introduced.
|
||||
- **📁 Files API**: Compatible with OpenAI, this feature allows for custom Retrieval-Augmented Generation (RAG) in conjunction with the Filter Function. More examples will be shared on our community platform and official documentation website.
|
||||
- **🛠️ Tool Enhancements**: Tools now support citations and "Valves". Documentation will be available shortly.
|
||||
- **🔗 Iframe Support via Files API**: Enables rendering HTML directly into your chat interface using functions and tools. Use cases include playing games like DOOM and Snake, displaying a weather applet, and implementing Anthropic "artifacts"-like features. Stay tuned for updates on our community platform and documentation.
|
||||
- **🔒 Experimental OAuth Support**: New experimental OAuth support. Check our documentation for more details.
|
||||
- **🖼️ Custom Background Support**: Set a custom background from Settings > Interface to personalize your experience.
|
||||
- **🔑 AUTOMATIC1111_API_AUTH Support**: Enhanced security for the AUTOMATIC1111 API.
|
||||
- **🎨 Code Highlight Optimization**: Improved code highlighting features.
|
||||
- **🎙️ Voice Interruption Feature**: Reintroduced and now toggleable from Settings > Interface.
|
||||
- **💤 Wakelock API**: Now in use to prevent screen dimming during important tasks.
|
||||
- **🔐 API Key Privacy**: All API keys are now hidden by default for better security.
|
||||
- **🔍 New Web Search Provider**: Added jina_search as a new option.
|
||||
- **🌐 Enhanced Internationalization (i18n)**: Improved Korean translation and updated Chinese and Ukrainian translations.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔧 Conversation Mode Issue**: Fixed the issue where Conversation Mode remained active after being removed from settings.
|
||||
- **📏 Scroll Button Obstruction**: Resolved the issue where the scrollToBottom button container obstructed clicks on buttons beneath it.
|
||||
|
||||
### Changed
|
||||
|
||||
- **⏲️ AIOHTTP_CLIENT_TIMEOUT**: Now set to 'None' by default for improved configuration flexibility.
|
||||
- **📞 Voice Call Enhancements**: Improved by skipping code blocks and expressions during calls.
|
||||
- **🚫 Error Message Handling**: Disabled the continuation of operations with error messages.
|
||||
- **🗂️ Playground Relocation**: Moved the Playground from the workspace to the user menu for better user experience.
|
||||
|
||||
## [0.3.5] - 2024-06-16
|
||||
|
||||
### Added
|
||||
|
||||
- **📞 Enhanced Voice Call**: Text-to-speech (TTS) callback now operates in real-time for each sentence, reducing latency by not waiting for full completion.
|
||||
- **👆 Tap to Interrupt**: During a call, you can now stop the assistant from speaking by simply tapping, instead of using voice. This resolves the issue of the speaker's voice being mistakenly registered as input.
|
||||
- **😊 Emoji Call**: Toggle this feature on from the Settings > Interface, allowing LLMs to express emotions using emojis during voice calls for a more dynamic interaction.
|
||||
- **🖱️ Quick Archive/Delete**: Use the Shift key + mouseover on the chat list to swiftly archive or delete items.
|
||||
- **📝 Markdown Support in Model Descriptions**: You can now format model descriptions with markdown, enabling bold text, links, etc.
|
||||
- **🧠 Editable Memories**: Adds the capability to modify memories.
|
||||
- **📋 Admin Panel Sorting**: Introduces the ability to sort users/chats within the admin panel.
|
||||
- **🌑 Dark Mode for Quick Selectors**: Dark mode now available for chat quick selectors (prompts, models, documents).
|
||||
- **🔧 Advanced Parameters**: Adds 'num_keep' and 'num_batch' to advanced parameters for customization.
|
||||
- **📅 Dynamic System Prompts**: New variables '{{CURRENT_DATETIME}}', '{{CURRENT_TIME}}', '{{USER_LOCATION}}' added for system prompts. Ensure '{{USER_LOCATION}}' is toggled on from Settings > Interface.
|
||||
- **🌐 Tavily Web Search**: Includes Tavily as a web search provider option.
|
||||
- **🖊️ Federated Auth Usernames**: Ability to set user names for federated authentication.
|
||||
- **🔗 Auto Clean URLs**: When adding connection URLs, trailing slashes are now automatically removed.
|
||||
- **🌐 Enhanced Translations**: Improved Chinese and Swedish translations.
|
||||
|
||||
### Fixed
|
||||
|
||||
- **⏳ AIOHTTP_CLIENT_TIMEOUT**: Introduced a new environment variable 'AIOHTTP_CLIENT_TIMEOUT' for requests to Ollama lasting longer than 5 minutes. Default is 300 seconds; set to blank ('') for no timeout.
|
||||
- **❌ Message Delete Freeze**: Resolved an issue where message deletion would sometimes cause the web UI to freeze.
|
||||
|
||||
## [0.3.4] - 2024-06-12
|
||||
|
||||
### Fixed
|
||||
|
||||
- **🔒 Mixed Content with HTTPS Issue**: Resolved a problem where mixed content (HTTP and HTTPS) was causing security warnings and blocking resources on HTTPS sites.
|
||||
- **🔍 Web Search Issue**: Addressed the problem where web search functionality was not working correctly. The 'ENABLE_RAG_LOCAL_WEB_FETCH' option has been reintroduced to restore proper web searching capabilities.
|
||||
- **💾 RAG Template Not Being Saved**: Fixed an issue where the RAG template was not being saved correctly, ensuring your custom templates are now preserved as expected.
|
||||
|
||||
## [0.3.3] - 2024-06-12
|
||||
|
||||
### Added
|
||||
|
||||
33
Dockerfile
33
Dockerfile
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# Initialize device type args
|
||||
# use build args in the docker build commmand with --build-arg="BUILDARG=true"
|
||||
# use build args in the docker build command with --build-arg="BUILDARG=true"
|
||||
ARG USE_CUDA=false
|
||||
ARG USE_OLLAMA=false
|
||||
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
|
||||
@@ -11,13 +11,17 @@ ARG USE_CUDA_VER=cu121
|
||||
# IMPORTANT: If you change the embedding model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
|
||||
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
|
||||
ARG USE_RERANKING_MODEL=""
|
||||
|
||||
# Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
|
||||
ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
|
||||
|
||||
ARG BUILD_HASH=dev-build
|
||||
# Override at your own risk - non-root configurations are untested
|
||||
ARG UID=0
|
||||
ARG GID=0
|
||||
|
||||
######## WebUI frontend ########
|
||||
FROM --platform=$BUILDPLATFORM node:21-alpine3.19 as build
|
||||
FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
|
||||
ARG BUILD_HASH
|
||||
|
||||
WORKDIR /app
|
||||
@@ -30,7 +34,7 @@ ENV APP_BUILD_HASH=${BUILD_HASH}
|
||||
RUN npm run build
|
||||
|
||||
######## WebUI backend ########
|
||||
FROM python:3.11-slim-bookworm as base
|
||||
FROM python:3.11-slim-bookworm AS base
|
||||
|
||||
# Use args
|
||||
ARG USE_CUDA
|
||||
@@ -72,13 +76,21 @@ ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
|
||||
RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER" \
|
||||
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
|
||||
|
||||
## Tiktoken model settings ##
|
||||
ENV TIKTOKEN_ENCODING_NAME="cl100k_base" \
|
||||
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
|
||||
|
||||
## Hugging Face download cache ##
|
||||
ENV HF_HOME="/app/backend/data/cache/embedding/models"
|
||||
|
||||
## Torch Extensions ##
|
||||
# ENV TORCH_EXTENSIONS_DIR="/.cache/torch_extensions"
|
||||
|
||||
#### Other models ##########################################################
|
||||
|
||||
WORKDIR /app/backend
|
||||
|
||||
ENV HOME /root
|
||||
ENV HOME=/root
|
||||
# Create user and group if not root
|
||||
RUN if [ $UID -ne 0 ]; then \
|
||||
if [ $GID -ne 0 ]; then \
|
||||
@@ -96,7 +108,8 @@ RUN chown -R $UID:$GID /app $HOME
|
||||
RUN if [ "$USE_OLLAMA" = "true" ]; then \
|
||||
apt-get update && \
|
||||
# Install pandoc and netcat
|
||||
apt-get install -y --no-install-recommends pandoc netcat-openbsd curl && \
|
||||
apt-get install -y --no-install-recommends git build-essential pandoc netcat-openbsd curl && \
|
||||
apt-get install -y --no-install-recommends gcc python3-dev && \
|
||||
# for RAG OCR
|
||||
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
|
||||
# install helper tools
|
||||
@@ -107,8 +120,9 @@ RUN if [ "$USE_OLLAMA" = "true" ]; then \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
else \
|
||||
apt-get update && \
|
||||
# Install pandoc and netcat
|
||||
apt-get install -y --no-install-recommends pandoc netcat-openbsd curl jq && \
|
||||
# Install pandoc, netcat and gcc
|
||||
apt-get install -y --no-install-recommends git build-essential pandoc gcc netcat-openbsd curl jq && \
|
||||
apt-get install -y --no-install-recommends gcc python3-dev && \
|
||||
# for RAG OCR
|
||||
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
|
||||
# cleanup
|
||||
@@ -125,11 +139,13 @@ RUN pip3 install uv && \
|
||||
uv pip install --system -r requirements.txt --no-cache-dir && \
|
||||
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
||||
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
|
||||
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
|
||||
else \
|
||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
|
||||
uv pip install --system -r requirements.txt --no-cache-dir && \
|
||||
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
|
||||
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
|
||||
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
|
||||
fi; \
|
||||
chown -R $UID:$GID /app/backend/data/
|
||||
|
||||
@@ -149,11 +165,12 @@ COPY --chown=$UID:$GID ./backend .
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
HEALTHCHECK CMD curl --silent --fail http://localhost:8080/health | jq -e '.status == true' || exit 1
|
||||
HEALTHCHECK CMD curl --silent --fail http://localhost:${PORT:-8080}/health | jq -ne 'input.status == true' || exit 1
|
||||
|
||||
USER $UID:$GID
|
||||
|
||||
ARG BUILD_HASH
|
||||
ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
|
||||
ENV DOCKER=true
|
||||
|
||||
CMD [ "bash", "start.sh"]
|
||||
|
||||
36
README.md
36
README.md
@@ -1,4 +1,4 @@
|
||||
# Open WebUI (Formerly Ollama WebUI) 👋
|
||||
# Open WebUI 👋
|
||||
|
||||

|
||||

|
||||
@@ -37,7 +37,7 @@ Open WebUI is an [extensible](https://github.com/open-webui/pipelines), feature-
|
||||
|
||||
- 📚 **Local RAG Integration**: Dive into the future of chat interactions with groundbreaking Retrieval Augmented Generation (RAG) support. This feature seamlessly integrates document interactions into your chat experience. You can load documents directly into the chat or add files to your document library, effortlessly accessing them using the `#` command before a query.
|
||||
|
||||
- 🔍 **Web Search for RAG**: Perform web searches using providers like `SearXNG`, `Google PSE`, `Brave Search`, `serpstack`, `serper`, and `Serply` and inject the results directly into your chat experience.
|
||||
- 🔍 **Web Search for RAG**: Perform web searches using providers like `SearXNG`, `Google PSE`, `Brave Search`, `serpstack`, `serper`, `Serply`, `DuckDuckGo`, `TavilySearch` and `SearchApi` and inject the results directly into your chat experience.
|
||||
|
||||
- 🌐 **Web Browsing Capability**: Seamlessly integrate websites into your chat experience using the `#` command followed by a URL. This feature allows you to incorporate web content directly into your conversations, enhancing the richness and depth of your interactions.
|
||||
|
||||
@@ -59,11 +59,31 @@ Don't forget to explore our sibling project, [Open WebUI Community](https://open
|
||||
|
||||
## How to Install 🚀
|
||||
|
||||
> [!NOTE]
|
||||
> Please note that for certain Docker environments, additional configurations might be needed. If you encounter any connection issues, our detailed guide on [Open WebUI Documentation](https://docs.openwebui.com/) is ready to assist you.
|
||||
### Installation via Python pip 🐍
|
||||
|
||||
Open WebUI can be installed using pip, the Python package installer. Before proceeding, ensure you're using **Python 3.11** to avoid compatibility issues.
|
||||
|
||||
1. **Install Open WebUI**:
|
||||
Open your terminal and run the following command to install Open WebUI:
|
||||
|
||||
```bash
|
||||
pip install open-webui
|
||||
```
|
||||
|
||||
2. **Running Open WebUI**:
|
||||
After installation, you can start Open WebUI by executing:
|
||||
|
||||
```bash
|
||||
open-webui serve
|
||||
```
|
||||
|
||||
This will start the Open WebUI server, which you can access at [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
### Quick Start with Docker 🐳
|
||||
|
||||
> [!NOTE]
|
||||
> Please note that for certain Docker environments, additional configurations might be needed. If you encounter any connection issues, our detailed guide on [Open WebUI Documentation](https://docs.openwebui.com/) is ready to assist you.
|
||||
|
||||
> [!WARNING]
|
||||
> When using Docker to install Open WebUI, make sure to include the `-v open-webui:/app/backend/data` in your Docker command. This step is crucial as it ensures your database is properly mounted and prevents any loss of data.
|
||||
|
||||
@@ -86,7 +106,7 @@ Don't forget to explore our sibling project, [Open WebUI Community](https://open
|
||||
docker run -d -p 3000:8080 -e OLLAMA_BASE_URL=https://example.com -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main
|
||||
```
|
||||
|
||||
- **To run Open WebUI with Nvidia GPU support**, use this command:
|
||||
- **To run Open WebUI with Nvidia GPU support**, use this command:
|
||||
|
||||
```bash
|
||||
docker run -d -p 3000:8080 --gpus all --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:cuda
|
||||
@@ -150,7 +170,7 @@ docker run --rm --volume /var/run/docker.sock:/var/run/docker.sock containrrr/wa
|
||||
|
||||
In the last part of the command, replace `open-webui` with your container name if it is different.
|
||||
|
||||
Check our Migration Guide available in our [Open WebUI Documentation](https://docs.openwebui.com/migration/).
|
||||
Check our Migration Guide available in our [Open WebUI Documentation](https://docs.openwebui.com/tutorials/migration/).
|
||||
|
||||
### Using the Dev Branch 🌙
|
||||
|
||||
@@ -160,7 +180,7 @@ Check our Migration Guide available in our [Open WebUI Documentation](https://do
|
||||
If you want to try out the latest bleeding-edge features and are okay with occasional instability, you can use the `:dev` tag like this:
|
||||
|
||||
```bash
|
||||
docker run -d -p 3000:8080 -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:dev
|
||||
docker run -d -p 3000:8080 -v open-webui:/app/backend/data --name open-webui --add-host=host.docker.internal:host-gateway --restart always ghcr.io/open-webui/open-webui:dev
|
||||
```
|
||||
|
||||
## What's Next? 🌟
|
||||
@@ -200,4 +220,4 @@ If you have any questions, suggestions, or need assistance, please open an issue
|
||||
|
||||
---
|
||||
|
||||
Created by [Timothy J. Baek](https://github.com/tjbck) - Let's make Open WebUI even more amazing together! 💪
|
||||
Created by [Timothy Jaeryang Baek](https://github.com/tjbck) - Let's make Open WebUI even more amazing together! 💪
|
||||
|
||||
@@ -18,6 +18,10 @@ If you're experiencing connection issues, it’s often due to the WebUI docker c
|
||||
docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main
|
||||
```
|
||||
|
||||
### Error on Slow Responses for Ollama
|
||||
|
||||
Open WebUI has a default timeout of 5 minutes for Ollama to finish generating the response. If needed, this can be adjusted via the environment variable AIOHTTP_CLIENT_TIMEOUT, which sets the timeout in seconds.
|
||||
|
||||
### General Connection Errors
|
||||
|
||||
**Ensure Ollama Version is Up-to-Date**: Always start by checking that you have the latest version of Ollama. Visit [Ollama's official site](https://ollama.com/) for the latest updates.
|
||||
|
||||
6
backend/.gitignore
vendored
6
backend/.gitignore
vendored
@@ -8,9 +8,5 @@ _test
|
||||
Pipfile
|
||||
!/data
|
||||
/data/*
|
||||
!/data/litellm
|
||||
/data/litellm/*
|
||||
!data/litellm/config.yaml
|
||||
|
||||
!data/config.json
|
||||
/open_webui/data/*
|
||||
.webui_secret_key
|
||||
@@ -1,374 +0,0 @@
|
||||
import os
|
||||
import logging
|
||||
from fastapi import (
|
||||
FastAPI,
|
||||
Request,
|
||||
Depends,
|
||||
HTTPException,
|
||||
status,
|
||||
UploadFile,
|
||||
File,
|
||||
Form,
|
||||
)
|
||||
|
||||
from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
|
||||
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from faster_whisper import WhisperModel
|
||||
from pydantic import BaseModel
|
||||
|
||||
import uuid
|
||||
import requests
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
from constants import ERROR_MESSAGES
|
||||
from utils.utils import (
|
||||
decode_token,
|
||||
get_current_user,
|
||||
get_verified_user,
|
||||
get_admin_user,
|
||||
)
|
||||
from utils.misc import calculate_sha256
|
||||
|
||||
from config import (
|
||||
SRC_LOG_LEVELS,
|
||||
CACHE_DIR,
|
||||
UPLOAD_DIR,
|
||||
WHISPER_MODEL,
|
||||
WHISPER_MODEL_DIR,
|
||||
WHISPER_MODEL_AUTO_UPDATE,
|
||||
DEVICE_TYPE,
|
||||
AUDIO_STT_OPENAI_API_BASE_URL,
|
||||
AUDIO_STT_OPENAI_API_KEY,
|
||||
AUDIO_TTS_OPENAI_API_BASE_URL,
|
||||
AUDIO_TTS_OPENAI_API_KEY,
|
||||
AUDIO_STT_ENGINE,
|
||||
AUDIO_STT_MODEL,
|
||||
AUDIO_TTS_ENGINE,
|
||||
AUDIO_TTS_MODEL,
|
||||
AUDIO_TTS_VOICE,
|
||||
AppConfig,
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["AUDIO"])
|
||||
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.state.config = AppConfig()
|
||||
|
||||
app.state.config.STT_OPENAI_API_BASE_URL = AUDIO_STT_OPENAI_API_BASE_URL
|
||||
app.state.config.STT_OPENAI_API_KEY = AUDIO_STT_OPENAI_API_KEY
|
||||
app.state.config.STT_ENGINE = AUDIO_STT_ENGINE
|
||||
app.state.config.STT_MODEL = AUDIO_STT_MODEL
|
||||
|
||||
app.state.config.TTS_OPENAI_API_BASE_URL = AUDIO_TTS_OPENAI_API_BASE_URL
|
||||
app.state.config.TTS_OPENAI_API_KEY = AUDIO_TTS_OPENAI_API_KEY
|
||||
app.state.config.TTS_ENGINE = AUDIO_TTS_ENGINE
|
||||
app.state.config.TTS_MODEL = AUDIO_TTS_MODEL
|
||||
app.state.config.TTS_VOICE = AUDIO_TTS_VOICE
|
||||
|
||||
# setting device type for whisper model
|
||||
whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
|
||||
log.info(f"whisper_device_type: {whisper_device_type}")
|
||||
|
||||
SPEECH_CACHE_DIR = Path(CACHE_DIR).joinpath("./audio/speech/")
|
||||
SPEECH_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
class TTSConfigForm(BaseModel):
|
||||
OPENAI_API_BASE_URL: str
|
||||
OPENAI_API_KEY: str
|
||||
ENGINE: str
|
||||
MODEL: str
|
||||
VOICE: str
|
||||
|
||||
|
||||
class STTConfigForm(BaseModel):
|
||||
OPENAI_API_BASE_URL: str
|
||||
OPENAI_API_KEY: str
|
||||
ENGINE: str
|
||||
MODEL: str
|
||||
|
||||
|
||||
class AudioConfigUpdateForm(BaseModel):
|
||||
tts: TTSConfigForm
|
||||
stt: STTConfigForm
|
||||
|
||||
|
||||
from pydub import AudioSegment
|
||||
from pydub.utils import mediainfo
|
||||
|
||||
|
||||
def is_mp4_audio(file_path):
|
||||
"""Check if the given file is an MP4 audio file."""
|
||||
if not os.path.isfile(file_path):
|
||||
print(f"File not found: {file_path}")
|
||||
return False
|
||||
|
||||
info = mediainfo(file_path)
|
||||
if (
|
||||
info.get("codec_name") == "aac"
|
||||
and info.get("codec_type") == "audio"
|
||||
and info.get("codec_tag_string") == "mp4a"
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def convert_mp4_to_wav(file_path, output_path):
|
||||
"""Convert MP4 audio file to WAV format."""
|
||||
audio = AudioSegment.from_file(file_path, format="mp4")
|
||||
audio.export(output_path, format="wav")
|
||||
print(f"Converted {file_path} to {output_path}")
|
||||
|
||||
|
||||
@app.get("/config")
|
||||
async def get_audio_config(user=Depends(get_admin_user)):
|
||||
return {
|
||||
"tts": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.TTS_OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.TTS_OPENAI_API_KEY,
|
||||
"ENGINE": app.state.config.TTS_ENGINE,
|
||||
"MODEL": app.state.config.TTS_MODEL,
|
||||
"VOICE": app.state.config.TTS_VOICE,
|
||||
},
|
||||
"stt": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.STT_OPENAI_API_KEY,
|
||||
"ENGINE": app.state.config.STT_ENGINE,
|
||||
"MODEL": app.state.config.STT_MODEL,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@app.post("/config/update")
|
||||
async def update_audio_config(
|
||||
form_data: AudioConfigUpdateForm, user=Depends(get_admin_user)
|
||||
):
|
||||
app.state.config.TTS_OPENAI_API_BASE_URL = form_data.tts.OPENAI_API_BASE_URL
|
||||
app.state.config.TTS_OPENAI_API_KEY = form_data.tts.OPENAI_API_KEY
|
||||
app.state.config.TTS_ENGINE = form_data.tts.ENGINE
|
||||
app.state.config.TTS_MODEL = form_data.tts.MODEL
|
||||
app.state.config.TTS_VOICE = form_data.tts.VOICE
|
||||
|
||||
app.state.config.STT_OPENAI_API_BASE_URL = form_data.stt.OPENAI_API_BASE_URL
|
||||
app.state.config.STT_OPENAI_API_KEY = form_data.stt.OPENAI_API_KEY
|
||||
app.state.config.STT_ENGINE = form_data.stt.ENGINE
|
||||
app.state.config.STT_MODEL = form_data.stt.MODEL
|
||||
|
||||
return {
|
||||
"tts": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.TTS_OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.TTS_OPENAI_API_KEY,
|
||||
"ENGINE": app.state.config.TTS_ENGINE,
|
||||
"MODEL": app.state.config.TTS_MODEL,
|
||||
"VOICE": app.state.config.TTS_VOICE,
|
||||
},
|
||||
"stt": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.STT_OPENAI_API_KEY,
|
||||
"ENGINE": app.state.config.STT_ENGINE,
|
||||
"MODEL": app.state.config.STT_MODEL,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@app.post("/speech")
|
||||
async def speech(request: Request, user=Depends(get_verified_user)):
|
||||
body = await request.body()
|
||||
name = hashlib.sha256(body).hexdigest()
|
||||
|
||||
file_path = SPEECH_CACHE_DIR.joinpath(f"{name}.mp3")
|
||||
file_body_path = SPEECH_CACHE_DIR.joinpath(f"{name}.json")
|
||||
|
||||
# Check if the file already exists in the cache
|
||||
if file_path.is_file():
|
||||
return FileResponse(file_path)
|
||||
|
||||
headers = {}
|
||||
headers["Authorization"] = f"Bearer {app.state.config.TTS_OPENAI_API_KEY}"
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
try:
|
||||
body = body.decode("utf-8")
|
||||
body = json.loads(body)
|
||||
body["model"] = app.state.config.TTS_MODEL
|
||||
body = json.dumps(body).encode("utf-8")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
r = None
|
||||
try:
|
||||
r = requests.post(
|
||||
url=f"{app.state.config.TTS_OPENAI_API_BASE_URL}/audio/speech",
|
||||
data=body,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
|
||||
# Save the streaming content to a file
|
||||
with open(file_path, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump(json.loads(body.decode("utf-8")), f)
|
||||
|
||||
# Return the saved file
|
||||
return FileResponse(file_path)
|
||||
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']['message']}"
|
||||
except:
|
||||
error_detail = f"External: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
status_code=r.status_code if r != None else 500,
|
||||
detail=error_detail,
|
||||
)
|
||||
|
||||
|
||||
@app.post("/transcriptions")
|
||||
def transcribe(
|
||||
file: UploadFile = File(...),
|
||||
user=Depends(get_current_user),
|
||||
):
|
||||
log.info(f"file.content_type: {file.content_type}")
|
||||
|
||||
if file.content_type not in ["audio/mpeg", "audio/wav"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED,
|
||||
)
|
||||
|
||||
try:
|
||||
ext = file.filename.split(".")[-1]
|
||||
|
||||
id = uuid.uuid4()
|
||||
filename = f"{id}.{ext}"
|
||||
|
||||
file_dir = f"{CACHE_DIR}/audio/transcriptions"
|
||||
os.makedirs(file_dir, exist_ok=True)
|
||||
file_path = f"{file_dir}/{filename}"
|
||||
|
||||
print(filename)
|
||||
|
||||
contents = file.file.read()
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(contents)
|
||||
f.close()
|
||||
|
||||
if app.state.config.STT_ENGINE == "":
|
||||
whisper_kwargs = {
|
||||
"model_size_or_path": WHISPER_MODEL,
|
||||
"device": whisper_device_type,
|
||||
"compute_type": "int8",
|
||||
"download_root": WHISPER_MODEL_DIR,
|
||||
"local_files_only": not WHISPER_MODEL_AUTO_UPDATE,
|
||||
}
|
||||
|
||||
log.debug(f"whisper_kwargs: {whisper_kwargs}")
|
||||
|
||||
try:
|
||||
model = WhisperModel(**whisper_kwargs)
|
||||
except:
|
||||
log.warning(
|
||||
"WhisperModel initialization failed, attempting download with local_files_only=False"
|
||||
)
|
||||
whisper_kwargs["local_files_only"] = False
|
||||
model = WhisperModel(**whisper_kwargs)
|
||||
|
||||
segments, info = model.transcribe(file_path, beam_size=5)
|
||||
log.info(
|
||||
"Detected language '%s' with probability %f"
|
||||
% (info.language, info.language_probability)
|
||||
)
|
||||
|
||||
transcript = "".join([segment.text for segment in list(segments)])
|
||||
|
||||
data = {"text": transcript.strip()}
|
||||
|
||||
# save the transcript to a json file
|
||||
transcript_file = f"{file_dir}/{id}.json"
|
||||
with open(transcript_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
print(data)
|
||||
|
||||
return data
|
||||
|
||||
elif app.state.config.STT_ENGINE == "openai":
|
||||
if is_mp4_audio(file_path):
|
||||
print("is_mp4_audio")
|
||||
os.rename(file_path, file_path.replace(".wav", ".mp4"))
|
||||
# Convert MP4 audio file to WAV format
|
||||
convert_mp4_to_wav(file_path.replace(".wav", ".mp4"), file_path)
|
||||
|
||||
headers = {"Authorization": f"Bearer {app.state.config.STT_OPENAI_API_KEY}"}
|
||||
|
||||
files = {"file": (filename, open(file_path, "rb"))}
|
||||
data = {"model": "whisper-1"}
|
||||
|
||||
print(files, data)
|
||||
|
||||
r = None
|
||||
try:
|
||||
r = requests.post(
|
||||
url=f"{app.state.config.STT_OPENAI_API_BASE_URL}/audio/transcriptions",
|
||||
headers=headers,
|
||||
files=files,
|
||||
data=data,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
|
||||
data = r.json()
|
||||
|
||||
# save the transcript to a json file
|
||||
transcript_file = f"{file_dir}/{id}.json"
|
||||
with open(transcript_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
print(data)
|
||||
return data
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']['message']}"
|
||||
except:
|
||||
error_detail = f"External: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
status_code=r.status_code if r != None else 500,
|
||||
detail=error_detail,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.DEFAULT(e),
|
||||
)
|
||||
@@ -1,527 +0,0 @@
|
||||
import re
|
||||
import requests
|
||||
from fastapi import (
|
||||
FastAPI,
|
||||
Request,
|
||||
Depends,
|
||||
HTTPException,
|
||||
status,
|
||||
UploadFile,
|
||||
File,
|
||||
Form,
|
||||
)
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
from constants import ERROR_MESSAGES
|
||||
from utils.utils import (
|
||||
get_current_user,
|
||||
get_admin_user,
|
||||
)
|
||||
|
||||
from apps.images.utils.comfyui import ImageGenerationPayload, comfyui_generate_image
|
||||
from utils.misc import calculate_sha256
|
||||
from typing import Optional
|
||||
from pydantic import BaseModel
|
||||
from pathlib import Path
|
||||
import mimetypes
|
||||
import uuid
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
|
||||
from config import (
|
||||
SRC_LOG_LEVELS,
|
||||
CACHE_DIR,
|
||||
IMAGE_GENERATION_ENGINE,
|
||||
ENABLE_IMAGE_GENERATION,
|
||||
AUTOMATIC1111_BASE_URL,
|
||||
COMFYUI_BASE_URL,
|
||||
IMAGES_OPENAI_API_BASE_URL,
|
||||
IMAGES_OPENAI_API_KEY,
|
||||
IMAGE_GENERATION_MODEL,
|
||||
IMAGE_SIZE,
|
||||
IMAGE_STEPS,
|
||||
AppConfig,
|
||||
)
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["IMAGES"])
|
||||
|
||||
IMAGE_CACHE_DIR = Path(CACHE_DIR).joinpath("./image/generations/")
|
||||
IMAGE_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.state.config = AppConfig()
|
||||
|
||||
app.state.config.ENGINE = IMAGE_GENERATION_ENGINE
|
||||
app.state.config.ENABLED = ENABLE_IMAGE_GENERATION
|
||||
|
||||
app.state.config.OPENAI_API_BASE_URL = IMAGES_OPENAI_API_BASE_URL
|
||||
app.state.config.OPENAI_API_KEY = IMAGES_OPENAI_API_KEY
|
||||
|
||||
app.state.config.MODEL = IMAGE_GENERATION_MODEL
|
||||
|
||||
|
||||
app.state.config.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
|
||||
app.state.config.COMFYUI_BASE_URL = COMFYUI_BASE_URL
|
||||
|
||||
|
||||
app.state.config.IMAGE_SIZE = IMAGE_SIZE
|
||||
app.state.config.IMAGE_STEPS = IMAGE_STEPS
|
||||
|
||||
|
||||
@app.get("/config")
|
||||
async def get_config(request: Request, user=Depends(get_admin_user)):
|
||||
return {
|
||||
"engine": app.state.config.ENGINE,
|
||||
"enabled": app.state.config.ENABLED,
|
||||
}
|
||||
|
||||
|
||||
class ConfigUpdateForm(BaseModel):
|
||||
engine: str
|
||||
enabled: bool
|
||||
|
||||
|
||||
@app.post("/config/update")
|
||||
async def update_config(form_data: ConfigUpdateForm, user=Depends(get_admin_user)):
|
||||
app.state.config.ENGINE = form_data.engine
|
||||
app.state.config.ENABLED = form_data.enabled
|
||||
return {
|
||||
"engine": app.state.config.ENGINE,
|
||||
"enabled": app.state.config.ENABLED,
|
||||
}
|
||||
|
||||
|
||||
class EngineUrlUpdateForm(BaseModel):
|
||||
AUTOMATIC1111_BASE_URL: Optional[str] = None
|
||||
COMFYUI_BASE_URL: Optional[str] = None
|
||||
|
||||
|
||||
@app.get("/url")
|
||||
async def get_engine_url(user=Depends(get_admin_user)):
|
||||
return {
|
||||
"AUTOMATIC1111_BASE_URL": app.state.config.AUTOMATIC1111_BASE_URL,
|
||||
"COMFYUI_BASE_URL": app.state.config.COMFYUI_BASE_URL,
|
||||
}
|
||||
|
||||
|
||||
@app.post("/url/update")
|
||||
async def update_engine_url(
|
||||
form_data: EngineUrlUpdateForm, user=Depends(get_admin_user)
|
||||
):
|
||||
|
||||
if form_data.AUTOMATIC1111_BASE_URL == None:
|
||||
app.state.config.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
|
||||
else:
|
||||
url = form_data.AUTOMATIC1111_BASE_URL.strip("/")
|
||||
try:
|
||||
r = requests.head(url)
|
||||
app.state.config.AUTOMATIC1111_BASE_URL = url
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
|
||||
|
||||
if form_data.COMFYUI_BASE_URL == None:
|
||||
app.state.config.COMFYUI_BASE_URL = COMFYUI_BASE_URL
|
||||
else:
|
||||
url = form_data.COMFYUI_BASE_URL.strip("/")
|
||||
|
||||
try:
|
||||
r = requests.head(url)
|
||||
app.state.config.COMFYUI_BASE_URL = url
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
|
||||
|
||||
return {
|
||||
"AUTOMATIC1111_BASE_URL": app.state.config.AUTOMATIC1111_BASE_URL,
|
||||
"COMFYUI_BASE_URL": app.state.config.COMFYUI_BASE_URL,
|
||||
"status": True,
|
||||
}
|
||||
|
||||
|
||||
class OpenAIConfigUpdateForm(BaseModel):
|
||||
url: str
|
||||
key: str
|
||||
|
||||
|
||||
@app.get("/openai/config")
|
||||
async def get_openai_config(user=Depends(get_admin_user)):
|
||||
return {
|
||||
"OPENAI_API_BASE_URL": app.state.config.OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.OPENAI_API_KEY,
|
||||
}
|
||||
|
||||
|
||||
@app.post("/openai/config/update")
|
||||
async def update_openai_config(
|
||||
form_data: OpenAIConfigUpdateForm, user=Depends(get_admin_user)
|
||||
):
|
||||
if form_data.key == "":
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.API_KEY_NOT_FOUND)
|
||||
|
||||
app.state.config.OPENAI_API_BASE_URL = form_data.url
|
||||
app.state.config.OPENAI_API_KEY = form_data.key
|
||||
|
||||
return {
|
||||
"status": True,
|
||||
"OPENAI_API_BASE_URL": app.state.config.OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.OPENAI_API_KEY,
|
||||
}
|
||||
|
||||
|
||||
class ImageSizeUpdateForm(BaseModel):
|
||||
size: str
|
||||
|
||||
|
||||
@app.get("/size")
|
||||
async def get_image_size(user=Depends(get_admin_user)):
|
||||
return {"IMAGE_SIZE": app.state.config.IMAGE_SIZE}
|
||||
|
||||
|
||||
@app.post("/size/update")
|
||||
async def update_image_size(
|
||||
form_data: ImageSizeUpdateForm, user=Depends(get_admin_user)
|
||||
):
|
||||
pattern = r"^\d+x\d+$" # Regular expression pattern
|
||||
if re.match(pattern, form_data.size):
|
||||
app.state.config.IMAGE_SIZE = form_data.size
|
||||
return {
|
||||
"IMAGE_SIZE": app.state.config.IMAGE_SIZE,
|
||||
"status": True,
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=ERROR_MESSAGES.INCORRECT_FORMAT(" (e.g., 512x512)."),
|
||||
)
|
||||
|
||||
|
||||
class ImageStepsUpdateForm(BaseModel):
|
||||
steps: int
|
||||
|
||||
|
||||
@app.get("/steps")
|
||||
async def get_image_size(user=Depends(get_admin_user)):
|
||||
return {"IMAGE_STEPS": app.state.config.IMAGE_STEPS}
|
||||
|
||||
|
||||
@app.post("/steps/update")
|
||||
async def update_image_size(
|
||||
form_data: ImageStepsUpdateForm, user=Depends(get_admin_user)
|
||||
):
|
||||
if form_data.steps >= 0:
|
||||
app.state.config.IMAGE_STEPS = form_data.steps
|
||||
return {
|
||||
"IMAGE_STEPS": app.state.config.IMAGE_STEPS,
|
||||
"status": True,
|
||||
}
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=ERROR_MESSAGES.INCORRECT_FORMAT(" (e.g., 50)."),
|
||||
)
|
||||
|
||||
|
||||
@app.get("/models")
|
||||
def get_models(user=Depends(get_current_user)):
|
||||
try:
|
||||
if app.state.config.ENGINE == "openai":
|
||||
return [
|
||||
{"id": "dall-e-2", "name": "DALL·E 2"},
|
||||
{"id": "dall-e-3", "name": "DALL·E 3"},
|
||||
]
|
||||
elif app.state.config.ENGINE == "comfyui":
|
||||
|
||||
r = requests.get(url=f"{app.state.config.COMFYUI_BASE_URL}/object_info")
|
||||
info = r.json()
|
||||
|
||||
return list(
|
||||
map(
|
||||
lambda model: {"id": model, "name": model},
|
||||
info["CheckpointLoaderSimple"]["input"]["required"]["ckpt_name"][0],
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
r = requests.get(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/sd-models"
|
||||
)
|
||||
models = r.json()
|
||||
return list(
|
||||
map(
|
||||
lambda model: {"id": model["title"], "name": model["model_name"]},
|
||||
models,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
app.state.config.ENABLED = False
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
|
||||
|
||||
|
||||
@app.get("/models/default")
|
||||
async def get_default_model(user=Depends(get_admin_user)):
|
||||
try:
|
||||
if app.state.config.ENGINE == "openai":
|
||||
return {
|
||||
"model": (
|
||||
app.state.config.MODEL if app.state.config.MODEL else "dall-e-2"
|
||||
)
|
||||
}
|
||||
elif app.state.config.ENGINE == "comfyui":
|
||||
return {"model": (app.state.config.MODEL if app.state.config.MODEL else "")}
|
||||
else:
|
||||
r = requests.get(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/options"
|
||||
)
|
||||
options = r.json()
|
||||
return {"model": options["sd_model_checkpoint"]}
|
||||
except Exception as e:
|
||||
app.state.config.ENABLED = False
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
|
||||
|
||||
|
||||
class UpdateModelForm(BaseModel):
|
||||
model: str
|
||||
|
||||
|
||||
def set_model_handler(model: str):
|
||||
if app.state.config.ENGINE in ["openai", "comfyui"]:
|
||||
app.state.config.MODEL = model
|
||||
return app.state.config.MODEL
|
||||
else:
|
||||
r = requests.get(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/options"
|
||||
)
|
||||
options = r.json()
|
||||
|
||||
if model != options["sd_model_checkpoint"]:
|
||||
options["sd_model_checkpoint"] = model
|
||||
r = requests.post(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/options",
|
||||
json=options,
|
||||
)
|
||||
|
||||
return options
|
||||
|
||||
|
||||
@app.post("/models/default/update")
|
||||
def update_default_model(
|
||||
form_data: UpdateModelForm,
|
||||
user=Depends(get_current_user),
|
||||
):
|
||||
return set_model_handler(form_data.model)
|
||||
|
||||
|
||||
class GenerateImageForm(BaseModel):
|
||||
model: Optional[str] = None
|
||||
prompt: str
|
||||
n: int = 1
|
||||
size: Optional[str] = None
|
||||
negative_prompt: Optional[str] = None
|
||||
|
||||
|
||||
def save_b64_image(b64_str):
|
||||
try:
|
||||
image_id = str(uuid.uuid4())
|
||||
|
||||
if "," in b64_str:
|
||||
header, encoded = b64_str.split(",", 1)
|
||||
mime_type = header.split(";")[0]
|
||||
|
||||
img_data = base64.b64decode(encoded)
|
||||
image_format = mimetypes.guess_extension(mime_type)
|
||||
|
||||
image_filename = f"{image_id}{image_format}"
|
||||
file_path = IMAGE_CACHE_DIR / f"{image_filename}"
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(img_data)
|
||||
return image_filename
|
||||
else:
|
||||
image_filename = f"{image_id}.png"
|
||||
file_path = IMAGE_CACHE_DIR.joinpath(image_filename)
|
||||
|
||||
img_data = base64.b64decode(b64_str)
|
||||
|
||||
# Write the image data to a file
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(img_data)
|
||||
return image_filename
|
||||
|
||||
except Exception as e:
|
||||
log.exception(f"Error saving image: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def save_url_image(url):
|
||||
image_id = str(uuid.uuid4())
|
||||
try:
|
||||
r = requests.get(url)
|
||||
r.raise_for_status()
|
||||
if r.headers["content-type"].split("/")[0] == "image":
|
||||
|
||||
mime_type = r.headers["content-type"]
|
||||
image_format = mimetypes.guess_extension(mime_type)
|
||||
|
||||
if not image_format:
|
||||
raise ValueError("Could not determine image type from MIME type")
|
||||
|
||||
image_filename = f"{image_id}{image_format}"
|
||||
|
||||
file_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}")
|
||||
with open(file_path, "wb") as image_file:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
image_file.write(chunk)
|
||||
return image_filename
|
||||
else:
|
||||
log.error(f"Url does not point to an image.")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
log.exception(f"Error saving image: {e}")
|
||||
return None
|
||||
|
||||
|
||||
@app.post("/generations")
|
||||
def generate_image(
|
||||
form_data: GenerateImageForm,
|
||||
user=Depends(get_current_user),
|
||||
):
|
||||
|
||||
width, height = tuple(map(int, app.state.config.IMAGE_SIZE.split("x")))
|
||||
|
||||
r = None
|
||||
try:
|
||||
if app.state.config.ENGINE == "openai":
|
||||
|
||||
headers = {}
|
||||
headers["Authorization"] = f"Bearer {app.state.config.OPENAI_API_KEY}"
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
data = {
|
||||
"model": (
|
||||
app.state.config.MODEL
|
||||
if app.state.config.MODEL != ""
|
||||
else "dall-e-2"
|
||||
),
|
||||
"prompt": form_data.prompt,
|
||||
"n": form_data.n,
|
||||
"size": (
|
||||
form_data.size if form_data.size else app.state.config.IMAGE_SIZE
|
||||
),
|
||||
"response_format": "b64_json",
|
||||
}
|
||||
|
||||
r = requests.post(
|
||||
url=f"{app.state.config.OPENAI_API_BASE_URL}/images/generations",
|
||||
json=data,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
res = r.json()
|
||||
|
||||
images = []
|
||||
|
||||
for image in res["data"]:
|
||||
image_filename = save_b64_image(image["b64_json"])
|
||||
images.append({"url": f"/cache/image/generations/{image_filename}"})
|
||||
file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json")
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
return images
|
||||
|
||||
elif app.state.config.ENGINE == "comfyui":
|
||||
|
||||
data = {
|
||||
"prompt": form_data.prompt,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"n": form_data.n,
|
||||
}
|
||||
|
||||
if app.state.config.IMAGE_STEPS is not None:
|
||||
data["steps"] = app.state.config.IMAGE_STEPS
|
||||
|
||||
if form_data.negative_prompt is not None:
|
||||
data["negative_prompt"] = form_data.negative_prompt
|
||||
|
||||
data = ImageGenerationPayload(**data)
|
||||
|
||||
res = comfyui_generate_image(
|
||||
app.state.config.MODEL,
|
||||
data,
|
||||
user.id,
|
||||
app.state.config.COMFYUI_BASE_URL,
|
||||
)
|
||||
log.debug(f"res: {res}")
|
||||
|
||||
images = []
|
||||
|
||||
for image in res["data"]:
|
||||
image_filename = save_url_image(image["url"])
|
||||
images.append({"url": f"/cache/image/generations/{image_filename}"})
|
||||
file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json")
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump(data.model_dump(exclude_none=True), f)
|
||||
|
||||
log.debug(f"images: {images}")
|
||||
return images
|
||||
else:
|
||||
if form_data.model:
|
||||
set_model_handler(form_data.model)
|
||||
|
||||
data = {
|
||||
"prompt": form_data.prompt,
|
||||
"batch_size": form_data.n,
|
||||
"width": width,
|
||||
"height": height,
|
||||
}
|
||||
|
||||
if app.state.config.IMAGE_STEPS is not None:
|
||||
data["steps"] = app.state.config.IMAGE_STEPS
|
||||
|
||||
if form_data.negative_prompt is not None:
|
||||
data["negative_prompt"] = form_data.negative_prompt
|
||||
|
||||
r = requests.post(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/txt2img",
|
||||
json=data,
|
||||
)
|
||||
|
||||
res = r.json()
|
||||
|
||||
log.debug(f"res: {res}")
|
||||
|
||||
images = []
|
||||
|
||||
for image in res["images"]:
|
||||
image_filename = save_b64_image(image)
|
||||
images.append({"url": f"/cache/image/generations/{image_filename}"})
|
||||
file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json")
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump({**data, "info": res["info"]}, f)
|
||||
|
||||
return images
|
||||
|
||||
except Exception as e:
|
||||
error = e
|
||||
|
||||
if r != None:
|
||||
data = r.json()
|
||||
if "error" in data:
|
||||
error = data["error"]["message"]
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(error))
|
||||
@@ -1,234 +0,0 @@
|
||||
import websocket # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
|
||||
import uuid
|
||||
import json
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
import random
|
||||
import logging
|
||||
|
||||
from config import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["COMFYUI"])
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from typing import Optional
|
||||
|
||||
COMFYUI_DEFAULT_PROMPT = """
|
||||
{
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 0,
|
||||
"steps": 20,
|
||||
"cfg": 8,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"ckpt_name": "model.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"width": 512,
|
||||
"height": 512,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "Prompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "Negative Prompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"4",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"9": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def queue_prompt(prompt, client_id, base_url):
|
||||
log.info("queue_prompt")
|
||||
p = {"prompt": prompt, "client_id": client_id}
|
||||
data = json.dumps(p).encode("utf-8")
|
||||
req = urllib.request.Request(f"{base_url}/prompt", data=data)
|
||||
return json.loads(urllib.request.urlopen(req).read())
|
||||
|
||||
|
||||
def get_image(filename, subfolder, folder_type, base_url):
|
||||
log.info("get_image")
|
||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||
url_values = urllib.parse.urlencode(data)
|
||||
with urllib.request.urlopen(f"{base_url}/view?{url_values}") as response:
|
||||
return response.read()
|
||||
|
||||
|
||||
def get_image_url(filename, subfolder, folder_type, base_url):
|
||||
log.info("get_image")
|
||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||
url_values = urllib.parse.urlencode(data)
|
||||
return f"{base_url}/view?{url_values}"
|
||||
|
||||
|
||||
def get_history(prompt_id, base_url):
|
||||
log.info("get_history")
|
||||
with urllib.request.urlopen(f"{base_url}/history/{prompt_id}") as response:
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def get_images(ws, prompt, client_id, base_url):
|
||||
prompt_id = queue_prompt(prompt, client_id, base_url)["prompt_id"]
|
||||
output_images = []
|
||||
while True:
|
||||
out = ws.recv()
|
||||
if isinstance(out, str):
|
||||
message = json.loads(out)
|
||||
if message["type"] == "executing":
|
||||
data = message["data"]
|
||||
if data["node"] is None and data["prompt_id"] == prompt_id:
|
||||
break # Execution is done
|
||||
else:
|
||||
continue # previews are binary data
|
||||
|
||||
history = get_history(prompt_id, base_url)[prompt_id]
|
||||
for o in history["outputs"]:
|
||||
for node_id in history["outputs"]:
|
||||
node_output = history["outputs"][node_id]
|
||||
if "images" in node_output:
|
||||
for image in node_output["images"]:
|
||||
url = get_image_url(
|
||||
image["filename"], image["subfolder"], image["type"], base_url
|
||||
)
|
||||
output_images.append({"url": url})
|
||||
return {"data": output_images}
|
||||
|
||||
|
||||
class ImageGenerationPayload(BaseModel):
|
||||
prompt: str
|
||||
negative_prompt: Optional[str] = ""
|
||||
steps: Optional[int] = None
|
||||
seed: Optional[int] = None
|
||||
width: int
|
||||
height: int
|
||||
n: int = 1
|
||||
|
||||
|
||||
def comfyui_generate_image(
|
||||
model: str, payload: ImageGenerationPayload, client_id, base_url
|
||||
):
|
||||
ws_url = base_url.replace("http://", "ws://").replace("https://", "wss://")
|
||||
|
||||
comfyui_prompt = json.loads(COMFYUI_DEFAULT_PROMPT)
|
||||
|
||||
comfyui_prompt["4"]["inputs"]["ckpt_name"] = model
|
||||
comfyui_prompt["5"]["inputs"]["batch_size"] = payload.n
|
||||
comfyui_prompt["5"]["inputs"]["width"] = payload.width
|
||||
comfyui_prompt["5"]["inputs"]["height"] = payload.height
|
||||
|
||||
# set the text prompt for our positive CLIPTextEncode
|
||||
comfyui_prompt["6"]["inputs"]["text"] = payload.prompt
|
||||
comfyui_prompt["7"]["inputs"]["text"] = payload.negative_prompt
|
||||
|
||||
if payload.steps:
|
||||
comfyui_prompt["3"]["inputs"]["steps"] = payload.steps
|
||||
|
||||
comfyui_prompt["3"]["inputs"]["seed"] = (
|
||||
payload.seed if payload.seed else random.randint(0, 18446744073709551614)
|
||||
)
|
||||
|
||||
try:
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect(f"{ws_url}/ws?clientId={client_id}")
|
||||
log.info("WebSocket connection established.")
|
||||
except Exception as e:
|
||||
log.exception(f"Failed to connect to WebSocket server: {e}")
|
||||
return None
|
||||
|
||||
try:
|
||||
images = get_images(ws, comfyui_prompt, client_id, base_url)
|
||||
except Exception as e:
|
||||
log.exception(f"Error while receiving images: {e}")
|
||||
images = None
|
||||
|
||||
ws.close()
|
||||
|
||||
return images
|
||||
@@ -1,9 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class SearchResult(BaseModel):
|
||||
link: str
|
||||
title: Optional[str]
|
||||
snippet: Optional[str]
|
||||
@@ -1,139 +0,0 @@
|
||||
import socketio
|
||||
import asyncio
|
||||
|
||||
|
||||
from apps.webui.models.users import Users
|
||||
from utils.utils import decode_token
|
||||
|
||||
sio = socketio.AsyncServer(cors_allowed_origins=[], async_mode="asgi")
|
||||
app = socketio.ASGIApp(sio, socketio_path="/ws/socket.io")
|
||||
|
||||
# Dictionary to maintain the user pool
|
||||
|
||||
SESSION_POOL = {}
|
||||
USER_POOL = {}
|
||||
USAGE_POOL = {}
|
||||
# Timeout duration in seconds
|
||||
TIMEOUT_DURATION = 3
|
||||
|
||||
|
||||
@sio.event
|
||||
async def connect(sid, environ, auth):
|
||||
user = None
|
||||
if auth and "token" in auth:
|
||||
data = decode_token(auth["token"])
|
||||
|
||||
if data is not None and "id" in data:
|
||||
user = Users.get_user_by_id(data["id"])
|
||||
|
||||
if user:
|
||||
SESSION_POOL[sid] = user.id
|
||||
if user.id in USER_POOL:
|
||||
USER_POOL[user.id].append(sid)
|
||||
else:
|
||||
USER_POOL[user.id] = [sid]
|
||||
|
||||
print(f"user {user.name}({user.id}) connected with session ID {sid}")
|
||||
|
||||
await sio.emit("user-count", {"count": len(set(USER_POOL))})
|
||||
await sio.emit("usage", {"models": get_models_in_use()})
|
||||
|
||||
|
||||
@sio.on("user-join")
|
||||
async def user_join(sid, data):
|
||||
print("user-join", sid, data)
|
||||
|
||||
auth = data["auth"] if "auth" in data else None
|
||||
|
||||
if auth and "token" in auth:
|
||||
data = decode_token(auth["token"])
|
||||
|
||||
if data is not None and "id" in data:
|
||||
user = Users.get_user_by_id(data["id"])
|
||||
|
||||
if user:
|
||||
|
||||
SESSION_POOL[sid] = user.id
|
||||
if user.id in USER_POOL:
|
||||
USER_POOL[user.id].append(sid)
|
||||
else:
|
||||
USER_POOL[user.id] = [sid]
|
||||
|
||||
print(f"user {user.name}({user.id}) connected with session ID {sid}")
|
||||
|
||||
await sio.emit("user-count", {"count": len(set(USER_POOL))})
|
||||
|
||||
|
||||
@sio.on("user-count")
|
||||
async def user_count(sid):
|
||||
await sio.emit("user-count", {"count": len(set(USER_POOL))})
|
||||
|
||||
|
||||
def get_models_in_use():
|
||||
# Aggregate all models in use
|
||||
models_in_use = []
|
||||
for model_id, data in USAGE_POOL.items():
|
||||
models_in_use.append(model_id)
|
||||
|
||||
return models_in_use
|
||||
|
||||
|
||||
@sio.on("usage")
|
||||
async def usage(sid, data):
|
||||
|
||||
model_id = data["model"]
|
||||
|
||||
# Cancel previous callback if there is one
|
||||
if model_id in USAGE_POOL:
|
||||
USAGE_POOL[model_id]["callback"].cancel()
|
||||
|
||||
# Store the new usage data and task
|
||||
|
||||
if model_id in USAGE_POOL:
|
||||
USAGE_POOL[model_id]["sids"].append(sid)
|
||||
USAGE_POOL[model_id]["sids"] = list(set(USAGE_POOL[model_id]["sids"]))
|
||||
|
||||
else:
|
||||
USAGE_POOL[model_id] = {"sids": [sid]}
|
||||
|
||||
# Schedule a task to remove the usage data after TIMEOUT_DURATION
|
||||
USAGE_POOL[model_id]["callback"] = asyncio.create_task(
|
||||
remove_after_timeout(sid, model_id)
|
||||
)
|
||||
|
||||
# Broadcast the usage data to all clients
|
||||
await sio.emit("usage", {"models": get_models_in_use()})
|
||||
|
||||
|
||||
async def remove_after_timeout(sid, model_id):
|
||||
try:
|
||||
await asyncio.sleep(TIMEOUT_DURATION)
|
||||
if model_id in USAGE_POOL:
|
||||
print(USAGE_POOL[model_id]["sids"])
|
||||
USAGE_POOL[model_id]["sids"].remove(sid)
|
||||
USAGE_POOL[model_id]["sids"] = list(set(USAGE_POOL[model_id]["sids"]))
|
||||
|
||||
if len(USAGE_POOL[model_id]["sids"]) == 0:
|
||||
del USAGE_POOL[model_id]
|
||||
|
||||
# Broadcast the usage data to all clients
|
||||
await sio.emit("usage", {"models": get_models_in_use()})
|
||||
except asyncio.CancelledError:
|
||||
# Task was cancelled due to new 'usage' event
|
||||
pass
|
||||
|
||||
|
||||
@sio.event
|
||||
async def disconnect(sid):
|
||||
if sid in SESSION_POOL:
|
||||
user_id = SESSION_POOL[sid]
|
||||
del SESSION_POOL[sid]
|
||||
|
||||
USER_POOL[user_id].remove(sid)
|
||||
|
||||
if len(USER_POOL[user_id]) == 0:
|
||||
del USER_POOL[user_id]
|
||||
|
||||
await sio.emit("user-count", {"count": len(USER_POOL)})
|
||||
else:
|
||||
print(f"Unknown session ID {sid} disconnected")
|
||||
@@ -1,39 +0,0 @@
|
||||
import json
|
||||
|
||||
from peewee import *
|
||||
from peewee_migrate import Router
|
||||
from playhouse.db_url import connect
|
||||
from config import SRC_LOG_LEVELS, DATA_DIR, DATABASE_URL, BACKEND_DIR
|
||||
import os
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["DB"])
|
||||
|
||||
|
||||
class JSONField(TextField):
|
||||
def db_value(self, value):
|
||||
return json.dumps(value)
|
||||
|
||||
def python_value(self, value):
|
||||
if value is not None:
|
||||
return json.loads(value)
|
||||
|
||||
|
||||
# Check if the file exists
|
||||
if os.path.exists(f"{DATA_DIR}/ollama.db"):
|
||||
# Rename the file
|
||||
os.rename(f"{DATA_DIR}/ollama.db", f"{DATA_DIR}/webui.db")
|
||||
log.info("Database migrated from Ollama-WebUI successfully.")
|
||||
else:
|
||||
pass
|
||||
|
||||
DB = connect(DATABASE_URL)
|
||||
log.info(f"Connected to a {DB.__class__.__name__} database.")
|
||||
router = Router(
|
||||
DB,
|
||||
migrate_dir=BACKEND_DIR / "apps" / "webui" / "internal" / "migrations",
|
||||
logger=log,
|
||||
)
|
||||
router.run()
|
||||
DB.connect(reuse_if_open=True)
|
||||
@@ -1,21 +0,0 @@
|
||||
# Database Migrations
|
||||
|
||||
This directory contains all the database migrations for the web app.
|
||||
Migrations are done using the [`peewee-migrate`](https://github.com/klen/peewee_migrate) library.
|
||||
|
||||
Migrations are automatically ran at app startup.
|
||||
|
||||
## Creating a migration
|
||||
|
||||
Have you made a change to the schema of an existing model?
|
||||
You will need to create a migration file to ensure that existing databases are updated for backwards compatibility.
|
||||
|
||||
1. Have a database file (`webui.db`) that has the old schema prior to any of your changes.
|
||||
2. Make your changes to the models.
|
||||
3. From the `backend` directory, run the following command:
|
||||
```bash
|
||||
pw_migrate create --auto --auto-source apps.webui.models --database sqlite:///${SQLITE_DB} --directory apps/web/internal/migrations ${MIGRATION_NAME}
|
||||
```
|
||||
- `$SQLITE_DB` should be the path to the database file.
|
||||
- `$MIGRATION_NAME` should be a descriptive name for the migration.
|
||||
4. The migration file will be created in the `apps/web/internal/migrations` directory.
|
||||
@@ -1,91 +0,0 @@
|
||||
from fastapi import FastAPI, Depends
|
||||
from fastapi.routing import APIRoute
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from apps.webui.routers import (
|
||||
auths,
|
||||
users,
|
||||
chats,
|
||||
documents,
|
||||
tools,
|
||||
models,
|
||||
prompts,
|
||||
configs,
|
||||
memories,
|
||||
utils,
|
||||
)
|
||||
from config import (
|
||||
WEBUI_BUILD_HASH,
|
||||
SHOW_ADMIN_DETAILS,
|
||||
ADMIN_EMAIL,
|
||||
WEBUI_AUTH,
|
||||
DEFAULT_MODELS,
|
||||
DEFAULT_PROMPT_SUGGESTIONS,
|
||||
DEFAULT_USER_ROLE,
|
||||
ENABLE_SIGNUP,
|
||||
USER_PERMISSIONS,
|
||||
WEBHOOK_URL,
|
||||
WEBUI_AUTH_TRUSTED_EMAIL_HEADER,
|
||||
JWT_EXPIRES_IN,
|
||||
WEBUI_BANNERS,
|
||||
ENABLE_COMMUNITY_SHARING,
|
||||
AppConfig,
|
||||
)
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
origins = ["*"]
|
||||
|
||||
app.state.config = AppConfig()
|
||||
|
||||
app.state.config.ENABLE_SIGNUP = ENABLE_SIGNUP
|
||||
app.state.config.JWT_EXPIRES_IN = JWT_EXPIRES_IN
|
||||
app.state.AUTH_TRUSTED_EMAIL_HEADER = WEBUI_AUTH_TRUSTED_EMAIL_HEADER
|
||||
|
||||
|
||||
app.state.config.SHOW_ADMIN_DETAILS = SHOW_ADMIN_DETAILS
|
||||
app.state.config.ADMIN_EMAIL = ADMIN_EMAIL
|
||||
|
||||
|
||||
app.state.config.DEFAULT_MODELS = DEFAULT_MODELS
|
||||
app.state.config.DEFAULT_PROMPT_SUGGESTIONS = DEFAULT_PROMPT_SUGGESTIONS
|
||||
app.state.config.DEFAULT_USER_ROLE = DEFAULT_USER_ROLE
|
||||
app.state.config.USER_PERMISSIONS = USER_PERMISSIONS
|
||||
app.state.config.WEBHOOK_URL = WEBHOOK_URL
|
||||
app.state.config.BANNERS = WEBUI_BANNERS
|
||||
|
||||
app.state.config.ENABLE_COMMUNITY_SHARING = ENABLE_COMMUNITY_SHARING
|
||||
|
||||
app.state.MODELS = {}
|
||||
app.state.TOOLS = {}
|
||||
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.include_router(auths.router, prefix="/auths", tags=["auths"])
|
||||
app.include_router(users.router, prefix="/users", tags=["users"])
|
||||
app.include_router(chats.router, prefix="/chats", tags=["chats"])
|
||||
|
||||
app.include_router(documents.router, prefix="/documents", tags=["documents"])
|
||||
app.include_router(tools.router, prefix="/tools", tags=["tools"])
|
||||
app.include_router(models.router, prefix="/models", tags=["models"])
|
||||
app.include_router(prompts.router, prefix="/prompts", tags=["prompts"])
|
||||
app.include_router(memories.router, prefix="/memories", tags=["memories"])
|
||||
|
||||
app.include_router(configs.router, prefix="/configs", tags=["configs"])
|
||||
app.include_router(utils.router, prefix="/utils", tags=["utils"])
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def get_status():
|
||||
return {
|
||||
"status": True,
|
||||
"auth": WEBUI_AUTH,
|
||||
"default_models": app.state.config.DEFAULT_MODELS,
|
||||
"default_prompt_suggestions": app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
|
||||
}
|
||||
@@ -1,355 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Union, Optional
|
||||
from peewee import *
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
import json
|
||||
import uuid
|
||||
import time
|
||||
|
||||
from apps.webui.internal.db import DB
|
||||
|
||||
####################
|
||||
# Chat DB Schema
|
||||
####################
|
||||
|
||||
|
||||
class Chat(Model):
|
||||
id = CharField(unique=True)
|
||||
user_id = CharField()
|
||||
title = TextField()
|
||||
chat = TextField() # Save Chat JSON as Text
|
||||
|
||||
created_at = BigIntegerField()
|
||||
updated_at = BigIntegerField()
|
||||
|
||||
share_id = CharField(null=True, unique=True)
|
||||
archived = BooleanField(default=False)
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class ChatModel(BaseModel):
|
||||
id: str
|
||||
user_id: str
|
||||
title: str
|
||||
chat: str
|
||||
|
||||
created_at: int # timestamp in epoch
|
||||
updated_at: int # timestamp in epoch
|
||||
|
||||
share_id: Optional[str] = None
|
||||
archived: bool = False
|
||||
|
||||
|
||||
####################
|
||||
# Forms
|
||||
####################
|
||||
|
||||
|
||||
class ChatForm(BaseModel):
|
||||
chat: dict
|
||||
|
||||
|
||||
class ChatTitleForm(BaseModel):
|
||||
title: str
|
||||
|
||||
|
||||
class ChatResponse(BaseModel):
|
||||
id: str
|
||||
user_id: str
|
||||
title: str
|
||||
chat: dict
|
||||
updated_at: int # timestamp in epoch
|
||||
created_at: int # timestamp in epoch
|
||||
share_id: Optional[str] = None # id of the chat to be shared
|
||||
archived: bool
|
||||
|
||||
|
||||
class ChatTitleIdResponse(BaseModel):
|
||||
id: str
|
||||
title: str
|
||||
updated_at: int
|
||||
created_at: int
|
||||
|
||||
|
||||
class ChatTable:
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
db.create_tables([Chat])
|
||||
|
||||
def insert_new_chat(self, user_id: str, form_data: ChatForm) -> Optional[ChatModel]:
|
||||
id = str(uuid.uuid4())
|
||||
chat = ChatModel(
|
||||
**{
|
||||
"id": id,
|
||||
"user_id": user_id,
|
||||
"title": (
|
||||
form_data.chat["title"] if "title" in form_data.chat else "New Chat"
|
||||
),
|
||||
"chat": json.dumps(form_data.chat),
|
||||
"created_at": int(time.time()),
|
||||
"updated_at": int(time.time()),
|
||||
}
|
||||
)
|
||||
|
||||
result = Chat.create(**chat.model_dump())
|
||||
return chat if result else None
|
||||
|
||||
def update_chat_by_id(self, id: str, chat: dict) -> Optional[ChatModel]:
|
||||
try:
|
||||
query = Chat.update(
|
||||
chat=json.dumps(chat),
|
||||
title=chat["title"] if "title" in chat else "New Chat",
|
||||
updated_at=int(time.time()),
|
||||
).where(Chat.id == id)
|
||||
query.execute()
|
||||
|
||||
chat = Chat.get(Chat.id == id)
|
||||
return ChatModel(**model_to_dict(chat))
|
||||
except:
|
||||
return None
|
||||
|
||||
def insert_shared_chat_by_chat_id(self, chat_id: str) -> Optional[ChatModel]:
|
||||
# Get the existing chat to share
|
||||
chat = Chat.get(Chat.id == chat_id)
|
||||
# Check if the chat is already shared
|
||||
if chat.share_id:
|
||||
return self.get_chat_by_id_and_user_id(chat.share_id, "shared")
|
||||
# Create a new chat with the same data, but with a new ID
|
||||
shared_chat = ChatModel(
|
||||
**{
|
||||
"id": str(uuid.uuid4()),
|
||||
"user_id": f"shared-{chat_id}",
|
||||
"title": chat.title,
|
||||
"chat": chat.chat,
|
||||
"created_at": chat.created_at,
|
||||
"updated_at": int(time.time()),
|
||||
}
|
||||
)
|
||||
shared_result = Chat.create(**shared_chat.model_dump())
|
||||
# Update the original chat with the share_id
|
||||
result = (
|
||||
Chat.update(share_id=shared_chat.id).where(Chat.id == chat_id).execute()
|
||||
)
|
||||
|
||||
return shared_chat if (shared_result and result) else None
|
||||
|
||||
def update_shared_chat_by_chat_id(self, chat_id: str) -> Optional[ChatModel]:
|
||||
try:
|
||||
print("update_shared_chat_by_id")
|
||||
chat = Chat.get(Chat.id == chat_id)
|
||||
print(chat)
|
||||
|
||||
query = Chat.update(
|
||||
title=chat.title,
|
||||
chat=chat.chat,
|
||||
).where(Chat.id == chat.share_id)
|
||||
|
||||
query.execute()
|
||||
|
||||
chat = Chat.get(Chat.id == chat.share_id)
|
||||
return ChatModel(**model_to_dict(chat))
|
||||
except:
|
||||
return None
|
||||
|
||||
def delete_shared_chat_by_chat_id(self, chat_id: str) -> bool:
|
||||
try:
|
||||
query = Chat.delete().where(Chat.user_id == f"shared-{chat_id}")
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def update_chat_share_id_by_id(
|
||||
self, id: str, share_id: Optional[str]
|
||||
) -> Optional[ChatModel]:
|
||||
try:
|
||||
query = Chat.update(
|
||||
share_id=share_id,
|
||||
).where(Chat.id == id)
|
||||
query.execute()
|
||||
|
||||
chat = Chat.get(Chat.id == id)
|
||||
return ChatModel(**model_to_dict(chat))
|
||||
except:
|
||||
return None
|
||||
|
||||
def toggle_chat_archive_by_id(self, id: str) -> Optional[ChatModel]:
|
||||
try:
|
||||
chat = self.get_chat_by_id(id)
|
||||
query = Chat.update(
|
||||
archived=(not chat.archived),
|
||||
).where(Chat.id == id)
|
||||
|
||||
query.execute()
|
||||
|
||||
chat = Chat.get(Chat.id == id)
|
||||
return ChatModel(**model_to_dict(chat))
|
||||
except:
|
||||
return None
|
||||
|
||||
def archive_all_chats_by_user_id(self, user_id: str) -> bool:
|
||||
try:
|
||||
chats = self.get_chats_by_user_id(user_id)
|
||||
for chat in chats:
|
||||
query = Chat.update(
|
||||
archived=True,
|
||||
).where(Chat.id == chat.id)
|
||||
|
||||
query.execute()
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def get_archived_chat_list_by_user_id(
|
||||
self, user_id: str, skip: int = 0, limit: int = 50
|
||||
) -> List[ChatModel]:
|
||||
return [
|
||||
ChatModel(**model_to_dict(chat))
|
||||
for chat in Chat.select()
|
||||
.where(Chat.archived == True)
|
||||
.where(Chat.user_id == user_id)
|
||||
.order_by(Chat.updated_at.desc())
|
||||
# .limit(limit)
|
||||
# .offset(skip)
|
||||
]
|
||||
|
||||
def get_chat_list_by_user_id(
|
||||
self,
|
||||
user_id: str,
|
||||
include_archived: bool = False,
|
||||
skip: int = 0,
|
||||
limit: int = 50,
|
||||
) -> List[ChatModel]:
|
||||
if include_archived:
|
||||
return [
|
||||
ChatModel(**model_to_dict(chat))
|
||||
for chat in Chat.select()
|
||||
.where(Chat.user_id == user_id)
|
||||
.order_by(Chat.updated_at.desc())
|
||||
# .limit(limit)
|
||||
# .offset(skip)
|
||||
]
|
||||
else:
|
||||
return [
|
||||
ChatModel(**model_to_dict(chat))
|
||||
for chat in Chat.select()
|
||||
.where(Chat.archived == False)
|
||||
.where(Chat.user_id == user_id)
|
||||
.order_by(Chat.updated_at.desc())
|
||||
# .limit(limit)
|
||||
# .offset(skip)
|
||||
]
|
||||
|
||||
def get_chat_list_by_chat_ids(
|
||||
self, chat_ids: List[str], skip: int = 0, limit: int = 50
|
||||
) -> List[ChatModel]:
|
||||
return [
|
||||
ChatModel(**model_to_dict(chat))
|
||||
for chat in Chat.select()
|
||||
.where(Chat.archived == False)
|
||||
.where(Chat.id.in_(chat_ids))
|
||||
.order_by(Chat.updated_at.desc())
|
||||
]
|
||||
|
||||
def get_chat_by_id(self, id: str) -> Optional[ChatModel]:
|
||||
try:
|
||||
chat = Chat.get(Chat.id == id)
|
||||
return ChatModel(**model_to_dict(chat))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_chat_by_share_id(self, id: str) -> Optional[ChatModel]:
|
||||
try:
|
||||
chat = Chat.get(Chat.share_id == id)
|
||||
|
||||
if chat:
|
||||
chat = Chat.get(Chat.id == id)
|
||||
return ChatModel(**model_to_dict(chat))
|
||||
else:
|
||||
return None
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_chat_by_id_and_user_id(self, id: str, user_id: str) -> Optional[ChatModel]:
|
||||
try:
|
||||
chat = Chat.get(Chat.id == id, Chat.user_id == user_id)
|
||||
return ChatModel(**model_to_dict(chat))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_chats(self, skip: int = 0, limit: int = 50) -> List[ChatModel]:
|
||||
return [
|
||||
ChatModel(**model_to_dict(chat))
|
||||
for chat in Chat.select().order_by(Chat.updated_at.desc())
|
||||
# .limit(limit).offset(skip)
|
||||
]
|
||||
|
||||
def get_chats_by_user_id(self, user_id: str) -> List[ChatModel]:
|
||||
return [
|
||||
ChatModel(**model_to_dict(chat))
|
||||
for chat in Chat.select()
|
||||
.where(Chat.user_id == user_id)
|
||||
.order_by(Chat.updated_at.desc())
|
||||
# .limit(limit).offset(skip)
|
||||
]
|
||||
|
||||
def get_archived_chats_by_user_id(self, user_id: str) -> List[ChatModel]:
|
||||
return [
|
||||
ChatModel(**model_to_dict(chat))
|
||||
for chat in Chat.select()
|
||||
.where(Chat.archived == True)
|
||||
.where(Chat.user_id == user_id)
|
||||
.order_by(Chat.updated_at.desc())
|
||||
]
|
||||
|
||||
def delete_chat_by_id(self, id: str) -> bool:
|
||||
try:
|
||||
query = Chat.delete().where((Chat.id == id))
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True and self.delete_shared_chat_by_chat_id(id)
|
||||
except:
|
||||
return False
|
||||
|
||||
def delete_chat_by_id_and_user_id(self, id: str, user_id: str) -> bool:
|
||||
try:
|
||||
query = Chat.delete().where((Chat.id == id) & (Chat.user_id == user_id))
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True and self.delete_shared_chat_by_chat_id(id)
|
||||
except:
|
||||
return False
|
||||
|
||||
def delete_chats_by_user_id(self, user_id: str) -> bool:
|
||||
try:
|
||||
|
||||
self.delete_shared_chats_by_user_id(user_id)
|
||||
|
||||
query = Chat.delete().where(Chat.user_id == user_id)
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def delete_shared_chats_by_user_id(self, user_id: str) -> bool:
|
||||
try:
|
||||
shared_chat_ids = [
|
||||
f"shared-{chat.id}"
|
||||
for chat in Chat.select().where(Chat.user_id == user_id)
|
||||
]
|
||||
|
||||
query = Chat.delete().where(Chat.user_id << shared_chat_ids)
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
Chats = ChatTable(DB)
|
||||
@@ -1,160 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
from peewee import *
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
from typing import List, Union, Optional
|
||||
import time
|
||||
import logging
|
||||
|
||||
from utils.utils import decode_token
|
||||
from utils.misc import get_gravatar_url
|
||||
|
||||
from apps.webui.internal.db import DB
|
||||
|
||||
import json
|
||||
|
||||
from config import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["MODELS"])
|
||||
|
||||
####################
|
||||
# Documents DB Schema
|
||||
####################
|
||||
|
||||
|
||||
class Document(Model):
|
||||
collection_name = CharField(unique=True)
|
||||
name = CharField(unique=True)
|
||||
title = TextField()
|
||||
filename = TextField()
|
||||
content = TextField(null=True)
|
||||
user_id = CharField()
|
||||
timestamp = BigIntegerField()
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class DocumentModel(BaseModel):
|
||||
collection_name: str
|
||||
name: str
|
||||
title: str
|
||||
filename: str
|
||||
content: Optional[str] = None
|
||||
user_id: str
|
||||
timestamp: int # timestamp in epoch
|
||||
|
||||
|
||||
####################
|
||||
# Forms
|
||||
####################
|
||||
|
||||
|
||||
class DocumentResponse(BaseModel):
|
||||
collection_name: str
|
||||
name: str
|
||||
title: str
|
||||
filename: str
|
||||
content: Optional[dict] = None
|
||||
user_id: str
|
||||
timestamp: int # timestamp in epoch
|
||||
|
||||
|
||||
class DocumentUpdateForm(BaseModel):
|
||||
name: str
|
||||
title: str
|
||||
|
||||
|
||||
class DocumentForm(DocumentUpdateForm):
|
||||
collection_name: str
|
||||
filename: str
|
||||
content: Optional[str] = None
|
||||
|
||||
|
||||
class DocumentsTable:
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
self.db.create_tables([Document])
|
||||
|
||||
def insert_new_doc(
|
||||
self, user_id: str, form_data: DocumentForm
|
||||
) -> Optional[DocumentModel]:
|
||||
document = DocumentModel(
|
||||
**{
|
||||
**form_data.model_dump(),
|
||||
"user_id": user_id,
|
||||
"timestamp": int(time.time()),
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
result = Document.create(**document.model_dump())
|
||||
if result:
|
||||
return document
|
||||
else:
|
||||
return None
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_doc_by_name(self, name: str) -> Optional[DocumentModel]:
|
||||
try:
|
||||
document = Document.get(Document.name == name)
|
||||
return DocumentModel(**model_to_dict(document))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_docs(self) -> List[DocumentModel]:
|
||||
return [
|
||||
DocumentModel(**model_to_dict(doc))
|
||||
for doc in Document.select()
|
||||
# .limit(limit).offset(skip)
|
||||
]
|
||||
|
||||
def update_doc_by_name(
|
||||
self, name: str, form_data: DocumentUpdateForm
|
||||
) -> Optional[DocumentModel]:
|
||||
try:
|
||||
query = Document.update(
|
||||
title=form_data.title,
|
||||
name=form_data.name,
|
||||
timestamp=int(time.time()),
|
||||
).where(Document.name == name)
|
||||
query.execute()
|
||||
|
||||
doc = Document.get(Document.name == form_data.name)
|
||||
return DocumentModel(**model_to_dict(doc))
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return None
|
||||
|
||||
def update_doc_content_by_name(
|
||||
self, name: str, updated: dict
|
||||
) -> Optional[DocumentModel]:
|
||||
try:
|
||||
doc = self.get_doc_by_name(name)
|
||||
doc_content = json.loads(doc.content if doc.content else "{}")
|
||||
doc_content = {**doc_content, **updated}
|
||||
|
||||
query = Document.update(
|
||||
content=json.dumps(doc_content),
|
||||
timestamp=int(time.time()),
|
||||
).where(Document.name == name)
|
||||
query.execute()
|
||||
|
||||
doc = Document.get(Document.name == name)
|
||||
return DocumentModel(**model_to_dict(doc))
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
return None
|
||||
|
||||
def delete_doc_by_name(self, name: str) -> bool:
|
||||
try:
|
||||
query = Document.delete().where((Document.name == name))
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
Documents = DocumentsTable(DB)
|
||||
@@ -1,118 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
from peewee import *
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
from typing import List, Union, Optional
|
||||
|
||||
from apps.webui.internal.db import DB
|
||||
from apps.webui.models.chats import Chats
|
||||
|
||||
import time
|
||||
import uuid
|
||||
|
||||
####################
|
||||
# Memory DB Schema
|
||||
####################
|
||||
|
||||
|
||||
class Memory(Model):
|
||||
id = CharField(unique=True)
|
||||
user_id = CharField()
|
||||
content = TextField()
|
||||
updated_at = BigIntegerField()
|
||||
created_at = BigIntegerField()
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class MemoryModel(BaseModel):
|
||||
id: str
|
||||
user_id: str
|
||||
content: str
|
||||
updated_at: int # timestamp in epoch
|
||||
created_at: int # timestamp in epoch
|
||||
|
||||
|
||||
####################
|
||||
# Forms
|
||||
####################
|
||||
|
||||
|
||||
class MemoriesTable:
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
self.db.create_tables([Memory])
|
||||
|
||||
def insert_new_memory(
|
||||
self,
|
||||
user_id: str,
|
||||
content: str,
|
||||
) -> Optional[MemoryModel]:
|
||||
id = str(uuid.uuid4())
|
||||
|
||||
memory = MemoryModel(
|
||||
**{
|
||||
"id": id,
|
||||
"user_id": user_id,
|
||||
"content": content,
|
||||
"created_at": int(time.time()),
|
||||
"updated_at": int(time.time()),
|
||||
}
|
||||
)
|
||||
result = Memory.create(**memory.model_dump())
|
||||
if result:
|
||||
return memory
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_memories(self) -> List[MemoryModel]:
|
||||
try:
|
||||
memories = Memory.select()
|
||||
return [MemoryModel(**model_to_dict(memory)) for memory in memories]
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_memories_by_user_id(self, user_id: str) -> List[MemoryModel]:
|
||||
try:
|
||||
memories = Memory.select().where(Memory.user_id == user_id)
|
||||
return [MemoryModel(**model_to_dict(memory)) for memory in memories]
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_memory_by_id(self, id) -> Optional[MemoryModel]:
|
||||
try:
|
||||
memory = Memory.get(Memory.id == id)
|
||||
return MemoryModel(**model_to_dict(memory))
|
||||
except:
|
||||
return None
|
||||
|
||||
def delete_memory_by_id(self, id: str) -> bool:
|
||||
try:
|
||||
query = Memory.delete().where(Memory.id == id)
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
|
||||
except:
|
||||
return False
|
||||
|
||||
def delete_memories_by_user_id(self, user_id: str) -> bool:
|
||||
try:
|
||||
query = Memory.delete().where(Memory.user_id == user_id)
|
||||
query.execute()
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def delete_memory_by_id_and_user_id(self, id: str, user_id: str) -> bool:
|
||||
try:
|
||||
query = Memory.delete().where(Memory.id == id, Memory.user_id == user_id)
|
||||
query.execute()
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
Memories = MemoriesTable(DB)
|
||||
@@ -1,118 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
from peewee import *
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
from typing import List, Union, Optional
|
||||
import time
|
||||
|
||||
from utils.utils import decode_token
|
||||
from utils.misc import get_gravatar_url
|
||||
|
||||
from apps.webui.internal.db import DB
|
||||
|
||||
import json
|
||||
|
||||
####################
|
||||
# Prompts DB Schema
|
||||
####################
|
||||
|
||||
|
||||
class Prompt(Model):
|
||||
command = CharField(unique=True)
|
||||
user_id = CharField()
|
||||
title = TextField()
|
||||
content = TextField()
|
||||
timestamp = BigIntegerField()
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class PromptModel(BaseModel):
|
||||
command: str
|
||||
user_id: str
|
||||
title: str
|
||||
content: str
|
||||
timestamp: int # timestamp in epoch
|
||||
|
||||
|
||||
####################
|
||||
# Forms
|
||||
####################
|
||||
|
||||
|
||||
class PromptForm(BaseModel):
|
||||
command: str
|
||||
title: str
|
||||
content: str
|
||||
|
||||
|
||||
class PromptsTable:
|
||||
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
self.db.create_tables([Prompt])
|
||||
|
||||
def insert_new_prompt(
|
||||
self, user_id: str, form_data: PromptForm
|
||||
) -> Optional[PromptModel]:
|
||||
prompt = PromptModel(
|
||||
**{
|
||||
"user_id": user_id,
|
||||
"command": form_data.command,
|
||||
"title": form_data.title,
|
||||
"content": form_data.content,
|
||||
"timestamp": int(time.time()),
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
result = Prompt.create(**prompt.model_dump())
|
||||
if result:
|
||||
return prompt
|
||||
else:
|
||||
return None
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_prompt_by_command(self, command: str) -> Optional[PromptModel]:
|
||||
try:
|
||||
prompt = Prompt.get(Prompt.command == command)
|
||||
return PromptModel(**model_to_dict(prompt))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_prompts(self) -> List[PromptModel]:
|
||||
return [
|
||||
PromptModel(**model_to_dict(prompt))
|
||||
for prompt in Prompt.select()
|
||||
# .limit(limit).offset(skip)
|
||||
]
|
||||
|
||||
def update_prompt_by_command(
|
||||
self, command: str, form_data: PromptForm
|
||||
) -> Optional[PromptModel]:
|
||||
try:
|
||||
query = Prompt.update(
|
||||
title=form_data.title,
|
||||
content=form_data.content,
|
||||
timestamp=int(time.time()),
|
||||
).where(Prompt.command == command)
|
||||
|
||||
query.execute()
|
||||
|
||||
prompt = Prompt.get(Prompt.command == command)
|
||||
return PromptModel(**model_to_dict(prompt))
|
||||
except:
|
||||
return None
|
||||
|
||||
def delete_prompt_by_command(self, command: str) -> bool:
|
||||
try:
|
||||
query = Prompt.delete().where((Prompt.command == command))
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
Prompts = PromptsTable(DB)
|
||||
@@ -1,237 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Union, Optional
|
||||
from peewee import *
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
import json
|
||||
import uuid
|
||||
import time
|
||||
import logging
|
||||
|
||||
from apps.webui.internal.db import DB
|
||||
|
||||
from config import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["MODELS"])
|
||||
|
||||
####################
|
||||
# Tag DB Schema
|
||||
####################
|
||||
|
||||
|
||||
class Tag(Model):
|
||||
id = CharField(unique=True)
|
||||
name = CharField()
|
||||
user_id = CharField()
|
||||
data = TextField(null=True)
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class ChatIdTag(Model):
|
||||
id = CharField(unique=True)
|
||||
tag_name = CharField()
|
||||
chat_id = CharField()
|
||||
user_id = CharField()
|
||||
timestamp = BigIntegerField()
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class TagModel(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
user_id: str
|
||||
data: Optional[str] = None
|
||||
|
||||
|
||||
class ChatIdTagModel(BaseModel):
|
||||
id: str
|
||||
tag_name: str
|
||||
chat_id: str
|
||||
user_id: str
|
||||
timestamp: int
|
||||
|
||||
|
||||
####################
|
||||
# Forms
|
||||
####################
|
||||
|
||||
|
||||
class ChatIdTagForm(BaseModel):
|
||||
tag_name: str
|
||||
chat_id: str
|
||||
|
||||
|
||||
class TagChatIdsResponse(BaseModel):
|
||||
chat_ids: List[str]
|
||||
|
||||
|
||||
class ChatTagsResponse(BaseModel):
|
||||
tags: List[str]
|
||||
|
||||
|
||||
class TagTable:
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
db.create_tables([Tag, ChatIdTag])
|
||||
|
||||
def insert_new_tag(self, name: str, user_id: str) -> Optional[TagModel]:
|
||||
id = str(uuid.uuid4())
|
||||
tag = TagModel(**{"id": id, "user_id": user_id, "name": name})
|
||||
try:
|
||||
result = Tag.create(**tag.model_dump())
|
||||
if result:
|
||||
return tag
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
def get_tag_by_name_and_user_id(
|
||||
self, name: str, user_id: str
|
||||
) -> Optional[TagModel]:
|
||||
try:
|
||||
tag = Tag.get(Tag.name == name, Tag.user_id == user_id)
|
||||
return TagModel(**model_to_dict(tag))
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
def add_tag_to_chat(
|
||||
self, user_id: str, form_data: ChatIdTagForm
|
||||
) -> Optional[ChatIdTagModel]:
|
||||
tag = self.get_tag_by_name_and_user_id(form_data.tag_name, user_id)
|
||||
if tag == None:
|
||||
tag = self.insert_new_tag(form_data.tag_name, user_id)
|
||||
|
||||
id = str(uuid.uuid4())
|
||||
chatIdTag = ChatIdTagModel(
|
||||
**{
|
||||
"id": id,
|
||||
"user_id": user_id,
|
||||
"chat_id": form_data.chat_id,
|
||||
"tag_name": tag.name,
|
||||
"timestamp": int(time.time()),
|
||||
}
|
||||
)
|
||||
try:
|
||||
result = ChatIdTag.create(**chatIdTag.model_dump())
|
||||
if result:
|
||||
return chatIdTag
|
||||
else:
|
||||
return None
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_tags_by_user_id(self, user_id: str) -> List[TagModel]:
|
||||
tag_names = [
|
||||
ChatIdTagModel(**model_to_dict(chat_id_tag)).tag_name
|
||||
for chat_id_tag in ChatIdTag.select()
|
||||
.where(ChatIdTag.user_id == user_id)
|
||||
.order_by(ChatIdTag.timestamp.desc())
|
||||
]
|
||||
|
||||
return [
|
||||
TagModel(**model_to_dict(tag))
|
||||
for tag in Tag.select()
|
||||
.where(Tag.user_id == user_id)
|
||||
.where(Tag.name.in_(tag_names))
|
||||
]
|
||||
|
||||
def get_tags_by_chat_id_and_user_id(
|
||||
self, chat_id: str, user_id: str
|
||||
) -> List[TagModel]:
|
||||
tag_names = [
|
||||
ChatIdTagModel(**model_to_dict(chat_id_tag)).tag_name
|
||||
for chat_id_tag in ChatIdTag.select()
|
||||
.where((ChatIdTag.user_id == user_id) & (ChatIdTag.chat_id == chat_id))
|
||||
.order_by(ChatIdTag.timestamp.desc())
|
||||
]
|
||||
|
||||
return [
|
||||
TagModel(**model_to_dict(tag))
|
||||
for tag in Tag.select()
|
||||
.where(Tag.user_id == user_id)
|
||||
.where(Tag.name.in_(tag_names))
|
||||
]
|
||||
|
||||
def get_chat_ids_by_tag_name_and_user_id(
|
||||
self, tag_name: str, user_id: str
|
||||
) -> Optional[ChatIdTagModel]:
|
||||
return [
|
||||
ChatIdTagModel(**model_to_dict(chat_id_tag))
|
||||
for chat_id_tag in ChatIdTag.select()
|
||||
.where((ChatIdTag.user_id == user_id) & (ChatIdTag.tag_name == tag_name))
|
||||
.order_by(ChatIdTag.timestamp.desc())
|
||||
]
|
||||
|
||||
def count_chat_ids_by_tag_name_and_user_id(
|
||||
self, tag_name: str, user_id: str
|
||||
) -> int:
|
||||
return (
|
||||
ChatIdTag.select()
|
||||
.where((ChatIdTag.tag_name == tag_name) & (ChatIdTag.user_id == user_id))
|
||||
.count()
|
||||
)
|
||||
|
||||
def delete_tag_by_tag_name_and_user_id(self, tag_name: str, user_id: str) -> bool:
|
||||
try:
|
||||
query = ChatIdTag.delete().where(
|
||||
(ChatIdTag.tag_name == tag_name) & (ChatIdTag.user_id == user_id)
|
||||
)
|
||||
res = query.execute() # Remove the rows, return number of rows removed.
|
||||
log.debug(f"res: {res}")
|
||||
|
||||
tag_count = self.count_chat_ids_by_tag_name_and_user_id(tag_name, user_id)
|
||||
if tag_count == 0:
|
||||
# Remove tag item from Tag col as well
|
||||
query = Tag.delete().where(
|
||||
(Tag.name == tag_name) & (Tag.user_id == user_id)
|
||||
)
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
log.error(f"delete_tag: {e}")
|
||||
return False
|
||||
|
||||
def delete_tag_by_tag_name_and_chat_id_and_user_id(
|
||||
self, tag_name: str, chat_id: str, user_id: str
|
||||
) -> bool:
|
||||
try:
|
||||
query = ChatIdTag.delete().where(
|
||||
(ChatIdTag.tag_name == tag_name)
|
||||
& (ChatIdTag.chat_id == chat_id)
|
||||
& (ChatIdTag.user_id == user_id)
|
||||
)
|
||||
res = query.execute() # Remove the rows, return number of rows removed.
|
||||
log.debug(f"res: {res}")
|
||||
|
||||
tag_count = self.count_chat_ids_by_tag_name_and_user_id(tag_name, user_id)
|
||||
if tag_count == 0:
|
||||
# Remove tag item from Tag col as well
|
||||
query = Tag.delete().where(
|
||||
(Tag.name == tag_name) & (Tag.user_id == user_id)
|
||||
)
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
log.error(f"delete_tag: {e}")
|
||||
return False
|
||||
|
||||
def delete_tags_by_chat_id_and_user_id(self, chat_id: str, user_id: str) -> bool:
|
||||
tags = self.get_tags_by_chat_id_and_user_id(chat_id, user_id)
|
||||
|
||||
for tag in tags:
|
||||
self.delete_tag_by_tag_name_and_chat_id_and_user_id(
|
||||
tag.tag_name, chat_id, user_id
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
Tags = TagTable(DB)
|
||||
@@ -1,132 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
from peewee import *
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
from typing import List, Union, Optional
|
||||
import time
|
||||
import logging
|
||||
from apps.webui.internal.db import DB, JSONField
|
||||
|
||||
import json
|
||||
|
||||
from config import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["MODELS"])
|
||||
|
||||
####################
|
||||
# Tools DB Schema
|
||||
####################
|
||||
|
||||
|
||||
class Tool(Model):
|
||||
id = CharField(unique=True)
|
||||
user_id = CharField()
|
||||
name = TextField()
|
||||
content = TextField()
|
||||
specs = JSONField()
|
||||
meta = JSONField()
|
||||
updated_at = BigIntegerField()
|
||||
created_at = BigIntegerField()
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class ToolMeta(BaseModel):
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
class ToolModel(BaseModel):
|
||||
id: str
|
||||
user_id: str
|
||||
name: str
|
||||
content: str
|
||||
specs: List[dict]
|
||||
meta: ToolMeta
|
||||
updated_at: int # timestamp in epoch
|
||||
created_at: int # timestamp in epoch
|
||||
|
||||
|
||||
####################
|
||||
# Forms
|
||||
####################
|
||||
|
||||
|
||||
class ToolResponse(BaseModel):
|
||||
id: str
|
||||
user_id: str
|
||||
name: str
|
||||
meta: ToolMeta
|
||||
updated_at: int # timestamp in epoch
|
||||
created_at: int # timestamp in epoch
|
||||
|
||||
|
||||
class ToolForm(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
content: str
|
||||
meta: ToolMeta
|
||||
|
||||
|
||||
class ToolsTable:
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
self.db.create_tables([Tool])
|
||||
|
||||
def insert_new_tool(
|
||||
self, user_id: str, form_data: ToolForm, specs: List[dict]
|
||||
) -> Optional[ToolModel]:
|
||||
tool = ToolModel(
|
||||
**{
|
||||
**form_data.model_dump(),
|
||||
"specs": specs,
|
||||
"user_id": user_id,
|
||||
"updated_at": int(time.time()),
|
||||
"created_at": int(time.time()),
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
result = Tool.create(**tool.model_dump())
|
||||
if result:
|
||||
return tool
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error creating tool: {e}")
|
||||
return None
|
||||
|
||||
def get_tool_by_id(self, id: str) -> Optional[ToolModel]:
|
||||
try:
|
||||
tool = Tool.get(Tool.id == id)
|
||||
return ToolModel(**model_to_dict(tool))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_tools(self) -> List[ToolModel]:
|
||||
return [ToolModel(**model_to_dict(tool)) for tool in Tool.select()]
|
||||
|
||||
def update_tool_by_id(self, id: str, updated: dict) -> Optional[ToolModel]:
|
||||
try:
|
||||
query = Tool.update(
|
||||
**updated,
|
||||
updated_at=int(time.time()),
|
||||
).where(Tool.id == id)
|
||||
query.execute()
|
||||
|
||||
tool = Tool.get(Tool.id == id)
|
||||
return ToolModel(**model_to_dict(tool))
|
||||
except:
|
||||
return None
|
||||
|
||||
def delete_tool_by_id(self, id: str) -> bool:
|
||||
try:
|
||||
query = Tool.delete().where((Tool.id == id))
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
Tools = ToolsTable(DB)
|
||||
@@ -1,218 +0,0 @@
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
from peewee import *
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
from typing import List, Union, Optional
|
||||
import time
|
||||
from utils.misc import get_gravatar_url
|
||||
|
||||
from apps.webui.internal.db import DB, JSONField
|
||||
from apps.webui.models.chats import Chats
|
||||
|
||||
####################
|
||||
# User DB Schema
|
||||
####################
|
||||
|
||||
|
||||
class User(Model):
|
||||
id = CharField(unique=True)
|
||||
name = CharField()
|
||||
email = CharField()
|
||||
role = CharField()
|
||||
profile_image_url = TextField()
|
||||
|
||||
last_active_at = BigIntegerField()
|
||||
updated_at = BigIntegerField()
|
||||
created_at = BigIntegerField()
|
||||
|
||||
api_key = CharField(null=True, unique=True)
|
||||
settings = JSONField(null=True)
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class UserSettings(BaseModel):
|
||||
ui: Optional[dict] = {}
|
||||
model_config = ConfigDict(extra="allow")
|
||||
pass
|
||||
|
||||
|
||||
class UserModel(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
email: str
|
||||
role: str = "pending"
|
||||
profile_image_url: str
|
||||
|
||||
last_active_at: int # timestamp in epoch
|
||||
updated_at: int # timestamp in epoch
|
||||
created_at: int # timestamp in epoch
|
||||
|
||||
api_key: Optional[str] = None
|
||||
settings: Optional[UserSettings] = None
|
||||
|
||||
|
||||
####################
|
||||
# Forms
|
||||
####################
|
||||
|
||||
|
||||
class UserRoleUpdateForm(BaseModel):
|
||||
id: str
|
||||
role: str
|
||||
|
||||
|
||||
class UserUpdateForm(BaseModel):
|
||||
name: str
|
||||
email: str
|
||||
profile_image_url: str
|
||||
password: Optional[str] = None
|
||||
|
||||
|
||||
class UsersTable:
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
self.db.create_tables([User])
|
||||
|
||||
def insert_new_user(
|
||||
self,
|
||||
id: str,
|
||||
name: str,
|
||||
email: str,
|
||||
profile_image_url: str = "/user.png",
|
||||
role: str = "pending",
|
||||
) -> Optional[UserModel]:
|
||||
user = UserModel(
|
||||
**{
|
||||
"id": id,
|
||||
"name": name,
|
||||
"email": email,
|
||||
"role": role,
|
||||
"profile_image_url": profile_image_url,
|
||||
"last_active_at": int(time.time()),
|
||||
"created_at": int(time.time()),
|
||||
"updated_at": int(time.time()),
|
||||
}
|
||||
)
|
||||
result = User.create(**user.model_dump())
|
||||
if result:
|
||||
return user
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_user_by_id(self, id: str) -> Optional[UserModel]:
|
||||
try:
|
||||
user = User.get(User.id == id)
|
||||
return UserModel(**model_to_dict(user))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_user_by_api_key(self, api_key: str) -> Optional[UserModel]:
|
||||
try:
|
||||
user = User.get(User.api_key == api_key)
|
||||
return UserModel(**model_to_dict(user))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_user_by_email(self, email: str) -> Optional[UserModel]:
|
||||
try:
|
||||
user = User.get(User.email == email)
|
||||
return UserModel(**model_to_dict(user))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_users(self, skip: int = 0, limit: int = 50) -> List[UserModel]:
|
||||
return [
|
||||
UserModel(**model_to_dict(user))
|
||||
for user in User.select()
|
||||
# .limit(limit).offset(skip)
|
||||
]
|
||||
|
||||
def get_num_users(self) -> Optional[int]:
|
||||
return User.select().count()
|
||||
|
||||
def get_first_user(self) -> UserModel:
|
||||
try:
|
||||
user = User.select().order_by(User.created_at).first()
|
||||
return UserModel(**model_to_dict(user))
|
||||
except:
|
||||
return None
|
||||
|
||||
def update_user_role_by_id(self, id: str, role: str) -> Optional[UserModel]:
|
||||
try:
|
||||
query = User.update(role=role).where(User.id == id)
|
||||
query.execute()
|
||||
|
||||
user = User.get(User.id == id)
|
||||
return UserModel(**model_to_dict(user))
|
||||
except:
|
||||
return None
|
||||
|
||||
def update_user_profile_image_url_by_id(
|
||||
self, id: str, profile_image_url: str
|
||||
) -> Optional[UserModel]:
|
||||
try:
|
||||
query = User.update(profile_image_url=profile_image_url).where(
|
||||
User.id == id
|
||||
)
|
||||
query.execute()
|
||||
|
||||
user = User.get(User.id == id)
|
||||
return UserModel(**model_to_dict(user))
|
||||
except:
|
||||
return None
|
||||
|
||||
def update_user_last_active_by_id(self, id: str) -> Optional[UserModel]:
|
||||
try:
|
||||
query = User.update(last_active_at=int(time.time())).where(User.id == id)
|
||||
query.execute()
|
||||
|
||||
user = User.get(User.id == id)
|
||||
return UserModel(**model_to_dict(user))
|
||||
except:
|
||||
return None
|
||||
|
||||
def update_user_by_id(self, id: str, updated: dict) -> Optional[UserModel]:
|
||||
try:
|
||||
query = User.update(**updated).where(User.id == id)
|
||||
query.execute()
|
||||
|
||||
user = User.get(User.id == id)
|
||||
return UserModel(**model_to_dict(user))
|
||||
except:
|
||||
return None
|
||||
|
||||
def delete_user_by_id(self, id: str) -> bool:
|
||||
try:
|
||||
# Delete User Chats
|
||||
result = Chats.delete_chats_by_user_id(id)
|
||||
|
||||
if result:
|
||||
# Delete User
|
||||
query = User.delete().where(User.id == id)
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except:
|
||||
return False
|
||||
|
||||
def update_user_api_key_by_id(self, id: str, api_key: str) -> str:
|
||||
try:
|
||||
query = User.update(api_key=api_key).where(User.id == id)
|
||||
result = query.execute()
|
||||
|
||||
return True if result == 1 else False
|
||||
except:
|
||||
return False
|
||||
|
||||
def get_user_api_key_by_id(self, id: str) -> Optional[str]:
|
||||
try:
|
||||
user = User.get(User.id == id)
|
||||
return user.api_key
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
Users = UsersTable(DB)
|
||||
@@ -1,481 +0,0 @@
|
||||
from fastapi import Depends, Request, HTTPException, status
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Union, Optional
|
||||
from utils.utils import get_current_user, get_admin_user
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel
|
||||
import json
|
||||
import logging
|
||||
|
||||
from apps.webui.models.users import Users
|
||||
from apps.webui.models.chats import (
|
||||
ChatModel,
|
||||
ChatResponse,
|
||||
ChatTitleForm,
|
||||
ChatForm,
|
||||
ChatTitleIdResponse,
|
||||
Chats,
|
||||
)
|
||||
|
||||
|
||||
from apps.webui.models.tags import (
|
||||
TagModel,
|
||||
ChatIdTagModel,
|
||||
ChatIdTagForm,
|
||||
ChatTagsResponse,
|
||||
Tags,
|
||||
)
|
||||
|
||||
from constants import ERROR_MESSAGES
|
||||
|
||||
from config import SRC_LOG_LEVELS, ENABLE_ADMIN_EXPORT
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["MODELS"])
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
############################
|
||||
# GetChatList
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/", response_model=List[ChatTitleIdResponse])
|
||||
@router.get("/list", response_model=List[ChatTitleIdResponse])
|
||||
async def get_session_user_chat_list(
|
||||
user=Depends(get_current_user), skip: int = 0, limit: int = 50
|
||||
):
|
||||
return Chats.get_chat_list_by_user_id(user.id, skip, limit)
|
||||
|
||||
|
||||
############################
|
||||
# DeleteAllChats
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/", response_model=bool)
|
||||
async def delete_all_user_chats(request: Request, user=Depends(get_current_user)):
|
||||
|
||||
if (
|
||||
user.role == "user"
|
||||
and not request.app.state.config.USER_PERMISSIONS["chat"]["deletion"]
|
||||
):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
|
||||
result = Chats.delete_chats_by_user_id(user.id)
|
||||
return result
|
||||
|
||||
|
||||
############################
|
||||
# GetUserChatList
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/list/user/{user_id}", response_model=List[ChatTitleIdResponse])
|
||||
async def get_user_chat_list_by_user_id(
|
||||
user_id: str, user=Depends(get_admin_user), skip: int = 0, limit: int = 50
|
||||
):
|
||||
return Chats.get_chat_list_by_user_id(
|
||||
user_id, include_archived=True, skip=skip, limit=limit
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# CreateNewChat
|
||||
############################
|
||||
|
||||
|
||||
@router.post("/new", response_model=Optional[ChatResponse])
|
||||
async def create_new_chat(form_data: ChatForm, user=Depends(get_current_user)):
|
||||
try:
|
||||
chat = Chats.insert_new_chat(user.id, form_data)
|
||||
return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT()
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# GetChats
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/all", response_model=List[ChatResponse])
|
||||
async def get_user_chats(user=Depends(get_current_user)):
|
||||
return [
|
||||
ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
for chat in Chats.get_chats_by_user_id(user.id)
|
||||
]
|
||||
|
||||
|
||||
############################
|
||||
# GetArchivedChats
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/all/archived", response_model=List[ChatResponse])
|
||||
async def get_user_chats(user=Depends(get_current_user)):
|
||||
return [
|
||||
ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
for chat in Chats.get_archived_chats_by_user_id(user.id)
|
||||
]
|
||||
|
||||
|
||||
############################
|
||||
# GetAllChatsInDB
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/all/db", response_model=List[ChatResponse])
|
||||
async def get_all_user_chats_in_db(user=Depends(get_admin_user)):
|
||||
if not ENABLE_ADMIN_EXPORT:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
return [
|
||||
ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
for chat in Chats.get_chats()
|
||||
]
|
||||
|
||||
|
||||
############################
|
||||
# GetArchivedChats
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/archived", response_model=List[ChatTitleIdResponse])
|
||||
async def get_archived_session_user_chat_list(
|
||||
user=Depends(get_current_user), skip: int = 0, limit: int = 50
|
||||
):
|
||||
return Chats.get_archived_chat_list_by_user_id(user.id, skip, limit)
|
||||
|
||||
|
||||
############################
|
||||
# ArchiveAllChats
|
||||
############################
|
||||
|
||||
|
||||
@router.post("/archive/all", response_model=bool)
|
||||
async def archive_all_chats(user=Depends(get_current_user)):
|
||||
return Chats.archive_all_chats_by_user_id(user.id)
|
||||
|
||||
|
||||
############################
|
||||
# GetSharedChatById
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/share/{share_id}", response_model=Optional[ChatResponse])
|
||||
async def get_shared_chat_by_id(share_id: str, user=Depends(get_current_user)):
|
||||
if user.role == "pending":
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
|
||||
)
|
||||
|
||||
if user.role == "user":
|
||||
chat = Chats.get_chat_by_share_id(share_id)
|
||||
elif user.role == "admin":
|
||||
chat = Chats.get_chat_by_id(share_id)
|
||||
|
||||
if chat:
|
||||
return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# GetChatsByTags
|
||||
############################
|
||||
|
||||
|
||||
class TagNameForm(BaseModel):
|
||||
name: str
|
||||
skip: Optional[int] = 0
|
||||
limit: Optional[int] = 50
|
||||
|
||||
|
||||
@router.post("/tags", response_model=List[ChatTitleIdResponse])
|
||||
async def get_user_chat_list_by_tag_name(
|
||||
form_data: TagNameForm, user=Depends(get_current_user)
|
||||
):
|
||||
|
||||
print(form_data)
|
||||
chat_ids = [
|
||||
chat_id_tag.chat_id
|
||||
for chat_id_tag in Tags.get_chat_ids_by_tag_name_and_user_id(
|
||||
form_data.name, user.id
|
||||
)
|
||||
]
|
||||
|
||||
chats = Chats.get_chat_list_by_chat_ids(chat_ids, form_data.skip, form_data.limit)
|
||||
|
||||
if len(chats) == 0:
|
||||
Tags.delete_tag_by_tag_name_and_user_id(form_data.name, user.id)
|
||||
|
||||
return chats
|
||||
|
||||
|
||||
############################
|
||||
# GetAllTags
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/tags/all", response_model=List[TagModel])
|
||||
async def get_all_tags(user=Depends(get_current_user)):
|
||||
try:
|
||||
tags = Tags.get_tags_by_user_id(user.id)
|
||||
return tags
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT()
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# GetChatById
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/{id}", response_model=Optional[ChatResponse])
|
||||
async def get_chat_by_id(id: str, user=Depends(get_current_user)):
|
||||
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
|
||||
|
||||
if chat:
|
||||
return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# UpdateChatById
|
||||
############################
|
||||
|
||||
|
||||
@router.post("/{id}", response_model=Optional[ChatResponse])
|
||||
async def update_chat_by_id(
|
||||
id: str, form_data: ChatForm, user=Depends(get_current_user)
|
||||
):
|
||||
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
|
||||
if chat:
|
||||
updated_chat = {**json.loads(chat.chat), **form_data.chat}
|
||||
|
||||
chat = Chats.update_chat_by_id(id, updated_chat)
|
||||
return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# DeleteChatById
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/{id}", response_model=bool)
|
||||
async def delete_chat_by_id(request: Request, id: str, user=Depends(get_current_user)):
|
||||
|
||||
if user.role == "admin":
|
||||
result = Chats.delete_chat_by_id(id)
|
||||
return result
|
||||
else:
|
||||
if not request.app.state.config.USER_PERMISSIONS["chat"]["deletion"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
|
||||
result = Chats.delete_chat_by_id_and_user_id(id, user.id)
|
||||
return result
|
||||
|
||||
|
||||
############################
|
||||
# CloneChat
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/{id}/clone", response_model=Optional[ChatResponse])
|
||||
async def clone_chat_by_id(id: str, user=Depends(get_current_user)):
|
||||
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
|
||||
if chat:
|
||||
|
||||
chat_body = json.loads(chat.chat)
|
||||
updated_chat = {
|
||||
**chat_body,
|
||||
"originalChatId": chat.id,
|
||||
"branchPointMessageId": chat_body["history"]["currentId"],
|
||||
"title": f"Clone of {chat.title}",
|
||||
}
|
||||
|
||||
chat = Chats.insert_new_chat(user.id, ChatForm(**{"chat": updated_chat}))
|
||||
return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.DEFAULT()
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# ArchiveChat
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/{id}/archive", response_model=Optional[ChatResponse])
|
||||
async def archive_chat_by_id(id: str, user=Depends(get_current_user)):
|
||||
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
|
||||
if chat:
|
||||
chat = Chats.toggle_chat_archive_by_id(id)
|
||||
return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.DEFAULT()
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# ShareChatById
|
||||
############################
|
||||
|
||||
|
||||
@router.post("/{id}/share", response_model=Optional[ChatResponse])
|
||||
async def share_chat_by_id(id: str, user=Depends(get_current_user)):
|
||||
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
|
||||
if chat:
|
||||
if chat.share_id:
|
||||
shared_chat = Chats.update_shared_chat_by_chat_id(chat.id)
|
||||
return ChatResponse(
|
||||
**{**shared_chat.model_dump(), "chat": json.loads(shared_chat.chat)}
|
||||
)
|
||||
|
||||
shared_chat = Chats.insert_shared_chat_by_chat_id(chat.id)
|
||||
if not shared_chat:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=ERROR_MESSAGES.DEFAULT(),
|
||||
)
|
||||
|
||||
return ChatResponse(
|
||||
**{**shared_chat.model_dump(), "chat": json.loads(shared_chat.chat)}
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# DeletedSharedChatById
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/{id}/share", response_model=Optional[bool])
|
||||
async def delete_shared_chat_by_id(id: str, user=Depends(get_current_user)):
|
||||
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
|
||||
if chat:
|
||||
if not chat.share_id:
|
||||
return False
|
||||
|
||||
result = Chats.delete_shared_chat_by_chat_id(id)
|
||||
update_result = Chats.update_chat_share_id_by_id(id, None)
|
||||
|
||||
return result and update_result != None
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# GetChatTagsById
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/{id}/tags", response_model=List[TagModel])
|
||||
async def get_chat_tags_by_id(id: str, user=Depends(get_current_user)):
|
||||
tags = Tags.get_tags_by_chat_id_and_user_id(id, user.id)
|
||||
|
||||
if tags != None:
|
||||
return tags
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# AddChatTagById
|
||||
############################
|
||||
|
||||
|
||||
@router.post("/{id}/tags", response_model=Optional[ChatIdTagModel])
|
||||
async def add_chat_tag_by_id(
|
||||
id: str, form_data: ChatIdTagForm, user=Depends(get_current_user)
|
||||
):
|
||||
tags = Tags.get_tags_by_chat_id_and_user_id(id, user.id)
|
||||
|
||||
if form_data.tag_name not in tags:
|
||||
tag = Tags.add_tag_to_chat(user.id, form_data)
|
||||
|
||||
if tag:
|
||||
return tag
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.NOT_FOUND,
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.DEFAULT()
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# DeleteChatTagById
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/{id}/tags", response_model=Optional[bool])
|
||||
async def delete_chat_tag_by_id(
|
||||
id: str, form_data: ChatIdTagForm, user=Depends(get_current_user)
|
||||
):
|
||||
result = Tags.delete_tag_by_tag_name_and_chat_id_and_user_id(
|
||||
form_data.tag_name, id, user.id
|
||||
)
|
||||
|
||||
if result:
|
||||
return result
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# DeleteAllChatTagsById
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/{id}/tags/all", response_model=Optional[bool])
|
||||
async def delete_all_chat_tags_by_id(id: str, user=Depends(get_current_user)):
|
||||
result = Tags.delete_tags_by_chat_id_and_user_id(id, user.id)
|
||||
|
||||
if result:
|
||||
return result
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
|
||||
)
|
||||
@@ -1,145 +0,0 @@
|
||||
from fastapi import Response, Request
|
||||
from fastapi import Depends, FastAPI, HTTPException, status
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Union, Optional
|
||||
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel
|
||||
import logging
|
||||
|
||||
from apps.webui.models.memories import Memories, MemoryModel
|
||||
|
||||
from utils.utils import get_verified_user
|
||||
from constants import ERROR_MESSAGES
|
||||
|
||||
from config import SRC_LOG_LEVELS, CHROMA_CLIENT
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["MODELS"])
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/ef")
|
||||
async def get_embeddings(request: Request):
|
||||
return {"result": request.app.state.EMBEDDING_FUNCTION("hello world")}
|
||||
|
||||
|
||||
############################
|
||||
# GetMemories
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/", response_model=List[MemoryModel])
|
||||
async def get_memories(user=Depends(get_verified_user)):
|
||||
return Memories.get_memories_by_user_id(user.id)
|
||||
|
||||
|
||||
############################
|
||||
# AddMemory
|
||||
############################
|
||||
|
||||
|
||||
class AddMemoryForm(BaseModel):
|
||||
content: str
|
||||
|
||||
|
||||
@router.post("/add", response_model=Optional[MemoryModel])
|
||||
async def add_memory(
|
||||
request: Request, form_data: AddMemoryForm, user=Depends(get_verified_user)
|
||||
):
|
||||
memory = Memories.insert_new_memory(user.id, form_data.content)
|
||||
memory_embedding = request.app.state.EMBEDDING_FUNCTION(memory.content)
|
||||
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
|
||||
collection.upsert(
|
||||
documents=[memory.content],
|
||||
ids=[memory.id],
|
||||
embeddings=[memory_embedding],
|
||||
metadatas=[{"created_at": memory.created_at}],
|
||||
)
|
||||
|
||||
return memory
|
||||
|
||||
|
||||
############################
|
||||
# QueryMemory
|
||||
############################
|
||||
|
||||
|
||||
class QueryMemoryForm(BaseModel):
|
||||
content: str
|
||||
|
||||
|
||||
@router.post("/query")
|
||||
async def query_memory(
|
||||
request: Request, form_data: QueryMemoryForm, user=Depends(get_verified_user)
|
||||
):
|
||||
query_embedding = request.app.state.EMBEDDING_FUNCTION(form_data.content)
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
|
||||
|
||||
results = collection.query(
|
||||
query_embeddings=[query_embedding],
|
||||
n_results=1, # how many results to return
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
############################
|
||||
# ResetMemoryFromVectorDB
|
||||
############################
|
||||
@router.get("/reset", response_model=bool)
|
||||
async def reset_memory_from_vector_db(
|
||||
request: Request, user=Depends(get_verified_user)
|
||||
):
|
||||
CHROMA_CLIENT.delete_collection(f"user-memory-{user.id}")
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
|
||||
|
||||
memories = Memories.get_memories_by_user_id(user.id)
|
||||
for memory in memories:
|
||||
memory_embedding = request.app.state.EMBEDDING_FUNCTION(memory.content)
|
||||
collection.upsert(
|
||||
documents=[memory.content],
|
||||
ids=[memory.id],
|
||||
embeddings=[memory_embedding],
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
############################
|
||||
# DeleteMemoriesByUserId
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/user", response_model=bool)
|
||||
async def delete_memory_by_user_id(user=Depends(get_verified_user)):
|
||||
result = Memories.delete_memories_by_user_id(user.id)
|
||||
|
||||
if result:
|
||||
try:
|
||||
CHROMA_CLIENT.delete_collection(f"user-memory-{user.id}")
|
||||
except Exception as e:
|
||||
log.error(e)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
############################
|
||||
# DeleteMemoryById
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/{memory_id}", response_model=bool)
|
||||
async def delete_memory_by_id(memory_id: str, user=Depends(get_verified_user)):
|
||||
result = Memories.delete_memory_by_id_and_user_id(memory_id, user.id)
|
||||
|
||||
if result:
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(
|
||||
name=f"user-memory-{user.id}"
|
||||
)
|
||||
collection.delete(ids=[memory_id])
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -1,183 +0,0 @@
|
||||
from fastapi import Depends, FastAPI, HTTPException, status, Request
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Union, Optional
|
||||
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel
|
||||
import json
|
||||
|
||||
from apps.webui.models.tools import Tools, ToolForm, ToolModel, ToolResponse
|
||||
from apps.webui.utils import load_toolkit_module_by_id
|
||||
|
||||
from utils.utils import get_current_user, get_admin_user
|
||||
from utils.tools import get_tools_specs
|
||||
from constants import ERROR_MESSAGES
|
||||
|
||||
from importlib import util
|
||||
import os
|
||||
|
||||
from config import DATA_DIR
|
||||
|
||||
|
||||
TOOLS_DIR = f"{DATA_DIR}/tools"
|
||||
os.makedirs(TOOLS_DIR, exist_ok=True)
|
||||
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
############################
|
||||
# GetToolkits
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/", response_model=List[ToolResponse])
|
||||
async def get_toolkits(user=Depends(get_current_user)):
|
||||
toolkits = [toolkit for toolkit in Tools.get_tools()]
|
||||
return toolkits
|
||||
|
||||
|
||||
############################
|
||||
# ExportToolKits
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/export", response_model=List[ToolModel])
|
||||
async def get_toolkits(user=Depends(get_admin_user)):
|
||||
toolkits = [toolkit for toolkit in Tools.get_tools()]
|
||||
return toolkits
|
||||
|
||||
|
||||
############################
|
||||
# CreateNewToolKit
|
||||
############################
|
||||
|
||||
|
||||
@router.post("/create", response_model=Optional[ToolResponse])
|
||||
async def create_new_toolkit(
|
||||
request: Request, form_data: ToolForm, user=Depends(get_admin_user)
|
||||
):
|
||||
if not form_data.id.isidentifier():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="Only alphanumeric characters and underscores are allowed in the id",
|
||||
)
|
||||
|
||||
form_data.id = form_data.id.lower()
|
||||
|
||||
toolkit = Tools.get_tool_by_id(form_data.id)
|
||||
if toolkit == None:
|
||||
toolkit_path = os.path.join(TOOLS_DIR, f"{form_data.id}.py")
|
||||
try:
|
||||
with open(toolkit_path, "w") as tool_file:
|
||||
tool_file.write(form_data.content)
|
||||
|
||||
toolkit_module = load_toolkit_module_by_id(form_data.id)
|
||||
|
||||
TOOLS = request.app.state.TOOLS
|
||||
TOOLS[form_data.id] = toolkit_module
|
||||
|
||||
specs = get_tools_specs(TOOLS[form_data.id])
|
||||
toolkit = Tools.insert_new_tool(user.id, form_data, specs)
|
||||
|
||||
if toolkit:
|
||||
return toolkit
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.DEFAULT("Error creating toolkit"),
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.DEFAULT(e),
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.ID_TAKEN,
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# GetToolkitById
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/id/{id}", response_model=Optional[ToolModel])
|
||||
async def get_toolkit_by_id(id: str, user=Depends(get_admin_user)):
|
||||
toolkit = Tools.get_tool_by_id(id)
|
||||
|
||||
if toolkit:
|
||||
return toolkit
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.NOT_FOUND,
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# UpdateToolkitById
|
||||
############################
|
||||
|
||||
|
||||
@router.post("/id/{id}/update", response_model=Optional[ToolModel])
|
||||
async def update_toolkit_by_id(
|
||||
request: Request, id: str, form_data: ToolForm, user=Depends(get_admin_user)
|
||||
):
|
||||
toolkit_path = os.path.join(TOOLS_DIR, f"{id}.py")
|
||||
|
||||
try:
|
||||
with open(toolkit_path, "w") as tool_file:
|
||||
tool_file.write(form_data.content)
|
||||
|
||||
toolkit_module = load_toolkit_module_by_id(id)
|
||||
|
||||
TOOLS = request.app.state.TOOLS
|
||||
TOOLS[id] = toolkit_module
|
||||
|
||||
specs = get_tools_specs(TOOLS[id])
|
||||
|
||||
updated = {
|
||||
**form_data.model_dump(exclude={"id"}),
|
||||
"specs": specs,
|
||||
}
|
||||
|
||||
print(updated)
|
||||
toolkit = Tools.update_tool_by_id(id, updated)
|
||||
|
||||
if toolkit:
|
||||
return toolkit
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.DEFAULT("Error updating toolkit"),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.DEFAULT(e),
|
||||
)
|
||||
|
||||
|
||||
############################
|
||||
# DeleteToolkitById
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/id/{id}/delete", response_model=bool)
|
||||
async def delete_toolkit_by_id(request: Request, id: str, user=Depends(get_admin_user)):
|
||||
result = Tools.delete_tool_by_id(id)
|
||||
|
||||
if result:
|
||||
TOOLS = request.app.state.TOOLS
|
||||
if id in TOOLS:
|
||||
del TOOLS[id]
|
||||
|
||||
# delete the toolkit file
|
||||
toolkit_path = os.path.join(TOOLS_DIR, f"{id}.py")
|
||||
os.remove(toolkit_path)
|
||||
|
||||
return result
|
||||
@@ -1,135 +0,0 @@
|
||||
from fastapi import APIRouter, UploadFile, File, Response
|
||||
from fastapi import Depends, HTTPException, status
|
||||
from peewee import SqliteDatabase
|
||||
from starlette.responses import StreamingResponse, FileResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
from fpdf import FPDF
|
||||
import markdown
|
||||
import black
|
||||
|
||||
|
||||
from apps.webui.internal.db import DB
|
||||
from utils.utils import get_admin_user
|
||||
from utils.misc import calculate_sha256, get_gravatar_url
|
||||
|
||||
from config import OLLAMA_BASE_URLS, DATA_DIR, UPLOAD_DIR, ENABLE_ADMIN_EXPORT
|
||||
from constants import ERROR_MESSAGES
|
||||
from typing import List
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/gravatar")
|
||||
async def get_gravatar(
|
||||
email: str,
|
||||
):
|
||||
return get_gravatar_url(email)
|
||||
|
||||
|
||||
class CodeFormatRequest(BaseModel):
|
||||
code: str
|
||||
|
||||
|
||||
@router.post("/code/format")
|
||||
async def format_code(request: CodeFormatRequest):
|
||||
try:
|
||||
formatted_code = black.format_str(request.code, mode=black.Mode())
|
||||
return {"code": formatted_code}
|
||||
except black.NothingChanged:
|
||||
return {"code": request.code}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
|
||||
class MarkdownForm(BaseModel):
|
||||
md: str
|
||||
|
||||
|
||||
@router.post("/markdown")
|
||||
async def get_html_from_markdown(
|
||||
form_data: MarkdownForm,
|
||||
):
|
||||
return {"html": markdown.markdown(form_data.md)}
|
||||
|
||||
|
||||
class ChatForm(BaseModel):
|
||||
title: str
|
||||
messages: List[dict]
|
||||
|
||||
|
||||
@router.post("/pdf")
|
||||
async def download_chat_as_pdf(
|
||||
form_data: ChatForm,
|
||||
):
|
||||
pdf = FPDF()
|
||||
pdf.add_page()
|
||||
|
||||
STATIC_DIR = "./static"
|
||||
FONTS_DIR = f"{STATIC_DIR}/fonts"
|
||||
|
||||
pdf.add_font("NotoSans", "", f"{FONTS_DIR}/NotoSans-Regular.ttf")
|
||||
pdf.add_font("NotoSans", "b", f"{FONTS_DIR}/NotoSans-Bold.ttf")
|
||||
pdf.add_font("NotoSans", "i", f"{FONTS_DIR}/NotoSans-Italic.ttf")
|
||||
pdf.add_font("NotoSansKR", "", f"{FONTS_DIR}/NotoSansKR-Regular.ttf")
|
||||
pdf.add_font("NotoSansJP", "", f"{FONTS_DIR}/NotoSansJP-Regular.ttf")
|
||||
|
||||
pdf.set_font("NotoSans", size=12)
|
||||
pdf.set_fallback_fonts(["NotoSansKR", "NotoSansJP"])
|
||||
|
||||
pdf.set_auto_page_break(auto=True, margin=15)
|
||||
|
||||
# Adjust the effective page width for multi_cell
|
||||
effective_page_width = (
|
||||
pdf.w - 2 * pdf.l_margin - 10
|
||||
) # Subtracted an additional 10 for extra padding
|
||||
|
||||
# Add chat messages
|
||||
for message in form_data.messages:
|
||||
role = message["role"]
|
||||
content = message["content"]
|
||||
pdf.set_font("NotoSans", "B", size=14) # Bold for the role
|
||||
pdf.multi_cell(effective_page_width, 10, f"{role.upper()}", 0, "L")
|
||||
pdf.ln(1) # Extra space between messages
|
||||
|
||||
pdf.set_font("NotoSans", size=10) # Regular for content
|
||||
pdf.multi_cell(effective_page_width, 6, content, 0, "L")
|
||||
pdf.ln(1.5) # Extra space between messages
|
||||
|
||||
# Save the pdf with name .pdf
|
||||
pdf_bytes = pdf.output()
|
||||
|
||||
return Response(
|
||||
content=bytes(pdf_bytes),
|
||||
media_type="application/pdf",
|
||||
headers={"Content-Disposition": f"attachment;filename=chat.pdf"},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/db/download")
|
||||
async def download_db(user=Depends(get_admin_user)):
|
||||
if not ENABLE_ADMIN_EXPORT:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
if not isinstance(DB, SqliteDatabase):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.DB_NOT_SQLITE,
|
||||
)
|
||||
return FileResponse(
|
||||
DB.database,
|
||||
media_type="application/octet-stream",
|
||||
filename="webui.db",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/litellm/config")
|
||||
async def download_litellm_config_yaml(user=Depends(get_admin_user)):
|
||||
return FileResponse(
|
||||
f"{DATA_DIR}/litellm/config.yaml",
|
||||
media_type="application/octet-stream",
|
||||
filename="config.yaml",
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
from importlib import util
|
||||
import os
|
||||
|
||||
from config import TOOLS_DIR
|
||||
|
||||
|
||||
def load_toolkit_module_by_id(toolkit_id):
|
||||
toolkit_path = os.path.join(TOOLS_DIR, f"{toolkit_id}.py")
|
||||
spec = util.spec_from_file_location(toolkit_id, toolkit_path)
|
||||
module = util.module_from_spec(spec)
|
||||
|
||||
try:
|
||||
spec.loader.exec_module(module)
|
||||
print(f"Loaded module: {module.__name__}")
|
||||
if hasattr(module, "Tools"):
|
||||
return module.Tools()
|
||||
else:
|
||||
raise Exception("No Tools class found")
|
||||
except Exception as e:
|
||||
print(f"Error loading module: {toolkit_id}")
|
||||
# Move the file to the error folder
|
||||
os.rename(toolkit_path, f"{toolkit_path}.error")
|
||||
raise e
|
||||
1086
backend/config.py
1086
backend/config.py
File diff suppressed because it is too large
Load Diff
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"version": 0,
|
||||
"ui": {
|
||||
"default_locale": "en-US",
|
||||
"prompt_suggestions": [
|
||||
{
|
||||
"title": ["Help me study", "vocabulary for a college entrance exam"],
|
||||
"content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."
|
||||
},
|
||||
{
|
||||
"title": ["Give me ideas", "for what to do with my kids' art"],
|
||||
"content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."
|
||||
},
|
||||
{
|
||||
"title": ["Tell me a fun fact", "about the Roman Empire"],
|
||||
"content": "Tell me a random fun fact about the Roman Empire"
|
||||
},
|
||||
{
|
||||
"title": ["Show me a code snippet", "of a website's sticky header"],
|
||||
"content": "Show me a code snippet of a website's sticky header in CSS and JavaScript."
|
||||
},
|
||||
{
|
||||
"title": ["Explain options trading", "if I'm familiar with buying and selling stocks"],
|
||||
"content": "Explain options trading in simple terms if I'm familiar with buying and selling stocks."
|
||||
},
|
||||
{
|
||||
"title": ["Overcome procrastination", "give me tips"],
|
||||
"content": "Could you start by asking me about instances when I procrastinate the most and then give me some suggestions to overcome it?"
|
||||
},
|
||||
{
|
||||
"title": ["Grammar check", "rewrite it for better readability "],
|
||||
"content": "Check the following sentence for grammar and clarity: \"[sentence]\". Rewrite it for better readability while maintaining its original meaning."
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
general_settings: {}
|
||||
litellm_settings: {}
|
||||
model_list: []
|
||||
router_settings: {}
|
||||
@@ -1 +1 @@
|
||||
dir for backend files (db, documents, etc.)
|
||||
docker dir for backend files (db, documents, etc.)
|
||||
@@ -1,2 +1,2 @@
|
||||
PORT="${PORT:-8080}"
|
||||
uvicorn main:app --port $PORT --host 0.0.0.0 --forwarded-allow-ips '*' --reload
|
||||
uvicorn open_webui.main:app --port $PORT --host 0.0.0.0 --forwarded-allow-ips '*' --reload
|
||||
1433
backend/main.py
1433
backend/main.py
File diff suppressed because it is too large
Load Diff
@@ -9,8 +9,6 @@ import uvicorn
|
||||
app = typer.Typer()
|
||||
|
||||
KEY_FILE = Path.cwd() / ".webui_secret_key"
|
||||
if (frontend_build_dir := Path(__file__).parent / "frontend").exists():
|
||||
os.environ["FRONTEND_BUILD_DIR"] = str(frontend_build_dir)
|
||||
|
||||
|
||||
@app.command()
|
||||
@@ -18,6 +16,7 @@ def serve(
|
||||
host: str = "0.0.0.0",
|
||||
port: int = 8080,
|
||||
):
|
||||
os.environ["FROM_INIT_PY"] = "true"
|
||||
if os.getenv("WEBUI_SECRET_KEY") is None:
|
||||
typer.echo(
|
||||
"Loading WEBUI_SECRET_KEY from file, not provided as an environment variable."
|
||||
@@ -40,9 +39,23 @@ def serve(
|
||||
"/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib",
|
||||
]
|
||||
)
|
||||
import main # we need set environment variables before importing main
|
||||
try:
|
||||
import torch
|
||||
|
||||
uvicorn.run(main.app, host=host, port=port, forwarded_allow_ips="*")
|
||||
assert torch.cuda.is_available(), "CUDA not available"
|
||||
typer.echo("CUDA seems to be working")
|
||||
except Exception as e:
|
||||
typer.echo(
|
||||
"Error when testing CUDA but USE_CUDA_DOCKER is true. "
|
||||
"Resetting USE_CUDA_DOCKER to false and removing "
|
||||
f"LD_LIBRARY_PATH modifications: {e}"
|
||||
)
|
||||
os.environ["USE_CUDA_DOCKER"] = "false"
|
||||
os.environ["LD_LIBRARY_PATH"] = ":".join(LD_LIBRARY_PATH)
|
||||
|
||||
import open_webui.main # we need set environment variables before importing main
|
||||
|
||||
uvicorn.run(open_webui.main.app, host=host, port=port, forwarded_allow_ips="*")
|
||||
|
||||
|
||||
@app.command()
|
||||
@@ -52,7 +65,11 @@ def dev(
|
||||
reload: bool = True,
|
||||
):
|
||||
uvicorn.run(
|
||||
"main:app", host=host, port=port, reload=reload, forwarded_allow_ips="*"
|
||||
"open_webui.main:app",
|
||||
host=host,
|
||||
port=port,
|
||||
reload=reload,
|
||||
forwarded_allow_ips="*",
|
||||
)
|
||||
|
||||
|
||||
|
||||
114
backend/open_webui/alembic.ini
Normal file
114
backend/open_webui/alembic.ini
Normal file
@@ -0,0 +1,114 @@
|
||||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# path to migration scripts
|
||||
script_location = migrations
|
||||
|
||||
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
|
||||
# Uncomment the line below if you want the files to be prepended with date and time
|
||||
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
|
||||
|
||||
# sys.path path, will be prepended to sys.path if present.
|
||||
# defaults to the current working directory.
|
||||
prepend_sys_path = .
|
||||
|
||||
# timezone to use when rendering the date within the migration file
|
||||
# as well as the filename.
|
||||
# If specified, requires the python>=3.9 or backports.zoneinfo library.
|
||||
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
|
||||
# string value is passed to ZoneInfo()
|
||||
# leave blank for localtime
|
||||
# timezone =
|
||||
|
||||
# max length of characters to apply to the
|
||||
# "slug" field
|
||||
# truncate_slug_length = 40
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
|
||||
# set to 'true' to allow .pyc and .pyo files without
|
||||
# a source .py file to be detected as revisions in the
|
||||
# versions/ directory
|
||||
# sourceless = false
|
||||
|
||||
# version location specification; This defaults
|
||||
# to migrations/versions. When using multiple version
|
||||
# directories, initial revisions must be specified with --version-path.
|
||||
# The path separator used here should be the separator specified by "version_path_separator" below.
|
||||
# version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
|
||||
|
||||
# version path separator; As mentioned above, this is the character used to split
|
||||
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
|
||||
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
|
||||
# Valid values for version_path_separator are:
|
||||
#
|
||||
# version_path_separator = :
|
||||
# version_path_separator = ;
|
||||
# version_path_separator = space
|
||||
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
|
||||
|
||||
# set to 'true' to search source files recursively
|
||||
# in each "version_locations" directory
|
||||
# new in Alembic version 1.10
|
||||
# recursive_version_locations = false
|
||||
|
||||
# the output encoding used when revision files
|
||||
# are written from script.py.mako
|
||||
# output_encoding = utf-8
|
||||
|
||||
# sqlalchemy.url = REPLACE_WITH_DATABASE_URL
|
||||
|
||||
|
||||
[post_write_hooks]
|
||||
# post_write_hooks defines scripts or Python functions that are run
|
||||
# on newly generated revision scripts. See the documentation for further
|
||||
# detail and examples
|
||||
|
||||
# format using "black" - use the console_scripts runner, against the "black" entrypoint
|
||||
# hooks = black
|
||||
# black.type = console_scripts
|
||||
# black.entrypoint = black
|
||||
# black.options = -l 79 REVISION_SCRIPT_FILENAME
|
||||
|
||||
# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
|
||||
# hooks = ruff
|
||||
# ruff.type = exec
|
||||
# ruff.executable = %(here)s/.venv/bin/ruff
|
||||
# ruff.options = --fix REVISION_SCRIPT_FILENAME
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
||||
640
backend/open_webui/apps/audio/main.py
Normal file
640
backend/open_webui/apps/audio/main.py
Normal file
@@ -0,0 +1,640 @@
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from pydub import AudioSegment
|
||||
from pydub.silence import split_on_silence
|
||||
|
||||
import requests
|
||||
from open_webui.config import (
|
||||
AUDIO_STT_ENGINE,
|
||||
AUDIO_STT_MODEL,
|
||||
AUDIO_STT_OPENAI_API_BASE_URL,
|
||||
AUDIO_STT_OPENAI_API_KEY,
|
||||
AUDIO_TTS_API_KEY,
|
||||
AUDIO_TTS_ENGINE,
|
||||
AUDIO_TTS_MODEL,
|
||||
AUDIO_TTS_OPENAI_API_BASE_URL,
|
||||
AUDIO_TTS_OPENAI_API_KEY,
|
||||
AUDIO_TTS_SPLIT_ON,
|
||||
AUDIO_TTS_VOICE,
|
||||
AUDIO_TTS_AZURE_SPEECH_REGION,
|
||||
AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT,
|
||||
CACHE_DIR,
|
||||
CORS_ALLOW_ORIGIN,
|
||||
WHISPER_MODEL,
|
||||
WHISPER_MODEL_AUTO_UPDATE,
|
||||
WHISPER_MODEL_DIR,
|
||||
AppConfig,
|
||||
)
|
||||
|
||||
from open_webui.constants import ERROR_MESSAGES
|
||||
from open_webui.env import SRC_LOG_LEVELS, DEVICE_TYPE
|
||||
from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile, status
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import FileResponse
|
||||
from pydantic import BaseModel
|
||||
from open_webui.utils.utils import get_admin_user, get_verified_user
|
||||
|
||||
# Constants
|
||||
MAX_FILE_SIZE_MB = 25
|
||||
MAX_FILE_SIZE = MAX_FILE_SIZE_MB * 1024 * 1024 # Convert MB to bytes
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["AUDIO"])
|
||||
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=CORS_ALLOW_ORIGIN,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.state.config = AppConfig()
|
||||
|
||||
app.state.config.STT_OPENAI_API_BASE_URL = AUDIO_STT_OPENAI_API_BASE_URL
|
||||
app.state.config.STT_OPENAI_API_KEY = AUDIO_STT_OPENAI_API_KEY
|
||||
app.state.config.STT_ENGINE = AUDIO_STT_ENGINE
|
||||
app.state.config.STT_MODEL = AUDIO_STT_MODEL
|
||||
|
||||
app.state.config.WHISPER_MODEL = WHISPER_MODEL
|
||||
app.state.faster_whisper_model = None
|
||||
|
||||
app.state.config.TTS_OPENAI_API_BASE_URL = AUDIO_TTS_OPENAI_API_BASE_URL
|
||||
app.state.config.TTS_OPENAI_API_KEY = AUDIO_TTS_OPENAI_API_KEY
|
||||
app.state.config.TTS_ENGINE = AUDIO_TTS_ENGINE
|
||||
app.state.config.TTS_MODEL = AUDIO_TTS_MODEL
|
||||
app.state.config.TTS_VOICE = AUDIO_TTS_VOICE
|
||||
app.state.config.TTS_API_KEY = AUDIO_TTS_API_KEY
|
||||
app.state.config.TTS_SPLIT_ON = AUDIO_TTS_SPLIT_ON
|
||||
|
||||
app.state.config.TTS_AZURE_SPEECH_REGION = AUDIO_TTS_AZURE_SPEECH_REGION
|
||||
app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT = AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT
|
||||
|
||||
# setting device type for whisper model
|
||||
whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
|
||||
log.info(f"whisper_device_type: {whisper_device_type}")
|
||||
|
||||
SPEECH_CACHE_DIR = Path(CACHE_DIR).joinpath("./audio/speech/")
|
||||
SPEECH_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
def set_faster_whisper_model(model: str, auto_update: bool = False):
|
||||
if model and app.state.config.STT_ENGINE == "":
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
faster_whisper_kwargs = {
|
||||
"model_size_or_path": model,
|
||||
"device": whisper_device_type,
|
||||
"compute_type": "int8",
|
||||
"download_root": WHISPER_MODEL_DIR,
|
||||
"local_files_only": not auto_update,
|
||||
}
|
||||
|
||||
try:
|
||||
app.state.faster_whisper_model = WhisperModel(**faster_whisper_kwargs)
|
||||
except Exception:
|
||||
log.warning(
|
||||
"WhisperModel initialization failed, attempting download with local_files_only=False"
|
||||
)
|
||||
faster_whisper_kwargs["local_files_only"] = False
|
||||
app.state.faster_whisper_model = WhisperModel(**faster_whisper_kwargs)
|
||||
|
||||
else:
|
||||
app.state.faster_whisper_model = None
|
||||
|
||||
|
||||
class TTSConfigForm(BaseModel):
|
||||
OPENAI_API_BASE_URL: str
|
||||
OPENAI_API_KEY: str
|
||||
API_KEY: str
|
||||
ENGINE: str
|
||||
MODEL: str
|
||||
VOICE: str
|
||||
SPLIT_ON: str
|
||||
AZURE_SPEECH_REGION: str
|
||||
AZURE_SPEECH_OUTPUT_FORMAT: str
|
||||
|
||||
|
||||
class STTConfigForm(BaseModel):
|
||||
OPENAI_API_BASE_URL: str
|
||||
OPENAI_API_KEY: str
|
||||
ENGINE: str
|
||||
MODEL: str
|
||||
WHISPER_MODEL: str
|
||||
|
||||
|
||||
class AudioConfigUpdateForm(BaseModel):
|
||||
tts: TTSConfigForm
|
||||
stt: STTConfigForm
|
||||
|
||||
|
||||
from pydub import AudioSegment
|
||||
from pydub.utils import mediainfo
|
||||
|
||||
|
||||
def is_mp4_audio(file_path):
|
||||
"""Check if the given file is an MP4 audio file."""
|
||||
if not os.path.isfile(file_path):
|
||||
print(f"File not found: {file_path}")
|
||||
return False
|
||||
|
||||
info = mediainfo(file_path)
|
||||
if (
|
||||
info.get("codec_name") == "aac"
|
||||
and info.get("codec_type") == "audio"
|
||||
and info.get("codec_tag_string") == "mp4a"
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def convert_mp4_to_wav(file_path, output_path):
|
||||
"""Convert MP4 audio file to WAV format."""
|
||||
audio = AudioSegment.from_file(file_path, format="mp4")
|
||||
audio.export(output_path, format="wav")
|
||||
print(f"Converted {file_path} to {output_path}")
|
||||
|
||||
|
||||
@app.get("/config")
|
||||
async def get_audio_config(user=Depends(get_admin_user)):
|
||||
return {
|
||||
"tts": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.TTS_OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.TTS_OPENAI_API_KEY,
|
||||
"API_KEY": app.state.config.TTS_API_KEY,
|
||||
"ENGINE": app.state.config.TTS_ENGINE,
|
||||
"MODEL": app.state.config.TTS_MODEL,
|
||||
"VOICE": app.state.config.TTS_VOICE,
|
||||
"SPLIT_ON": app.state.config.TTS_SPLIT_ON,
|
||||
"AZURE_SPEECH_REGION": app.state.config.TTS_AZURE_SPEECH_REGION,
|
||||
"AZURE_SPEECH_OUTPUT_FORMAT": app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT,
|
||||
},
|
||||
"stt": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.STT_OPENAI_API_KEY,
|
||||
"ENGINE": app.state.config.STT_ENGINE,
|
||||
"MODEL": app.state.config.STT_MODEL,
|
||||
"WHISPER_MODEL": app.state.config.WHISPER_MODEL,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@app.post("/config/update")
|
||||
async def update_audio_config(
|
||||
form_data: AudioConfigUpdateForm, user=Depends(get_admin_user)
|
||||
):
|
||||
app.state.config.TTS_OPENAI_API_BASE_URL = form_data.tts.OPENAI_API_BASE_URL
|
||||
app.state.config.TTS_OPENAI_API_KEY = form_data.tts.OPENAI_API_KEY
|
||||
app.state.config.TTS_API_KEY = form_data.tts.API_KEY
|
||||
app.state.config.TTS_ENGINE = form_data.tts.ENGINE
|
||||
app.state.config.TTS_MODEL = form_data.tts.MODEL
|
||||
app.state.config.TTS_VOICE = form_data.tts.VOICE
|
||||
app.state.config.TTS_SPLIT_ON = form_data.tts.SPLIT_ON
|
||||
app.state.config.TTS_AZURE_SPEECH_REGION = form_data.tts.AZURE_SPEECH_REGION
|
||||
app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT = (
|
||||
form_data.tts.AZURE_SPEECH_OUTPUT_FORMAT
|
||||
)
|
||||
|
||||
app.state.config.STT_OPENAI_API_BASE_URL = form_data.stt.OPENAI_API_BASE_URL
|
||||
app.state.config.STT_OPENAI_API_KEY = form_data.stt.OPENAI_API_KEY
|
||||
app.state.config.STT_ENGINE = form_data.stt.ENGINE
|
||||
app.state.config.STT_MODEL = form_data.stt.MODEL
|
||||
app.state.config.WHISPER_MODEL = form_data.stt.WHISPER_MODEL
|
||||
set_faster_whisper_model(form_data.stt.WHISPER_MODEL, WHISPER_MODEL_AUTO_UPDATE)
|
||||
|
||||
return {
|
||||
"tts": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.TTS_OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.TTS_OPENAI_API_KEY,
|
||||
"API_KEY": app.state.config.TTS_API_KEY,
|
||||
"ENGINE": app.state.config.TTS_ENGINE,
|
||||
"MODEL": app.state.config.TTS_MODEL,
|
||||
"VOICE": app.state.config.TTS_VOICE,
|
||||
"SPLIT_ON": app.state.config.TTS_SPLIT_ON,
|
||||
"AZURE_SPEECH_REGION": app.state.config.TTS_AZURE_SPEECH_REGION,
|
||||
"AZURE_SPEECH_OUTPUT_FORMAT": app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT,
|
||||
},
|
||||
"stt": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.STT_OPENAI_API_KEY,
|
||||
"ENGINE": app.state.config.STT_ENGINE,
|
||||
"MODEL": app.state.config.STT_MODEL,
|
||||
"WHISPER_MODEL": app.state.config.WHISPER_MODEL,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@app.post("/speech")
|
||||
async def speech(request: Request, user=Depends(get_verified_user)):
|
||||
body = await request.body()
|
||||
name = hashlib.sha256(body).hexdigest()
|
||||
|
||||
file_path = SPEECH_CACHE_DIR.joinpath(f"{name}.mp3")
|
||||
file_body_path = SPEECH_CACHE_DIR.joinpath(f"{name}.json")
|
||||
|
||||
# Check if the file already exists in the cache
|
||||
if file_path.is_file():
|
||||
return FileResponse(file_path)
|
||||
|
||||
if app.state.config.TTS_ENGINE == "openai":
|
||||
headers = {}
|
||||
headers["Authorization"] = f"Bearer {app.state.config.TTS_OPENAI_API_KEY}"
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
try:
|
||||
body = body.decode("utf-8")
|
||||
body = json.loads(body)
|
||||
body["model"] = app.state.config.TTS_MODEL
|
||||
body = json.dumps(body).encode("utf-8")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
r = None
|
||||
try:
|
||||
r = requests.post(
|
||||
url=f"{app.state.config.TTS_OPENAI_API_BASE_URL}/audio/speech",
|
||||
data=body,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
|
||||
# Save the streaming content to a file
|
||||
with open(file_path, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump(json.loads(body.decode("utf-8")), f)
|
||||
|
||||
# Return the saved file
|
||||
return FileResponse(file_path)
|
||||
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']['message']}"
|
||||
except Exception:
|
||||
error_detail = f"External: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
status_code=r.status_code if r != None else 500,
|
||||
detail=error_detail,
|
||||
)
|
||||
|
||||
elif app.state.config.TTS_ENGINE == "elevenlabs":
|
||||
payload = None
|
||||
try:
|
||||
payload = json.loads(body.decode("utf-8"))
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON payload")
|
||||
|
||||
voice_id = payload.get("voice", "")
|
||||
|
||||
if voice_id not in get_available_voices():
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Invalid voice id",
|
||||
)
|
||||
|
||||
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
|
||||
|
||||
headers = {
|
||||
"Accept": "audio/mpeg",
|
||||
"Content-Type": "application/json",
|
||||
"xi-api-key": app.state.config.TTS_API_KEY,
|
||||
}
|
||||
|
||||
data = {
|
||||
"text": payload["input"],
|
||||
"model_id": app.state.config.TTS_MODEL,
|
||||
"voice_settings": {"stability": 0.5, "similarity_boost": 0.5},
|
||||
}
|
||||
|
||||
try:
|
||||
r = requests.post(url, json=data, headers=headers)
|
||||
|
||||
r.raise_for_status()
|
||||
|
||||
# Save the streaming content to a file
|
||||
with open(file_path, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump(json.loads(body.decode("utf-8")), f)
|
||||
|
||||
# Return the saved file
|
||||
return FileResponse(file_path)
|
||||
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']['message']}"
|
||||
except Exception:
|
||||
error_detail = f"External: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
status_code=r.status_code if r != None else 500,
|
||||
detail=error_detail,
|
||||
)
|
||||
|
||||
elif app.state.config.TTS_ENGINE == "azure":
|
||||
payload = None
|
||||
try:
|
||||
payload = json.loads(body.decode("utf-8"))
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON payload")
|
||||
|
||||
region = app.state.config.TTS_AZURE_SPEECH_REGION
|
||||
language = app.state.config.TTS_VOICE
|
||||
locale = "-".join(app.state.config.TTS_VOICE.split("-")[:1])
|
||||
output_format = app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT
|
||||
url = f"https://{region}.tts.speech.microsoft.com/cognitiveservices/v1"
|
||||
|
||||
headers = {
|
||||
"Ocp-Apim-Subscription-Key": app.state.config.TTS_API_KEY,
|
||||
"Content-Type": "application/ssml+xml",
|
||||
"X-Microsoft-OutputFormat": output_format,
|
||||
}
|
||||
|
||||
data = f"""<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="{locale}">
|
||||
<voice name="{language}">{payload["input"]}</voice>
|
||||
</speak>"""
|
||||
|
||||
response = requests.post(url, headers=headers, data=data)
|
||||
|
||||
if response.status_code == 200:
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
return FileResponse(file_path)
|
||||
else:
|
||||
log.error(f"Error synthesizing speech - {response.reason}")
|
||||
raise HTTPException(
|
||||
status_code=500, detail=f"Error synthesizing speech - {response.reason}"
|
||||
)
|
||||
|
||||
|
||||
def transcribe(file_path):
|
||||
print("transcribe", file_path)
|
||||
filename = os.path.basename(file_path)
|
||||
file_dir = os.path.dirname(file_path)
|
||||
id = filename.split(".")[0]
|
||||
|
||||
if app.state.config.STT_ENGINE == "":
|
||||
if app.state.faster_whisper_model is None:
|
||||
set_faster_whisper_model(app.state.config.WHISPER_MODEL)
|
||||
|
||||
model = app.state.faster_whisper_model
|
||||
segments, info = model.transcribe(file_path, beam_size=5)
|
||||
log.info(
|
||||
"Detected language '%s' with probability %f"
|
||||
% (info.language, info.language_probability)
|
||||
)
|
||||
|
||||
transcript = "".join([segment.text for segment in list(segments)])
|
||||
data = {"text": transcript.strip()}
|
||||
|
||||
# save the transcript to a json file
|
||||
transcript_file = f"{file_dir}/{id}.json"
|
||||
with open(transcript_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
log.debug(data)
|
||||
return data
|
||||
elif app.state.config.STT_ENGINE == "openai":
|
||||
if is_mp4_audio(file_path):
|
||||
print("is_mp4_audio")
|
||||
os.rename(file_path, file_path.replace(".wav", ".mp4"))
|
||||
# Convert MP4 audio file to WAV format
|
||||
convert_mp4_to_wav(file_path.replace(".wav", ".mp4"), file_path)
|
||||
|
||||
headers = {"Authorization": f"Bearer {app.state.config.STT_OPENAI_API_KEY}"}
|
||||
|
||||
files = {"file": (filename, open(file_path, "rb"))}
|
||||
data = {"model": app.state.config.STT_MODEL}
|
||||
|
||||
log.debug(files, data)
|
||||
|
||||
r = None
|
||||
try:
|
||||
r = requests.post(
|
||||
url=f"{app.state.config.STT_OPENAI_API_BASE_URL}/audio/transcriptions",
|
||||
headers=headers,
|
||||
files=files,
|
||||
data=data,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
|
||||
data = r.json()
|
||||
|
||||
# save the transcript to a json file
|
||||
transcript_file = f"{file_dir}/{id}.json"
|
||||
with open(transcript_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
print(data)
|
||||
return data
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']['message']}"
|
||||
except Exception:
|
||||
error_detail = f"External: {e}"
|
||||
|
||||
raise Exception(error_detail)
|
||||
|
||||
|
||||
@app.post("/transcriptions")
|
||||
def transcription(
|
||||
file: UploadFile = File(...),
|
||||
user=Depends(get_verified_user),
|
||||
):
|
||||
log.info(f"file.content_type: {file.content_type}")
|
||||
|
||||
if file.content_type not in ["audio/mpeg", "audio/wav", "audio/ogg", "audio/x-m4a"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED,
|
||||
)
|
||||
|
||||
try:
|
||||
ext = file.filename.split(".")[-1]
|
||||
id = uuid.uuid4()
|
||||
|
||||
filename = f"{id}.{ext}"
|
||||
contents = file.file.read()
|
||||
|
||||
file_dir = f"{CACHE_DIR}/audio/transcriptions"
|
||||
os.makedirs(file_dir, exist_ok=True)
|
||||
file_path = f"{file_dir}/{filename}"
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(contents)
|
||||
|
||||
try:
|
||||
if os.path.getsize(file_path) > MAX_FILE_SIZE: # file is bigger than 25MB
|
||||
log.debug(f"File size is larger than {MAX_FILE_SIZE_MB}MB")
|
||||
audio = AudioSegment.from_file(file_path)
|
||||
audio = audio.set_frame_rate(16000).set_channels(1) # Compress audio
|
||||
compressed_path = f"{file_dir}/{id}_compressed.opus"
|
||||
audio.export(compressed_path, format="opus", bitrate="32k")
|
||||
log.debug(f"Compressed audio to {compressed_path}")
|
||||
file_path = compressed_path
|
||||
|
||||
if (
|
||||
os.path.getsize(file_path) > MAX_FILE_SIZE
|
||||
): # Still larger than 25MB after compression
|
||||
log.debug(
|
||||
f"Compressed file size is still larger than {MAX_FILE_SIZE_MB}MB: {os.path.getsize(file_path)}"
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.FILE_TOO_LARGE(
|
||||
size=f"{MAX_FILE_SIZE_MB}MB"
|
||||
),
|
||||
)
|
||||
|
||||
data = transcribe(file_path)
|
||||
else:
|
||||
data = transcribe(file_path)
|
||||
|
||||
file_path = file_path.split("/")[-1]
|
||||
return {**data, "filename": file_path}
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.DEFAULT(e),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=ERROR_MESSAGES.DEFAULT(e),
|
||||
)
|
||||
|
||||
|
||||
def get_available_models() -> list[dict]:
|
||||
if app.state.config.TTS_ENGINE == "openai":
|
||||
return [{"id": "tts-1"}, {"id": "tts-1-hd"}]
|
||||
elif app.state.config.TTS_ENGINE == "elevenlabs":
|
||||
headers = {
|
||||
"xi-api-key": app.state.config.TTS_API_KEY,
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
"https://api.elevenlabs.io/v1/models", headers=headers, timeout=5
|
||||
)
|
||||
response.raise_for_status()
|
||||
models = response.json()
|
||||
return [
|
||||
{"name": model["name"], "id": model["model_id"]} for model in models
|
||||
]
|
||||
except requests.RequestException as e:
|
||||
log.error(f"Error fetching voices: {str(e)}")
|
||||
return []
|
||||
|
||||
|
||||
@app.get("/models")
|
||||
async def get_models(user=Depends(get_verified_user)):
|
||||
return {"models": get_available_models()}
|
||||
|
||||
|
||||
def get_available_voices() -> dict:
|
||||
"""Returns {voice_id: voice_name} dict"""
|
||||
ret = {}
|
||||
if app.state.config.TTS_ENGINE == "openai":
|
||||
ret = {
|
||||
"alloy": "alloy",
|
||||
"echo": "echo",
|
||||
"fable": "fable",
|
||||
"onyx": "onyx",
|
||||
"nova": "nova",
|
||||
"shimmer": "shimmer",
|
||||
}
|
||||
elif app.state.config.TTS_ENGINE == "elevenlabs":
|
||||
try:
|
||||
ret = get_elevenlabs_voices()
|
||||
except Exception:
|
||||
# Avoided @lru_cache with exception
|
||||
pass
|
||||
elif app.state.config.TTS_ENGINE == "azure":
|
||||
try:
|
||||
region = app.state.config.TTS_AZURE_SPEECH_REGION
|
||||
url = f"https://{region}.tts.speech.microsoft.com/cognitiveservices/voices/list"
|
||||
headers = {"Ocp-Apim-Subscription-Key": app.state.config.TTS_API_KEY}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
voices = response.json()
|
||||
for voice in voices:
|
||||
ret[voice["ShortName"]] = (
|
||||
f"{voice['DisplayName']} ({voice['ShortName']})"
|
||||
)
|
||||
except requests.RequestException as e:
|
||||
log.error(f"Error fetching voices: {str(e)}")
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_elevenlabs_voices() -> dict:
|
||||
"""
|
||||
Note, set the following in your .env file to use Elevenlabs:
|
||||
AUDIO_TTS_ENGINE=elevenlabs
|
||||
AUDIO_TTS_API_KEY=sk_... # Your Elevenlabs API key
|
||||
AUDIO_TTS_VOICE=EXAVITQu4vr4xnSDxMaL # From https://api.elevenlabs.io/v1/voices
|
||||
AUDIO_TTS_MODEL=eleven_multilingual_v2
|
||||
"""
|
||||
headers = {
|
||||
"xi-api-key": app.state.config.TTS_API_KEY,
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
try:
|
||||
# TODO: Add retries
|
||||
response = requests.get("https://api.elevenlabs.io/v1/voices", headers=headers)
|
||||
response.raise_for_status()
|
||||
voices_data = response.json()
|
||||
|
||||
voices = {}
|
||||
for voice in voices_data.get("voices", []):
|
||||
voices[voice["voice_id"]] = voice["name"]
|
||||
except requests.RequestException as e:
|
||||
# Avoid @lru_cache with exception
|
||||
log.error(f"Error fetching voices: {str(e)}")
|
||||
raise RuntimeError(f"Error fetching voices: {str(e)}")
|
||||
|
||||
return voices
|
||||
|
||||
|
||||
@app.get("/voices")
|
||||
async def get_voices(user=Depends(get_verified_user)):
|
||||
return {"voices": [{"id": k, "name": v} for k, v in get_available_voices().items()]}
|
||||
597
backend/open_webui/apps/images/main.py
Normal file
597
backend/open_webui/apps/images/main.py
Normal file
@@ -0,0 +1,597 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import mimetypes
|
||||
import re
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
from open_webui.apps.images.utils.comfyui import (
|
||||
ComfyUIGenerateImageForm,
|
||||
ComfyUIWorkflow,
|
||||
comfyui_generate_image,
|
||||
)
|
||||
from open_webui.config import (
|
||||
AUTOMATIC1111_API_AUTH,
|
||||
AUTOMATIC1111_BASE_URL,
|
||||
AUTOMATIC1111_CFG_SCALE,
|
||||
AUTOMATIC1111_SAMPLER,
|
||||
AUTOMATIC1111_SCHEDULER,
|
||||
CACHE_DIR,
|
||||
COMFYUI_BASE_URL,
|
||||
COMFYUI_WORKFLOW,
|
||||
COMFYUI_WORKFLOW_NODES,
|
||||
CORS_ALLOW_ORIGIN,
|
||||
ENABLE_IMAGE_GENERATION,
|
||||
IMAGE_GENERATION_ENGINE,
|
||||
IMAGE_GENERATION_MODEL,
|
||||
IMAGE_SIZE,
|
||||
IMAGE_STEPS,
|
||||
IMAGES_OPENAI_API_BASE_URL,
|
||||
IMAGES_OPENAI_API_KEY,
|
||||
AppConfig,
|
||||
)
|
||||
from open_webui.constants import ERROR_MESSAGES
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
from fastapi import Depends, FastAPI, HTTPException, Request
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel
|
||||
from open_webui.utils.utils import get_admin_user, get_verified_user
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["IMAGES"])
|
||||
|
||||
IMAGE_CACHE_DIR = Path(CACHE_DIR).joinpath("./image/generations/")
|
||||
IMAGE_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=CORS_ALLOW_ORIGIN,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
app.state.config = AppConfig()
|
||||
|
||||
app.state.config.ENGINE = IMAGE_GENERATION_ENGINE
|
||||
app.state.config.ENABLED = ENABLE_IMAGE_GENERATION
|
||||
|
||||
app.state.config.OPENAI_API_BASE_URL = IMAGES_OPENAI_API_BASE_URL
|
||||
app.state.config.OPENAI_API_KEY = IMAGES_OPENAI_API_KEY
|
||||
|
||||
app.state.config.MODEL = IMAGE_GENERATION_MODEL
|
||||
|
||||
app.state.config.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
|
||||
app.state.config.AUTOMATIC1111_API_AUTH = AUTOMATIC1111_API_AUTH
|
||||
app.state.config.AUTOMATIC1111_CFG_SCALE = AUTOMATIC1111_CFG_SCALE
|
||||
app.state.config.AUTOMATIC1111_SAMPLER = AUTOMATIC1111_SAMPLER
|
||||
app.state.config.AUTOMATIC1111_SCHEDULER = AUTOMATIC1111_SCHEDULER
|
||||
app.state.config.COMFYUI_BASE_URL = COMFYUI_BASE_URL
|
||||
app.state.config.COMFYUI_WORKFLOW = COMFYUI_WORKFLOW
|
||||
app.state.config.COMFYUI_WORKFLOW_NODES = COMFYUI_WORKFLOW_NODES
|
||||
|
||||
app.state.config.IMAGE_SIZE = IMAGE_SIZE
|
||||
app.state.config.IMAGE_STEPS = IMAGE_STEPS
|
||||
|
||||
|
||||
@app.get("/config")
|
||||
async def get_config(request: Request, user=Depends(get_admin_user)):
|
||||
return {
|
||||
"enabled": app.state.config.ENABLED,
|
||||
"engine": app.state.config.ENGINE,
|
||||
"openai": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.OPENAI_API_KEY,
|
||||
},
|
||||
"automatic1111": {
|
||||
"AUTOMATIC1111_BASE_URL": app.state.config.AUTOMATIC1111_BASE_URL,
|
||||
"AUTOMATIC1111_API_AUTH": app.state.config.AUTOMATIC1111_API_AUTH,
|
||||
"AUTOMATIC1111_CFG_SCALE": app.state.config.AUTOMATIC1111_CFG_SCALE,
|
||||
"AUTOMATIC1111_SAMPLER": app.state.config.AUTOMATIC1111_SAMPLER,
|
||||
"AUTOMATIC1111_SCHEDULER": app.state.config.AUTOMATIC1111_SCHEDULER,
|
||||
},
|
||||
"comfyui": {
|
||||
"COMFYUI_BASE_URL": app.state.config.COMFYUI_BASE_URL,
|
||||
"COMFYUI_WORKFLOW": app.state.config.COMFYUI_WORKFLOW,
|
||||
"COMFYUI_WORKFLOW_NODES": app.state.config.COMFYUI_WORKFLOW_NODES,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class OpenAIConfigForm(BaseModel):
|
||||
OPENAI_API_BASE_URL: str
|
||||
OPENAI_API_KEY: str
|
||||
|
||||
|
||||
class Automatic1111ConfigForm(BaseModel):
|
||||
AUTOMATIC1111_BASE_URL: str
|
||||
AUTOMATIC1111_API_AUTH: str
|
||||
AUTOMATIC1111_CFG_SCALE: Optional[str]
|
||||
AUTOMATIC1111_SAMPLER: Optional[str]
|
||||
AUTOMATIC1111_SCHEDULER: Optional[str]
|
||||
|
||||
|
||||
class ComfyUIConfigForm(BaseModel):
|
||||
COMFYUI_BASE_URL: str
|
||||
COMFYUI_WORKFLOW: str
|
||||
COMFYUI_WORKFLOW_NODES: list[dict]
|
||||
|
||||
|
||||
class ConfigForm(BaseModel):
|
||||
enabled: bool
|
||||
engine: str
|
||||
openai: OpenAIConfigForm
|
||||
automatic1111: Automatic1111ConfigForm
|
||||
comfyui: ComfyUIConfigForm
|
||||
|
||||
|
||||
@app.post("/config/update")
|
||||
async def update_config(form_data: ConfigForm, user=Depends(get_admin_user)):
|
||||
app.state.config.ENGINE = form_data.engine
|
||||
app.state.config.ENABLED = form_data.enabled
|
||||
|
||||
app.state.config.OPENAI_API_BASE_URL = form_data.openai.OPENAI_API_BASE_URL
|
||||
app.state.config.OPENAI_API_KEY = form_data.openai.OPENAI_API_KEY
|
||||
|
||||
app.state.config.AUTOMATIC1111_BASE_URL = (
|
||||
form_data.automatic1111.AUTOMATIC1111_BASE_URL
|
||||
)
|
||||
app.state.config.AUTOMATIC1111_API_AUTH = (
|
||||
form_data.automatic1111.AUTOMATIC1111_API_AUTH
|
||||
)
|
||||
|
||||
app.state.config.AUTOMATIC1111_CFG_SCALE = (
|
||||
float(form_data.automatic1111.AUTOMATIC1111_CFG_SCALE)
|
||||
if form_data.automatic1111.AUTOMATIC1111_CFG_SCALE
|
||||
else None
|
||||
)
|
||||
app.state.config.AUTOMATIC1111_SAMPLER = (
|
||||
form_data.automatic1111.AUTOMATIC1111_SAMPLER
|
||||
if form_data.automatic1111.AUTOMATIC1111_SAMPLER
|
||||
else None
|
||||
)
|
||||
app.state.config.AUTOMATIC1111_SCHEDULER = (
|
||||
form_data.automatic1111.AUTOMATIC1111_SCHEDULER
|
||||
if form_data.automatic1111.AUTOMATIC1111_SCHEDULER
|
||||
else None
|
||||
)
|
||||
|
||||
app.state.config.COMFYUI_BASE_URL = form_data.comfyui.COMFYUI_BASE_URL.strip("/")
|
||||
app.state.config.COMFYUI_WORKFLOW = form_data.comfyui.COMFYUI_WORKFLOW
|
||||
app.state.config.COMFYUI_WORKFLOW_NODES = form_data.comfyui.COMFYUI_WORKFLOW_NODES
|
||||
|
||||
return {
|
||||
"enabled": app.state.config.ENABLED,
|
||||
"engine": app.state.config.ENGINE,
|
||||
"openai": {
|
||||
"OPENAI_API_BASE_URL": app.state.config.OPENAI_API_BASE_URL,
|
||||
"OPENAI_API_KEY": app.state.config.OPENAI_API_KEY,
|
||||
},
|
||||
"automatic1111": {
|
||||
"AUTOMATIC1111_BASE_URL": app.state.config.AUTOMATIC1111_BASE_URL,
|
||||
"AUTOMATIC1111_API_AUTH": app.state.config.AUTOMATIC1111_API_AUTH,
|
||||
"AUTOMATIC1111_CFG_SCALE": app.state.config.AUTOMATIC1111_CFG_SCALE,
|
||||
"AUTOMATIC1111_SAMPLER": app.state.config.AUTOMATIC1111_SAMPLER,
|
||||
"AUTOMATIC1111_SCHEDULER": app.state.config.AUTOMATIC1111_SCHEDULER,
|
||||
},
|
||||
"comfyui": {
|
||||
"COMFYUI_BASE_URL": app.state.config.COMFYUI_BASE_URL,
|
||||
"COMFYUI_WORKFLOW": app.state.config.COMFYUI_WORKFLOW,
|
||||
"COMFYUI_WORKFLOW_NODES": app.state.config.COMFYUI_WORKFLOW_NODES,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def get_automatic1111_api_auth():
|
||||
if app.state.config.AUTOMATIC1111_API_AUTH is None:
|
||||
return ""
|
||||
else:
|
||||
auth1111_byte_string = app.state.config.AUTOMATIC1111_API_AUTH.encode("utf-8")
|
||||
auth1111_base64_encoded_bytes = base64.b64encode(auth1111_byte_string)
|
||||
auth1111_base64_encoded_string = auth1111_base64_encoded_bytes.decode("utf-8")
|
||||
return f"Basic {auth1111_base64_encoded_string}"
|
||||
|
||||
|
||||
@app.get("/config/url/verify")
|
||||
async def verify_url(user=Depends(get_admin_user)):
|
||||
if app.state.config.ENGINE == "automatic1111":
|
||||
try:
|
||||
r = requests.get(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/options",
|
||||
headers={"authorization": get_automatic1111_api_auth()},
|
||||
)
|
||||
r.raise_for_status()
|
||||
return True
|
||||
except Exception:
|
||||
app.state.config.ENABLED = False
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.INVALID_URL)
|
||||
elif app.state.config.ENGINE == "comfyui":
|
||||
try:
|
||||
r = requests.get(url=f"{app.state.config.COMFYUI_BASE_URL}/object_info")
|
||||
r.raise_for_status()
|
||||
return True
|
||||
except Exception:
|
||||
app.state.config.ENABLED = False
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.INVALID_URL)
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def set_image_model(model: str):
|
||||
log.info(f"Setting image model to {model}")
|
||||
app.state.config.MODEL = model
|
||||
if app.state.config.ENGINE in ["", "automatic1111"]:
|
||||
api_auth = get_automatic1111_api_auth()
|
||||
r = requests.get(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/options",
|
||||
headers={"authorization": api_auth},
|
||||
)
|
||||
options = r.json()
|
||||
if model != options["sd_model_checkpoint"]:
|
||||
options["sd_model_checkpoint"] = model
|
||||
r = requests.post(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/options",
|
||||
json=options,
|
||||
headers={"authorization": api_auth},
|
||||
)
|
||||
return app.state.config.MODEL
|
||||
|
||||
|
||||
def get_image_model():
|
||||
if app.state.config.ENGINE == "openai":
|
||||
return app.state.config.MODEL if app.state.config.MODEL else "dall-e-2"
|
||||
elif app.state.config.ENGINE == "comfyui":
|
||||
return app.state.config.MODEL if app.state.config.MODEL else ""
|
||||
elif app.state.config.ENGINE == "automatic1111" or app.state.config.ENGINE == "":
|
||||
try:
|
||||
r = requests.get(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/options",
|
||||
headers={"authorization": get_automatic1111_api_auth()},
|
||||
)
|
||||
options = r.json()
|
||||
return options["sd_model_checkpoint"]
|
||||
except Exception as e:
|
||||
app.state.config.ENABLED = False
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
|
||||
|
||||
|
||||
class ImageConfigForm(BaseModel):
|
||||
MODEL: str
|
||||
IMAGE_SIZE: str
|
||||
IMAGE_STEPS: int
|
||||
|
||||
|
||||
@app.get("/image/config")
|
||||
async def get_image_config(user=Depends(get_admin_user)):
|
||||
return {
|
||||
"MODEL": app.state.config.MODEL,
|
||||
"IMAGE_SIZE": app.state.config.IMAGE_SIZE,
|
||||
"IMAGE_STEPS": app.state.config.IMAGE_STEPS,
|
||||
}
|
||||
|
||||
|
||||
@app.post("/image/config/update")
|
||||
async def update_image_config(form_data: ImageConfigForm, user=Depends(get_admin_user)):
|
||||
|
||||
set_image_model(form_data.MODEL)
|
||||
|
||||
pattern = r"^\d+x\d+$"
|
||||
if re.match(pattern, form_data.IMAGE_SIZE):
|
||||
app.state.config.IMAGE_SIZE = form_data.IMAGE_SIZE
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=ERROR_MESSAGES.INCORRECT_FORMAT(" (e.g., 512x512)."),
|
||||
)
|
||||
|
||||
if form_data.IMAGE_STEPS >= 0:
|
||||
app.state.config.IMAGE_STEPS = form_data.IMAGE_STEPS
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=ERROR_MESSAGES.INCORRECT_FORMAT(" (e.g., 50)."),
|
||||
)
|
||||
|
||||
return {
|
||||
"MODEL": app.state.config.MODEL,
|
||||
"IMAGE_SIZE": app.state.config.IMAGE_SIZE,
|
||||
"IMAGE_STEPS": app.state.config.IMAGE_STEPS,
|
||||
}
|
||||
|
||||
|
||||
@app.get("/models")
|
||||
def get_models(user=Depends(get_verified_user)):
|
||||
try:
|
||||
if app.state.config.ENGINE == "openai":
|
||||
return [
|
||||
{"id": "dall-e-2", "name": "DALL·E 2"},
|
||||
{"id": "dall-e-3", "name": "DALL·E 3"},
|
||||
]
|
||||
elif app.state.config.ENGINE == "comfyui":
|
||||
# TODO - get models from comfyui
|
||||
r = requests.get(url=f"{app.state.config.COMFYUI_BASE_URL}/object_info")
|
||||
info = r.json()
|
||||
|
||||
workflow = json.loads(app.state.config.COMFYUI_WORKFLOW)
|
||||
model_node_id = None
|
||||
|
||||
for node in app.state.config.COMFYUI_WORKFLOW_NODES:
|
||||
if node["type"] == "model":
|
||||
if node["node_ids"]:
|
||||
model_node_id = node["node_ids"][0]
|
||||
break
|
||||
|
||||
if model_node_id:
|
||||
model_list_key = None
|
||||
|
||||
print(workflow[model_node_id]["class_type"])
|
||||
for key in info[workflow[model_node_id]["class_type"]]["input"][
|
||||
"required"
|
||||
]:
|
||||
if "_name" in key:
|
||||
model_list_key = key
|
||||
break
|
||||
|
||||
if model_list_key:
|
||||
return list(
|
||||
map(
|
||||
lambda model: {"id": model, "name": model},
|
||||
info[workflow[model_node_id]["class_type"]]["input"][
|
||||
"required"
|
||||
][model_list_key][0],
|
||||
)
|
||||
)
|
||||
else:
|
||||
return list(
|
||||
map(
|
||||
lambda model: {"id": model, "name": model},
|
||||
info["CheckpointLoaderSimple"]["input"]["required"][
|
||||
"ckpt_name"
|
||||
][0],
|
||||
)
|
||||
)
|
||||
elif (
|
||||
app.state.config.ENGINE == "automatic1111" or app.state.config.ENGINE == ""
|
||||
):
|
||||
r = requests.get(
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/sd-models",
|
||||
headers={"authorization": get_automatic1111_api_auth()},
|
||||
)
|
||||
models = r.json()
|
||||
return list(
|
||||
map(
|
||||
lambda model: {"id": model["title"], "name": model["model_name"]},
|
||||
models,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
app.state.config.ENABLED = False
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
|
||||
|
||||
|
||||
class GenerateImageForm(BaseModel):
|
||||
model: Optional[str] = None
|
||||
prompt: str
|
||||
size: Optional[str] = None
|
||||
n: int = 1
|
||||
negative_prompt: Optional[str] = None
|
||||
|
||||
|
||||
def save_b64_image(b64_str):
|
||||
try:
|
||||
image_id = str(uuid.uuid4())
|
||||
|
||||
if "," in b64_str:
|
||||
header, encoded = b64_str.split(",", 1)
|
||||
mime_type = header.split(";")[0]
|
||||
|
||||
img_data = base64.b64decode(encoded)
|
||||
image_format = mimetypes.guess_extension(mime_type)
|
||||
|
||||
image_filename = f"{image_id}{image_format}"
|
||||
file_path = IMAGE_CACHE_DIR / f"{image_filename}"
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(img_data)
|
||||
return image_filename
|
||||
else:
|
||||
image_filename = f"{image_id}.png"
|
||||
file_path = IMAGE_CACHE_DIR.joinpath(image_filename)
|
||||
|
||||
img_data = base64.b64decode(b64_str)
|
||||
|
||||
# Write the image data to a file
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(img_data)
|
||||
return image_filename
|
||||
|
||||
except Exception as e:
|
||||
log.exception(f"Error saving image: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def save_url_image(url):
|
||||
image_id = str(uuid.uuid4())
|
||||
try:
|
||||
r = requests.get(url)
|
||||
r.raise_for_status()
|
||||
if r.headers["content-type"].split("/")[0] == "image":
|
||||
mime_type = r.headers["content-type"]
|
||||
image_format = mimetypes.guess_extension(mime_type)
|
||||
|
||||
if not image_format:
|
||||
raise ValueError("Could not determine image type from MIME type")
|
||||
|
||||
image_filename = f"{image_id}{image_format}"
|
||||
|
||||
file_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}")
|
||||
with open(file_path, "wb") as image_file:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
image_file.write(chunk)
|
||||
return image_filename
|
||||
else:
|
||||
log.error("Url does not point to an image.")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
log.exception(f"Error saving image: {e}")
|
||||
return None
|
||||
|
||||
|
||||
@app.post("/generations")
|
||||
async def image_generations(
|
||||
form_data: GenerateImageForm,
|
||||
user=Depends(get_verified_user),
|
||||
):
|
||||
width, height = tuple(map(int, app.state.config.IMAGE_SIZE.split("x")))
|
||||
|
||||
r = None
|
||||
try:
|
||||
if app.state.config.ENGINE == "openai":
|
||||
headers = {}
|
||||
headers["Authorization"] = f"Bearer {app.state.config.OPENAI_API_KEY}"
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
data = {
|
||||
"model": (
|
||||
app.state.config.MODEL
|
||||
if app.state.config.MODEL != ""
|
||||
else "dall-e-2"
|
||||
),
|
||||
"prompt": form_data.prompt,
|
||||
"n": form_data.n,
|
||||
"size": (
|
||||
form_data.size if form_data.size else app.state.config.IMAGE_SIZE
|
||||
),
|
||||
"response_format": "b64_json",
|
||||
}
|
||||
|
||||
# Use asyncio.to_thread for the requests.post call
|
||||
r = await asyncio.to_thread(
|
||||
requests.post,
|
||||
url=f"{app.state.config.OPENAI_API_BASE_URL}/images/generations",
|
||||
json=data,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
res = r.json()
|
||||
|
||||
images = []
|
||||
|
||||
for image in res["data"]:
|
||||
image_filename = save_b64_image(image["b64_json"])
|
||||
images.append({"url": f"/cache/image/generations/{image_filename}"})
|
||||
file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json")
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
return images
|
||||
|
||||
elif app.state.config.ENGINE == "comfyui":
|
||||
data = {
|
||||
"prompt": form_data.prompt,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"n": form_data.n,
|
||||
}
|
||||
|
||||
if app.state.config.IMAGE_STEPS is not None:
|
||||
data["steps"] = app.state.config.IMAGE_STEPS
|
||||
|
||||
if form_data.negative_prompt is not None:
|
||||
data["negative_prompt"] = form_data.negative_prompt
|
||||
|
||||
form_data = ComfyUIGenerateImageForm(
|
||||
**{
|
||||
"workflow": ComfyUIWorkflow(
|
||||
**{
|
||||
"workflow": app.state.config.COMFYUI_WORKFLOW,
|
||||
"nodes": app.state.config.COMFYUI_WORKFLOW_NODES,
|
||||
}
|
||||
),
|
||||
**data,
|
||||
}
|
||||
)
|
||||
res = await comfyui_generate_image(
|
||||
app.state.config.MODEL,
|
||||
form_data,
|
||||
user.id,
|
||||
app.state.config.COMFYUI_BASE_URL,
|
||||
)
|
||||
log.debug(f"res: {res}")
|
||||
|
||||
images = []
|
||||
|
||||
for image in res["data"]:
|
||||
image_filename = save_url_image(image["url"])
|
||||
images.append({"url": f"/cache/image/generations/{image_filename}"})
|
||||
file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json")
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump(form_data.model_dump(exclude_none=True), f)
|
||||
|
||||
log.debug(f"images: {images}")
|
||||
return images
|
||||
elif (
|
||||
app.state.config.ENGINE == "automatic1111" or app.state.config.ENGINE == ""
|
||||
):
|
||||
if form_data.model:
|
||||
set_image_model(form_data.model)
|
||||
|
||||
data = {
|
||||
"prompt": form_data.prompt,
|
||||
"batch_size": form_data.n,
|
||||
"width": width,
|
||||
"height": height,
|
||||
}
|
||||
|
||||
if app.state.config.IMAGE_STEPS is not None:
|
||||
data["steps"] = app.state.config.IMAGE_STEPS
|
||||
|
||||
if form_data.negative_prompt is not None:
|
||||
data["negative_prompt"] = form_data.negative_prompt
|
||||
|
||||
if app.state.config.AUTOMATIC1111_CFG_SCALE:
|
||||
data["cfg_scale"] = app.state.config.AUTOMATIC1111_CFG_SCALE
|
||||
|
||||
if app.state.config.AUTOMATIC1111_SAMPLER:
|
||||
data["sampler_name"] = app.state.config.AUTOMATIC1111_SAMPLER
|
||||
|
||||
if app.state.config.AUTOMATIC1111_SCHEDULER:
|
||||
data["scheduler"] = app.state.config.AUTOMATIC1111_SCHEDULER
|
||||
|
||||
# Use asyncio.to_thread for the requests.post call
|
||||
r = await asyncio.to_thread(
|
||||
requests.post,
|
||||
url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/txt2img",
|
||||
json=data,
|
||||
headers={"authorization": get_automatic1111_api_auth()},
|
||||
)
|
||||
|
||||
res = r.json()
|
||||
log.debug(f"res: {res}")
|
||||
|
||||
images = []
|
||||
|
||||
for image in res["images"]:
|
||||
image_filename = save_b64_image(image)
|
||||
images.append({"url": f"/cache/image/generations/{image_filename}"})
|
||||
file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json")
|
||||
|
||||
with open(file_body_path, "w") as f:
|
||||
json.dump({**data, "info": res["info"]}, f)
|
||||
|
||||
return images
|
||||
except Exception as e:
|
||||
error = e
|
||||
if r != None:
|
||||
data = r.json()
|
||||
if "error" in data:
|
||||
error = data["error"]["message"]
|
||||
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(error))
|
||||
186
backend/open_webui/apps/images/utils/comfyui.py
Normal file
186
backend/open_webui/apps/images/utils/comfyui.py
Normal file
@@ -0,0 +1,186 @@
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from typing import Optional
|
||||
|
||||
import websocket # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
from pydantic import BaseModel
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["COMFYUI"])
|
||||
|
||||
default_headers = {"User-Agent": "Mozilla/5.0"}
|
||||
|
||||
|
||||
def queue_prompt(prompt, client_id, base_url):
|
||||
log.info("queue_prompt")
|
||||
p = {"prompt": prompt, "client_id": client_id}
|
||||
data = json.dumps(p).encode("utf-8")
|
||||
log.debug(f"queue_prompt data: {data}")
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{base_url}/prompt", data=data, headers=default_headers
|
||||
)
|
||||
response = urllib.request.urlopen(req).read()
|
||||
return json.loads(response)
|
||||
except Exception as e:
|
||||
log.exception(f"Error while queuing prompt: {e}")
|
||||
raise e
|
||||
|
||||
|
||||
def get_image(filename, subfolder, folder_type, base_url):
|
||||
log.info("get_image")
|
||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||
url_values = urllib.parse.urlencode(data)
|
||||
req = urllib.request.Request(
|
||||
f"{base_url}/view?{url_values}", headers=default_headers
|
||||
)
|
||||
with urllib.request.urlopen(req) as response:
|
||||
return response.read()
|
||||
|
||||
|
||||
def get_image_url(filename, subfolder, folder_type, base_url):
|
||||
log.info("get_image")
|
||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||
url_values = urllib.parse.urlencode(data)
|
||||
return f"{base_url}/view?{url_values}"
|
||||
|
||||
|
||||
def get_history(prompt_id, base_url):
|
||||
log.info("get_history")
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{base_url}/history/{prompt_id}", headers=default_headers
|
||||
)
|
||||
with urllib.request.urlopen(req) as response:
|
||||
return json.loads(response.read())
|
||||
|
||||
|
||||
def get_images(ws, prompt, client_id, base_url):
|
||||
prompt_id = queue_prompt(prompt, client_id, base_url)["prompt_id"]
|
||||
output_images = []
|
||||
while True:
|
||||
out = ws.recv()
|
||||
if isinstance(out, str):
|
||||
message = json.loads(out)
|
||||
if message["type"] == "executing":
|
||||
data = message["data"]
|
||||
if data["node"] is None and data["prompt_id"] == prompt_id:
|
||||
break # Execution is done
|
||||
else:
|
||||
continue # previews are binary data
|
||||
|
||||
history = get_history(prompt_id, base_url)[prompt_id]
|
||||
for o in history["outputs"]:
|
||||
for node_id in history["outputs"]:
|
||||
node_output = history["outputs"][node_id]
|
||||
if "images" in node_output:
|
||||
for image in node_output["images"]:
|
||||
url = get_image_url(
|
||||
image["filename"], image["subfolder"], image["type"], base_url
|
||||
)
|
||||
output_images.append({"url": url})
|
||||
return {"data": output_images}
|
||||
|
||||
|
||||
class ComfyUINodeInput(BaseModel):
|
||||
type: Optional[str] = None
|
||||
node_ids: list[str] = []
|
||||
key: Optional[str] = "text"
|
||||
value: Optional[str] = None
|
||||
|
||||
|
||||
class ComfyUIWorkflow(BaseModel):
|
||||
workflow: str
|
||||
nodes: list[ComfyUINodeInput]
|
||||
|
||||
|
||||
class ComfyUIGenerateImageForm(BaseModel):
|
||||
workflow: ComfyUIWorkflow
|
||||
|
||||
prompt: str
|
||||
negative_prompt: Optional[str] = None
|
||||
width: int
|
||||
height: int
|
||||
n: int = 1
|
||||
|
||||
steps: Optional[int] = None
|
||||
seed: Optional[int] = None
|
||||
|
||||
|
||||
async def comfyui_generate_image(
|
||||
model: str, payload: ComfyUIGenerateImageForm, client_id, base_url
|
||||
):
|
||||
ws_url = base_url.replace("http://", "ws://").replace("https://", "wss://")
|
||||
workflow = json.loads(payload.workflow.workflow)
|
||||
|
||||
for node in payload.workflow.nodes:
|
||||
if node.type:
|
||||
if node.type == "model":
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][node.key] = model
|
||||
elif node.type == "prompt":
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][
|
||||
node.key if node.key else "text"
|
||||
] = payload.prompt
|
||||
elif node.type == "negative_prompt":
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][
|
||||
node.key if node.key else "text"
|
||||
] = payload.negative_prompt
|
||||
elif node.type == "width":
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][
|
||||
node.key if node.key else "width"
|
||||
] = payload.width
|
||||
elif node.type == "height":
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][
|
||||
node.key if node.key else "height"
|
||||
] = payload.height
|
||||
elif node.type == "n":
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][
|
||||
node.key if node.key else "batch_size"
|
||||
] = payload.n
|
||||
elif node.type == "steps":
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][
|
||||
node.key if node.key else "steps"
|
||||
] = payload.steps
|
||||
elif node.type == "seed":
|
||||
seed = (
|
||||
payload.seed
|
||||
if payload.seed
|
||||
else random.randint(0, 18446744073709551614)
|
||||
)
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][node.key] = seed
|
||||
else:
|
||||
for node_id in node.node_ids:
|
||||
workflow[node_id]["inputs"][node.key] = node.value
|
||||
|
||||
try:
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect(f"{ws_url}/ws?clientId={client_id}")
|
||||
log.info("WebSocket connection established.")
|
||||
except Exception as e:
|
||||
log.exception(f"Failed to connect to WebSocket server: {e}")
|
||||
return None
|
||||
|
||||
try:
|
||||
log.info("Sending workflow to WebSocket server.")
|
||||
log.info(f"Workflow: {workflow}")
|
||||
images = await asyncio.to_thread(get_images, ws, workflow, client_id, base_url)
|
||||
except Exception as e:
|
||||
log.exception(f"Error while receiving images: {e}")
|
||||
images = None
|
||||
|
||||
ws.close()
|
||||
|
||||
return images
|
||||
@@ -1,57 +1,46 @@
|
||||
from fastapi import (
|
||||
FastAPI,
|
||||
Request,
|
||||
Response,
|
||||
HTTPException,
|
||||
Depends,
|
||||
status,
|
||||
UploadFile,
|
||||
File,
|
||||
BackgroundTasks,
|
||||
)
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import StreamingResponse
|
||||
from fastapi.concurrency import run_in_threadpool
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
import os
|
||||
import re
|
||||
import copy
|
||||
import random
|
||||
import requests
|
||||
import json
|
||||
import uuid
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from typing import Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
from typing import Optional, List, Union
|
||||
|
||||
from starlette.background import BackgroundTask
|
||||
|
||||
from apps.webui.models.models import Models
|
||||
from apps.webui.models.users import Users
|
||||
from constants import ERROR_MESSAGES
|
||||
from utils.utils import (
|
||||
decode_token,
|
||||
get_current_user,
|
||||
get_verified_user,
|
||||
get_admin_user,
|
||||
)
|
||||
|
||||
|
||||
from config import (
|
||||
SRC_LOG_LEVELS,
|
||||
OLLAMA_BASE_URLS,
|
||||
ENABLE_OLLAMA_API,
|
||||
import aiohttp
|
||||
import requests
|
||||
from open_webui.apps.webui.models.models import Models
|
||||
from open_webui.config import (
|
||||
CORS_ALLOW_ORIGIN,
|
||||
ENABLE_MODEL_FILTER,
|
||||
ENABLE_OLLAMA_API,
|
||||
MODEL_FILTER_LIST,
|
||||
OLLAMA_BASE_URLS,
|
||||
UPLOAD_DIR,
|
||||
AppConfig,
|
||||
)
|
||||
from utils.misc import calculate_sha256
|
||||
from open_webui.env import AIOHTTP_CLIENT_TIMEOUT
|
||||
|
||||
|
||||
from open_webui.constants import ERROR_MESSAGES
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import StreamingResponse
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
from starlette.background import BackgroundTask
|
||||
|
||||
|
||||
from open_webui.utils.misc import (
|
||||
calculate_sha256,
|
||||
)
|
||||
from open_webui.utils.payload import (
|
||||
apply_model_params_to_body_ollama,
|
||||
apply_model_params_to_body_openai,
|
||||
apply_model_system_prompt_to_body,
|
||||
)
|
||||
from open_webui.utils.utils import get_admin_user, get_verified_user
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
|
||||
@@ -59,7 +48,7 @@ log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_origins=CORS_ALLOW_ORIGIN,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
@@ -118,7 +107,7 @@ async def get_ollama_api_urls(user=Depends(get_admin_user)):
|
||||
|
||||
|
||||
class UrlUpdateForm(BaseModel):
|
||||
urls: List[str]
|
||||
urls: list[str]
|
||||
|
||||
|
||||
@app.post("/urls/update")
|
||||
@@ -130,7 +119,7 @@ async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin
|
||||
|
||||
|
||||
async def fetch_url(url):
|
||||
timeout = aiohttp.ClientTimeout(total=5)
|
||||
timeout = aiohttp.ClientTimeout(total=3)
|
||||
try:
|
||||
async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
|
||||
async with session.get(url) as response:
|
||||
@@ -151,19 +140,38 @@ async def cleanup_response(
|
||||
await session.close()
|
||||
|
||||
|
||||
async def post_streaming_url(url: str, payload: str):
|
||||
async def post_streaming_url(
|
||||
url: str, payload: Union[str, bytes], stream: bool = True, content_type=None
|
||||
):
|
||||
r = None
|
||||
try:
|
||||
session = aiohttp.ClientSession(trust_env=True)
|
||||
r = await session.post(url, data=payload)
|
||||
session = aiohttp.ClientSession(
|
||||
trust_env=True, timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT)
|
||||
)
|
||||
r = await session.post(
|
||||
url,
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
return StreamingResponse(
|
||||
r.content,
|
||||
status_code=r.status,
|
||||
headers=dict(r.headers),
|
||||
background=BackgroundTask(cleanup_response, response=r, session=session),
|
||||
)
|
||||
if stream:
|
||||
headers = dict(r.headers)
|
||||
if content_type:
|
||||
headers["Content-Type"] = content_type
|
||||
return StreamingResponse(
|
||||
r.content,
|
||||
status_code=r.status,
|
||||
headers=headers,
|
||||
background=BackgroundTask(
|
||||
cleanup_response, response=r, session=session
|
||||
),
|
||||
)
|
||||
else:
|
||||
res = await r.json()
|
||||
await cleanup_response(r, session)
|
||||
return res
|
||||
|
||||
except Exception as e:
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
@@ -171,7 +179,7 @@ async def post_streaming_url(url: str, payload: str):
|
||||
res = await r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -196,9 +204,6 @@ def merge_models_lists(model_lists):
|
||||
return list(merged_models.values())
|
||||
|
||||
|
||||
# user=Depends(get_current_user)
|
||||
|
||||
|
||||
async def get_all_models():
|
||||
log.info("get_all_models()")
|
||||
|
||||
@@ -229,7 +234,7 @@ async def get_all_models():
|
||||
async def get_ollama_tags(
|
||||
url_idx: Optional[int] = None, user=Depends(get_verified_user)
|
||||
):
|
||||
if url_idx == None:
|
||||
if url_idx is None:
|
||||
models = await get_all_models()
|
||||
|
||||
if app.state.config.ENABLE_MODEL_FILTER:
|
||||
@@ -260,7 +265,7 @@ async def get_ollama_tags(
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -273,8 +278,7 @@ async def get_ollama_tags(
|
||||
@app.get("/api/version/{url_idx}")
|
||||
async def get_ollama_versions(url_idx: Optional[int] = None):
|
||||
if app.state.config.ENABLE_OLLAMA_API:
|
||||
if url_idx == None:
|
||||
|
||||
if url_idx is None:
|
||||
# returns lowest version
|
||||
tasks = [
|
||||
fetch_url(f"{url}/api/version")
|
||||
@@ -314,7 +318,7 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -337,8 +341,6 @@ async def pull_model(
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = None
|
||||
|
||||
# Admin should be able to pull models from any source
|
||||
payload = {**form_data.model_dump(exclude_none=True), "insecure": True}
|
||||
|
||||
@@ -358,7 +360,7 @@ async def push_model(
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_admin_user),
|
||||
):
|
||||
if url_idx == None:
|
||||
if url_idx is None:
|
||||
if form_data.name in app.state.MODELS:
|
||||
url_idx = app.state.MODELS[form_data.name]["urls"][0]
|
||||
else:
|
||||
@@ -408,7 +410,7 @@ async def copy_model(
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_admin_user),
|
||||
):
|
||||
if url_idx == None:
|
||||
if url_idx is None:
|
||||
if form_data.source in app.state.MODELS:
|
||||
url_idx = app.state.MODELS[form_data.source]["urls"][0]
|
||||
else:
|
||||
@@ -419,13 +421,14 @@ async def copy_model(
|
||||
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
log.info(f"url: {url}")
|
||||
r = requests.request(
|
||||
method="POST",
|
||||
url=f"{url}/api/copy",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
|
||||
try:
|
||||
r = requests.request(
|
||||
method="POST",
|
||||
url=f"{url}/api/copy",
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
log.debug(f"r.text: {r.text}")
|
||||
@@ -439,7 +442,7 @@ async def copy_model(
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -455,7 +458,7 @@ async def delete_model(
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_admin_user),
|
||||
):
|
||||
if url_idx == None:
|
||||
if url_idx is None:
|
||||
if form_data.name in app.state.MODELS:
|
||||
url_idx = app.state.MODELS[form_data.name]["urls"][0]
|
||||
else:
|
||||
@@ -467,12 +470,13 @@ async def delete_model(
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = requests.request(
|
||||
method="DELETE",
|
||||
url=f"{url}/api/delete",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
try:
|
||||
r = requests.request(
|
||||
method="DELETE",
|
||||
url=f"{url}/api/delete",
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
log.debug(f"r.text: {r.text}")
|
||||
@@ -486,7 +490,7 @@ async def delete_model(
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -507,12 +511,13 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_us
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = requests.request(
|
||||
method="POST",
|
||||
url=f"{url}/api/show",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
try:
|
||||
r = requests.request(
|
||||
method="POST",
|
||||
url=f"{url}/api/show",
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
return r.json()
|
||||
@@ -524,7 +529,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_us
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -540,6 +545,24 @@ class GenerateEmbeddingsForm(BaseModel):
|
||||
keep_alive: Optional[Union[int, str]] = None
|
||||
|
||||
|
||||
class GenerateEmbedForm(BaseModel):
|
||||
model: str
|
||||
input: list[str] | str
|
||||
truncate: Optional[bool] = None
|
||||
options: Optional[dict] = None
|
||||
keep_alive: Optional[Union[int, str]] = None
|
||||
|
||||
|
||||
@app.post("/api/embed")
|
||||
@app.post("/api/embed/{url_idx}")
|
||||
async def generate_embeddings(
|
||||
form_data: GenerateEmbedForm,
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_verified_user),
|
||||
):
|
||||
return generate_ollama_batch_embeddings(form_data, url_idx)
|
||||
|
||||
|
||||
@app.post("/api/embeddings")
|
||||
@app.post("/api/embeddings/{url_idx}")
|
||||
async def generate_embeddings(
|
||||
@@ -547,7 +570,16 @@ async def generate_embeddings(
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_verified_user),
|
||||
):
|
||||
if url_idx == None:
|
||||
return generate_ollama_embeddings(form_data=form_data, url_idx=url_idx)
|
||||
|
||||
|
||||
def generate_ollama_embeddings(
|
||||
form_data: GenerateEmbeddingsForm,
|
||||
url_idx: Optional[int] = None,
|
||||
):
|
||||
log.info(f"generate_ollama_embeddings {form_data}")
|
||||
|
||||
if url_idx is None:
|
||||
model = form_data.model
|
||||
|
||||
if ":" not in model:
|
||||
@@ -564,15 +596,23 @@ async def generate_embeddings(
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = requests.request(
|
||||
method="POST",
|
||||
url=f"{url}/api/embeddings",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
try:
|
||||
r = requests.request(
|
||||
method="POST",
|
||||
url=f"{url}/api/embeddings",
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
return r.json()
|
||||
data = r.json()
|
||||
|
||||
log.info(f"generate_ollama_embeddings {data}")
|
||||
|
||||
if "embedding" in data:
|
||||
return data
|
||||
else:
|
||||
raise Exception("Something went wrong :/")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
@@ -581,7 +621,7 @@ async def generate_embeddings(
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -590,14 +630,13 @@ async def generate_embeddings(
|
||||
)
|
||||
|
||||
|
||||
def generate_ollama_embeddings(
|
||||
form_data: GenerateEmbeddingsForm,
|
||||
def generate_ollama_batch_embeddings(
|
||||
form_data: GenerateEmbedForm,
|
||||
url_idx: Optional[int] = None,
|
||||
):
|
||||
log.info(f"generate_ollama_batch_embeddings {form_data}")
|
||||
|
||||
log.info(f"generate_ollama_embeddings {form_data}")
|
||||
|
||||
if url_idx == None:
|
||||
if url_idx is None:
|
||||
model = form_data.model
|
||||
|
||||
if ":" not in model:
|
||||
@@ -614,22 +653,23 @@ def generate_ollama_embeddings(
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
log.info(f"url: {url}")
|
||||
|
||||
r = requests.request(
|
||||
method="POST",
|
||||
url=f"{url}/api/embed",
|
||||
headers={"Content-Type": "application/json"},
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
try:
|
||||
r = requests.request(
|
||||
method="POST",
|
||||
url=f"{url}/api/embeddings",
|
||||
data=form_data.model_dump_json(exclude_none=True).encode(),
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
data = r.json()
|
||||
|
||||
log.info(f"generate_ollama_embeddings {data}")
|
||||
log.info(f"generate_ollama_batch_embeddings {data}")
|
||||
|
||||
if "embedding" in data:
|
||||
return data["embedding"]
|
||||
if "embeddings" in data:
|
||||
return data
|
||||
else:
|
||||
raise "Something went wrong :/"
|
||||
raise Exception("Something went wrong :/")
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
@@ -638,16 +678,16 @@ def generate_ollama_embeddings(
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise error_detail
|
||||
raise Exception(error_detail)
|
||||
|
||||
|
||||
class GenerateCompletionForm(BaseModel):
|
||||
model: str
|
||||
prompt: str
|
||||
images: Optional[List[str]] = None
|
||||
images: Optional[list[str]] = None
|
||||
format: Optional[str] = None
|
||||
options: Optional[dict] = None
|
||||
system: Optional[str] = None
|
||||
@@ -665,8 +705,7 @@ async def generate_completion(
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_verified_user),
|
||||
):
|
||||
|
||||
if url_idx == None:
|
||||
if url_idx is None:
|
||||
model = form_data.model
|
||||
|
||||
if ":" not in model:
|
||||
@@ -691,158 +730,83 @@ async def generate_completion(
|
||||
class ChatMessage(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
images: Optional[List[str]] = None
|
||||
images: Optional[list[str]] = None
|
||||
|
||||
|
||||
class GenerateChatCompletionForm(BaseModel):
|
||||
model: str
|
||||
messages: List[ChatMessage]
|
||||
messages: list[ChatMessage]
|
||||
format: Optional[str] = None
|
||||
options: Optional[dict] = None
|
||||
template: Optional[str] = None
|
||||
stream: Optional[bool] = None
|
||||
stream: Optional[bool] = True
|
||||
keep_alive: Optional[Union[int, str]] = None
|
||||
|
||||
|
||||
def get_ollama_url(url_idx: Optional[int], model: str):
|
||||
if url_idx is None:
|
||||
if model not in app.state.MODELS:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=ERROR_MESSAGES.MODEL_NOT_FOUND(model),
|
||||
)
|
||||
url_idx = random.choice(app.state.MODELS[model]["urls"])
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
return url
|
||||
|
||||
|
||||
@app.post("/api/chat")
|
||||
@app.post("/api/chat/{url_idx}")
|
||||
async def generate_chat_completion(
|
||||
form_data: GenerateChatCompletionForm,
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_verified_user),
|
||||
bypass_filter: Optional[bool] = False,
|
||||
):
|
||||
|
||||
log.debug(
|
||||
"form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
|
||||
form_data.model_dump_json(exclude_none=True).encode()
|
||||
)
|
||||
)
|
||||
|
||||
payload = {
|
||||
**form_data.model_dump(exclude_none=True),
|
||||
}
|
||||
payload = {**form_data.model_dump(exclude_none=True)}
|
||||
log.debug(f"generate_chat_completion() - 1.payload = {payload}")
|
||||
if "metadata" in payload:
|
||||
del payload["metadata"]
|
||||
|
||||
model_id = form_data.model
|
||||
|
||||
if not bypass_filter and app.state.config.ENABLE_MODEL_FILTER:
|
||||
if user.role == "user" and model_id not in app.state.config.MODEL_FILTER_LIST:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Model not found",
|
||||
)
|
||||
|
||||
model_info = Models.get_model_by_id(model_id)
|
||||
|
||||
if model_info:
|
||||
if model_info.base_model_id:
|
||||
payload["model"] = model_info.base_model_id
|
||||
|
||||
model_info.params = model_info.params.model_dump()
|
||||
params = model_info.params.model_dump()
|
||||
|
||||
if model_info.params:
|
||||
payload["options"] = {}
|
||||
if params:
|
||||
if payload.get("options") is None:
|
||||
payload["options"] = {}
|
||||
|
||||
if model_info.params.get("mirostat", None):
|
||||
payload["options"]["mirostat"] = model_info.params.get("mirostat", None)
|
||||
|
||||
if model_info.params.get("mirostat_eta", None):
|
||||
payload["options"]["mirostat_eta"] = model_info.params.get(
|
||||
"mirostat_eta", None
|
||||
)
|
||||
|
||||
if model_info.params.get("mirostat_tau", None):
|
||||
|
||||
payload["options"]["mirostat_tau"] = model_info.params.get(
|
||||
"mirostat_tau", None
|
||||
)
|
||||
|
||||
if model_info.params.get("num_ctx", None):
|
||||
payload["options"]["num_ctx"] = model_info.params.get("num_ctx", None)
|
||||
|
||||
if model_info.params.get("repeat_last_n", None):
|
||||
payload["options"]["repeat_last_n"] = model_info.params.get(
|
||||
"repeat_last_n", None
|
||||
)
|
||||
|
||||
if model_info.params.get("frequency_penalty", None):
|
||||
payload["options"]["repeat_penalty"] = model_info.params.get(
|
||||
"frequency_penalty", None
|
||||
)
|
||||
|
||||
if model_info.params.get("temperature", None) is not None:
|
||||
payload["options"]["temperature"] = model_info.params.get(
|
||||
"temperature", None
|
||||
)
|
||||
|
||||
if model_info.params.get("seed", None):
|
||||
payload["options"]["seed"] = model_info.params.get("seed", None)
|
||||
|
||||
if model_info.params.get("stop", None):
|
||||
payload["options"]["stop"] = (
|
||||
[
|
||||
bytes(stop, "utf-8").decode("unicode_escape")
|
||||
for stop in model_info.params["stop"]
|
||||
]
|
||||
if model_info.params.get("stop", None)
|
||||
else None
|
||||
)
|
||||
|
||||
if model_info.params.get("tfs_z", None):
|
||||
payload["options"]["tfs_z"] = model_info.params.get("tfs_z", None)
|
||||
|
||||
if model_info.params.get("max_tokens", None):
|
||||
payload["options"]["num_predict"] = model_info.params.get(
|
||||
"max_tokens", None
|
||||
)
|
||||
|
||||
if model_info.params.get("top_k", None):
|
||||
payload["options"]["top_k"] = model_info.params.get("top_k", None)
|
||||
|
||||
if model_info.params.get("top_p", None):
|
||||
payload["options"]["top_p"] = model_info.params.get("top_p", None)
|
||||
|
||||
if model_info.params.get("use_mmap", None):
|
||||
payload["options"]["use_mmap"] = model_info.params.get("use_mmap", None)
|
||||
|
||||
if model_info.params.get("use_mlock", None):
|
||||
payload["options"]["use_mlock"] = model_info.params.get(
|
||||
"use_mlock", None
|
||||
)
|
||||
|
||||
if model_info.params.get("num_thread", None):
|
||||
payload["options"]["num_thread"] = model_info.params.get(
|
||||
"num_thread", None
|
||||
)
|
||||
|
||||
if model_info.params.get("system", None):
|
||||
# Check if the payload already has a system message
|
||||
# If not, add a system message to the payload
|
||||
if payload.get("messages"):
|
||||
for message in payload["messages"]:
|
||||
if message.get("role") == "system":
|
||||
message["content"] = (
|
||||
model_info.params.get("system", None) + message["content"]
|
||||
)
|
||||
break
|
||||
else:
|
||||
payload["messages"].insert(
|
||||
0,
|
||||
{
|
||||
"role": "system",
|
||||
"content": model_info.params.get("system", None),
|
||||
},
|
||||
)
|
||||
|
||||
if url_idx == None:
|
||||
if ":" not in payload["model"]:
|
||||
payload["model"] = f"{payload['model']}:latest"
|
||||
|
||||
if payload["model"] in app.state.MODELS:
|
||||
url_idx = random.choice(app.state.MODELS[payload["model"]]["urls"])
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
|
||||
payload["options"] = apply_model_params_to_body_ollama(
|
||||
params, payload["options"]
|
||||
)
|
||||
payload = apply_model_system_prompt_to_body(params, payload, user)
|
||||
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
if ":" not in payload["model"]:
|
||||
payload["model"] = f"{payload['model']}:latest"
|
||||
|
||||
url = get_ollama_url(url_idx, payload["model"])
|
||||
log.info(f"url: {url}")
|
||||
log.debug(f"generate_chat_completion() - 2.payload = {payload}")
|
||||
|
||||
print(payload)
|
||||
|
||||
return await post_streaming_url(f"{url}/api/chat", json.dumps(payload))
|
||||
return await post_streaming_url(
|
||||
f"{url}/api/chat",
|
||||
json.dumps(payload),
|
||||
stream=form_data.stream,
|
||||
content_type="application/x-ndjson",
|
||||
)
|
||||
|
||||
|
||||
# TODO: we should update this part once Ollama supports other types
|
||||
@@ -860,7 +824,7 @@ class OpenAIChatMessage(BaseModel):
|
||||
|
||||
class OpenAIChatCompletionForm(BaseModel):
|
||||
model: str
|
||||
messages: List[OpenAIChatMessage]
|
||||
messages: list[OpenAIChatMessage]
|
||||
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
@@ -868,76 +832,47 @@ class OpenAIChatCompletionForm(BaseModel):
|
||||
@app.post("/v1/chat/completions")
|
||||
@app.post("/v1/chat/completions/{url_idx}")
|
||||
async def generate_openai_chat_completion(
|
||||
form_data: OpenAIChatCompletionForm,
|
||||
form_data: dict,
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_verified_user),
|
||||
):
|
||||
completion_form = OpenAIChatCompletionForm(**form_data)
|
||||
payload = {**completion_form.model_dump(exclude_none=True, exclude=["metadata"])}
|
||||
if "metadata" in payload:
|
||||
del payload["metadata"]
|
||||
|
||||
payload = {
|
||||
**form_data.model_dump(exclude_none=True),
|
||||
}
|
||||
model_id = completion_form.model
|
||||
|
||||
if app.state.config.ENABLE_MODEL_FILTER:
|
||||
if user.role == "user" and model_id not in app.state.config.MODEL_FILTER_LIST:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Model not found",
|
||||
)
|
||||
|
||||
model_id = form_data.model
|
||||
model_info = Models.get_model_by_id(model_id)
|
||||
|
||||
if model_info:
|
||||
if model_info.base_model_id:
|
||||
payload["model"] = model_info.base_model_id
|
||||
|
||||
model_info.params = model_info.params.model_dump()
|
||||
params = model_info.params.model_dump()
|
||||
|
||||
if model_info.params:
|
||||
payload["temperature"] = model_info.params.get("temperature", None)
|
||||
payload["top_p"] = model_info.params.get("top_p", None)
|
||||
payload["max_tokens"] = model_info.params.get("max_tokens", None)
|
||||
payload["frequency_penalty"] = model_info.params.get(
|
||||
"frequency_penalty", None
|
||||
)
|
||||
payload["seed"] = model_info.params.get("seed", None)
|
||||
payload["stop"] = (
|
||||
[
|
||||
bytes(stop, "utf-8").decode("unicode_escape")
|
||||
for stop in model_info.params["stop"]
|
||||
]
|
||||
if model_info.params.get("stop", None)
|
||||
else None
|
||||
)
|
||||
if params:
|
||||
payload = apply_model_params_to_body_openai(params, payload)
|
||||
payload = apply_model_system_prompt_to_body(params, payload, user)
|
||||
|
||||
if model_info.params.get("system", None):
|
||||
# Check if the payload already has a system message
|
||||
# If not, add a system message to the payload
|
||||
if payload.get("messages"):
|
||||
for message in payload["messages"]:
|
||||
if message.get("role") == "system":
|
||||
message["content"] = (
|
||||
model_info.params.get("system", None) + message["content"]
|
||||
)
|
||||
break
|
||||
else:
|
||||
payload["messages"].insert(
|
||||
0,
|
||||
{
|
||||
"role": "system",
|
||||
"content": model_info.params.get("system", None),
|
||||
},
|
||||
)
|
||||
if ":" not in payload["model"]:
|
||||
payload["model"] = f"{payload['model']}:latest"
|
||||
|
||||
if url_idx == None:
|
||||
if ":" not in payload["model"]:
|
||||
payload["model"] = f"{payload['model']}:latest"
|
||||
|
||||
if payload["model"] in app.state.MODELS:
|
||||
url_idx = random.choice(app.state.MODELS[payload["model"]]["urls"])
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
|
||||
)
|
||||
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
url = get_ollama_url(url_idx, payload["model"])
|
||||
log.info(f"url: {url}")
|
||||
|
||||
return await post_streaming_url(f"{url}/v1/chat/completions", json.dumps(payload))
|
||||
return await post_streaming_url(
|
||||
f"{url}/v1/chat/completions",
|
||||
json.dumps(payload),
|
||||
stream=payload.get("stream", False),
|
||||
)
|
||||
|
||||
|
||||
@app.get("/v1/models")
|
||||
@@ -946,7 +881,7 @@ async def get_openai_models(
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_verified_user),
|
||||
):
|
||||
if url_idx == None:
|
||||
if url_idx is None:
|
||||
models = await get_all_models()
|
||||
|
||||
if app.state.config.ENABLE_MODEL_FILTER:
|
||||
@@ -1001,7 +936,7 @@ async def get_openai_models(
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -1027,7 +962,6 @@ def parse_huggingface_url(hf_url):
|
||||
path_components = parsed_url.path.split("/")
|
||||
|
||||
# Extract the desired output
|
||||
user_repo = "/".join(path_components[1:3])
|
||||
model_file = path_components[-1]
|
||||
|
||||
return model_file
|
||||
@@ -1084,19 +1018,14 @@ async def download_file_stream(
|
||||
raise "Ollama: Could not create blob, Please try again."
|
||||
|
||||
|
||||
# def number_generator():
|
||||
# for i in range(1, 101):
|
||||
# yield f"data: {i}\n"
|
||||
|
||||
|
||||
# url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
|
||||
@app.post("/models/download")
|
||||
@app.post("/models/download/{url_idx}")
|
||||
async def download_model(
|
||||
form_data: UrlForm,
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_admin_user),
|
||||
):
|
||||
|
||||
allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
|
||||
|
||||
if not any(form_data.url.startswith(host) for host in allowed_hosts):
|
||||
@@ -1105,7 +1034,7 @@ async def download_model(
|
||||
detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
|
||||
)
|
||||
|
||||
if url_idx == None:
|
||||
if url_idx is None:
|
||||
url_idx = 0
|
||||
url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
|
||||
@@ -1123,8 +1052,12 @@ async def download_model(
|
||||
|
||||
@app.post("/models/upload")
|
||||
@app.post("/models/upload/{url_idx}")
|
||||
def upload_model(file: UploadFile = File(...), url_idx: Optional[int] = None):
|
||||
if url_idx == None:
|
||||
def upload_model(
|
||||
file: UploadFile = File(...),
|
||||
url_idx: Optional[int] = None,
|
||||
user=Depends(get_admin_user),
|
||||
):
|
||||
if url_idx is None:
|
||||
url_idx = 0
|
||||
ollama_url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
|
||||
@@ -1186,137 +1119,3 @@ def upload_model(file: UploadFile = File(...), url_idx: Optional[int] = None):
|
||||
yield f"data: {json.dumps(res)}\n\n"
|
||||
|
||||
return StreamingResponse(file_process_stream(), media_type="text/event-stream")
|
||||
|
||||
|
||||
# async def upload_model(file: UploadFile = File(), url_idx: Optional[int] = None):
|
||||
# if url_idx == None:
|
||||
# url_idx = 0
|
||||
# url = app.state.config.OLLAMA_BASE_URLS[url_idx]
|
||||
|
||||
# file_location = os.path.join(UPLOAD_DIR, file.filename)
|
||||
# total_size = file.size
|
||||
|
||||
# async def file_upload_generator(file):
|
||||
# print(file)
|
||||
# try:
|
||||
# async with aiofiles.open(file_location, "wb") as f:
|
||||
# completed_size = 0
|
||||
# while True:
|
||||
# chunk = await file.read(1024*1024)
|
||||
# if not chunk:
|
||||
# break
|
||||
# await f.write(chunk)
|
||||
# completed_size += len(chunk)
|
||||
# progress = (completed_size / total_size) * 100
|
||||
|
||||
# print(progress)
|
||||
# yield f'data: {json.dumps({"status": "uploading", "percentage": progress, "total": total_size, "completed": completed_size, "done": False})}\n'
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
# yield f"data: {json.dumps({'status': 'error', 'message': str(e)})}\n"
|
||||
# finally:
|
||||
# await file.close()
|
||||
# print("done")
|
||||
# yield f'data: {json.dumps({"status": "completed", "percentage": 100, "total": total_size, "completed": completed_size, "done": True})}\n'
|
||||
|
||||
# return StreamingResponse(
|
||||
# file_upload_generator(copy.deepcopy(file)), media_type="text/event-stream"
|
||||
# )
|
||||
|
||||
|
||||
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
|
||||
async def deprecated_proxy(
|
||||
path: str, request: Request, user=Depends(get_verified_user)
|
||||
):
|
||||
url = app.state.config.OLLAMA_BASE_URLS[0]
|
||||
target_url = f"{url}/{path}"
|
||||
|
||||
body = await request.body()
|
||||
headers = dict(request.headers)
|
||||
|
||||
if user.role in ["user", "admin"]:
|
||||
if path in ["pull", "delete", "push", "copy", "create"]:
|
||||
if user.role != "admin":
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
|
||||
)
|
||||
|
||||
headers.pop("host", None)
|
||||
headers.pop("authorization", None)
|
||||
headers.pop("origin", None)
|
||||
headers.pop("referer", None)
|
||||
|
||||
r = None
|
||||
|
||||
def get_request():
|
||||
nonlocal r
|
||||
|
||||
request_id = str(uuid.uuid4())
|
||||
try:
|
||||
REQUEST_POOL.append(request_id)
|
||||
|
||||
def stream_content():
|
||||
try:
|
||||
if path == "generate":
|
||||
data = json.loads(body.decode("utf-8"))
|
||||
|
||||
if data.get("stream", True):
|
||||
yield json.dumps({"id": request_id, "done": False}) + "\n"
|
||||
|
||||
elif path == "chat":
|
||||
yield json.dumps({"id": request_id, "done": False}) + "\n"
|
||||
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
if request_id in REQUEST_POOL:
|
||||
yield chunk
|
||||
else:
|
||||
log.warning("User: canceled request")
|
||||
break
|
||||
finally:
|
||||
if hasattr(r, "close"):
|
||||
r.close()
|
||||
if request_id in REQUEST_POOL:
|
||||
REQUEST_POOL.remove(request_id)
|
||||
|
||||
r = requests.request(
|
||||
method=request.method,
|
||||
url=target_url,
|
||||
data=body,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
|
||||
# r.close()
|
||||
|
||||
return StreamingResponse(
|
||||
stream_content(),
|
||||
status_code=r.status_code,
|
||||
headers=dict(r.headers),
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
try:
|
||||
return await run_in_threadpool(get_request)
|
||||
except Exception as e:
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"Ollama: {res['error']}"
|
||||
except:
|
||||
error_detail = f"Ollama: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
status_code=r.status_code if r else 500,
|
||||
detail=error_detail,
|
||||
)
|
||||
@@ -1,40 +1,42 @@
|
||||
from fastapi import FastAPI, Request, Response, HTTPException, Depends
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
|
||||
|
||||
import requests
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional, overload
|
||||
|
||||
import aiohttp
|
||||
import requests
|
||||
from open_webui.apps.webui.models.models import Models
|
||||
from open_webui.config import (
|
||||
CACHE_DIR,
|
||||
CORS_ALLOW_ORIGIN,
|
||||
ENABLE_MODEL_FILTER,
|
||||
ENABLE_OPENAI_API,
|
||||
MODEL_FILTER_LIST,
|
||||
OPENAI_API_BASE_URLS,
|
||||
OPENAI_API_KEYS,
|
||||
AppConfig,
|
||||
)
|
||||
from open_webui.env import (
|
||||
AIOHTTP_CLIENT_TIMEOUT,
|
||||
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST,
|
||||
)
|
||||
|
||||
from open_webui.constants import ERROR_MESSAGES
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
from fastapi import Depends, FastAPI, HTTPException, Request
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import FileResponse, StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
from starlette.background import BackgroundTask
|
||||
|
||||
from apps.webui.models.models import Models
|
||||
from apps.webui.models.users import Users
|
||||
from constants import ERROR_MESSAGES
|
||||
from utils.utils import (
|
||||
decode_token,
|
||||
get_current_user,
|
||||
get_verified_user,
|
||||
get_admin_user,
|
||||
from open_webui.utils.payload import (
|
||||
apply_model_params_to_body_openai,
|
||||
apply_model_system_prompt_to_body,
|
||||
)
|
||||
from config import (
|
||||
SRC_LOG_LEVELS,
|
||||
ENABLE_OPENAI_API,
|
||||
OPENAI_API_BASE_URLS,
|
||||
OPENAI_API_KEYS,
|
||||
CACHE_DIR,
|
||||
ENABLE_MODEL_FILTER,
|
||||
MODEL_FILTER_LIST,
|
||||
AppConfig,
|
||||
)
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from open_webui.utils.utils import get_admin_user, get_verified_user
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["OPENAI"])
|
||||
@@ -42,13 +44,12 @@ log.setLevel(SRC_LOG_LEVELS["OPENAI"])
|
||||
app = FastAPI()
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_origins=CORS_ALLOW_ORIGIN,
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
|
||||
app.state.config = AppConfig()
|
||||
|
||||
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
|
||||
@@ -65,8 +66,6 @@ app.state.MODELS = {}
|
||||
async def check_url(request: Request, call_next):
|
||||
if len(app.state.MODELS) == 0:
|
||||
await get_all_models()
|
||||
else:
|
||||
pass
|
||||
|
||||
response = await call_next(request)
|
||||
return response
|
||||
@@ -88,11 +87,11 @@ async def update_config(form_data: OpenAIConfigForm, user=Depends(get_admin_user
|
||||
|
||||
|
||||
class UrlsUpdateForm(BaseModel):
|
||||
urls: List[str]
|
||||
urls: list[str]
|
||||
|
||||
|
||||
class KeysUpdateForm(BaseModel):
|
||||
keys: List[str]
|
||||
keys: list[str]
|
||||
|
||||
|
||||
@app.get("/urls")
|
||||
@@ -171,7 +170,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"External: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -183,7 +182,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
|
||||
|
||||
|
||||
async def fetch_url(url, key):
|
||||
timeout = aiohttp.ClientTimeout(total=5)
|
||||
timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST)
|
||||
try:
|
||||
headers = {"Authorization": f"Bearer {key}"}
|
||||
async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
|
||||
@@ -223,79 +222,91 @@ def merge_models_lists(model_lists):
|
||||
for model in models
|
||||
if "api.openai.com"
|
||||
not in app.state.config.OPENAI_API_BASE_URLS[idx]
|
||||
or "gpt" in model["id"]
|
||||
or not any(
|
||||
name in model["id"]
|
||||
for name in [
|
||||
"babbage",
|
||||
"dall-e",
|
||||
"davinci",
|
||||
"embedding",
|
||||
"tts",
|
||||
"whisper",
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
return merged_list
|
||||
|
||||
|
||||
async def get_all_models(raw: bool = False):
|
||||
def is_openai_api_disabled():
|
||||
return not app.state.config.ENABLE_OPENAI_API
|
||||
|
||||
|
||||
async def get_all_models_raw() -> list:
|
||||
if is_openai_api_disabled():
|
||||
return []
|
||||
|
||||
# Check if API KEYS length is same than API URLS length
|
||||
num_urls = len(app.state.config.OPENAI_API_BASE_URLS)
|
||||
num_keys = len(app.state.config.OPENAI_API_KEYS)
|
||||
|
||||
if num_keys != num_urls:
|
||||
# if there are more keys than urls, remove the extra keys
|
||||
if num_keys > num_urls:
|
||||
new_keys = app.state.config.OPENAI_API_KEYS[:num_urls]
|
||||
app.state.config.OPENAI_API_KEYS = new_keys
|
||||
# if there are more urls than keys, add empty keys
|
||||
else:
|
||||
app.state.config.OPENAI_API_KEYS += [""] * (num_urls - num_keys)
|
||||
|
||||
tasks = [
|
||||
fetch_url(f"{url}/models", app.state.config.OPENAI_API_KEYS[idx])
|
||||
for idx, url in enumerate(app.state.config.OPENAI_API_BASE_URLS)
|
||||
]
|
||||
|
||||
responses = await asyncio.gather(*tasks)
|
||||
log.debug(f"get_all_models:responses() {responses}")
|
||||
|
||||
return responses
|
||||
|
||||
|
||||
@overload
|
||||
async def get_all_models(raw: Literal[True]) -> list: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def get_all_models(raw: Literal[False] = False) -> dict[str, list]: ...
|
||||
|
||||
|
||||
async def get_all_models(raw=False) -> dict[str, list] | list:
|
||||
log.info("get_all_models()")
|
||||
if is_openai_api_disabled():
|
||||
return [] if raw else {"data": []}
|
||||
|
||||
if (
|
||||
len(app.state.config.OPENAI_API_KEYS) == 1
|
||||
and app.state.config.OPENAI_API_KEYS[0] == ""
|
||||
) or not app.state.config.ENABLE_OPENAI_API:
|
||||
models = {"data": []}
|
||||
else:
|
||||
# Check if API KEYS length is same than API URLS length
|
||||
if len(app.state.config.OPENAI_API_KEYS) != len(
|
||||
app.state.config.OPENAI_API_BASE_URLS
|
||||
):
|
||||
# if there are more keys than urls, remove the extra keys
|
||||
if len(app.state.config.OPENAI_API_KEYS) > len(
|
||||
app.state.config.OPENAI_API_BASE_URLS
|
||||
):
|
||||
app.state.config.OPENAI_API_KEYS = app.state.config.OPENAI_API_KEYS[
|
||||
: len(app.state.config.OPENAI_API_BASE_URLS)
|
||||
]
|
||||
# if there are more urls than keys, add empty keys
|
||||
else:
|
||||
app.state.config.OPENAI_API_KEYS += [
|
||||
""
|
||||
for _ in range(
|
||||
len(app.state.config.OPENAI_API_BASE_URLS)
|
||||
- len(app.state.config.OPENAI_API_KEYS)
|
||||
)
|
||||
]
|
||||
responses = await get_all_models_raw()
|
||||
if raw:
|
||||
return responses
|
||||
|
||||
tasks = [
|
||||
fetch_url(f"{url}/models", app.state.config.OPENAI_API_KEYS[idx])
|
||||
for idx, url in enumerate(app.state.config.OPENAI_API_BASE_URLS)
|
||||
]
|
||||
def extract_data(response):
|
||||
if response and "data" in response:
|
||||
return response["data"]
|
||||
if isinstance(response, list):
|
||||
return response
|
||||
return None
|
||||
|
||||
responses = await asyncio.gather(*tasks)
|
||||
log.debug(f"get_all_models:responses() {responses}")
|
||||
models = {"data": merge_models_lists(map(extract_data, responses))}
|
||||
|
||||
if raw:
|
||||
return responses
|
||||
|
||||
models = {
|
||||
"data": merge_models_lists(
|
||||
list(
|
||||
map(
|
||||
lambda response: (
|
||||
response["data"]
|
||||
if (response and "data" in response)
|
||||
else (response if isinstance(response, list) else None)
|
||||
),
|
||||
responses,
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
log.debug(f"models: {models}")
|
||||
app.state.MODELS = {model["id"]: model for model in models["data"]}
|
||||
log.debug(f"models: {models}")
|
||||
app.state.MODELS = {model["id"]: model for model in models["data"]}
|
||||
|
||||
return models
|
||||
|
||||
|
||||
@app.get("/models")
|
||||
@app.get("/models/{url_idx}")
|
||||
async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_user)):
|
||||
if url_idx == None:
|
||||
async def get_models(url_idx: Optional[int] = None, user=Depends(get_verified_user)):
|
||||
if url_idx is None:
|
||||
models = await get_all_models()
|
||||
if app.state.config.ENABLE_MODEL_FILTER:
|
||||
if user.role == "user":
|
||||
@@ -322,10 +333,24 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use
|
||||
r.raise_for_status()
|
||||
|
||||
response_data = r.json()
|
||||
|
||||
if "api.openai.com" in url:
|
||||
response_data["data"] = list(
|
||||
filter(lambda model: "gpt" in model["id"], response_data["data"])
|
||||
)
|
||||
# Filter the response data
|
||||
response_data["data"] = [
|
||||
model
|
||||
for model in response_data["data"]
|
||||
if not any(
|
||||
name in model["id"]
|
||||
for name in [
|
||||
"babbage",
|
||||
"dall-e",
|
||||
"davinci",
|
||||
"embedding",
|
||||
"tts",
|
||||
"whisper",
|
||||
]
|
||||
)
|
||||
]
|
||||
|
||||
return response_data
|
||||
except Exception as e:
|
||||
@@ -336,7 +361,7 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use
|
||||
res = r.json()
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"External: {e}"
|
||||
|
||||
raise HTTPException(
|
||||
@@ -355,6 +380,9 @@ async def generate_chat_completion(
|
||||
idx = 0
|
||||
payload = {**form_data}
|
||||
|
||||
if "metadata" in payload:
|
||||
del payload["metadata"]
|
||||
|
||||
model_id = form_data.get("model")
|
||||
model_info = Models.get_model_by_id(model_id)
|
||||
|
||||
@@ -362,91 +390,63 @@ async def generate_chat_completion(
|
||||
if model_info.base_model_id:
|
||||
payload["model"] = model_info.base_model_id
|
||||
|
||||
model_info.params = model_info.params.model_dump()
|
||||
|
||||
if model_info.params:
|
||||
if model_info.params.get("temperature", None) is not None:
|
||||
payload["temperature"] = float(model_info.params.get("temperature"))
|
||||
|
||||
if model_info.params.get("top_p", None):
|
||||
payload["top_p"] = int(model_info.params.get("top_p", None))
|
||||
|
||||
if model_info.params.get("max_tokens", None):
|
||||
payload["max_tokens"] = int(model_info.params.get("max_tokens", None))
|
||||
|
||||
if model_info.params.get("frequency_penalty", None):
|
||||
payload["frequency_penalty"] = int(
|
||||
model_info.params.get("frequency_penalty", None)
|
||||
)
|
||||
|
||||
if model_info.params.get("seed", None):
|
||||
payload["seed"] = model_info.params.get("seed", None)
|
||||
|
||||
if model_info.params.get("stop", None):
|
||||
payload["stop"] = (
|
||||
[
|
||||
bytes(stop, "utf-8").decode("unicode_escape")
|
||||
for stop in model_info.params["stop"]
|
||||
]
|
||||
if model_info.params.get("stop", None)
|
||||
else None
|
||||
)
|
||||
|
||||
if model_info.params.get("system", None):
|
||||
# Check if the payload already has a system message
|
||||
# If not, add a system message to the payload
|
||||
if payload.get("messages"):
|
||||
for message in payload["messages"]:
|
||||
if message.get("role") == "system":
|
||||
message["content"] = (
|
||||
model_info.params.get("system", None) + message["content"]
|
||||
)
|
||||
break
|
||||
else:
|
||||
payload["messages"].insert(
|
||||
0,
|
||||
{
|
||||
"role": "system",
|
||||
"content": model_info.params.get("system", None),
|
||||
},
|
||||
)
|
||||
|
||||
else:
|
||||
pass
|
||||
params = model_info.params.model_dump()
|
||||
payload = apply_model_params_to_body_openai(params, payload)
|
||||
payload = apply_model_system_prompt_to_body(params, payload, user)
|
||||
|
||||
model = app.state.MODELS[payload.get("model")]
|
||||
idx = model["urlIdx"]
|
||||
|
||||
if "pipeline" in model and model.get("pipeline"):
|
||||
payload["user"] = {"name": user.name, "id": user.id}
|
||||
payload["user"] = {
|
||||
"name": user.name,
|
||||
"id": user.id,
|
||||
"email": user.email,
|
||||
"role": user.role,
|
||||
}
|
||||
|
||||
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
|
||||
# This is a workaround until OpenAI fixes the issue with this model
|
||||
if payload.get("model") == "gpt-4-vision-preview":
|
||||
if "max_tokens" not in payload:
|
||||
payload["max_tokens"] = 4000
|
||||
log.debug("Modified payload:", payload)
|
||||
url = app.state.config.OPENAI_API_BASE_URLS[idx]
|
||||
key = app.state.config.OPENAI_API_KEYS[idx]
|
||||
is_o1 = payload["model"].lower().startswith("o1-")
|
||||
|
||||
# Change max_completion_tokens to max_tokens (Backward compatible)
|
||||
if "api.openai.com" not in url and not is_o1:
|
||||
if "max_completion_tokens" in payload:
|
||||
# Remove "max_completion_tokens" from the payload
|
||||
payload["max_tokens"] = payload["max_completion_tokens"]
|
||||
del payload["max_completion_tokens"]
|
||||
else:
|
||||
if is_o1 and "max_tokens" in payload:
|
||||
payload["max_completion_tokens"] = payload["max_tokens"]
|
||||
del payload["max_tokens"]
|
||||
if "max_tokens" in payload and "max_completion_tokens" in payload:
|
||||
del payload["max_tokens"]
|
||||
|
||||
# Fix: O1 does not support the "system" parameter, Modify "system" to "user"
|
||||
if is_o1 and payload["messages"][0]["role"] == "system":
|
||||
payload["messages"][0]["role"] = "user"
|
||||
|
||||
# Convert the modified body back to JSON
|
||||
payload = json.dumps(payload)
|
||||
|
||||
print(payload)
|
||||
|
||||
url = app.state.config.OPENAI_API_BASE_URLS[idx]
|
||||
key = app.state.config.OPENAI_API_KEYS[idx]
|
||||
|
||||
print(payload)
|
||||
log.debug(payload)
|
||||
|
||||
headers = {}
|
||||
headers["Authorization"] = f"Bearer {key}"
|
||||
headers["Content-Type"] = "application/json"
|
||||
if "openrouter.ai" in app.state.config.OPENAI_API_BASE_URLS[idx]:
|
||||
headers["HTTP-Referer"] = "https://openwebui.com/"
|
||||
headers["X-Title"] = "Open WebUI"
|
||||
|
||||
r = None
|
||||
session = None
|
||||
streaming = False
|
||||
response = None
|
||||
|
||||
try:
|
||||
session = aiohttp.ClientSession(trust_env=True)
|
||||
session = aiohttp.ClientSession(
|
||||
trust_env=True, timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT)
|
||||
)
|
||||
r = await session.request(
|
||||
method="POST",
|
||||
url=f"{url}/chat/completions",
|
||||
@@ -454,8 +454,6 @@ async def generate_chat_completion(
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
r.raise_for_status()
|
||||
|
||||
# Check if response is SSE
|
||||
if "text/event-stream" in r.headers.get("Content-Type", ""):
|
||||
streaming = True
|
||||
@@ -468,19 +466,23 @@ async def generate_chat_completion(
|
||||
),
|
||||
)
|
||||
else:
|
||||
response_data = await r.json()
|
||||
return response_data
|
||||
try:
|
||||
response = await r.json()
|
||||
except Exception as e:
|
||||
log.error(e)
|
||||
response = await r.text()
|
||||
|
||||
r.raise_for_status()
|
||||
return response
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
error_detail = "Open WebUI: Server Connection Error"
|
||||
if r is not None:
|
||||
try:
|
||||
res = await r.json()
|
||||
print(res)
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
|
||||
except:
|
||||
error_detail = f"External: {e}"
|
||||
if isinstance(response, dict):
|
||||
if "error" in response:
|
||||
error_detail = f"{response['error']['message'] if 'message' in response['error'] else response['error']}"
|
||||
elif isinstance(response, str):
|
||||
error_detail = response
|
||||
|
||||
raise HTTPException(status_code=r.status if r else 500, detail=error_detail)
|
||||
finally:
|
||||
if not streaming and session:
|
||||
@@ -542,7 +544,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
|
||||
print(res)
|
||||
if "error" in res:
|
||||
error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
|
||||
except:
|
||||
except Exception:
|
||||
error_detail = f"External: {e}"
|
||||
raise HTTPException(status_code=r.status if r else 500, detail=error_detail)
|
||||
finally:
|
||||
190
backend/open_webui/apps/retrieval/loaders/main.py
Normal file
190
backend/open_webui/apps/retrieval/loaders/main.py
Normal file
@@ -0,0 +1,190 @@
|
||||
import requests
|
||||
import logging
|
||||
import ftfy
|
||||
|
||||
from langchain_community.document_loaders import (
|
||||
BSHTMLLoader,
|
||||
CSVLoader,
|
||||
Docx2txtLoader,
|
||||
OutlookMessageLoader,
|
||||
PyPDFLoader,
|
||||
TextLoader,
|
||||
UnstructuredEPubLoader,
|
||||
UnstructuredExcelLoader,
|
||||
UnstructuredMarkdownLoader,
|
||||
UnstructuredPowerPointLoader,
|
||||
UnstructuredRSTLoader,
|
||||
UnstructuredXMLLoader,
|
||||
YoutubeLoader,
|
||||
)
|
||||
from langchain_core.documents import Document
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
known_source_ext = [
|
||||
"go",
|
||||
"py",
|
||||
"java",
|
||||
"sh",
|
||||
"bat",
|
||||
"ps1",
|
||||
"cmd",
|
||||
"js",
|
||||
"ts",
|
||||
"css",
|
||||
"cpp",
|
||||
"hpp",
|
||||
"h",
|
||||
"c",
|
||||
"cs",
|
||||
"sql",
|
||||
"log",
|
||||
"ini",
|
||||
"pl",
|
||||
"pm",
|
||||
"r",
|
||||
"dart",
|
||||
"dockerfile",
|
||||
"env",
|
||||
"php",
|
||||
"hs",
|
||||
"hsc",
|
||||
"lua",
|
||||
"nginxconf",
|
||||
"conf",
|
||||
"m",
|
||||
"mm",
|
||||
"plsql",
|
||||
"perl",
|
||||
"rb",
|
||||
"rs",
|
||||
"db2",
|
||||
"scala",
|
||||
"bash",
|
||||
"swift",
|
||||
"vue",
|
||||
"svelte",
|
||||
"msg",
|
||||
"ex",
|
||||
"exs",
|
||||
"erl",
|
||||
"tsx",
|
||||
"jsx",
|
||||
"hs",
|
||||
"lhs",
|
||||
]
|
||||
|
||||
|
||||
class TikaLoader:
|
||||
def __init__(self, url, file_path, mime_type=None):
|
||||
self.url = url
|
||||
self.file_path = file_path
|
||||
self.mime_type = mime_type
|
||||
|
||||
def load(self) -> list[Document]:
|
||||
with open(self.file_path, "rb") as f:
|
||||
data = f.read()
|
||||
|
||||
if self.mime_type is not None:
|
||||
headers = {"Content-Type": self.mime_type}
|
||||
else:
|
||||
headers = {}
|
||||
|
||||
endpoint = self.url
|
||||
if not endpoint.endswith("/"):
|
||||
endpoint += "/"
|
||||
endpoint += "tika/text"
|
||||
|
||||
r = requests.put(endpoint, data=data, headers=headers)
|
||||
|
||||
if r.ok:
|
||||
raw_metadata = r.json()
|
||||
text = raw_metadata.get("X-TIKA:content", "<No text content found>")
|
||||
|
||||
if "Content-Type" in raw_metadata:
|
||||
headers["Content-Type"] = raw_metadata["Content-Type"]
|
||||
|
||||
log.info("Tika extracted text: %s", text)
|
||||
|
||||
return [Document(page_content=text, metadata=headers)]
|
||||
else:
|
||||
raise Exception(f"Error calling Tika: {r.reason}")
|
||||
|
||||
|
||||
class Loader:
|
||||
def __init__(self, engine: str = "", **kwargs):
|
||||
self.engine = engine
|
||||
self.kwargs = kwargs
|
||||
|
||||
def load(
|
||||
self, filename: str, file_content_type: str, file_path: str
|
||||
) -> list[Document]:
|
||||
loader = self._get_loader(filename, file_content_type, file_path)
|
||||
docs = loader.load()
|
||||
|
||||
return [
|
||||
Document(
|
||||
page_content=ftfy.fix_text(doc.page_content), metadata=doc.metadata
|
||||
)
|
||||
for doc in docs
|
||||
]
|
||||
|
||||
def _get_loader(self, filename: str, file_content_type: str, file_path: str):
|
||||
file_ext = filename.split(".")[-1].lower()
|
||||
|
||||
if self.engine == "tika" and self.kwargs.get("TIKA_SERVER_URL"):
|
||||
if file_ext in known_source_ext or (
|
||||
file_content_type and file_content_type.find("text/") >= 0
|
||||
):
|
||||
loader = TextLoader(file_path, autodetect_encoding=True)
|
||||
else:
|
||||
loader = TikaLoader(
|
||||
url=self.kwargs.get("TIKA_SERVER_URL"),
|
||||
file_path=file_path,
|
||||
mime_type=file_content_type,
|
||||
)
|
||||
else:
|
||||
if file_ext == "pdf":
|
||||
loader = PyPDFLoader(
|
||||
file_path, extract_images=self.kwargs.get("PDF_EXTRACT_IMAGES")
|
||||
)
|
||||
elif file_ext == "csv":
|
||||
loader = CSVLoader(file_path)
|
||||
elif file_ext == "rst":
|
||||
loader = UnstructuredRSTLoader(file_path, mode="elements")
|
||||
elif file_ext == "xml":
|
||||
loader = UnstructuredXMLLoader(file_path)
|
||||
elif file_ext in ["htm", "html"]:
|
||||
loader = BSHTMLLoader(file_path, open_encoding="unicode_escape")
|
||||
elif file_ext == "md":
|
||||
loader = UnstructuredMarkdownLoader(file_path)
|
||||
elif file_content_type == "application/epub+zip":
|
||||
loader = UnstructuredEPubLoader(file_path)
|
||||
elif (
|
||||
file_content_type
|
||||
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
||||
or file_ext == "docx"
|
||||
):
|
||||
loader = Docx2txtLoader(file_path)
|
||||
elif file_content_type in [
|
||||
"application/vnd.ms-excel",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
] or file_ext in ["xls", "xlsx"]:
|
||||
loader = UnstructuredExcelLoader(file_path)
|
||||
elif file_content_type in [
|
||||
"application/vnd.ms-powerpoint",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
] or file_ext in ["ppt", "pptx"]:
|
||||
loader = UnstructuredPowerPointLoader(file_path)
|
||||
elif file_ext == "msg":
|
||||
loader = OutlookMessageLoader(file_path)
|
||||
elif file_ext in known_source_ext or (
|
||||
file_content_type and file_content_type.find("text/") >= 0
|
||||
):
|
||||
loader = TextLoader(file_path, autodetect_encoding=True)
|
||||
else:
|
||||
loader = TextLoader(file_path, autodetect_encoding=True)
|
||||
|
||||
return loader
|
||||
File diff suppressed because it is too large
Load Diff
81
backend/open_webui/apps/retrieval/models/colbert.py
Normal file
81
backend/open_webui/apps/retrieval/models/colbert.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import os
|
||||
import torch
|
||||
import numpy as np
|
||||
from colbert.infra import ColBERTConfig
|
||||
from colbert.modeling.checkpoint import Checkpoint
|
||||
|
||||
|
||||
class ColBERT:
|
||||
def __init__(self, name, **kwargs) -> None:
|
||||
print("ColBERT: Loading model", name)
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
DOCKER = kwargs.get("env") == "docker"
|
||||
if DOCKER:
|
||||
# This is a workaround for the issue with the docker container
|
||||
# where the torch extension is not loaded properly
|
||||
# and the following error is thrown:
|
||||
# /root/.cache/torch_extensions/py311_cpu/segmented_maxsim_cpp/segmented_maxsim_cpp.so: cannot open shared object file: No such file or directory
|
||||
|
||||
lock_file = (
|
||||
"/root/.cache/torch_extensions/py311_cpu/segmented_maxsim_cpp/lock"
|
||||
)
|
||||
if os.path.exists(lock_file):
|
||||
os.remove(lock_file)
|
||||
|
||||
self.ckpt = Checkpoint(
|
||||
name,
|
||||
colbert_config=ColBERTConfig(model_name=name),
|
||||
).to(self.device)
|
||||
pass
|
||||
|
||||
def calculate_similarity_scores(self, query_embeddings, document_embeddings):
|
||||
|
||||
query_embeddings = query_embeddings.to(self.device)
|
||||
document_embeddings = document_embeddings.to(self.device)
|
||||
|
||||
# Validate dimensions to ensure compatibility
|
||||
if query_embeddings.dim() != 3:
|
||||
raise ValueError(
|
||||
f"Expected query embeddings to have 3 dimensions, but got {query_embeddings.dim()}."
|
||||
)
|
||||
if document_embeddings.dim() != 3:
|
||||
raise ValueError(
|
||||
f"Expected document embeddings to have 3 dimensions, but got {document_embeddings.dim()}."
|
||||
)
|
||||
if query_embeddings.size(0) not in [1, document_embeddings.size(0)]:
|
||||
raise ValueError(
|
||||
"There should be either one query or queries equal to the number of documents."
|
||||
)
|
||||
|
||||
# Transpose the query embeddings to align for matrix multiplication
|
||||
transposed_query_embeddings = query_embeddings.permute(0, 2, 1)
|
||||
# Compute similarity scores using batch matrix multiplication
|
||||
computed_scores = torch.matmul(document_embeddings, transposed_query_embeddings)
|
||||
# Apply max pooling to extract the highest semantic similarity across each document's sequence
|
||||
maximum_scores = torch.max(computed_scores, dim=1).values
|
||||
|
||||
# Sum up the maximum scores across features to get the overall document relevance scores
|
||||
final_scores = maximum_scores.sum(dim=1)
|
||||
|
||||
normalized_scores = torch.softmax(final_scores, dim=0)
|
||||
|
||||
return normalized_scores.detach().cpu().numpy().astype(np.float32)
|
||||
|
||||
def predict(self, sentences):
|
||||
|
||||
query = sentences[0][0]
|
||||
docs = [i[1] for i in sentences]
|
||||
|
||||
# Embedding the documents
|
||||
embedded_docs = self.ckpt.docFromText(docs, bsize=32)[0]
|
||||
# Embedding the queries
|
||||
embedded_queries = self.ckpt.queryFromText([query], bsize=32)
|
||||
embedded_query = embedded_queries[0]
|
||||
|
||||
# Calculate retrieval scores for the query against all documents
|
||||
scores = self.calculate_similarity_scores(
|
||||
embedded_query.unsqueeze(0), embedded_docs
|
||||
)
|
||||
|
||||
return scores
|
||||
@@ -1,50 +1,85 @@
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from typing import Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
from typing import List, Union
|
||||
|
||||
from apps.ollama.main import (
|
||||
generate_ollama_embeddings,
|
||||
GenerateEmbeddingsForm,
|
||||
)
|
||||
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain.retrievers import ContextualCompressionRetriever, EnsembleRetriever
|
||||
from langchain_community.retrievers import BM25Retriever
|
||||
from langchain.retrievers import (
|
||||
ContextualCompressionRetriever,
|
||||
EnsembleRetriever,
|
||||
from langchain_core.documents import Document
|
||||
|
||||
|
||||
from open_webui.apps.ollama.main import (
|
||||
GenerateEmbedForm,
|
||||
generate_ollama_batch_embeddings,
|
||||
)
|
||||
from open_webui.apps.retrieval.vector.connector import VECTOR_DB_CLIENT
|
||||
from open_webui.utils.misc import get_last_user_message
|
||||
|
||||
from typing import Optional
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
from open_webui.config import DEFAULT_RAG_TEMPLATE
|
||||
|
||||
from utils.misc import get_last_user_message, add_or_update_system_message
|
||||
from config import SRC_LOG_LEVELS, CHROMA_CLIENT
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForRetrieverRun
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
|
||||
|
||||
class VectorSearchRetriever(BaseRetriever):
|
||||
collection_name: Any
|
||||
embedding_function: Any
|
||||
top_k: int
|
||||
|
||||
def _get_relevant_documents(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
run_manager: CallbackManagerForRetrieverRun,
|
||||
) -> list[Document]:
|
||||
result = VECTOR_DB_CLIENT.search(
|
||||
collection_name=self.collection_name,
|
||||
vectors=[self.embedding_function(query)],
|
||||
limit=self.top_k,
|
||||
)
|
||||
|
||||
ids = result.ids[0]
|
||||
metadatas = result.metadatas[0]
|
||||
documents = result.documents[0]
|
||||
|
||||
results = []
|
||||
for idx in range(len(ids)):
|
||||
results.append(
|
||||
Document(
|
||||
metadata=metadatas[idx],
|
||||
page_content=documents[idx],
|
||||
)
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
def query_doc(
|
||||
collection_name: str,
|
||||
query: str,
|
||||
embedding_function,
|
||||
query_embedding: list[float],
|
||||
k: int,
|
||||
):
|
||||
try:
|
||||
collection = CHROMA_CLIENT.get_collection(name=collection_name)
|
||||
query_embeddings = embedding_function(query)
|
||||
|
||||
result = collection.query(
|
||||
query_embeddings=[query_embeddings],
|
||||
n_results=k,
|
||||
result = VECTOR_DB_CLIENT.search(
|
||||
collection_name=collection_name,
|
||||
vectors=[query_embedding],
|
||||
limit=k,
|
||||
)
|
||||
|
||||
log.info(f"query_doc:result {result}")
|
||||
return result
|
||||
except Exception as e:
|
||||
print(e)
|
||||
raise e
|
||||
|
||||
|
||||
@@ -55,27 +90,25 @@ def query_doc_with_hybrid_search(
|
||||
k: int,
|
||||
reranking_function,
|
||||
r: float,
|
||||
):
|
||||
) -> dict:
|
||||
try:
|
||||
collection = CHROMA_CLIENT.get_collection(name=collection_name)
|
||||
documents = collection.get() # get all documents
|
||||
result = VECTOR_DB_CLIENT.get(collection_name=collection_name)
|
||||
|
||||
bm25_retriever = BM25Retriever.from_texts(
|
||||
texts=documents.get("documents"),
|
||||
metadatas=documents.get("metadatas"),
|
||||
texts=result.documents[0],
|
||||
metadatas=result.metadatas[0],
|
||||
)
|
||||
bm25_retriever.k = k
|
||||
|
||||
chroma_retriever = ChromaRetriever(
|
||||
collection=collection,
|
||||
vector_search_retriever = VectorSearchRetriever(
|
||||
collection_name=collection_name,
|
||||
embedding_function=embedding_function,
|
||||
top_n=k,
|
||||
top_k=k,
|
||||
)
|
||||
|
||||
ensemble_retriever = EnsembleRetriever(
|
||||
retrievers=[bm25_retriever, chroma_retriever], weights=[0.5, 0.5]
|
||||
retrievers=[bm25_retriever, vector_search_retriever], weights=[0.5, 0.5]
|
||||
)
|
||||
|
||||
compressor = RerankCompressor(
|
||||
embedding_function=embedding_function,
|
||||
top_n=k,
|
||||
@@ -100,7 +133,9 @@ def query_doc_with_hybrid_search(
|
||||
raise e
|
||||
|
||||
|
||||
def merge_and_sort_query_results(query_results, k, reverse=False):
|
||||
def merge_and_sort_query_results(
|
||||
query_results: list[dict], k: int, reverse: bool = False
|
||||
) -> list[dict]:
|
||||
# Initialize lists to store combined data
|
||||
combined_distances = []
|
||||
combined_documents = []
|
||||
@@ -142,35 +177,43 @@ def merge_and_sort_query_results(query_results, k, reverse=False):
|
||||
|
||||
|
||||
def query_collection(
|
||||
collection_names: List[str],
|
||||
collection_names: list[str],
|
||||
query: str,
|
||||
embedding_function,
|
||||
k: int,
|
||||
):
|
||||
) -> dict:
|
||||
|
||||
results = []
|
||||
query_embedding = embedding_function(query)
|
||||
|
||||
for collection_name in collection_names:
|
||||
try:
|
||||
result = query_doc(
|
||||
collection_name=collection_name,
|
||||
query=query,
|
||||
k=k,
|
||||
embedding_function=embedding_function,
|
||||
)
|
||||
results.append(result)
|
||||
except:
|
||||
if collection_name:
|
||||
try:
|
||||
result = query_doc(
|
||||
collection_name=collection_name,
|
||||
k=k,
|
||||
query_embedding=query_embedding,
|
||||
)
|
||||
if result is not None:
|
||||
results.append(result.model_dump())
|
||||
except Exception as e:
|
||||
log.exception(f"Error when querying the collection: {e}")
|
||||
else:
|
||||
pass
|
||||
|
||||
return merge_and_sort_query_results(results, k=k)
|
||||
|
||||
|
||||
def query_collection_with_hybrid_search(
|
||||
collection_names: List[str],
|
||||
collection_names: list[str],
|
||||
query: str,
|
||||
embedding_function,
|
||||
k: int,
|
||||
reranking_function,
|
||||
r: float,
|
||||
):
|
||||
) -> dict:
|
||||
results = []
|
||||
error = False
|
||||
for collection_name in collection_names:
|
||||
try:
|
||||
result = query_doc_with_hybrid_search(
|
||||
@@ -182,14 +225,55 @@ def query_collection_with_hybrid_search(
|
||||
r=r,
|
||||
)
|
||||
results.append(result)
|
||||
except:
|
||||
pass
|
||||
except Exception as e:
|
||||
log.exception(
|
||||
"Error when querying the collection with " f"hybrid_search: {e}"
|
||||
)
|
||||
error = True
|
||||
|
||||
if error:
|
||||
raise Exception(
|
||||
"Hybrid search failed for all collections. Using Non hybrid search as fallback."
|
||||
)
|
||||
|
||||
return merge_and_sort_query_results(results, k=k, reverse=True)
|
||||
|
||||
|
||||
def rag_template(template: str, context: str, query: str):
|
||||
if template == "":
|
||||
template = DEFAULT_RAG_TEMPLATE
|
||||
|
||||
if "[context]" not in template and "{{CONTEXT}}" not in template:
|
||||
log.debug(
|
||||
"WARNING: The RAG template does not contain the '[context]' or '{{CONTEXT}}' placeholder."
|
||||
)
|
||||
|
||||
if "<context>" in context and "</context>" in context:
|
||||
log.debug(
|
||||
"WARNING: Potential prompt injection attack: the RAG "
|
||||
"context contains '<context>' and '</context>'. This might be "
|
||||
"nothing, or the user might be trying to hack something."
|
||||
)
|
||||
|
||||
query_placeholders = []
|
||||
if "[query]" in context:
|
||||
query_placeholder = "{{QUERY" + str(uuid.uuid4()) + "}}"
|
||||
template = template.replace("[query]", query_placeholder)
|
||||
query_placeholders.append(query_placeholder)
|
||||
|
||||
if "{{QUERY}}" in context:
|
||||
query_placeholder = "{{QUERY" + str(uuid.uuid4()) + "}}"
|
||||
template = template.replace("{{QUERY}}", query_placeholder)
|
||||
query_placeholders.append(query_placeholder)
|
||||
|
||||
template = template.replace("[context]", context)
|
||||
template = template.replace("{{CONTEXT}}", context)
|
||||
template = template.replace("[query]", query)
|
||||
template = template.replace("{{QUERY}}", query)
|
||||
|
||||
for query_placeholder in query_placeholders:
|
||||
template = template.replace(query_placeholder, query)
|
||||
|
||||
return template
|
||||
|
||||
|
||||
@@ -199,45 +283,33 @@ def get_embedding_function(
|
||||
embedding_function,
|
||||
openai_key,
|
||||
openai_url,
|
||||
batch_size,
|
||||
embedding_batch_size,
|
||||
):
|
||||
if embedding_engine == "":
|
||||
return lambda query: embedding_function.encode(query).tolist()
|
||||
elif embedding_engine in ["ollama", "openai"]:
|
||||
if embedding_engine == "ollama":
|
||||
func = lambda query: generate_ollama_embeddings(
|
||||
GenerateEmbeddingsForm(
|
||||
**{
|
||||
"model": embedding_model,
|
||||
"prompt": query,
|
||||
}
|
||||
)
|
||||
)
|
||||
elif embedding_engine == "openai":
|
||||
func = lambda query: generate_openai_embeddings(
|
||||
model=embedding_model,
|
||||
text=query,
|
||||
key=openai_key,
|
||||
url=openai_url,
|
||||
)
|
||||
func = lambda query: generate_embeddings(
|
||||
engine=embedding_engine,
|
||||
model=embedding_model,
|
||||
text=query,
|
||||
key=openai_key if embedding_engine == "openai" else "",
|
||||
url=openai_url if embedding_engine == "openai" else "",
|
||||
)
|
||||
|
||||
def generate_multiple(query, f):
|
||||
def generate_multiple(query, func):
|
||||
if isinstance(query, list):
|
||||
if embedding_engine == "openai":
|
||||
embeddings = []
|
||||
for i in range(0, len(query), batch_size):
|
||||
embeddings.extend(f(query[i : i + batch_size]))
|
||||
return embeddings
|
||||
else:
|
||||
return [f(q) for q in query]
|
||||
embeddings = []
|
||||
for i in range(0, len(query), embedding_batch_size):
|
||||
embeddings.extend(func(query[i : i + embedding_batch_size]))
|
||||
return embeddings
|
||||
else:
|
||||
return f(query)
|
||||
return func(query)
|
||||
|
||||
return lambda query: generate_multiple(query, func)
|
||||
|
||||
|
||||
def get_rag_context(
|
||||
docs,
|
||||
files,
|
||||
messages,
|
||||
embedding_function,
|
||||
k,
|
||||
@@ -245,79 +317,115 @@ def get_rag_context(
|
||||
r,
|
||||
hybrid_search,
|
||||
):
|
||||
log.debug(f"docs: {docs} {messages} {embedding_function} {reranking_function}")
|
||||
log.debug(f"files: {files} {messages} {embedding_function} {reranking_function}")
|
||||
query = get_last_user_message(messages)
|
||||
|
||||
extracted_collections = []
|
||||
relevant_contexts = []
|
||||
|
||||
for doc in docs:
|
||||
context = None
|
||||
|
||||
collection_names = (
|
||||
doc["collection_names"]
|
||||
if doc["type"] == "collection"
|
||||
else [doc["collection_name"]]
|
||||
)
|
||||
|
||||
collection_names = set(collection_names).difference(extracted_collections)
|
||||
if not collection_names:
|
||||
log.debug(f"skipping {doc} as it has already been extracted")
|
||||
continue
|
||||
|
||||
try:
|
||||
if doc["type"] == "text":
|
||||
context = doc["content"]
|
||||
else:
|
||||
if hybrid_search:
|
||||
context = query_collection_with_hybrid_search(
|
||||
collection_names=collection_names,
|
||||
query=query,
|
||||
embedding_function=embedding_function,
|
||||
k=k,
|
||||
reranking_function=reranking_function,
|
||||
r=r,
|
||||
)
|
||||
else:
|
||||
context = query_collection(
|
||||
collection_names=collection_names,
|
||||
query=query,
|
||||
embedding_function=embedding_function,
|
||||
k=k,
|
||||
)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
for file in files:
|
||||
if file.get("context") == "full":
|
||||
context = {
|
||||
"documents": [[file.get("file").get("data", {}).get("content")]],
|
||||
"metadatas": [[{"file_id": file.get("id"), "name": file.get("name")}]],
|
||||
}
|
||||
else:
|
||||
context = None
|
||||
|
||||
collection_names = []
|
||||
if file.get("type") == "collection":
|
||||
if file.get("legacy"):
|
||||
collection_names = file.get("collection_names", [])
|
||||
else:
|
||||
collection_names.append(file["id"])
|
||||
elif file.get("collection_name"):
|
||||
collection_names.append(file["collection_name"])
|
||||
elif file.get("id"):
|
||||
if file.get("legacy"):
|
||||
collection_names.append(f"{file['id']}")
|
||||
else:
|
||||
collection_names.append(f"file-{file['id']}")
|
||||
|
||||
collection_names = set(collection_names).difference(extracted_collections)
|
||||
if not collection_names:
|
||||
log.debug(f"skipping {file} as it has already been extracted")
|
||||
continue
|
||||
|
||||
try:
|
||||
context = None
|
||||
if file.get("type") == "text":
|
||||
context = file["content"]
|
||||
else:
|
||||
if hybrid_search:
|
||||
try:
|
||||
context = query_collection_with_hybrid_search(
|
||||
collection_names=collection_names,
|
||||
query=query,
|
||||
embedding_function=embedding_function,
|
||||
k=k,
|
||||
reranking_function=reranking_function,
|
||||
r=r,
|
||||
)
|
||||
except Exception as e:
|
||||
log.debug(
|
||||
"Error when using hybrid search, using"
|
||||
" non hybrid search as fallback."
|
||||
)
|
||||
|
||||
if (not hybrid_search) or (context is None):
|
||||
context = query_collection(
|
||||
collection_names=collection_names,
|
||||
query=query,
|
||||
embedding_function=embedding_function,
|
||||
k=k,
|
||||
)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
|
||||
extracted_collections.extend(collection_names)
|
||||
|
||||
if context:
|
||||
relevant_contexts.append({**context, "source": doc})
|
||||
|
||||
extracted_collections.extend(collection_names)
|
||||
|
||||
context_string = ""
|
||||
if "data" in file:
|
||||
del file["data"]
|
||||
relevant_contexts.append({**context, "file": file})
|
||||
|
||||
contexts = []
|
||||
citations = []
|
||||
for context in relevant_contexts:
|
||||
try:
|
||||
if "documents" in context:
|
||||
context_string += "\n\n".join(
|
||||
[text for text in context["documents"][0] if text is not None]
|
||||
file_names = list(
|
||||
set(
|
||||
[
|
||||
metadata["name"]
|
||||
for metadata in context["metadatas"][0]
|
||||
if metadata is not None and "name" in metadata
|
||||
]
|
||||
)
|
||||
)
|
||||
contexts.append(
|
||||
((", ".join(file_names) + ":\n\n") if file_names else "")
|
||||
+ "\n\n".join(
|
||||
[text for text in context["documents"][0] if text is not None]
|
||||
)
|
||||
)
|
||||
|
||||
if "metadatas" in context:
|
||||
citations.append(
|
||||
{
|
||||
"source": context["source"],
|
||||
"document": context["documents"][0],
|
||||
"metadata": context["metadatas"][0],
|
||||
}
|
||||
)
|
||||
citation = {
|
||||
"source": context["file"],
|
||||
"document": context["documents"][0],
|
||||
"metadata": context["metadatas"][0],
|
||||
}
|
||||
if "distances" in context and context["distances"]:
|
||||
citation["distances"] = context["distances"][0]
|
||||
citations.append(citation)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
|
||||
context_string = context_string.strip()
|
||||
print("contexts", contexts)
|
||||
print("citations", citations)
|
||||
|
||||
return context_string, citations
|
||||
return contexts, citations
|
||||
|
||||
|
||||
def get_model_path(model: str, update_model: bool = False):
|
||||
@@ -358,20 +466,6 @@ def get_model_path(model: str, update_model: bool = False):
|
||||
return model
|
||||
|
||||
|
||||
def generate_openai_embeddings(
|
||||
model: str,
|
||||
text: Union[str, list[str]],
|
||||
key: str,
|
||||
url: str = "https://api.openai.com/v1",
|
||||
):
|
||||
if isinstance(text, list):
|
||||
embeddings = generate_openai_batch_embeddings(model, text, key, url)
|
||||
else:
|
||||
embeddings = generate_openai_batch_embeddings(model, [text], key, url)
|
||||
|
||||
return embeddings[0] if isinstance(text, str) else embeddings
|
||||
|
||||
|
||||
def generate_openai_batch_embeddings(
|
||||
model: str, texts: list[str], key: str, url: str = "https://api.openai.com/v1"
|
||||
) -> Optional[list[list[float]]]:
|
||||
@@ -395,54 +489,38 @@ def generate_openai_batch_embeddings(
|
||||
return None
|
||||
|
||||
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
from langchain_core.callbacks import CallbackManagerForRetrieverRun
|
||||
|
||||
|
||||
class ChromaRetriever(BaseRetriever):
|
||||
collection: Any
|
||||
embedding_function: Any
|
||||
top_n: int
|
||||
|
||||
def _get_relevant_documents(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
run_manager: CallbackManagerForRetrieverRun,
|
||||
) -> List[Document]:
|
||||
query_embeddings = self.embedding_function(query)
|
||||
|
||||
results = self.collection.query(
|
||||
query_embeddings=[query_embeddings],
|
||||
n_results=self.top_n,
|
||||
)
|
||||
|
||||
ids = results["ids"][0]
|
||||
metadatas = results["metadatas"][0]
|
||||
documents = results["documents"][0]
|
||||
|
||||
results = []
|
||||
for idx in range(len(ids)):
|
||||
results.append(
|
||||
Document(
|
||||
metadata=metadatas[idx],
|
||||
page_content=documents[idx],
|
||||
)
|
||||
def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], **kwargs):
|
||||
if engine == "ollama":
|
||||
if isinstance(text, list):
|
||||
embeddings = generate_ollama_batch_embeddings(
|
||||
GenerateEmbedForm(**{"model": model, "input": text})
|
||||
)
|
||||
return results
|
||||
else:
|
||||
embeddings = generate_ollama_batch_embeddings(
|
||||
GenerateEmbedForm(**{"model": model, "input": [text]})
|
||||
)
|
||||
return (
|
||||
embeddings["embeddings"][0]
|
||||
if isinstance(text, str)
|
||||
else embeddings["embeddings"]
|
||||
)
|
||||
elif engine == "openai":
|
||||
key = kwargs.get("key", "")
|
||||
url = kwargs.get("url", "https://api.openai.com/v1")
|
||||
|
||||
if isinstance(text, list):
|
||||
embeddings = generate_openai_batch_embeddings(model, text, key, url)
|
||||
else:
|
||||
embeddings = generate_openai_batch_embeddings(model, [text], key, url)
|
||||
|
||||
return embeddings[0] if isinstance(text, str) else embeddings
|
||||
|
||||
|
||||
import operator
|
||||
|
||||
from typing import Optional, Sequence
|
||||
|
||||
from langchain_core.documents import BaseDocumentCompressor, Document
|
||||
from langchain_core.callbacks import Callbacks
|
||||
from langchain_core.pydantic_v1 import Extra
|
||||
|
||||
from sentence_transformers import util
|
||||
from langchain_core.documents import BaseDocumentCompressor, Document
|
||||
|
||||
|
||||
class RerankCompressor(BaseDocumentCompressor):
|
||||
@@ -452,7 +530,7 @@ class RerankCompressor(BaseDocumentCompressor):
|
||||
r_score: float
|
||||
|
||||
class Config:
|
||||
extra = Extra.forbid
|
||||
extra = "forbid"
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def compress_documents(
|
||||
@@ -468,6 +546,8 @@ class RerankCompressor(BaseDocumentCompressor):
|
||||
[(query, doc.page_content) for doc in documents]
|
||||
)
|
||||
else:
|
||||
from sentence_transformers import util
|
||||
|
||||
query_embedding = self.embedding_function(query)
|
||||
document_embedding = self.embedding_function(
|
||||
[doc.page_content for doc in documents]
|
||||
14
backend/open_webui/apps/retrieval/vector/connector.py
Normal file
14
backend/open_webui/apps/retrieval/vector/connector.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from open_webui.config import VECTOR_DB
|
||||
|
||||
if VECTOR_DB == "milvus":
|
||||
from open_webui.apps.retrieval.vector.dbs.milvus import MilvusClient
|
||||
|
||||
VECTOR_DB_CLIENT = MilvusClient()
|
||||
elif VECTOR_DB == "qdrant":
|
||||
from open_webui.apps.retrieval.vector.dbs.qdrant import QdrantClient
|
||||
|
||||
VECTOR_DB_CLIENT = QdrantClient()
|
||||
else:
|
||||
from open_webui.apps.retrieval.vector.dbs.chroma import ChromaClient
|
||||
|
||||
VECTOR_DB_CLIENT = ChromaClient()
|
||||
161
backend/open_webui/apps/retrieval/vector/dbs/chroma.py
Normal file
161
backend/open_webui/apps/retrieval/vector/dbs/chroma.py
Normal file
@@ -0,0 +1,161 @@
|
||||
import chromadb
|
||||
from chromadb import Settings
|
||||
from chromadb.utils.batch_utils import create_batches
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from open_webui.apps.retrieval.vector.main import VectorItem, SearchResult, GetResult
|
||||
from open_webui.config import (
|
||||
CHROMA_DATA_PATH,
|
||||
CHROMA_HTTP_HOST,
|
||||
CHROMA_HTTP_PORT,
|
||||
CHROMA_HTTP_HEADERS,
|
||||
CHROMA_HTTP_SSL,
|
||||
CHROMA_TENANT,
|
||||
CHROMA_DATABASE,
|
||||
)
|
||||
|
||||
|
||||
class ChromaClient:
|
||||
def __init__(self):
|
||||
if CHROMA_HTTP_HOST != "":
|
||||
self.client = chromadb.HttpClient(
|
||||
host=CHROMA_HTTP_HOST,
|
||||
port=CHROMA_HTTP_PORT,
|
||||
headers=CHROMA_HTTP_HEADERS,
|
||||
ssl=CHROMA_HTTP_SSL,
|
||||
tenant=CHROMA_TENANT,
|
||||
database=CHROMA_DATABASE,
|
||||
settings=Settings(allow_reset=True, anonymized_telemetry=False),
|
||||
)
|
||||
else:
|
||||
self.client = chromadb.PersistentClient(
|
||||
path=CHROMA_DATA_PATH,
|
||||
settings=Settings(allow_reset=True, anonymized_telemetry=False),
|
||||
tenant=CHROMA_TENANT,
|
||||
database=CHROMA_DATABASE,
|
||||
)
|
||||
|
||||
def has_collection(self, collection_name: str) -> bool:
|
||||
# Check if the collection exists based on the collection name.
|
||||
collections = self.client.list_collections()
|
||||
return collection_name in [collection.name for collection in collections]
|
||||
|
||||
def delete_collection(self, collection_name: str):
|
||||
# Delete the collection based on the collection name.
|
||||
return self.client.delete_collection(name=collection_name)
|
||||
|
||||
def search(
|
||||
self, collection_name: str, vectors: list[list[float | int]], limit: int
|
||||
) -> Optional[SearchResult]:
|
||||
# Search for the nearest neighbor items based on the vectors and return 'limit' number of results.
|
||||
try:
|
||||
collection = self.client.get_collection(name=collection_name)
|
||||
if collection:
|
||||
result = collection.query(
|
||||
query_embeddings=vectors,
|
||||
n_results=limit,
|
||||
)
|
||||
|
||||
return SearchResult(
|
||||
**{
|
||||
"ids": result["ids"],
|
||||
"distances": result["distances"],
|
||||
"documents": result["documents"],
|
||||
"metadatas": result["metadatas"],
|
||||
}
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
return None
|
||||
|
||||
def query(
|
||||
self, collection_name: str, filter: dict, limit: Optional[int] = None
|
||||
) -> Optional[GetResult]:
|
||||
# Query the items from the collection based on the filter.
|
||||
try:
|
||||
collection = self.client.get_collection(name=collection_name)
|
||||
if collection:
|
||||
result = collection.get(
|
||||
where=filter,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
return GetResult(
|
||||
**{
|
||||
"ids": [result["ids"]],
|
||||
"documents": [result["documents"]],
|
||||
"metadatas": [result["metadatas"]],
|
||||
}
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return None
|
||||
|
||||
def get(self, collection_name: str) -> Optional[GetResult]:
|
||||
# Get all the items in the collection.
|
||||
collection = self.client.get_collection(name=collection_name)
|
||||
if collection:
|
||||
result = collection.get()
|
||||
return GetResult(
|
||||
**{
|
||||
"ids": [result["ids"]],
|
||||
"documents": [result["documents"]],
|
||||
"metadatas": [result["metadatas"]],
|
||||
}
|
||||
)
|
||||
return None
|
||||
|
||||
def insert(self, collection_name: str, items: list[VectorItem]):
|
||||
# Insert the items into the collection, if the collection does not exist, it will be created.
|
||||
collection = self.client.get_or_create_collection(
|
||||
name=collection_name, metadata={"hnsw:space": "cosine"}
|
||||
)
|
||||
|
||||
ids = [item["id"] for item in items]
|
||||
documents = [item["text"] for item in items]
|
||||
embeddings = [item["vector"] for item in items]
|
||||
metadatas = [item["metadata"] for item in items]
|
||||
|
||||
for batch in create_batches(
|
||||
api=self.client,
|
||||
documents=documents,
|
||||
embeddings=embeddings,
|
||||
ids=ids,
|
||||
metadatas=metadatas,
|
||||
):
|
||||
collection.add(*batch)
|
||||
|
||||
def upsert(self, collection_name: str, items: list[VectorItem]):
|
||||
# Update the items in the collection, if the items are not present, insert them. If the collection does not exist, it will be created.
|
||||
collection = self.client.get_or_create_collection(
|
||||
name=collection_name, metadata={"hnsw:space": "cosine"}
|
||||
)
|
||||
|
||||
ids = [item["id"] for item in items]
|
||||
documents = [item["text"] for item in items]
|
||||
embeddings = [item["vector"] for item in items]
|
||||
metadatas = [item["metadata"] for item in items]
|
||||
|
||||
collection.upsert(
|
||||
ids=ids, documents=documents, embeddings=embeddings, metadatas=metadatas
|
||||
)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
collection_name: str,
|
||||
ids: Optional[list[str]] = None,
|
||||
filter: Optional[dict] = None,
|
||||
):
|
||||
# Delete the items from the collection based on the ids.
|
||||
collection = self.client.get_collection(name=collection_name)
|
||||
if collection:
|
||||
if ids:
|
||||
collection.delete(ids=ids)
|
||||
elif filter:
|
||||
collection.delete(where=filter)
|
||||
|
||||
def reset(self):
|
||||
# Resets the database. This will delete all collections and item entries.
|
||||
return self.client.reset()
|
||||
286
backend/open_webui/apps/retrieval/vector/dbs/milvus.py
Normal file
286
backend/open_webui/apps/retrieval/vector/dbs/milvus.py
Normal file
@@ -0,0 +1,286 @@
|
||||
from pymilvus import MilvusClient as Client
|
||||
from pymilvus import FieldSchema, DataType
|
||||
import json
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from open_webui.apps.retrieval.vector.main import VectorItem, SearchResult, GetResult
|
||||
from open_webui.config import (
|
||||
MILVUS_URI,
|
||||
)
|
||||
|
||||
|
||||
class MilvusClient:
|
||||
def __init__(self):
|
||||
self.collection_prefix = "open_webui"
|
||||
self.client = Client(uri=MILVUS_URI)
|
||||
|
||||
def _result_to_get_result(self, result) -> GetResult:
|
||||
ids = []
|
||||
documents = []
|
||||
metadatas = []
|
||||
|
||||
for match in result:
|
||||
_ids = []
|
||||
_documents = []
|
||||
_metadatas = []
|
||||
for item in match:
|
||||
_ids.append(item.get("id"))
|
||||
_documents.append(item.get("data", {}).get("text"))
|
||||
_metadatas.append(item.get("metadata"))
|
||||
|
||||
ids.append(_ids)
|
||||
documents.append(_documents)
|
||||
metadatas.append(_metadatas)
|
||||
|
||||
return GetResult(
|
||||
**{
|
||||
"ids": ids,
|
||||
"documents": documents,
|
||||
"metadatas": metadatas,
|
||||
}
|
||||
)
|
||||
|
||||
def _result_to_search_result(self, result) -> SearchResult:
|
||||
ids = []
|
||||
distances = []
|
||||
documents = []
|
||||
metadatas = []
|
||||
|
||||
for match in result:
|
||||
_ids = []
|
||||
_distances = []
|
||||
_documents = []
|
||||
_metadatas = []
|
||||
|
||||
for item in match:
|
||||
_ids.append(item.get("id"))
|
||||
_distances.append(item.get("distance"))
|
||||
_documents.append(item.get("entity", {}).get("data", {}).get("text"))
|
||||
_metadatas.append(item.get("entity", {}).get("metadata"))
|
||||
|
||||
ids.append(_ids)
|
||||
distances.append(_distances)
|
||||
documents.append(_documents)
|
||||
metadatas.append(_metadatas)
|
||||
|
||||
return SearchResult(
|
||||
**{
|
||||
"ids": ids,
|
||||
"distances": distances,
|
||||
"documents": documents,
|
||||
"metadatas": metadatas,
|
||||
}
|
||||
)
|
||||
|
||||
def _create_collection(self, collection_name: str, dimension: int):
|
||||
schema = self.client.create_schema(
|
||||
auto_id=False,
|
||||
enable_dynamic_field=True,
|
||||
)
|
||||
schema.add_field(
|
||||
field_name="id",
|
||||
datatype=DataType.VARCHAR,
|
||||
is_primary=True,
|
||||
max_length=65535,
|
||||
)
|
||||
schema.add_field(
|
||||
field_name="vector",
|
||||
datatype=DataType.FLOAT_VECTOR,
|
||||
dim=dimension,
|
||||
description="vector",
|
||||
)
|
||||
schema.add_field(field_name="data", datatype=DataType.JSON, description="data")
|
||||
schema.add_field(
|
||||
field_name="metadata", datatype=DataType.JSON, description="metadata"
|
||||
)
|
||||
|
||||
index_params = self.client.prepare_index_params()
|
||||
index_params.add_index(
|
||||
field_name="vector",
|
||||
index_type="HNSW",
|
||||
metric_type="COSINE",
|
||||
params={"M": 16, "efConstruction": 100},
|
||||
)
|
||||
|
||||
self.client.create_collection(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
schema=schema,
|
||||
index_params=index_params,
|
||||
)
|
||||
|
||||
def has_collection(self, collection_name: str) -> bool:
|
||||
# Check if the collection exists based on the collection name.
|
||||
collection_name = collection_name.replace("-", "_")
|
||||
return self.client.has_collection(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}"
|
||||
)
|
||||
|
||||
def delete_collection(self, collection_name: str):
|
||||
# Delete the collection based on the collection name.
|
||||
collection_name = collection_name.replace("-", "_")
|
||||
return self.client.drop_collection(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}"
|
||||
)
|
||||
|
||||
def search(
|
||||
self, collection_name: str, vectors: list[list[float | int]], limit: int
|
||||
) -> Optional[SearchResult]:
|
||||
# Search for the nearest neighbor items based on the vectors and return 'limit' number of results.
|
||||
collection_name = collection_name.replace("-", "_")
|
||||
result = self.client.search(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
data=vectors,
|
||||
limit=limit,
|
||||
output_fields=["data", "metadata"],
|
||||
)
|
||||
|
||||
return self._result_to_search_result(result)
|
||||
|
||||
def query(self, collection_name: str, filter: dict, limit: Optional[int] = None):
|
||||
# Construct the filter string for querying
|
||||
collection_name = collection_name.replace("-", "_")
|
||||
if not self.has_collection(collection_name):
|
||||
return None
|
||||
|
||||
filter_string = " && ".join(
|
||||
[
|
||||
f'metadata["{key}"] == {json.dumps(value)}'
|
||||
for key, value in filter.items()
|
||||
]
|
||||
)
|
||||
|
||||
max_limit = 16383 # The maximum number of records per request
|
||||
all_results = []
|
||||
|
||||
if limit is None:
|
||||
limit = float("inf") # Use infinity as a placeholder for no limit
|
||||
|
||||
# Initialize offset and remaining to handle pagination
|
||||
offset = 0
|
||||
remaining = limit
|
||||
|
||||
try:
|
||||
# Loop until there are no more items to fetch or the desired limit is reached
|
||||
while remaining > 0:
|
||||
print("remaining", remaining)
|
||||
current_fetch = min(
|
||||
max_limit, remaining
|
||||
) # Determine how many items to fetch in this iteration
|
||||
|
||||
results = self.client.query(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
filter=filter_string,
|
||||
output_fields=["*"],
|
||||
limit=current_fetch,
|
||||
offset=offset,
|
||||
)
|
||||
|
||||
if not results:
|
||||
break
|
||||
|
||||
all_results.extend(results)
|
||||
results_count = len(results)
|
||||
remaining -= (
|
||||
results_count # Decrease remaining by the number of items fetched
|
||||
)
|
||||
offset += results_count
|
||||
|
||||
# Break the loop if the results returned are less than the requested fetch count
|
||||
if results_count < current_fetch:
|
||||
break
|
||||
|
||||
print(all_results)
|
||||
return self._result_to_get_result([all_results])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return None
|
||||
|
||||
def get(self, collection_name: str) -> Optional[GetResult]:
|
||||
# Get all the items in the collection.
|
||||
collection_name = collection_name.replace("-", "_")
|
||||
result = self.client.query(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
filter='id != ""',
|
||||
)
|
||||
return self._result_to_get_result([result])
|
||||
|
||||
def insert(self, collection_name: str, items: list[VectorItem]):
|
||||
# Insert the items into the collection, if the collection does not exist, it will be created.
|
||||
collection_name = collection_name.replace("-", "_")
|
||||
if not self.client.has_collection(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}"
|
||||
):
|
||||
self._create_collection(
|
||||
collection_name=collection_name, dimension=len(items[0]["vector"])
|
||||
)
|
||||
|
||||
return self.client.insert(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
data=[
|
||||
{
|
||||
"id": item["id"],
|
||||
"vector": item["vector"],
|
||||
"data": {"text": item["text"]},
|
||||
"metadata": item["metadata"],
|
||||
}
|
||||
for item in items
|
||||
],
|
||||
)
|
||||
|
||||
def upsert(self, collection_name: str, items: list[VectorItem]):
|
||||
# Update the items in the collection, if the items are not present, insert them. If the collection does not exist, it will be created.
|
||||
collection_name = collection_name.replace("-", "_")
|
||||
if not self.client.has_collection(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}"
|
||||
):
|
||||
self._create_collection(
|
||||
collection_name=collection_name, dimension=len(items[0]["vector"])
|
||||
)
|
||||
|
||||
return self.client.upsert(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
data=[
|
||||
{
|
||||
"id": item["id"],
|
||||
"vector": item["vector"],
|
||||
"data": {"text": item["text"]},
|
||||
"metadata": item["metadata"],
|
||||
}
|
||||
for item in items
|
||||
],
|
||||
)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
collection_name: str,
|
||||
ids: Optional[list[str]] = None,
|
||||
filter: Optional[dict] = None,
|
||||
):
|
||||
# Delete the items from the collection based on the ids.
|
||||
collection_name = collection_name.replace("-", "_")
|
||||
if ids:
|
||||
return self.client.delete(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
ids=ids,
|
||||
)
|
||||
elif filter:
|
||||
# Convert the filter dictionary to a string using JSON_CONTAINS.
|
||||
filter_string = " && ".join(
|
||||
[
|
||||
f'metadata["{key}"] == {json.dumps(value)}'
|
||||
for key, value in filter.items()
|
||||
]
|
||||
)
|
||||
|
||||
return self.client.delete(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
filter=filter_string,
|
||||
)
|
||||
|
||||
def reset(self):
|
||||
# Resets the database. This will delete all collections and item entries.
|
||||
collection_names = self.client.list_collections()
|
||||
for collection_name in collection_names:
|
||||
if collection_name.startswith(self.collection_prefix):
|
||||
self.client.drop_collection(collection_name=collection_name)
|
||||
179
backend/open_webui/apps/retrieval/vector/dbs/qdrant.py
Normal file
179
backend/open_webui/apps/retrieval/vector/dbs/qdrant.py
Normal file
@@ -0,0 +1,179 @@
|
||||
from typing import Optional
|
||||
|
||||
from qdrant_client import QdrantClient as Qclient
|
||||
from qdrant_client.http.models import PointStruct
|
||||
from qdrant_client.models import models
|
||||
|
||||
from open_webui.apps.retrieval.vector.main import VectorItem, SearchResult, GetResult
|
||||
from open_webui.config import QDRANT_URI
|
||||
|
||||
NO_LIMIT = 999999999
|
||||
|
||||
|
||||
class QdrantClient:
|
||||
def __init__(self):
|
||||
self.collection_prefix = "open-webui"
|
||||
self.QDRANT_URI = QDRANT_URI
|
||||
self.client = Qclient(url=self.QDRANT_URI) if self.QDRANT_URI else None
|
||||
|
||||
def _result_to_get_result(self, points) -> GetResult:
|
||||
ids = []
|
||||
documents = []
|
||||
metadatas = []
|
||||
|
||||
for point in points:
|
||||
payload = point.payload
|
||||
ids.append(point.id)
|
||||
documents.append(payload["text"])
|
||||
metadatas.append(payload["metadata"])
|
||||
|
||||
return GetResult(
|
||||
**{
|
||||
"ids": [ids],
|
||||
"documents": [documents],
|
||||
"metadatas": [metadatas],
|
||||
}
|
||||
)
|
||||
|
||||
def _create_collection(self, collection_name: str, dimension: int):
|
||||
collection_name_with_prefix = f"{self.collection_prefix}_{collection_name}"
|
||||
self.client.create_collection(
|
||||
collection_name=collection_name_with_prefix,
|
||||
vectors_config=models.VectorParams(
|
||||
size=dimension, distance=models.Distance.COSINE
|
||||
),
|
||||
)
|
||||
|
||||
print(f"collection {collection_name_with_prefix} successfully created!")
|
||||
|
||||
def _create_collection_if_not_exists(self, collection_name, dimension):
|
||||
if not self.has_collection(collection_name=collection_name):
|
||||
self._create_collection(
|
||||
collection_name=collection_name, dimension=dimension
|
||||
)
|
||||
|
||||
def _create_points(self, items: list[VectorItem]):
|
||||
return [
|
||||
PointStruct(
|
||||
id=item["id"],
|
||||
vector=item["vector"],
|
||||
payload={"text": item["text"], "metadata": item["metadata"]},
|
||||
)
|
||||
for item in items
|
||||
]
|
||||
|
||||
def has_collection(self, collection_name: str) -> bool:
|
||||
return self.client.collection_exists(
|
||||
f"{self.collection_prefix}_{collection_name}"
|
||||
)
|
||||
|
||||
def delete_collection(self, collection_name: str):
|
||||
return self.client.delete_collection(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}"
|
||||
)
|
||||
|
||||
def search(
|
||||
self, collection_name: str, vectors: list[list[float | int]], limit: int
|
||||
) -> Optional[SearchResult]:
|
||||
# Search for the nearest neighbor items based on the vectors and return 'limit' number of results.
|
||||
if limit is None:
|
||||
limit = NO_LIMIT # otherwise qdrant would set limit to 10!
|
||||
|
||||
query_response = self.client.query_points(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
query=vectors[0],
|
||||
limit=limit,
|
||||
)
|
||||
get_result = self._result_to_get_result(query_response.points)
|
||||
return SearchResult(
|
||||
ids=get_result.ids,
|
||||
documents=get_result.documents,
|
||||
metadatas=get_result.metadatas,
|
||||
distances=[[point.score for point in query_response.points]],
|
||||
)
|
||||
|
||||
def query(self, collection_name: str, filter: dict, limit: Optional[int] = None):
|
||||
# Construct the filter string for querying
|
||||
if not self.has_collection(collection_name):
|
||||
return None
|
||||
try:
|
||||
if limit is None:
|
||||
limit = NO_LIMIT # otherwise qdrant would set limit to 10!
|
||||
|
||||
field_conditions = []
|
||||
for key, value in filter.items():
|
||||
field_conditions.append(
|
||||
models.FieldCondition(
|
||||
key=f"metadata.{key}", match=models.MatchValue(value=value)
|
||||
)
|
||||
)
|
||||
|
||||
points = self.client.query_points(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
query_filter=models.Filter(should=field_conditions),
|
||||
limit=limit,
|
||||
)
|
||||
return self._result_to_get_result(points.points)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return None
|
||||
|
||||
def get(self, collection_name: str) -> Optional[GetResult]:
|
||||
# Get all the items in the collection.
|
||||
points = self.client.query_points(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
limit=NO_LIMIT, # otherwise qdrant would set limit to 10!
|
||||
)
|
||||
return self._result_to_get_result(points.points)
|
||||
|
||||
def insert(self, collection_name: str, items: list[VectorItem]):
|
||||
# Insert the items into the collection, if the collection does not exist, it will be created.
|
||||
self._create_collection_if_not_exists(collection_name, len(items[0]["vector"]))
|
||||
points = self._create_points(items)
|
||||
self.client.upload_points(f"{self.collection_prefix}_{collection_name}", points)
|
||||
|
||||
def upsert(self, collection_name: str, items: list[VectorItem]):
|
||||
# Update the items in the collection, if the items are not present, insert them. If the collection does not exist, it will be created.
|
||||
self._create_collection_if_not_exists(collection_name, len(items[0]["vector"]))
|
||||
points = self._create_points(items)
|
||||
return self.client.upsert(f"{self.collection_prefix}_{collection_name}", points)
|
||||
|
||||
def delete(
|
||||
self,
|
||||
collection_name: str,
|
||||
ids: Optional[list[str]] = None,
|
||||
filter: Optional[dict] = None,
|
||||
):
|
||||
# Delete the items from the collection based on the ids.
|
||||
field_conditions = []
|
||||
|
||||
if ids:
|
||||
for id_value in ids:
|
||||
field_conditions.append(
|
||||
models.FieldCondition(
|
||||
key="metadata.id",
|
||||
match=models.MatchValue(value=id_value),
|
||||
),
|
||||
),
|
||||
elif filter:
|
||||
for key, value in filter.items():
|
||||
field_conditions.append(
|
||||
models.FieldCondition(
|
||||
key=f"metadata.{key}",
|
||||
match=models.MatchValue(value=value),
|
||||
),
|
||||
),
|
||||
|
||||
return self.client.delete(
|
||||
collection_name=f"{self.collection_prefix}_{collection_name}",
|
||||
points_selector=models.FilterSelector(
|
||||
filter=models.Filter(must=field_conditions)
|
||||
),
|
||||
)
|
||||
|
||||
def reset(self):
|
||||
# Resets the database. This will delete all collections and item entries.
|
||||
collection_names = self.client.get_collections().collections
|
||||
for collection_name in collection_names:
|
||||
if collection_name.name.startswith(self.collection_prefix):
|
||||
self.client.delete_collection(collection_name=collection_name.name)
|
||||
19
backend/open_webui/apps/retrieval/vector/main.py
Normal file
19
backend/open_webui/apps/retrieval/vector/main.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, List, Any
|
||||
|
||||
|
||||
class VectorItem(BaseModel):
|
||||
id: str
|
||||
text: str
|
||||
vector: List[float | int]
|
||||
metadata: Any
|
||||
|
||||
|
||||
class GetResult(BaseModel):
|
||||
ids: Optional[List[List[str]]]
|
||||
documents: Optional[List[List[str]]]
|
||||
metadatas: Optional[List[List[Any]]]
|
||||
|
||||
|
||||
class SearchResult(GetResult):
|
||||
distances: Optional[List[List[float | int]]]
|
||||
@@ -1,15 +1,17 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
from apps.rag.search.main import SearchResult
|
||||
from config import SRC_LOG_LEVELS
|
||||
from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_brave(api_key: str, query: str, count: int) -> list[SearchResult]:
|
||||
def search_brave(
|
||||
api_key: str, query: str, count: int, filter_list: Optional[list[str]] = None
|
||||
) -> list[SearchResult]:
|
||||
"""Search using Brave's Search API and return the results as a list of SearchResult objects.
|
||||
|
||||
Args:
|
||||
@@ -29,6 +31,9 @@ def search_brave(api_key: str, query: str, count: int) -> list[SearchResult]:
|
||||
|
||||
json_response = response.json()
|
||||
results = json_response.get("web", {}).get("results", [])
|
||||
if filter_list:
|
||||
results = get_filtered_results(results, filter_list)
|
||||
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["url"], title=result.get("title"), snippet=result.get("snippet")
|
||||
@@ -1,14 +1,17 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from apps.rag.search.main import SearchResult
|
||||
from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
|
||||
from duckduckgo_search import DDGS
|
||||
from config import SRC_LOG_LEVELS
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_duckduckgo(query: str, count: int) -> list[SearchResult]:
|
||||
def search_duckduckgo(
|
||||
query: str, count: int, filter_list: Optional[list[str]] = None
|
||||
) -> list[SearchResult]:
|
||||
"""
|
||||
Search using DuckDuckGo's Search API and return the results as a list of SearchResult objects.
|
||||
Args:
|
||||
@@ -16,7 +19,7 @@ def search_duckduckgo(query: str, count: int) -> list[SearchResult]:
|
||||
count (int): The number of results to return
|
||||
|
||||
Returns:
|
||||
List[SearchResult]: A list of search results
|
||||
list[SearchResult]: A list of search results
|
||||
"""
|
||||
# Use the DDGS context manager to create a DDGS object
|
||||
with DDGS() as ddgs:
|
||||
@@ -41,6 +44,7 @@ def search_duckduckgo(query: str, count: int) -> list[SearchResult]:
|
||||
snippet=result.get("body"),
|
||||
)
|
||||
)
|
||||
print(results)
|
||||
if filter_list:
|
||||
results = get_filtered_results(results, filter_list)
|
||||
# Return the list of search results
|
||||
return results
|
||||
@@ -1,17 +1,20 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
from apps.rag.search.main import SearchResult
|
||||
from config import SRC_LOG_LEVELS
|
||||
from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_google_pse(
|
||||
api_key: str, search_engine_id: str, query: str, count: int
|
||||
api_key: str,
|
||||
search_engine_id: str,
|
||||
query: str,
|
||||
count: int,
|
||||
filter_list: Optional[list[str]] = None,
|
||||
) -> list[SearchResult]:
|
||||
"""Search using Google's Programmable Search Engine API and return the results as a list of SearchResult objects.
|
||||
|
||||
@@ -35,6 +38,8 @@ def search_google_pse(
|
||||
|
||||
json_response = response.json()
|
||||
results = json_response.get("items", [])
|
||||
if filter_list:
|
||||
results = get_filtered_results(results, filter_list)
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["link"],
|
||||
41
backend/open_webui/apps/retrieval/web/jina_search.py
Normal file
41
backend/open_webui/apps/retrieval/web/jina_search.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from open_webui.apps.retrieval.web.main import SearchResult
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
from yarl import URL
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_jina(query: str, count: int) -> list[SearchResult]:
|
||||
"""
|
||||
Search using Jina's Search API and return the results as a list of SearchResult objects.
|
||||
Args:
|
||||
query (str): The query to search for
|
||||
count (int): The number of results to return
|
||||
|
||||
Returns:
|
||||
list[SearchResult]: A list of search results
|
||||
"""
|
||||
jina_search_endpoint = "https://s.jina.ai/"
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
}
|
||||
url = str(URL(jina_search_endpoint + query))
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
results = []
|
||||
for result in data["data"][:count]:
|
||||
results.append(
|
||||
SearchResult(
|
||||
link=result["url"],
|
||||
title=result.get("title"),
|
||||
snippet=result.get("content"),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
22
backend/open_webui/apps/retrieval/web/main.py
Normal file
22
backend/open_webui/apps/retrieval/web/main.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from typing import Optional
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
def get_filtered_results(results, filter_list):
|
||||
if not filter_list:
|
||||
return results
|
||||
filtered_results = []
|
||||
for result in results:
|
||||
url = result.get("url") or result.get("link", "")
|
||||
domain = urlparse(url).netloc
|
||||
if any(domain.endswith(filtered_domain) for filtered_domain in filter_list):
|
||||
filtered_results.append(result)
|
||||
return filtered_results
|
||||
|
||||
|
||||
class SearchResult(BaseModel):
|
||||
link: str
|
||||
title: Optional[str]
|
||||
snippet: Optional[str]
|
||||
48
backend/open_webui/apps/retrieval/web/searchapi.py
Normal file
48
backend/open_webui/apps/retrieval/web/searchapi.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import requests
|
||||
from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_searchapi(
|
||||
api_key: str,
|
||||
engine: str,
|
||||
query: str,
|
||||
count: int,
|
||||
filter_list: Optional[list[str]] = None,
|
||||
) -> list[SearchResult]:
|
||||
"""Search using searchapi.io's API and return the results as a list of SearchResult objects.
|
||||
|
||||
Args:
|
||||
api_key (str): A searchapi.io API key
|
||||
query (str): The query to search for
|
||||
"""
|
||||
url = "https://www.searchapi.io/api/v1/search"
|
||||
|
||||
engine = engine or "google"
|
||||
|
||||
payload = {"engine": engine, "q": query, "api_key": api_key}
|
||||
|
||||
url = f"{url}?{urlencode(payload)}"
|
||||
response = requests.request("GET", url)
|
||||
|
||||
json_response = response.json()
|
||||
log.info(f"results from searchapi search: {json_response}")
|
||||
|
||||
results = sorted(
|
||||
json_response.get("organic_results", []), key=lambda x: x.get("position", 0)
|
||||
)
|
||||
if filter_list:
|
||||
results = get_filtered_results(results, filter_list)
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["link"], title=result["title"], snippet=result["snippet"]
|
||||
)
|
||||
for result in results[:count]
|
||||
]
|
||||
@@ -1,18 +1,21 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
from typing import List
|
||||
|
||||
from apps.rag.search.main import SearchResult
|
||||
from config import SRC_LOG_LEVELS
|
||||
from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_searxng(
|
||||
query_url: str, query: str, count: int, **kwargs
|
||||
) -> List[SearchResult]:
|
||||
query_url: str,
|
||||
query: str,
|
||||
count: int,
|
||||
filter_list: Optional[list[str]] = None,
|
||||
**kwargs,
|
||||
) -> list[SearchResult]:
|
||||
"""
|
||||
Search a SearXNG instance for a given query and return the results as a list of SearchResult objects.
|
||||
|
||||
@@ -25,11 +28,12 @@ def search_searxng(
|
||||
|
||||
Keyword Args:
|
||||
language (str): Language filter for the search results; e.g., "en-US". Defaults to an empty string.
|
||||
safesearch (int): Safe search filter for safer web results; 0 = off, 1 = moderate, 2 = strict. Defaults to 1 (moderate).
|
||||
time_range (str): Time range for filtering results by date; e.g., "2023-04-05..today" or "all-time". Defaults to ''.
|
||||
categories: (Optional[List[str]]): Specific categories within which the search should be performed, defaulting to an empty string if not provided.
|
||||
categories: (Optional[list[str]]): Specific categories within which the search should be performed, defaulting to an empty string if not provided.
|
||||
|
||||
Returns:
|
||||
List[SearchResult]: A list of SearchResults sorted by relevance score in descending order.
|
||||
list[SearchResult]: A list of SearchResults sorted by relevance score in descending order.
|
||||
|
||||
Raise:
|
||||
requests.exceptions.RequestException: If a request error occurs during the search process.
|
||||
@@ -37,6 +41,7 @@ def search_searxng(
|
||||
|
||||
# Default values for optional parameters are provided as empty strings or None when not specified.
|
||||
language = kwargs.get("language", "en-US")
|
||||
safesearch = kwargs.get("safesearch", "1")
|
||||
time_range = kwargs.get("time_range", "")
|
||||
categories = "".join(kwargs.get("categories", []))
|
||||
|
||||
@@ -44,6 +49,7 @@ def search_searxng(
|
||||
"q": query,
|
||||
"format": "json",
|
||||
"pageno": 1,
|
||||
"safesearch": safesearch,
|
||||
"language": language,
|
||||
"time_range": time_range,
|
||||
"categories": categories,
|
||||
@@ -75,6 +81,8 @@ def search_searxng(
|
||||
json_response = response.json()
|
||||
results = json_response.get("results", [])
|
||||
sorted_results = sorted(results, key=lambda x: x.get("score", 0), reverse=True)
|
||||
if filter_list:
|
||||
sorted_results = get_filtered_results(sorted_results, filter_list)
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["url"], title=result.get("title"), snippet=result.get("content")
|
||||
@@ -1,16 +1,18 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
from apps.rag.search.main import SearchResult
|
||||
from config import SRC_LOG_LEVELS
|
||||
from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_serper(api_key: str, query: str, count: int) -> list[SearchResult]:
|
||||
def search_serper(
|
||||
api_key: str, query: str, count: int, filter_list: Optional[list[str]] = None
|
||||
) -> list[SearchResult]:
|
||||
"""Search using serper.dev's API and return the results as a list of SearchResult objects.
|
||||
|
||||
Args:
|
||||
@@ -29,6 +31,8 @@ def search_serper(api_key: str, query: str, count: int) -> list[SearchResult]:
|
||||
results = sorted(
|
||||
json_response.get("organic", []), key=lambda x: x.get("position", 0)
|
||||
)
|
||||
if filter_list:
|
||||
results = get_filtered_results(results, filter_list)
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["link"],
|
||||
@@ -1,11 +1,10 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from typing import Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from apps.rag.search.main import SearchResult
|
||||
from config import SRC_LOG_LEVELS
|
||||
import requests
|
||||
from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
@@ -19,6 +18,7 @@ def search_serply(
|
||||
limit: int = 10,
|
||||
device_type: str = "desktop",
|
||||
proxy_location: str = "US",
|
||||
filter_list: Optional[list[str]] = None,
|
||||
) -> list[SearchResult]:
|
||||
"""Search using serper.dev's API and return the results as a list of SearchResult objects.
|
||||
|
||||
@@ -57,7 +57,8 @@ def search_serply(
|
||||
results = sorted(
|
||||
json_response.get("results", []), key=lambda x: x.get("realPosition", 0)
|
||||
)
|
||||
|
||||
if filter_list:
|
||||
results = get_filtered_results(results, filter_list)
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["link"],
|
||||
@@ -1,17 +1,20 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
from apps.rag.search.main import SearchResult
|
||||
from config import SRC_LOG_LEVELS
|
||||
from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_serpstack(
|
||||
api_key: str, query: str, count: int, https_enabled: bool = True
|
||||
api_key: str,
|
||||
query: str,
|
||||
count: int,
|
||||
filter_list: Optional[list[str]] = None,
|
||||
https_enabled: bool = True,
|
||||
) -> list[SearchResult]:
|
||||
"""Search using serpstack.com's and return the results as a list of SearchResult objects.
|
||||
|
||||
@@ -35,6 +38,8 @@ def search_serpstack(
|
||||
results = sorted(
|
||||
json_response.get("organic_results", []), key=lambda x: x.get("position", 0)
|
||||
)
|
||||
if filter_list:
|
||||
results = get_filtered_results(results, filter_list)
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["url"], title=result.get("title"), snippet=result.get("snippet")
|
||||
38
backend/open_webui/apps/retrieval/web/tavily.py
Normal file
38
backend/open_webui/apps/retrieval/web/tavily.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from open_webui.apps.retrieval.web.main import SearchResult
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_tavily(api_key: str, query: str, count: int) -> list[SearchResult]:
|
||||
"""Search using Tavily's Search API and return the results as a list of SearchResult objects.
|
||||
|
||||
Args:
|
||||
api_key (str): A Tavily Search API key
|
||||
query (str): The query to search for
|
||||
|
||||
Returns:
|
||||
list[SearchResult]: A list of search results
|
||||
"""
|
||||
url = "https://api.tavily.com/search"
|
||||
data = {"query": query, "api_key": api_key}
|
||||
|
||||
response = requests.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
json_response = response.json()
|
||||
|
||||
raw_search_results = json_response.get("results", [])
|
||||
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["url"],
|
||||
title=result.get("title", ""),
|
||||
snippet=result.get("content"),
|
||||
)
|
||||
for result in raw_search_results[:count]
|
||||
]
|
||||
357
backend/open_webui/apps/retrieval/web/testdata/searchapi.json
vendored
Normal file
357
backend/open_webui/apps/retrieval/web/testdata/searchapi.json
vendored
Normal file
File diff suppressed because one or more lines are too long
97
backend/open_webui/apps/retrieval/web/utils.py
Normal file
97
backend/open_webui/apps/retrieval/web/utils.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import socket
|
||||
import urllib.parse
|
||||
import validators
|
||||
from typing import Union, Sequence, Iterator
|
||||
|
||||
from langchain_community.document_loaders import (
|
||||
WebBaseLoader,
|
||||
)
|
||||
from langchain_core.documents import Document
|
||||
|
||||
|
||||
from open_webui.constants import ERROR_MESSAGES
|
||||
from open_webui.config import ENABLE_RAG_LOCAL_WEB_FETCH
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def validate_url(url: Union[str, Sequence[str]]):
|
||||
if isinstance(url, str):
|
||||
if isinstance(validators.url(url), validators.ValidationError):
|
||||
raise ValueError(ERROR_MESSAGES.INVALID_URL)
|
||||
if not ENABLE_RAG_LOCAL_WEB_FETCH:
|
||||
# Local web fetch is disabled, filter out any URLs that resolve to private IP addresses
|
||||
parsed_url = urllib.parse.urlparse(url)
|
||||
# Get IPv4 and IPv6 addresses
|
||||
ipv4_addresses, ipv6_addresses = resolve_hostname(parsed_url.hostname)
|
||||
# Check if any of the resolved addresses are private
|
||||
# This is technically still vulnerable to DNS rebinding attacks, as we don't control WebBaseLoader
|
||||
for ip in ipv4_addresses:
|
||||
if validators.ipv4(ip, private=True):
|
||||
raise ValueError(ERROR_MESSAGES.INVALID_URL)
|
||||
for ip in ipv6_addresses:
|
||||
if validators.ipv6(ip, private=True):
|
||||
raise ValueError(ERROR_MESSAGES.INVALID_URL)
|
||||
return True
|
||||
elif isinstance(url, Sequence):
|
||||
return all(validate_url(u) for u in url)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def resolve_hostname(hostname):
|
||||
# Get address information
|
||||
addr_info = socket.getaddrinfo(hostname, None)
|
||||
|
||||
# Extract IP addresses from address information
|
||||
ipv4_addresses = [info[4][0] for info in addr_info if info[0] == socket.AF_INET]
|
||||
ipv6_addresses = [info[4][0] for info in addr_info if info[0] == socket.AF_INET6]
|
||||
|
||||
return ipv4_addresses, ipv6_addresses
|
||||
|
||||
|
||||
class SafeWebBaseLoader(WebBaseLoader):
|
||||
"""WebBaseLoader with enhanced error handling for URLs."""
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Lazy load text from the url(s) in web_path with error handling."""
|
||||
for path in self.web_paths:
|
||||
try:
|
||||
soup = self._scrape(path, bs_kwargs=self.bs_kwargs)
|
||||
text = soup.get_text(**self.bs_get_text_kwargs)
|
||||
|
||||
# Build metadata
|
||||
metadata = {"source": path}
|
||||
if title := soup.find("title"):
|
||||
metadata["title"] = title.get_text()
|
||||
if description := soup.find("meta", attrs={"name": "description"}):
|
||||
metadata["description"] = description.get(
|
||||
"content", "No description found."
|
||||
)
|
||||
if html := soup.find("html"):
|
||||
metadata["language"] = html.get("lang", "No language found.")
|
||||
|
||||
yield Document(page_content=text, metadata=metadata)
|
||||
except Exception as e:
|
||||
# Log the error and continue with the next URL
|
||||
log.error(f"Error loading {path}: {e}")
|
||||
|
||||
|
||||
def get_web_loader(
|
||||
url: Union[str, Sequence[str]],
|
||||
verify_ssl: bool = True,
|
||||
requests_per_second: int = 2,
|
||||
):
|
||||
# Check if the URL is valid
|
||||
if not validate_url(url):
|
||||
raise ValueError(ERROR_MESSAGES.INVALID_URL)
|
||||
return SafeWebBaseLoader(
|
||||
url,
|
||||
verify_ssl=verify_ssl,
|
||||
requests_per_second=requests_per_second,
|
||||
continue_on_failure=True,
|
||||
)
|
||||
219
backend/open_webui/apps/socket/main.py
Normal file
219
backend/open_webui/apps/socket/main.py
Normal file
@@ -0,0 +1,219 @@
|
||||
import asyncio
|
||||
import socketio
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
from open_webui.apps.webui.models.users import Users
|
||||
from open_webui.env import (
|
||||
ENABLE_WEBSOCKET_SUPPORT,
|
||||
WEBSOCKET_MANAGER,
|
||||
WEBSOCKET_REDIS_URL,
|
||||
)
|
||||
from open_webui.utils.utils import decode_token
|
||||
from open_webui.apps.socket.utils import RedisDict
|
||||
|
||||
from open_webui.env import (
|
||||
GLOBAL_LOG_LEVEL,
|
||||
SRC_LOG_LEVELS,
|
||||
)
|
||||
|
||||
|
||||
logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["SOCKET"])
|
||||
|
||||
|
||||
if WEBSOCKET_MANAGER == "redis":
|
||||
mgr = socketio.AsyncRedisManager(WEBSOCKET_REDIS_URL)
|
||||
sio = socketio.AsyncServer(
|
||||
cors_allowed_origins=[],
|
||||
async_mode="asgi",
|
||||
transports=(
|
||||
["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]
|
||||
),
|
||||
allow_upgrades=ENABLE_WEBSOCKET_SUPPORT,
|
||||
always_connect=True,
|
||||
client_manager=mgr,
|
||||
)
|
||||
else:
|
||||
sio = socketio.AsyncServer(
|
||||
cors_allowed_origins=[],
|
||||
async_mode="asgi",
|
||||
transports=(
|
||||
["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]
|
||||
),
|
||||
allow_upgrades=ENABLE_WEBSOCKET_SUPPORT,
|
||||
always_connect=True,
|
||||
)
|
||||
|
||||
|
||||
# Dictionary to maintain the user pool
|
||||
|
||||
if WEBSOCKET_MANAGER == "redis":
|
||||
SESSION_POOL = RedisDict("open-webui:session_pool", redis_url=WEBSOCKET_REDIS_URL)
|
||||
USER_POOL = RedisDict("open-webui:user_pool", redis_url=WEBSOCKET_REDIS_URL)
|
||||
USAGE_POOL = RedisDict("open-webui:usage_pool", redis_url=WEBSOCKET_REDIS_URL)
|
||||
else:
|
||||
SESSION_POOL = {}
|
||||
USER_POOL = {}
|
||||
USAGE_POOL = {}
|
||||
|
||||
|
||||
# Timeout duration in seconds
|
||||
TIMEOUT_DURATION = 3
|
||||
|
||||
|
||||
async def periodic_usage_pool_cleanup():
|
||||
while True:
|
||||
now = int(time.time())
|
||||
for model_id, connections in list(USAGE_POOL.items()):
|
||||
# Creating a list of sids to remove if they have timed out
|
||||
expired_sids = [
|
||||
sid
|
||||
for sid, details in connections.items()
|
||||
if now - details["updated_at"] > TIMEOUT_DURATION
|
||||
]
|
||||
|
||||
for sid in expired_sids:
|
||||
del connections[sid]
|
||||
|
||||
if not connections:
|
||||
log.debug(f"Cleaning up model {model_id} from usage pool")
|
||||
del USAGE_POOL[model_id]
|
||||
else:
|
||||
USAGE_POOL[model_id] = connections
|
||||
|
||||
# Emit updated usage information after cleaning
|
||||
await sio.emit("usage", {"models": get_models_in_use()})
|
||||
|
||||
await asyncio.sleep(TIMEOUT_DURATION)
|
||||
|
||||
|
||||
app = socketio.ASGIApp(
|
||||
sio,
|
||||
socketio_path="/ws/socket.io",
|
||||
)
|
||||
|
||||
|
||||
def get_models_in_use():
|
||||
# List models that are currently in use
|
||||
models_in_use = list(USAGE_POOL.keys())
|
||||
return models_in_use
|
||||
|
||||
|
||||
@sio.on("usage")
|
||||
async def usage(sid, data):
|
||||
model_id = data["model"]
|
||||
# Record the timestamp for the last update
|
||||
current_time = int(time.time())
|
||||
|
||||
# Store the new usage data and task
|
||||
USAGE_POOL[model_id] = {
|
||||
**(USAGE_POOL[model_id] if model_id in USAGE_POOL else {}),
|
||||
sid: {"updated_at": current_time},
|
||||
}
|
||||
|
||||
# Broadcast the usage data to all clients
|
||||
await sio.emit("usage", {"models": get_models_in_use()})
|
||||
|
||||
|
||||
@sio.event
|
||||
async def connect(sid, environ, auth):
|
||||
user = None
|
||||
if auth and "token" in auth:
|
||||
data = decode_token(auth["token"])
|
||||
|
||||
if data is not None and "id" in data:
|
||||
user = Users.get_user_by_id(data["id"])
|
||||
|
||||
if user:
|
||||
SESSION_POOL[sid] = user.id
|
||||
if user.id in USER_POOL:
|
||||
USER_POOL[user.id].append(sid)
|
||||
else:
|
||||
USER_POOL[user.id] = [sid]
|
||||
|
||||
# print(f"user {user.name}({user.id}) connected with session ID {sid}")
|
||||
await sio.emit("user-count", {"count": len(USER_POOL.items())})
|
||||
await sio.emit("usage", {"models": get_models_in_use()})
|
||||
|
||||
|
||||
@sio.on("user-join")
|
||||
async def user_join(sid, data):
|
||||
# print("user-join", sid, data)
|
||||
|
||||
auth = data["auth"] if "auth" in data else None
|
||||
if not auth or "token" not in auth:
|
||||
return
|
||||
|
||||
data = decode_token(auth["token"])
|
||||
if data is None or "id" not in data:
|
||||
return
|
||||
|
||||
user = Users.get_user_by_id(data["id"])
|
||||
if not user:
|
||||
return
|
||||
|
||||
SESSION_POOL[sid] = user.id
|
||||
if user.id in USER_POOL:
|
||||
USER_POOL[user.id].append(sid)
|
||||
else:
|
||||
USER_POOL[user.id] = [sid]
|
||||
|
||||
# print(f"user {user.name}({user.id}) connected with session ID {sid}")
|
||||
|
||||
await sio.emit("user-count", {"count": len(USER_POOL.items())})
|
||||
|
||||
|
||||
@sio.on("user-count")
|
||||
async def user_count(sid):
|
||||
await sio.emit("user-count", {"count": len(USER_POOL.items())})
|
||||
|
||||
|
||||
@sio.event
|
||||
async def disconnect(sid):
|
||||
if sid in SESSION_POOL:
|
||||
user_id = SESSION_POOL[sid]
|
||||
del SESSION_POOL[sid]
|
||||
|
||||
USER_POOL[user_id] = [_sid for _sid in USER_POOL[user_id] if _sid != sid]
|
||||
|
||||
if len(USER_POOL[user_id]) == 0:
|
||||
del USER_POOL[user_id]
|
||||
|
||||
await sio.emit("user-count", {"count": len(USER_POOL)})
|
||||
else:
|
||||
pass
|
||||
# print(f"Unknown session ID {sid} disconnected")
|
||||
|
||||
|
||||
def get_event_emitter(request_info):
|
||||
async def __event_emitter__(event_data):
|
||||
await sio.emit(
|
||||
"chat-events",
|
||||
{
|
||||
"chat_id": request_info["chat_id"],
|
||||
"message_id": request_info["message_id"],
|
||||
"data": event_data,
|
||||
},
|
||||
to=request_info["session_id"],
|
||||
)
|
||||
|
||||
return __event_emitter__
|
||||
|
||||
|
||||
def get_event_call(request_info):
|
||||
async def __event_call__(event_data):
|
||||
response = await sio.call(
|
||||
"chat-events",
|
||||
{
|
||||
"chat_id": request_info["chat_id"],
|
||||
"message_id": request_info["message_id"],
|
||||
"data": event_data,
|
||||
},
|
||||
to=request_info["session_id"],
|
||||
)
|
||||
return response
|
||||
|
||||
return __event_call__
|
||||
59
backend/open_webui/apps/socket/utils.py
Normal file
59
backend/open_webui/apps/socket/utils.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import json
|
||||
import redis
|
||||
|
||||
|
||||
class RedisDict:
|
||||
def __init__(self, name, redis_url):
|
||||
self.name = name
|
||||
self.redis = redis.Redis.from_url(redis_url, decode_responses=True)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
serialized_value = json.dumps(value)
|
||||
self.redis.hset(self.name, key, serialized_value)
|
||||
|
||||
def __getitem__(self, key):
|
||||
value = self.redis.hget(self.name, key)
|
||||
if value is None:
|
||||
raise KeyError(key)
|
||||
return json.loads(value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
result = self.redis.hdel(self.name, key)
|
||||
if result == 0:
|
||||
raise KeyError(key)
|
||||
|
||||
def __contains__(self, key):
|
||||
return self.redis.hexists(self.name, key)
|
||||
|
||||
def __len__(self):
|
||||
return self.redis.hlen(self.name)
|
||||
|
||||
def keys(self):
|
||||
return self.redis.hkeys(self.name)
|
||||
|
||||
def values(self):
|
||||
return [json.loads(v) for v in self.redis.hvals(self.name)]
|
||||
|
||||
def items(self):
|
||||
return [(k, json.loads(v)) for k, v in self.redis.hgetall(self.name).items()]
|
||||
|
||||
def get(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def clear(self):
|
||||
self.redis.delete(self.name)
|
||||
|
||||
def update(self, other=None, **kwargs):
|
||||
if other is not None:
|
||||
for k, v in other.items() if hasattr(other, "items") else other:
|
||||
self[k] = v
|
||||
for k, v in kwargs.items():
|
||||
self[k] = v
|
||||
|
||||
def setdefault(self, key, default=None):
|
||||
if key not in self:
|
||||
self[key] = default
|
||||
return self[key]
|
||||
114
backend/open_webui/apps/webui/internal/db.py
Normal file
114
backend/open_webui/apps/webui/internal/db.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import json
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Optional
|
||||
|
||||
from open_webui.apps.webui.internal.wrappers import register_connection
|
||||
from open_webui.env import (
|
||||
OPEN_WEBUI_DIR,
|
||||
DATABASE_URL,
|
||||
SRC_LOG_LEVELS,
|
||||
DATABASE_POOL_MAX_OVERFLOW,
|
||||
DATABASE_POOL_RECYCLE,
|
||||
DATABASE_POOL_SIZE,
|
||||
DATABASE_POOL_TIMEOUT,
|
||||
)
|
||||
from peewee_migrate import Router
|
||||
from sqlalchemy import Dialect, create_engine, types
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import scoped_session, sessionmaker
|
||||
from sqlalchemy.pool import QueuePool, NullPool
|
||||
from sqlalchemy.sql.type_api import _T
|
||||
from typing_extensions import Self
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["DB"])
|
||||
|
||||
|
||||
class JSONField(types.TypeDecorator):
|
||||
impl = types.Text
|
||||
cache_ok = True
|
||||
|
||||
def process_bind_param(self, value: Optional[_T], dialect: Dialect) -> Any:
|
||||
return json.dumps(value)
|
||||
|
||||
def process_result_value(self, value: Optional[_T], dialect: Dialect) -> Any:
|
||||
if value is not None:
|
||||
return json.loads(value)
|
||||
|
||||
def copy(self, **kw: Any) -> Self:
|
||||
return JSONField(self.impl.length)
|
||||
|
||||
def db_value(self, value):
|
||||
return json.dumps(value)
|
||||
|
||||
def python_value(self, value):
|
||||
if value is not None:
|
||||
return json.loads(value)
|
||||
|
||||
|
||||
# Workaround to handle the peewee migration
|
||||
# This is required to ensure the peewee migration is handled before the alembic migration
|
||||
def handle_peewee_migration(DATABASE_URL):
|
||||
# db = None
|
||||
try:
|
||||
# Replace the postgresql:// with postgres:// to handle the peewee migration
|
||||
db = register_connection(DATABASE_URL.replace("postgresql://", "postgres://"))
|
||||
migrate_dir = OPEN_WEBUI_DIR / "apps" / "webui" / "internal" / "migrations"
|
||||
router = Router(db, logger=log, migrate_dir=migrate_dir)
|
||||
router.run()
|
||||
db.close()
|
||||
|
||||
except Exception as e:
|
||||
log.error(f"Failed to initialize the database connection: {e}")
|
||||
raise
|
||||
finally:
|
||||
# Properly closing the database connection
|
||||
if db and not db.is_closed():
|
||||
db.close()
|
||||
|
||||
# Assert if db connection has been closed
|
||||
assert db.is_closed(), "Database connection is still open."
|
||||
|
||||
|
||||
handle_peewee_migration(DATABASE_URL)
|
||||
|
||||
|
||||
SQLALCHEMY_DATABASE_URL = DATABASE_URL
|
||||
if "sqlite" in SQLALCHEMY_DATABASE_URL:
|
||||
engine = create_engine(
|
||||
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
|
||||
)
|
||||
else:
|
||||
if DATABASE_POOL_SIZE > 0:
|
||||
engine = create_engine(
|
||||
SQLALCHEMY_DATABASE_URL,
|
||||
pool_size=DATABASE_POOL_SIZE,
|
||||
max_overflow=DATABASE_POOL_MAX_OVERFLOW,
|
||||
pool_timeout=DATABASE_POOL_TIMEOUT,
|
||||
pool_recycle=DATABASE_POOL_RECYCLE,
|
||||
pool_pre_ping=True,
|
||||
poolclass=QueuePool,
|
||||
)
|
||||
else:
|
||||
engine = create_engine(
|
||||
SQLALCHEMY_DATABASE_URL, pool_pre_ping=True, poolclass=NullPool
|
||||
)
|
||||
|
||||
|
||||
SessionLocal = sessionmaker(
|
||||
autocommit=False, autoflush=False, bind=engine, expire_on_commit=False
|
||||
)
|
||||
Base = declarative_base()
|
||||
Session = scoped_session(SessionLocal)
|
||||
|
||||
|
||||
def get_session():
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
get_db = contextmanager(get_session)
|
||||
@@ -30,7 +30,7 @@ import peewee as pw
|
||||
from peewee_migrate import Migrator
|
||||
import json
|
||||
|
||||
from utils.misc import parse_ollama_modelfile
|
||||
from open_webui.utils.misc import parse_ollama_modelfile
|
||||
|
||||
with suppress(ImportError):
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
@@ -0,0 +1,48 @@
|
||||
"""Peewee migrations -- 002_add_local_sharing.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['table_name'] # Return model in current state by name
|
||||
> Model = migrator.ModelClass # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.run(func, *args, **kwargs) # Run python function with the given args
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
> migrator.add_constraint(model, name, sql)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.drop_constraints(model, *constraints)
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import suppress
|
||||
|
||||
import peewee as pw
|
||||
from peewee_migrate import Migrator
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
|
||||
|
||||
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your migrations here."""
|
||||
|
||||
# Adding fields info to the 'user' table
|
||||
migrator.add_fields("user", info=pw.TextField(null=True))
|
||||
|
||||
|
||||
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your rollback migrations here."""
|
||||
|
||||
# Remove the settings field
|
||||
migrator.remove_fields("user", "info")
|
||||
@@ -0,0 +1,55 @@
|
||||
"""Peewee migrations -- 009_add_models.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['table_name'] # Return model in current state by name
|
||||
> Model = migrator.ModelClass # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.run(func, *args, **kwargs) # Run python function with the given args
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
> migrator.add_constraint(model, name, sql)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.drop_constraints(model, *constraints)
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import suppress
|
||||
|
||||
import peewee as pw
|
||||
from peewee_migrate import Migrator
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
|
||||
|
||||
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your migrations here."""
|
||||
|
||||
@migrator.create_model
|
||||
class File(pw.Model):
|
||||
id = pw.TextField(unique=True)
|
||||
user_id = pw.TextField()
|
||||
filename = pw.TextField()
|
||||
meta = pw.TextField()
|
||||
created_at = pw.BigIntegerField(null=False)
|
||||
|
||||
class Meta:
|
||||
table_name = "file"
|
||||
|
||||
|
||||
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your rollback migrations here."""
|
||||
|
||||
migrator.remove_model("file")
|
||||
@@ -0,0 +1,61 @@
|
||||
"""Peewee migrations -- 009_add_models.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['table_name'] # Return model in current state by name
|
||||
> Model = migrator.ModelClass # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.run(func, *args, **kwargs) # Run python function with the given args
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
> migrator.add_constraint(model, name, sql)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.drop_constraints(model, *constraints)
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import suppress
|
||||
|
||||
import peewee as pw
|
||||
from peewee_migrate import Migrator
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
|
||||
|
||||
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your migrations here."""
|
||||
|
||||
@migrator.create_model
|
||||
class Function(pw.Model):
|
||||
id = pw.TextField(unique=True)
|
||||
user_id = pw.TextField()
|
||||
|
||||
name = pw.TextField()
|
||||
type = pw.TextField()
|
||||
|
||||
content = pw.TextField()
|
||||
meta = pw.TextField()
|
||||
|
||||
created_at = pw.BigIntegerField(null=False)
|
||||
updated_at = pw.BigIntegerField(null=False)
|
||||
|
||||
class Meta:
|
||||
table_name = "function"
|
||||
|
||||
|
||||
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your rollback migrations here."""
|
||||
|
||||
migrator.remove_model("function")
|
||||
@@ -0,0 +1,50 @@
|
||||
"""Peewee migrations -- 009_add_models.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['table_name'] # Return model in current state by name
|
||||
> Model = migrator.ModelClass # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.run(func, *args, **kwargs) # Run python function with the given args
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
> migrator.add_constraint(model, name, sql)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.drop_constraints(model, *constraints)
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import suppress
|
||||
|
||||
import peewee as pw
|
||||
from peewee_migrate import Migrator
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
|
||||
|
||||
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your migrations here."""
|
||||
|
||||
migrator.add_fields("tool", valves=pw.TextField(null=True))
|
||||
migrator.add_fields("function", valves=pw.TextField(null=True))
|
||||
migrator.add_fields("function", is_active=pw.BooleanField(default=False))
|
||||
|
||||
|
||||
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your rollback migrations here."""
|
||||
|
||||
migrator.remove_fields("tool", "valves")
|
||||
migrator.remove_fields("function", "valves")
|
||||
migrator.remove_fields("function", "is_active")
|
||||
@@ -0,0 +1,45 @@
|
||||
"""Peewee migrations -- 017_add_user_oauth_sub.py.
|
||||
Some examples (model - class or model name)::
|
||||
> Model = migrator.orm['table_name'] # Return model in current state by name
|
||||
> Model = migrator.ModelClass # Return model in current state by name
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.run(func, *args, **kwargs) # Run python function with the given args
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
> migrator.add_constraint(model, name, sql)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.drop_constraints(model, *constraints)
|
||||
"""
|
||||
|
||||
from contextlib import suppress
|
||||
|
||||
import peewee as pw
|
||||
from peewee_migrate import Migrator
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
|
||||
|
||||
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your migrations here."""
|
||||
|
||||
migrator.add_fields(
|
||||
"user",
|
||||
oauth_sub=pw.TextField(null=True, unique=True),
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your rollback migrations here."""
|
||||
|
||||
migrator.remove_fields("user", "oauth_sub")
|
||||
@@ -0,0 +1,49 @@
|
||||
"""Peewee migrations -- 017_add_user_oauth_sub.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['table_name'] # Return model in current state by name
|
||||
> Model = migrator.ModelClass # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.run(func, *args, **kwargs) # Run python function with the given args
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
> migrator.add_constraint(model, name, sql)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.drop_constraints(model, *constraints)
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import suppress
|
||||
|
||||
import peewee as pw
|
||||
from peewee_migrate import Migrator
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
|
||||
|
||||
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your migrations here."""
|
||||
|
||||
migrator.add_fields(
|
||||
"function",
|
||||
is_global=pw.BooleanField(default=False),
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your rollback migrations here."""
|
||||
|
||||
migrator.remove_fields("function", "is_global")
|
||||
66
backend/open_webui/apps/webui/internal/wrappers.py
Normal file
66
backend/open_webui/apps/webui/internal/wrappers.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import logging
|
||||
from contextvars import ContextVar
|
||||
|
||||
from open_webui.env import SRC_LOG_LEVELS
|
||||
from peewee import *
|
||||
from peewee import InterfaceError as PeeWeeInterfaceError
|
||||
from peewee import PostgresqlDatabase
|
||||
from playhouse.db_url import connect, parse
|
||||
from playhouse.shortcuts import ReconnectMixin
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["DB"])
|
||||
|
||||
db_state_default = {"closed": None, "conn": None, "ctx": None, "transactions": None}
|
||||
db_state = ContextVar("db_state", default=db_state_default.copy())
|
||||
|
||||
|
||||
class PeeweeConnectionState(object):
|
||||
def __init__(self, **kwargs):
|
||||
super().__setattr__("_state", db_state)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
self._state.get()[name] = value
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = self._state.get()[name]
|
||||
return value
|
||||
|
||||
|
||||
class CustomReconnectMixin(ReconnectMixin):
|
||||
reconnect_errors = (
|
||||
# psycopg2
|
||||
(OperationalError, "termin"),
|
||||
(InterfaceError, "closed"),
|
||||
# peewee
|
||||
(PeeWeeInterfaceError, "closed"),
|
||||
)
|
||||
|
||||
|
||||
class ReconnectingPostgresqlDatabase(CustomReconnectMixin, PostgresqlDatabase):
|
||||
pass
|
||||
|
||||
|
||||
def register_connection(db_url):
|
||||
db = connect(db_url, unquote_password=True)
|
||||
if isinstance(db, PostgresqlDatabase):
|
||||
# Enable autoconnect for SQLite databases, managed by Peewee
|
||||
db.autoconnect = True
|
||||
db.reuse_if_open = True
|
||||
log.info("Connected to PostgreSQL database")
|
||||
|
||||
# Get the connection details
|
||||
connection = parse(db_url, unquote_password=True)
|
||||
|
||||
# Use our custom database class that supports reconnection
|
||||
db = ReconnectingPostgresqlDatabase(**connection)
|
||||
db.connect(reuse_if_open=True)
|
||||
elif isinstance(db, SqliteDatabase):
|
||||
# Enable autoconnect for SQLite databases, managed by Peewee
|
||||
db.autoconnect = True
|
||||
db.reuse_if_open = True
|
||||
log.info("Connected to SQLite database")
|
||||
else:
|
||||
raise ValueError("Unsupported database connection")
|
||||
return db
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user