frdel commited on
Commit
6aecd23
·
1 Parent(s): a730828

0.9.1 finalizing

Browse files
models.py CHANGED
@@ -13,6 +13,7 @@ from typing import (
13
  )
14
 
15
  from litellm import completion, acompletion, embedding
 
16
  from python.helpers import dotenv
17
  from python.helpers.dotenv import load_dotenv
18
  from python.helpers.rate_limiter import RateLimiter
@@ -34,6 +35,7 @@ from langchain.embeddings.base import Embeddings
34
 
35
  load_dotenv()
36
  os.environ['LITELLM_LOG'] = "ERROR" # only errors
 
37
 
38
 
39
  class ModelType(Enum):
@@ -428,11 +430,12 @@ def _get_litellm_chat(
428
  base_url = dotenv.get_dotenv_value(f"{provider_name.upper()}_BASE_URL")
429
 
430
  # If a base_url is set, ensure api_key is not passed to litellm
431
- if base_url:
432
- if "api_key" in kwargs:
433
- del kwargs["api_key"]
 
434
  # Only pass API key if no base_url is set and key is not a placeholder
435
- elif api_key and api_key not in ("None", "NA"):
436
  kwargs["api_key"] = api_key
437
 
438
  # for openrouter add app reference
@@ -459,11 +462,12 @@ def get_litellm_embedding(model_name: str, provider: str, **kwargs: Any):
459
  base_url = dotenv.get_dotenv_value(f"{provider.upper()}_BASE_URL")
460
 
461
  # If a base_url is set, ensure api_key is not passed to litellm
462
- if base_url:
463
- if "api_key" in kwargs:
464
- del kwargs["api_key"]
 
465
  # Only pass API key if no base_url is set and key is not a placeholder
466
- elif api_key and api_key not in ("None", "NA"):
467
  kwargs["api_key"] = api_key
468
 
469
  return LiteLLMEmbeddingWrapper(model=model_name, provider=provider, **kwargs)
 
13
  )
14
 
15
  from litellm import completion, acompletion, embedding
16
+ import litellm
17
  from python.helpers import dotenv
18
  from python.helpers.dotenv import load_dotenv
19
  from python.helpers.rate_limiter import RateLimiter
 
35
 
36
  load_dotenv()
37
  os.environ['LITELLM_LOG'] = "ERROR" # only errors
38
+ litellm.suppress_debug_info = True
39
 
40
 
41
  class ModelType(Enum):
 
430
  base_url = dotenv.get_dotenv_value(f"{provider_name.upper()}_BASE_URL")
431
 
432
  # If a base_url is set, ensure api_key is not passed to litellm
433
+ # > remove, this can be handled by api_key=None
434
+ # if base_url:
435
+ # if "api_key" in kwargs:
436
+ # del kwargs["api_key"]
437
  # Only pass API key if no base_url is set and key is not a placeholder
438
+ if api_key and api_key not in ("None", "NA"):
439
  kwargs["api_key"] = api_key
440
 
441
  # for openrouter add app reference
 
462
  base_url = dotenv.get_dotenv_value(f"{provider.upper()}_BASE_URL")
463
 
464
  # If a base_url is set, ensure api_key is not passed to litellm
465
+ # > remove, this can be handled by api_key=None
466
+ # if base_url:
467
+ # if "api_key" in kwargs:
468
+ # del kwargs["api_key"]
469
  # Only pass API key if no base_url is set and key is not a placeholder
470
+ if api_key and api_key not in ("None", "NA"):
471
  kwargs["api_key"] = api_key
472
 
473
  return LiteLLMEmbeddingWrapper(model=model_name, provider=provider, **kwargs)
python/helpers/files.py CHANGED
@@ -59,7 +59,6 @@ def load_plugin_variables(file: str, backup_dirs: list[str] | None = None) -> di
59
  # iterate backwards to skip imported superclasses
60
  for cls in reversed(class_list):
61
  if cls[1] is not VariablesPlugin and issubclass(cls[1], VariablesPlugin):
62
- PrintStyle().debug(f"Loading prompt variables from {plugin_file}")
63
  return cls[1]().get_variables() # type: ignore
64
  return {}
65
 
 
59
  # iterate backwards to skip imported superclasses
60
  for cls in reversed(class_list):
61
  if cls[1] is not VariablesPlugin and issubclass(cls[1], VariablesPlugin):
 
62
  return cls[1]().get_variables() # type: ignore
63
  return {}
64
 
python/helpers/memory.py CHANGED
@@ -30,7 +30,10 @@ from python.helpers.log import Log, LogItem
30
  from enum import Enum
31
  from agent import Agent, ModelConfig
32
  import models
 
33
 
 
 
34
 
35
  class MyFaiss(FAISS):
36
  # override aget_by_ids
 
30
  from enum import Enum
31
  from agent import Agent, ModelConfig
32
  import models
33
+ import logging
34
 
35
+ # Raise the log level so WARNING messages aren't shown
36
+ logging.getLogger("langchain_core.vectorstores.base").setLevel(logging.ERROR)
37
 
38
  class MyFaiss(FAISS):
39
  # override aget_by_ids
python/helpers/settings.py CHANGED
@@ -960,8 +960,8 @@ def get_default_settings() -> Settings:
960
 
961
  return Settings(
962
  version=_get_version(),
963
- chat_model_provider=ModelProvider.OPENAI.name,
964
- chat_model_name="gpt-4.1",
965
  chat_model_kwargs={"temperature": "0"},
966
  chat_model_ctx_length=100000,
967
  chat_model_ctx_history=0.7,
@@ -969,8 +969,8 @@ def get_default_settings() -> Settings:
969
  chat_model_rl_requests=0,
970
  chat_model_rl_input=0,
971
  chat_model_rl_output=0,
972
- util_model_provider=ModelProvider.OPENAI.name,
973
- util_model_name="gpt-4.1-nano",
974
  util_model_ctx_length=100000,
975
  util_model_ctx_input=0.7,
976
  util_model_kwargs={"temperature": "0"},
@@ -982,8 +982,8 @@ def get_default_settings() -> Settings:
982
  embed_model_kwargs={},
983
  embed_model_rl_requests=0,
984
  embed_model_rl_input=0,
985
- browser_model_provider=ModelProvider.OPENAI.name,
986
- browser_model_name="gpt-4.1",
987
  browser_model_vision=True,
988
  browser_model_kwargs={"temperature": "0"},
989
  api_keys={},
 
960
 
961
  return Settings(
962
  version=_get_version(),
963
+ chat_model_provider=ModelProvider.OPENROUTER.name,
964
+ chat_model_name="openai/gpt-4.1",
965
  chat_model_kwargs={"temperature": "0"},
966
  chat_model_ctx_length=100000,
967
  chat_model_ctx_history=0.7,
 
969
  chat_model_rl_requests=0,
970
  chat_model_rl_input=0,
971
  chat_model_rl_output=0,
972
+ util_model_provider=ModelProvider.OPENROUTER.name,
973
+ util_model_name="openai/gpt-4.1-nano",
974
  util_model_ctx_length=100000,
975
  util_model_ctx_input=0.7,
976
  util_model_kwargs={"temperature": "0"},
 
982
  embed_model_kwargs={},
983
  embed_model_rl_requests=0,
984
  embed_model_rl_input=0,
985
+ browser_model_provider=ModelProvider.OPENROUTER.name,
986
+ browser_model_name="openai/gpt-4.1",
987
  browser_model_vision=True,
988
  browser_model_kwargs={"temperature": "0"},
989
  api_keys={},
requirements.txt CHANGED
@@ -11,6 +11,7 @@ flaredantic==0.1.4
11
  GitPython==3.1.43
12
  inputimeout==1.0.4
13
  langchain-core==0.3.49
 
14
  openai-whisper==20240930
15
  lxml_html_clean==0.3.1
16
  markdown==3.7
 
11
  GitPython==3.1.43
12
  inputimeout==1.0.4
13
  langchain-core==0.3.49
14
+ langchain-community==0.3.19
15
  openai-whisper==20240930
16
  lxml_html_clean==0.3.1
17
  markdown==3.7
webui/components/messages/resize/message-resize-store.js CHANGED
@@ -13,6 +13,7 @@ const model = {
13
 
14
  _getDefaultSettings() {
15
  return {
 
16
  "message-agent": { minimized: true, maximized: false },
17
  "message-agent-response": { minimized: false, maximized: true },
18
  };
@@ -116,12 +117,12 @@ const model = {
116
  toggleCssProperty(
117
  `.${className} .message-body`,
118
  "max-height",
119
- setting.maximized ? undefined : "30em"
120
  );
121
  toggleCssProperty(
122
  `.${className} .message-body`,
123
  "overflow-y",
124
- setting.maximized ? undefined : "auto"
125
  );
126
  toggleCssProperty(
127
  `.${className} .message-body`,
 
13
 
14
  _getDefaultSettings() {
15
  return {
16
+ "message": { minimized: false, maximized: false },
17
  "message-agent": { minimized: true, maximized: false },
18
  "message-agent-response": { minimized: false, maximized: true },
19
  };
 
117
  toggleCssProperty(
118
  `.${className} .message-body`,
119
  "max-height",
120
+ setting.maximized ? "unset" : "30em"
121
  );
122
  toggleCssProperty(
123
  `.${className} .message-body`,
124
  "overflow-y",
125
+ setting.maximized ? "unset" : "auto"
126
  );
127
  toggleCssProperty(
128
  `.${className} .message-body`,
webui/css/messages.css CHANGED
@@ -244,7 +244,7 @@
244
  margin: 0;
245
  /* width: calc(100% - 4em); */
246
  margin-right: 4em;
247
- overflow-y: hidden;
248
  text-overflow: ellipsis;
249
  }
250
 
 
244
  margin: 0;
245
  /* width: calc(100% - 4em); */
246
  margin-right: 4em;
247
+ overflow: hidden;
248
  text-overflow: ellipsis;
249
  }
250
 
webui/index.js CHANGED
@@ -28,6 +28,10 @@ setupSidebarToggle();
28
  // Initialize tabs
29
  setupTabs();
30
 
 
 
 
 
31
  function isMobile() {
32
  return window.innerWidth <= 768;
33
  }
 
28
  // Initialize tabs
29
  setupTabs();
30
 
31
+ export function getAutoScroll() {
32
+ return autoScroll;
33
+ }
34
+
35
  function isMobile() {
36
  return window.innerWidth <= 768;
37
  }
webui/js/messages.js CHANGED
@@ -2,6 +2,7 @@
2
  import { openImageModal } from "./image_modal.js";
3
  import { marked } from "../vendor/marked/marked.esm.js";
4
  import { store as messageResizeStore } from "/components/messages/resize/message-resize-store.js";
 
5
 
6
  const chatHistory = document.getElementById("chat-history");
7
 
@@ -237,9 +238,10 @@ export function _drawMessage(
237
  }
238
 
239
  // autoscroll the body if needed
240
- setTimeout(() => {
241
- bodyDiv.scrollTop = bodyDiv.scrollHeight;
242
- }, 0);
 
243
 
244
  return messageDiv;
245
  }
@@ -361,7 +363,8 @@ export function drawMessageUser(
361
 
362
  const headingElement = document.createElement("h4");
363
  headingElement.classList.add("msg-heading");
364
- headingElement.innerHTML = "User message <span class='icon material-symbols-outlined'>person</span>";
 
365
  messageDiv.appendChild(headingElement);
366
 
367
  if (content && content.trim().length > 0) {
@@ -658,6 +661,7 @@ function drawKvps(container, kvps, latex) {
658
  }
659
 
660
  // autoscroll the KVP value if needed
 
661
  setTimeout(() => {
662
  tdiv.scrollTop = tdiv.scrollHeight;
663
  }, 0);
@@ -823,17 +827,17 @@ function convertPathsToLinks(str) {
823
 
824
  function adjustMarkdownRender(element) {
825
  // find all tables in the element
826
- const tables = element.querySelectorAll('table');
827
-
828
  // wrap each table with a div with class message-markdown-table-wrap
829
- tables.forEach(table => {
830
  // create wrapper div
831
- const wrapper = document.createElement('div');
832
- wrapper.className = 'message-markdown-table-wrap';
833
-
834
  // insert wrapper before table in the DOM
835
  table.parentNode.insertBefore(wrapper, table);
836
-
837
  // move table into wrapper
838
  wrapper.appendChild(table);
839
  });
 
2
  import { openImageModal } from "./image_modal.js";
3
  import { marked } from "../vendor/marked/marked.esm.js";
4
  import { store as messageResizeStore } from "/components/messages/resize/message-resize-store.js";
5
+ import { getAutoScroll } from "/index.js";
6
 
7
  const chatHistory = document.getElementById("chat-history");
8
 
 
238
  }
239
 
240
  // autoscroll the body if needed
241
+ // if (getAutoScroll()) #TODO needs a better redraw system
242
+ setTimeout(() => {
243
+ bodyDiv.scrollTop = bodyDiv.scrollHeight;
244
+ }, 0);
245
 
246
  return messageDiv;
247
  }
 
363
 
364
  const headingElement = document.createElement("h4");
365
  headingElement.classList.add("msg-heading");
366
+ headingElement.innerHTML =
367
+ "User message <span class='icon material-symbols-outlined'>person</span>";
368
  messageDiv.appendChild(headingElement);
369
 
370
  if (content && content.trim().length > 0) {
 
661
  }
662
 
663
  // autoscroll the KVP value if needed
664
+ // if (getAutoScroll()) #TODO needs a better redraw system
665
  setTimeout(() => {
666
  tdiv.scrollTop = tdiv.scrollHeight;
667
  }, 0);
 
827
 
828
  function adjustMarkdownRender(element) {
829
  // find all tables in the element
830
+ const tables = element.querySelectorAll("table");
831
+
832
  // wrap each table with a div with class message-markdown-table-wrap
833
+ tables.forEach((table) => {
834
  // create wrapper div
835
+ const wrapper = document.createElement("div");
836
+ wrapper.className = "message-markdown-table-wrap";
837
+
838
  // insert wrapper before table in the DOM
839
  table.parentNode.insertBefore(wrapper, table);
840
+
841
  // move table into wrapper
842
  wrapper.appendChild(table);
843
  });