Update llm_handler.py
Browse files- llm_handler.py +12 -5
llm_handler.py
CHANGED
|
@@ -2,15 +2,13 @@ import requests
|
|
| 2 |
import json
|
| 3 |
from openai import OpenAI
|
| 4 |
from params import OPENAI_MODEL, OPENAI_API_KEY
|
| 5 |
-
import llamanet
|
| 6 |
|
| 7 |
# Add this at the top of the file
|
| 8 |
local_model_base_url = "http://localhost:11434/v1"
|
| 9 |
anything_llm_workspace = "<input-workspace-name-here>"
|
| 10 |
|
| 11 |
-
llamanet.run()
|
| 12 |
# Create an instance of the OpenAI class
|
| 13 |
-
client = OpenAI(api_key="dummy_key", base_url=
|
| 14 |
|
| 15 |
def set_local_model_base_url(url):
|
| 16 |
global local_model_base_url
|
|
@@ -22,8 +20,18 @@ def set_anything_llm_workspace(workspace):
|
|
| 22 |
|
| 23 |
def send_to_chatgpt(msg_list):
|
| 24 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
completion = client.chat.completions.create(
|
| 26 |
-
model=
|
| 27 |
messages=msg_list,
|
| 28 |
temperature=0.6,
|
| 29 |
stream=True
|
|
@@ -42,7 +50,6 @@ def send_to_chatgpt(msg_list):
|
|
| 42 |
return f"Error: {str(e)}", None
|
| 43 |
|
| 44 |
def send_to_anything_llm(msg_list):
|
| 45 |
-
|
| 46 |
url = f'http://localhost:3001/api/v1/workspace/{anything_llm_workspace}/chat'
|
| 47 |
headers = {
|
| 48 |
'accept': 'application/json',
|
|
|
|
| 2 |
import json
|
| 3 |
from openai import OpenAI
|
| 4 |
from params import OPENAI_MODEL, OPENAI_API_KEY
|
|
|
|
| 5 |
|
| 6 |
# Add this at the top of the file
|
| 7 |
local_model_base_url = "http://localhost:11434/v1"
|
| 8 |
anything_llm_workspace = "<input-workspace-name-here>"
|
| 9 |
|
|
|
|
| 10 |
# Create an instance of the OpenAI class
|
| 11 |
+
client = OpenAI(api_key="dummy_key", base_url=local_model_base_url)
|
| 12 |
|
| 13 |
def set_local_model_base_url(url):
|
| 14 |
global local_model_base_url
|
|
|
|
| 20 |
|
| 21 |
def send_to_chatgpt(msg_list):
|
| 22 |
try:
|
| 23 |
+
# Modify this part to use llamanet conditionally
|
| 24 |
+
if OPENAI_MODEL.startswith("https://"):
|
| 25 |
+
# This is a llamanet model
|
| 26 |
+
import llamanet
|
| 27 |
+
llamanet.run()
|
| 28 |
+
client = OpenAI()
|
| 29 |
+
else:
|
| 30 |
+
# Use the existing client for other cases
|
| 31 |
+
client = OpenAI(api_key="dummy_key", base_url=local_model_base_url)
|
| 32 |
+
|
| 33 |
completion = client.chat.completions.create(
|
| 34 |
+
model=OPENAI_MODEL,
|
| 35 |
messages=msg_list,
|
| 36 |
temperature=0.6,
|
| 37 |
stream=True
|
|
|
|
| 50 |
return f"Error: {str(e)}", None
|
| 51 |
|
| 52 |
def send_to_anything_llm(msg_list):
|
|
|
|
| 53 |
url = f'http://localhost:3001/api/v1/workspace/{anything_llm_workspace}/chat'
|
| 54 |
headers = {
|
| 55 |
'accept': 'application/json',
|