|
|
|
|
|
import camel
|
|
|
from camel.models import ModelFactory
|
|
|
from camel.types import ModelType, ModelPlatformType
|
|
|
from camel.configs import MistralConfig, OllamaConfig, ChatGPTConfig
|
|
|
|
|
|
import time
|
|
|
|
|
|
import os
|
|
|
|
|
|
class Chatbot:
|
|
|
def __init__(self, model_name: str = "gpt", max_token: int = 1, temperature:float = 0.3):
|
|
|
self.model_name = model_name.lower()
|
|
|
self.max_token = max_token
|
|
|
self.temperature = temperature
|
|
|
self.model = self._get_model()
|
|
|
|
|
|
def _get_model(self):
|
|
|
if "gpt" == self.model_name:
|
|
|
return ModelFactory.create(
|
|
|
model_platform=ModelPlatformType.OPENAI,
|
|
|
model_type=ModelType.GPT_4O_MINI,
|
|
|
model_config_dict=ChatGPTConfig(temperature=0.3, max_tokens=self.max_token).as_dict(),
|
|
|
)
|
|
|
elif "mistral" == self.model_name:
|
|
|
return ModelFactory.create(
|
|
|
model_platform=ModelPlatformType.MISTRAL,
|
|
|
model_type=ModelType.MISTRAL_MEDIUM_3,
|
|
|
model_config_dict=MistralConfig(temperature=0.3, max_tokens=self.max_token).as_dict(),
|
|
|
)
|
|
|
else:
|
|
|
return ModelFactory.create(
|
|
|
model_platform=ModelPlatformType.OLLAMA,
|
|
|
model_type=self.model_name,
|
|
|
model_config_dict=OllamaConfig(temperature=0.3, max_tokens=self.max_token).as_dict(),
|
|
|
)
|
|
|
|
|
|
def chat(self, user_input: str, system:str = "You are a helpful assistant.", suppress_error: bool = False) -> str:
|
|
|
messages = [
|
|
|
{"role": "system", "content": system},
|
|
|
{"role": "user", "content": user_input},
|
|
|
|
|
|
]
|
|
|
while True:
|
|
|
try:
|
|
|
response = self.model.run(messages)
|
|
|
return response.choices[0].message.content
|
|
|
except Exception as e:
|
|
|
if suppress_error == False:
|
|
|
print(f"Error: {e}")
|
|
|
time.sleep(2)
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
chatbot_mistral = Chatbot(model_name="mistral")
|
|
|
print(chatbot_mistral.chat("hi"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|