File size: 1,533 Bytes
f5e247b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_community.chat_models import ChatOpenAI
from langchain_deepseek import ChatDeepSeek

from dotenv import load_dotenv
import os

load_dotenv()

def get_openai_chain():
    """Builds an LLMChain using OpenAI's model"""
    prompt = PromptTemplate.from_template("Answer this: {question}")
    llm = ChatOpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"), model="gpt-4o-mini")
    return LLMChain(llm=llm, prompt=prompt)

def get_deepseek_chain():
    """Builds an LLMChain using DeepSeek's model"""
    prompt = PromptTemplate.from_template("Answer this: {question}")
    llm = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"), model="deepseek-chat")
    return prompt | llm
    # return LLMChain(llm=llm, prompt=prompt)

def get_chain_by_model(model_name: str) -> LLMChain:
    """Chooses the chain based on model selection"""
    if model_name == "openai":
        return get_openai_chain()
    elif model_name == "deepseek":
        return get_deepseek_chain()
    else:
        raise ValueError("Unsupported model. Choose 'openai' or 'deepseek'.")


if __name__ == "__main__":
    question = input("Enter your question: ").strip()

    model = input("Choose model (openai / deepseek): ").strip().lower()

    chain = get_chain_by_model(model)
    response = chain.invoke({"question": question})
    print(f"\n[Response]:\n{response.content}")
    #print(f"\n[Response]:\n{response['text']}")