| import os | |
| os.environ["HUGGINGFACEHUB_API_TOKEN"] = "" | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chains import LLMChain | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain_community.llms import HuggingFaceHub | |
| template = """You are a friendly chatbot engaging in a conversation with a human. | |
| Previous conversation: | |
| {chat_history} | |
| New human question: {question} | |
| Response:""" | |
| def get_pipeline(model_name): | |
| llm = HuggingFaceHub( | |
| repo_id=model_name, | |
| task="text-generation", | |
| model_kwargs={ | |
| "max_new_tokens": 250, | |
| "top_k": 30, | |
| "temperature": 0.1, | |
| "repetition_penalty": 1.03, | |
| }, | |
| ) | |
| return llm | |
| chatbot = get_pipeline("mistralai/Mistral-7B-Instruct-v0.2") | |
| memory = ConversationBufferMemory(memory_key="chat_history") | |
| prompt_template = PromptTemplate.from_template(template) | |
| conversation = LLMChain(llm=chatbot, prompt=prompt_template, verbose=True, memory=memory) | |
| while True: | |
| question = input("You: ") | |
| response = conversation({"question": question}) | |
| print("-" * 50) | |
| print(response) | |
| print(response["text"]) | |
| print("-" * 50) | |
| print() |