| import os | |
| os.environ["HUGGINGFACEHUB_API_TOKEN"] = "" | |
| from langchain_community.llms import HuggingFaceHub | |
| llm = HuggingFaceHub( | |
| repo_id="meta-llama/Llama-2-7b-chat-hf", | |
| task="text-generation", | |
| model_kwargs={ | |
| "max_new_tokens": 512, | |
| "temperature": 0.1, | |
| "seed": 42, | |
| }, | |
| ) | |
| from langchain.schema import ( | |
| HumanMessage, | |
| SystemMessage, | |
| AIMessage, | |
| ) | |
| from langchain_community.chat_models.huggingface import ChatHuggingFace | |
| messages = [ | |
| SystemMessage(content="You're a helpful assistant"), | |
| ] | |
| chat_model = ChatHuggingFace(llm=llm) | |
| while True: | |
| question = input("You: ") | |
| messages.append(HumanMessage(content=question)) | |
| response = chat_model.invoke(messages) | |
| print(response) | |
| response = response.content | |
| messages.append(AIMessage(content=response)) | |
| print(f"Bot: {response}") |