Spaces:
Runtime error
Runtime error
| from langchain.chains import LLMChain | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.prompts import PromptTemplate | |
| from langchain_huggingface import HuggingFaceEndpoint | |
| # HuggingFace Endpoint Initialization | |
| repo_id = "" | |
| llmModel = HuggingFaceEndpoint( | |
| repo_id=repo_id, | |
| max_new_tokens=512, | |
| temperature=0.5, | |
| huggingfacehub_api_token="", | |
| task="text-generation", | |
| ) | |
| # Define the prompt template | |
| prompt = PromptTemplate( | |
| input_variables=["logs", "query"], | |
| template=( | |
| """ You are an expert log analyzer. Analyze the system logs provided below. Return only precise and concise answers to the questions asked, formatted clearly and without unnecessary elaboration. | |
| Logs: {logs} | |
| User's Query: Analyze the logs and answer the following question:{query} | |
| Be concise and direct in your responses.""" | |
| ), | |
| ) | |
| # Memory setup | |
| memory = ConversationBufferMemory( | |
| input_key="query", | |
| memory_key="history", | |
| return_messages=False, | |
| ) | |
| # Create the LLM chain with memory | |
| conversation_chain = LLMChain(llm=llmModel, prompt=prompt, memory=memory) | |
| def generate_ai_response(user_query,logs): | |
| try: | |
| # Run the conversation chain | |
| response = conversation_chain.run({"logs": logs, "query": user_query}) | |
| return response | |
| except Exception as e: | |
| print(e) | |
| return f"An error occurred: {e}" | |
| # generate_ai_response ("who is prime minister of india") | |