File size: 1,472 Bytes
6648464
 
 
 
 
 
7bb1ee2
6648464
 
 
 
897b227
6648464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain_huggingface import HuggingFaceEndpoint

# HuggingFace Endpoint Initialization
repo_id = ""
llmModel = HuggingFaceEndpoint(
    repo_id=repo_id,
    max_new_tokens=512,
    temperature=0.5,
    huggingfacehub_api_token="",
    task="text-generation", 
)

# Define the prompt template
prompt = PromptTemplate(
    input_variables=["logs", "query"],
    template=(
        """   You are an expert log analyzer. Analyze the system logs provided below. Return only precise and concise answers to the questions asked, formatted clearly and without unnecessary elaboration.
            Logs: {logs}
            User's Query: Analyze the logs and answer the following question:{query}
            Be concise and direct in your responses."""
    ),
)

# Memory setup
memory = ConversationBufferMemory(
    input_key="query",
    memory_key="history",
    return_messages=False,
)

# Create the LLM chain with memory
conversation_chain = LLMChain(llm=llmModel, prompt=prompt, memory=memory)


def generate_ai_response(user_query,logs):
    try:
        # Run the conversation chain
        response = conversation_chain.run({"logs": logs, "query": user_query})
        return response
    except Exception as e:
        print(e)
        return f"An error occurred: {e}"


# generate_ai_response ("who is prime minister of india")