MapMisfit commited on
Commit
5bc888a
·
verified ·
1 Parent(s): d1eb4c5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +204 -0
app.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from dotenv import load_dotenv
4
+ import traceback # For detailed error logging
5
+ import torch # Required for Hugging Face transformers
6
+
7
+ # --- LangChain and Hugging Face Transformers Imports ---
8
+ from langchain_neo4j import Neo4jGraph
9
+ # from langchain_openai import ChatOpenAI # We will replace this
10
+ from langchain_community.llms import HuggingFacePipeline # For using HuggingFace models
11
+ from langchain_community.chains.graph_qa.cypher import GraphCypherQAChain
12
+ from langchain_core.prompts import PromptTemplate
13
+
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
15
+
16
+ # --- Environment Variable Loading ---
17
+ load_dotenv()
18
+ print("Environment variables loaded:")
19
+ print(f"NEO4J_URI: {'Set' if os.getenv('NEO4J_URI') else 'Not Set'}")
20
+ print(f"NEO4J_USER: {'Set' if os.getenv('NEO4J_USER') else 'Not Set'}")
21
+ print(f"NEO4J_PASSWORD: {'Set' if os.getenv('NEO4J_PASSWORD') else 'Not Set'}")
22
+ # OPENAI_API_KEY is no longer the primary concern if using local/HF models
23
+ # print(f"OPENAI_API_KEY: {'Set' if os.getenv('OPENAI_API_KEY') else 'Not Set'}")
24
+ print(f"HUGGINGFACE_HUB_TOKEN: {'Set' if os.getenv('HUGGINGFACE_HUB_TOKEN') else 'Not Set (may be needed for certain models)'}")
25
+
26
+
27
+ # --- Global LangChain chain variable ---
28
+ chain = None
29
+ graph_connection_error = None # To store graph connection error
30
+ llm_initialization_error = None # To store LLM setup error
31
+
32
+ # --- Neo4j, Hugging Face LLM, and LangChain Setup ---
33
+ try:
34
+ print("Attempting to connect to Neo4j...")
35
+ graph = Neo4jGraph(
36
+ url=os.getenv("NEO4J_URI"),
37
+ username=os.getenv("NEO4J_USER"),
38
+ password=os.getenv("NEO4J_PASSWORD"),
39
+ )
40
+ print("Successfully connected to Neo4j.")
41
+
42
+ # --- Hugging Face LLM Setup ---
43
+ print("Initializing Hugging Face LLM...")
44
+ # IMPORTANT: Replace "gpt2" with your desired Hugging Face model.
45
+ # For larger models like Llama-2, ensure you have enough resources (VRAM/RAM)
46
+ # and handle authentication if it's a gated model (e.g., using huggingface-cli login
47
+ # or by passing use_auth_token=os.getenv("HUGGINGFACE_HUB_TOKEN") if supported and necessary).
48
+ model_id = "gpt2" # REPLACE THIS with your chosen model, e.g., "NousResearch/Llama-2-7b-chat-hf"
49
+ # model_id = "meta-llama/Llama-2-7b-chat-hf" # Example from the prompt, requires auth and resources
50
+
51
+ try:
52
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) # trust_remote_code might be needed for some models
53
+ # For large models, device_map='auto' and torch_dtype are crucial.
54
+ # For smaller models like gpt2, they might not be strictly necessary or could be simplified.
55
+ hf_model = AutoModelForCausalLM.from_pretrained(
56
+ model_id,
57
+ trust_remote_code=True,
58
+ device_map='auto', # Automatically distributes model layers across available devices (CPU/GPU)
59
+ torch_dtype=torch.float16, # Use float16 for memory efficiency if GPU supports it
60
+ # use_auth_token=os.getenv("HUGGINGFACE_HUB_TOKEN") # If your model requires a token
61
+ )
62
+ hf_model.eval() # Set the model to evaluation mode
63
+
64
+ # Create a text-generation pipeline
65
+ # Adjust max_new_tokens, do_sample, top_k as needed for your model and task
66
+ pipe = pipeline(
67
+ "text-generation",
68
+ model=hf_model,
69
+ tokenizer=tokenizer,
70
+ # torch_dtype=torch.bfloat16, # Alternative dtype
71
+ # device_map="auto", # Already set in model loading
72
+ max_new_tokens=512, # Max tokens for the generated Cypher query + answer synthesis
73
+ do_sample=True,
74
+ top_k=30,
75
+ num_return_sequences=1,
76
+ eos_token_id=tokenizer.eos_token_id,
77
+ pad_token_id=tokenizer.eos_token_id # Often good to set for open-ended generation
78
+ )
79
+
80
+ # Wrap the pipeline in LangChain's HuggingFacePipeline
81
+ llm = HuggingFacePipeline(
82
+ pipeline=pipe,
83
+ # model_kwargs can be used to pass additional arguments to the pipeline's __call__ method
84
+ # or to the model's generate method.
85
+ model_kwargs={'temperature': 0.1, 'max_length': 2000} # max_length here includes prompt
86
+ )
87
+ print(f"Hugging Face LLM ({model_id}) initialized successfully.")
88
+
89
+ except Exception as e_llm:
90
+ llm_initialization_error_message = f"Error initializing Hugging Face LLM ({model_id}): {str(e_llm)}\n"
91
+ llm_initialization_error_message += "Full Traceback:\n" + traceback.format_exc()
92
+ print(llm_initialization_error_message)
93
+ llm_initialization_error = llm_initialization_error_message
94
+ llm = None
95
+
96
+
97
+ if llm: # Proceed only if LLM initialized successfully
98
+ # --- GraphCypherQAChain Setup ---
99
+ print("Initializing GraphCypherQAChain...")
100
+ CYPHER_GENERATION_TEMPLATE = """You are an expert Neo4j Cypher translator.
101
+ Task: Convert the natural language question into a Cypher query that can retrieve relevant information from a Neo4j graph.
102
+ Instructions:
103
+ 1. Use only the provided schema details. Do not use any other node labels or relationship types.
104
+ 2. Understand the question and identify the key entities and relationships.
105
+ 3. Construct a Cypher query that accurately reflects the question's intent.
106
+ 4. Output ONLY the Cypher query. No explanations, no introductory text, no markdown. Just the query.
107
+
108
+ Schema:
109
+ {schema}
110
+
111
+ Question: {question}
112
+ Cypher Query:"""
113
+ cypher_prompt = PromptTemplate.from_template(CYPHER_GENERATION_TEMPLATE)
114
+
115
+ # For the QA part, the default prompt is often okay, but you might want to customize it too.
116
+ # Here's an example if you choose to:
117
+ # QA_TEMPLATE = """You are an assistant that answers questions based on query results from a graph database.
118
+ # Use the provided query result to answer the question.
119
+ # If the result is empty or does not contain the answer, say so.
120
+ # Do not make up information.
121
+ # Question: {question}
122
+ # Cypher Query Result: {context}
123
+ # Answer:"""
124
+ # qa_prompt = PromptTemplate.from_template(QA_TEMPLATE)
125
+
126
+ chain = GraphCypherQAChain.from_llm(
127
+ llm=llm,
128
+ graph=graph,
129
+ verbose=True,
130
+ return_intermediate_steps=True,
131
+ cypher_prompt=cypher_prompt,
132
+ # qa_prompt=qa_prompt # Uncomment if you want to use a custom QA prompt
133
+ )
134
+ print("LangChain integration with GraphCypherQAChain initialized successfully.")
135
+ else:
136
+ # This case is now handled by the llm_initialization_error check in process_query
137
+ pass
138
+
139
+
140
+ except Exception as e_graph:
141
+ graph_connection_error_message = f"Error setting up Neo4j connection: {str(e_graph)}\n"
142
+ graph_connection_error_message += "Full Traceback:\n" + traceback.format_exc()
143
+ print(graph_connection_error_message)
144
+ graph_connection_error = graph_connection_error_message
145
+ chain = None
146
+
147
+ # --- Gradio Interface Function ---
148
+ def process_query(message: str, history: list):
149
+ if graph_connection_error:
150
+ return f"Application Initialization Error (Neo4j): {graph_connection_error}"
151
+ if llm_initialization_error:
152
+ return f"Application Initialization Error (LLM): {llm_initialization_error}"
153
+ if not chain:
154
+ return "Error: LangChain QA Chain is not available. Please check server logs for initialization issues."
155
+
156
+ print(f"Processing message: {message}")
157
+ try:
158
+ result = chain.invoke({"query": message})
159
+ print(f"Chain result: {result}")
160
+
161
+ answer = result.get("result", "No answer found or an error occurred in processing.")
162
+ intermediate_steps = result.get("intermediate_steps", [])
163
+ generated_cypher = "Could not extract Cypher query from intermediate steps."
164
+
165
+ if intermediate_steps and isinstance(intermediate_steps, list) and len(intermediate_steps) > 0:
166
+ if isinstance(intermediate_steps[0], dict) and "query" in intermediate_steps[0]:
167
+ generated_cypher = intermediate_steps[0]["query"]
168
+ # Sometimes the Cypher query might be in a different structure or a later step
169
+ # depending on the chain's verbosity and internal structure.
170
+ # You might need to inspect intermediate_steps more closely if the above doesn't work.
171
+
172
+ return f"📝 Generated Cypher:\n```cypher\n{generated_cypher}\n```\n\n💬 Answer:\n{answer}"
173
+
174
+ except Exception as e:
175
+ error_message = f"Error processing query: {str(e)}"
176
+ print(error_message)
177
+ print(traceback.format_exc())
178
+ # Specific error check for Hugging Face model issues (e.g. out of memory)
179
+ if "CUDA out of memory" in str(e):
180
+ return "LLM Error: CUDA out of memory. The model may be too large for your GPU. Try a smaller model or reduce batch size if applicable."
181
+ return error_message
182
+
183
+ # --- Gradio Interface Definition ---
184
+ print("Setting up Gradio interface...")
185
+ demo = gr.ChatInterface(
186
+ fn=process_query,
187
+ chatbot=gr.Chatbot(height=600, type="messages"),
188
+ title="Neo4j Graph Database Assistant (with Hugging Face LLM)",
189
+ description="Ask questions about your Neo4j database. Model responses depend on the chosen Hugging Face LLM.",
190
+ examples=[
191
+ "How many nodes are in the database?",
192
+ "What types of nodes exist?",
193
+ "List all relationship types.",
194
+ ],
195
+ theme=gr.themes.Soft(),
196
+ cache_examples=False
197
+ )
198
+
199
+ # --- Main Execution ---
200
+ if __name__ == "__main__":
201
+ print("Launching Gradio interface...")
202
+ # To make accessible on the network (e.g., in Docker):
203
+ # demo.launch(server_name="0.0.0.0")
204
+ demo.launch()