Shafaq25 commited on
Commit
20feecd
·
verified ·
1 Parent(s): fd10fbd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -0
app.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import logging
4
+ import gradio as gr
5
+ import requests
6
+ from pinecone import Pinecone, ServerlessSpec
7
+ from haystack.components.embedders import OpenAIAPIDocumentEmbedder, OpenAIAPIQueryEmbedder
8
+ from haystack.components.writers import DocumentWriter
9
+ from haystack.document_stores.pinecone import PineconeDocumentStore
10
+ from haystack.components.retrievers.in_memory import InMemoryEmbeddingRetriever
11
+ from haystack.components.retrievers.pinecone import PineconeEmbeddingRetriever
12
+ from haystack import Pipeline
13
+ from haystack.components.routers import DocumentJoiner
14
+ from haystack.components.generators import OpenAIGenerator
15
+ from haystack.components.builders import PromptBuilder
16
+ from haystack.components.converters import TextFileToDocument
17
+ from haystack.components.preprocessors import DocumentSplitter
18
+ from haystack.components.routers import FileTypeRouter
19
+ from haystack.document_stores.in_memory import InMemoryDocumentStore
20
+
21
+ # --- Logging ---
22
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
23
+
24
+ # --- Environment Variables ---
25
+ api_key = os.getenv("PINECONE_API_KEY")
26
+ openai_api_key = os.getenv("OPENAI_API_KEY")
27
+
28
+ if not api_key:
29
+ raise ValueError("Please set the PINECONE_API_KEY as an environment variable.")
30
+ if not openai_api_key:
31
+ raise ValueError("Please set the OPENAI_API_KEY as an environment variable.")
32
+ os.environ["OPENAI_API_KEY"] = openai_api_key
33
+
34
+ # --- Pinecone Setup ---
35
+ index_name = "quickstart"
36
+ dimension = 1536
37
+ pc = Pinecone(api_key=api_key)
38
+
39
+ # Create index if not exists
40
+ if index_name not in [idx['name'] for idx in pc.list_indexes()]:
41
+ pc.create_index(
42
+ name=index_name,
43
+ dimension=dimension,
44
+ metric="euclidean",
45
+ spec=ServerlessSpec(cloud="aws", region="us-east-1")
46
+ )
47
+
48
+ # --- Document Loading and Processing ---
49
+ os.makedirs("data/paul_graham", exist_ok=True)
50
+ file_path = "data/paul_graham/paul_graham_essay.txt"
51
+ if not os.path.exists(file_path):
52
+ url = "https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt"
53
+ r = requests.get(url)
54
+ with open(file_path, "w") as f:
55
+ f.write(r.text)
56
+
57
+ # --- Haystack Pipeline for Indexing ---
58
+ document_store = PineconeDocumentStore(api_key=api_key, index=index_name)
59
+
60
+ indexing_pipeline = Pipeline()
61
+ indexing_pipeline.add_component("converter", TextFileToDocument())
62
+ indexing_pipeline.add_component("splitter", DocumentSplitter(split_by="word", split_length=100))
63
+ indexing_pipeline.add_component("embedder", OpenAIAPIDocumentEmbedder())
64
+ indexing_pipeline.add_component("writer", DocumentWriter(document_store))
65
+
66
+ indexing_pipeline.connect("converter.documents", "splitter.documents")
67
+ indexing_pipeline.connect("splitter.documents", "embedder.documents")
68
+ indexing_pipeline.connect("embedder.documents", "writer.documents")
69
+
70
+ if document_store.count_documents() == 0:
71
+ logging.info("Indexing the document...")
72
+ indexing_pipeline.run({"converter": {"sources": [file_path]}})
73
+ logging.info("Indexing complete.")
74
+
75
+ # --- Haystack Query Pipeline ---
76
+ template = """
77
+ Given the following context, answer the user's question.
78
+ If the context isn't sufficient, say that you don't have enough information.
79
+
80
+ Context:
81
+ {% for doc in documents %}
82
+ {{ doc.content }}
83
+ {% endfor %}
84
+
85
+ Question: {{ query }}
86
+ """
87
+ query_pipeline = Pipeline()
88
+ query_pipeline.add_component("embedder", OpenAIAPIQueryEmbedder())
89
+ query_pipeline.add_component("retriever", PineconeEmbeddingRetriever(document_store=document_store))
90
+ query_pipeline.add_component("prompt_builder", PromptBuilder(template=template))
91
+ query_pipeline.add_component("llm", OpenAIGenerator(api_key=openai_api_key))
92
+
93
+ query_pipeline.connect("embedder.query", "retriever.query")
94
+ query_pipeline.connect("retriever.documents", "prompt_builder.documents")
95
+ query_pipeline.connect("prompt_builder", "llm")
96
+
97
+ # --- Query Function ---
98
+ def ask_question(prompt):
99
+ try:
100
+ results = query_pipeline.run({"embedder": {"text": prompt}, "prompt_builder": {"query": prompt}})
101
+ response = results["llm"]["replies"][0]
102
+ return str(response)
103
+ except Exception as e:
104
+ return f"❌ Error: {str(e)}"
105
+
106
+ # --- Gradio UI ---
107
+ with gr.Blocks(css="""body { background-color: #f5f5dc; font-family: 'Georgia', 'Merriweather', serif;}h1, h2, h3 { color: #4e342e;}.gr-box, .gr-column, .gr-group { border-radius: 15px; padding: 20px; background-color: #fffaf0; box-shadow: 2px 4px 14px rgba(0, 0, 0, 0.1); margin-top: 10px;}textarea, input[type="text"] { background-color: #fffaf0; border: 1px solid #d2b48c; color: #4e342e; border-radius: 8px;}button { background-color: #a1887f; color: white; font-weight: bold; border-radius: 8px; transition: background-color 0.3s ease;}button:hover { background-color: #8d6e63;}.gr-button { border-radius: 8px !important;}""") as demo:
108
+ with gr.Column():
109
+ gr.Markdown("""
110
+ <div style='text-align: center;'>
111
+ <h1>🧠 Paul Graham Essay Q&A</h1>
112
+ <div style='font-size: 1.1em; color: #6d4c41; margin-bottom: 1em;'>
113
+ Explore insights from Paul Graham's essay using semantic search powered by <strong>Haystack</strong> + <strong>Pinecone</strong>.
114
+ </div>
115
+ </div>
116
+ """)
117
+ with gr.Accordion("ℹ️ What is Pinecone Vector Indexing?", open=False):
118
+ gr.Markdown("""**Pinecone** is a vector database that stores document embeddings (numeric representations of meaning). When you ask a question, it's converted into a vector and compared against stored vectors to find the most relevant answers — even if they don't match word-for-word.""")
119
+ gr.Markdown("### 📖 Ask your question below:")
120
+ with gr.Group():
121
+ with gr.Row():
122
+ user_input = gr.Textbox(
123
+ placeholder="E.g., What does Paul Graham say about startups?",
124
+ label="Your Question",
125
+ lines=2
126
+ )
127
+ with gr.Row():
128
+ output = gr.Textbox(label="Answer", lines=6)
129
+ with gr.Row():
130
+ submit_btn = gr.Button("🔍 Search Essay")
131
+ clear_btn = gr.Button("🧹 Clear")
132
+ submit_btn.click(fn=ask_question, inputs=user_input, outputs=output)
133
+ clear_btn.click(fn=lambda: ("", ""), inputs=None, outputs=[user_input, output])
134
+ demo.launch()