SHAMIL SHAHBAZ AWAN commited on
Commit
6420211
·
verified ·
1 Parent(s): 02a1d6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -19
app.py CHANGED
@@ -4,7 +4,7 @@ import pdfplumber
4
  from sentence_transformers import SentenceTransformer
5
  import faiss
6
  import numpy as np
7
- from transformers import pipeline # Use Hugging Face model instead of Groq
8
 
9
  # Set background image and customize colors
10
  background_image_url = "https://www.shutterstock.com/image-vector/artificial-intelligence-circuit-electric-line-600nw-2465096659.jpg"
@@ -46,29 +46,23 @@ st.markdown(
46
  background-color: green;
47
  color: white;
48
  }}
49
-
50
  /* Set query input block background color to white */
51
  .stTextInput input {{
52
  background-color: white;
53
  color: black;
54
  }}
55
-
56
- /* Display generated response text in white */
57
- .stWrite {{
58
- color: white !important;
59
- }}
60
  </style>
61
  """,
62
  unsafe_allow_html=True
63
  )
64
 
65
- # Load Hugging Face Secrets (if needed for Hugging Face integration)
66
  HUGGINGFACE_KEY = os.getenv("HUGGINGFACE_KEY")
67
  if not HUGGINGFACE_KEY:
68
  st.error("Hugging Face API token not found. Please set it in the Hugging Face Secrets.")
69
 
70
- # Initialize Hugging Face pipeline for text generation (using GPT-2 or other models)
71
- generator = pipeline('text-generation', model='gpt-2', api_key=HUGGINGFACE_KEY)
72
 
73
  # Load the SentenceTransformer model for embedding generation
74
  embedder = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
@@ -145,13 +139,13 @@ def process_and_store_document(file_path):
145
  # User interface for Streamlit
146
  st.title("The Rise of Agentic AI RAG Application")
147
 
148
- # Query input for the user
149
- user_query = st.text_input("Enter your query:")
150
-
151
  # Button to trigger document processing
152
  if st.button("Process PDF"):
153
  process_and_store_document(file_path)
154
 
 
 
 
155
  if user_query:
156
  # Check if there are any chunks in the index
157
  if not chunks:
@@ -181,13 +175,19 @@ if user_query:
181
  for chunk in retrieved_chunks:
182
  st.write(chunk)
183
 
184
- # Combine the retrieved chunks with the query and generate a response using Hugging Face
185
- combined_input = " ".join(retrieved_chunks) + " " + user_query
186
- response = generator(combined_input, max_length=200, num_return_sequences=1)[0]['generated_text']
187
 
188
- # Display the generated response in white text
189
- st.subheader("Generated Response")
190
- st.markdown(f"<p style='color:white'>{response}</p>", unsafe_allow_html=True)
 
 
 
 
 
 
 
191
 
192
  # Footer
193
  st.markdown("<div class='footer'>Created by Shamil Shahbaz</div>", unsafe_allow_html=True)
 
4
  from sentence_transformers import SentenceTransformer
5
  import faiss
6
  import numpy as np
7
+ from groq import Client # Ensure you're importing the correct Groq client
8
 
9
  # Set background image and customize colors
10
  background_image_url = "https://www.shutterstock.com/image-vector/artificial-intelligence-circuit-electric-line-600nw-2465096659.jpg"
 
46
  background-color: green;
47
  color: white;
48
  }}
 
49
  /* Set query input block background color to white */
50
  .stTextInput input {{
51
  background-color: white;
52
  color: black;
53
  }}
 
 
 
 
 
54
  </style>
55
  """,
56
  unsafe_allow_html=True
57
  )
58
 
59
+ # Load Hugging Face Secrets
60
  HUGGINGFACE_KEY = os.getenv("HUGGINGFACE_KEY")
61
  if not HUGGINGFACE_KEY:
62
  st.error("Hugging Face API token not found. Please set it in the Hugging Face Secrets.")
63
 
64
+ # Initialize Groq client
65
+ groq_client = Client(api_key=HUGGINGFACE_KEY)
66
 
67
  # Load the SentenceTransformer model for embedding generation
68
  embedder = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
 
139
  # User interface for Streamlit
140
  st.title("The Rise of Agentic AI RAG Application")
141
 
 
 
 
142
  # Button to trigger document processing
143
  if st.button("Process PDF"):
144
  process_and_store_document(file_path)
145
 
146
+ # Query input for the user
147
+ user_query = st.text_input("Enter your query:")
148
+
149
  if user_query:
150
  # Check if there are any chunks in the index
151
  if not chunks:
 
175
  for chunk in retrieved_chunks:
176
  st.write(chunk)
177
 
178
+ # Combine the retrieved chunks with the query and generate a response using Groq
179
+ combined_input = " ".join(retrieved_chunks) + user_query
 
180
 
181
+ # Generate the response using Groq's correct API method
182
+ try:
183
+ # Assuming the correct Groq method is `generate_response` (adjust as per your API)
184
+ response = groq_client.generate_response(model="llama3-8b-8192", prompt=combined_input, max_tokens=200)
185
+
186
+ # Display the generated response
187
+ st.subheader("Generated Response")
188
+ st.write(response["text"])
189
+ except Exception as e:
190
+ st.error(f"Error generating response: {e}")
191
 
192
  # Footer
193
  st.markdown("<div class='footer'>Created by Shamil Shahbaz</div>", unsafe_allow_html=True)