SHAMIL SHAHBAZ AWAN commited on
Commit
02a1d6f
·
verified ·
1 Parent(s): 67c0653

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -13
app.py CHANGED
@@ -4,7 +4,7 @@ import pdfplumber
4
  from sentence_transformers import SentenceTransformer
5
  import faiss
6
  import numpy as np
7
- from groq import Client
8
 
9
  # Set background image and customize colors
10
  background_image_url = "https://www.shutterstock.com/image-vector/artificial-intelligence-circuit-electric-line-600nw-2465096659.jpg"
@@ -46,25 +46,29 @@ st.markdown(
46
  background-color: green;
47
  color: white;
48
  }}
49
-
50
  /* Set query input block background color to white */
51
  .stTextInput input {{
52
  background-color: white;
53
  color: black;
54
  }}
55
 
 
 
 
 
56
  </style>
57
  """,
58
  unsafe_allow_html=True
59
  )
60
 
61
- # Load Hugging Face Secrets
62
  HUGGINGFACE_KEY = os.getenv("HUGGINGFACE_KEY")
63
  if not HUGGINGFACE_KEY:
64
  st.error("Hugging Face API token not found. Please set it in the Hugging Face Secrets.")
65
 
66
- # Initialize Groq client
67
- groq_client = Client(api_key=HUGGINGFACE_KEY)
68
 
69
  # Load the SentenceTransformer model for embedding generation
70
  embedder = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
@@ -141,13 +145,13 @@ def process_and_store_document(file_path):
141
  # User interface for Streamlit
142
  st.title("The Rise of Agentic AI RAG Application")
143
 
 
 
 
144
  # Button to trigger document processing
145
  if st.button("Process PDF"):
146
  process_and_store_document(file_path)
147
 
148
- # Query input for the user
149
- user_query = st.text_input("Enter your query:")
150
-
151
  if user_query:
152
  # Check if there are any chunks in the index
153
  if not chunks:
@@ -177,13 +181,13 @@ if user_query:
177
  for chunk in retrieved_chunks:
178
  st.write(chunk)
179
 
180
- # Combine the retrieved chunks with the query and generate a response using Groq
181
- combined_input = " ".join(retrieved_chunks) + user_query
182
- response = groq_client.generate(model="llama3-8b-8192", prompt=combined_input, max_tokens=200)
183
 
184
- # Display the generated response
185
  st.subheader("Generated Response")
186
- st.write(response["text"])
187
 
188
  # Footer
189
  st.markdown("<div class='footer'>Created by Shamil Shahbaz</div>", unsafe_allow_html=True)
 
4
  from sentence_transformers import SentenceTransformer
5
  import faiss
6
  import numpy as np
7
+ from transformers import pipeline # Use Hugging Face model instead of Groq
8
 
9
  # Set background image and customize colors
10
  background_image_url = "https://www.shutterstock.com/image-vector/artificial-intelligence-circuit-electric-line-600nw-2465096659.jpg"
 
46
  background-color: green;
47
  color: white;
48
  }}
49
+
50
  /* Set query input block background color to white */
51
  .stTextInput input {{
52
  background-color: white;
53
  color: black;
54
  }}
55
 
56
+ /* Display generated response text in white */
57
+ .stWrite {{
58
+ color: white !important;
59
+ }}
60
  </style>
61
  """,
62
  unsafe_allow_html=True
63
  )
64
 
65
+ # Load Hugging Face Secrets (if needed for Hugging Face integration)
66
  HUGGINGFACE_KEY = os.getenv("HUGGINGFACE_KEY")
67
  if not HUGGINGFACE_KEY:
68
  st.error("Hugging Face API token not found. Please set it in the Hugging Face Secrets.")
69
 
70
+ # Initialize Hugging Face pipeline for text generation (using GPT-2 or other models)
71
+ generator = pipeline('text-generation', model='gpt-2', api_key=HUGGINGFACE_KEY)
72
 
73
  # Load the SentenceTransformer model for embedding generation
74
  embedder = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
 
145
  # User interface for Streamlit
146
  st.title("The Rise of Agentic AI RAG Application")
147
 
148
+ # Query input for the user
149
+ user_query = st.text_input("Enter your query:")
150
+
151
  # Button to trigger document processing
152
  if st.button("Process PDF"):
153
  process_and_store_document(file_path)
154
 
 
 
 
155
  if user_query:
156
  # Check if there are any chunks in the index
157
  if not chunks:
 
181
  for chunk in retrieved_chunks:
182
  st.write(chunk)
183
 
184
+ # Combine the retrieved chunks with the query and generate a response using Hugging Face
185
+ combined_input = " ".join(retrieved_chunks) + " " + user_query
186
+ response = generator(combined_input, max_length=200, num_return_sequences=1)[0]['generated_text']
187
 
188
+ # Display the generated response in white text
189
  st.subheader("Generated Response")
190
+ st.markdown(f"<p style='color:white'>{response}</p>", unsafe_allow_html=True)
191
 
192
  # Footer
193
  st.markdown("<div class='footer'>Created by Shamil Shahbaz</div>", unsafe_allow_html=True)