SHAMIL SHAHBAZ AWAN commited on
Commit
e44a38f
·
verified ·
1 Parent(s): 6420211

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -23,9 +23,9 @@ st.markdown(
23
  color: black !important; /* Force title color to black */
24
  }}
25
 
26
- /* Set footer text color to white */
27
- h2, h3, h4, h5, h6, p {{
28
- color: black; /* Set text color to black */
29
  }}
30
 
31
  /* Set footer styling */
@@ -46,6 +46,7 @@ st.markdown(
46
  background-color: green;
47
  color: white;
48
  }}
 
49
  /* Set query input block background color to white */
50
  .stTextInput input {{
51
  background-color: white;
@@ -170,22 +171,21 @@ if user_query:
170
  # Retrieve the most relevant chunks based on the valid indices
171
  retrieved_chunks = [chunks[idx] for idx in valid_indices]
172
 
173
- # Display the retrieved chunks
174
  st.subheader("Retrieved Chunks")
175
  for chunk in retrieved_chunks:
176
- st.write(chunk)
177
 
178
  # Combine the retrieved chunks with the query and generate a response using Groq
179
  combined_input = " ".join(retrieved_chunks) + user_query
180
 
181
- # Generate the response using Groq's correct API method
182
  try:
183
- # Assuming the correct Groq method is `generate_response` (adjust as per your API)
184
- response = groq_client.generate_response(model="llama3-8b-8192", prompt=combined_input, max_tokens=200)
185
 
186
- # Display the generated response
187
  st.subheader("Generated Response")
188
- st.write(response["text"])
189
  except Exception as e:
190
  st.error(f"Error generating response: {e}")
191
 
 
23
  color: black !important; /* Force title color to black */
24
  }}
25
 
26
+ /* Set all text in the app to white */
27
+ h2, h3, h4, h5, h6, p, div {{
28
+ color: white !important; /* Set all text color to white */
29
  }}
30
 
31
  /* Set footer styling */
 
46
  background-color: green;
47
  color: white;
48
  }}
49
+
50
  /* Set query input block background color to white */
51
  .stTextInput input {{
52
  background-color: white;
 
171
  # Retrieve the most relevant chunks based on the valid indices
172
  retrieved_chunks = [chunks[idx] for idx in valid_indices]
173
 
174
+ # Display the retrieved chunks in white text
175
  st.subheader("Retrieved Chunks")
176
  for chunk in retrieved_chunks:
177
+ st.markdown(f"<p style='color:white;'>{chunk}</p>", unsafe_allow_html=True)
178
 
179
  # Combine the retrieved chunks with the query and generate a response using Groq
180
  combined_input = " ".join(retrieved_chunks) + user_query
181
 
 
182
  try:
183
+ # Assuming the correct Groq method is `predict` or another name; this is a placeholder
184
+ response = groq_client.predict(model="llama3-8b-8192", prompt=combined_input, max_tokens=200)
185
 
186
+ # Display the generated response in white text
187
  st.subheader("Generated Response")
188
+ st.markdown(f"<p style='color:white;'>{response['text']}</p>", unsafe_allow_html=True)
189
  except Exception as e:
190
  st.error(f"Error generating response: {e}")
191