Update app.py
Browse filesunique key for download
app.py
CHANGED
|
@@ -4,7 +4,7 @@ import requests
|
|
| 4 |
from bs4 import BeautifulSoup
|
| 5 |
import chunk # Import the chunking function from chunk.py
|
| 6 |
|
| 7 |
-
# Function to perform Google search and return the first
|
| 8 |
def google_search(query):
|
| 9 |
try:
|
| 10 |
search_results = search(query, num_results=2) # Get first two results
|
|
@@ -24,7 +24,7 @@ def fetch_webpage_content(url):
|
|
| 24 |
st.error(f"Failed to fetch the webpage content: {e}")
|
| 25 |
return None
|
| 26 |
|
| 27 |
-
# Function to scrape text from webpage content using
|
| 28 |
def scrape_text(webpage_content):
|
| 29 |
try:
|
| 30 |
soup = BeautifulSoup(webpage_content, 'html.parser')
|
|
@@ -63,19 +63,21 @@ if st.button("Search"):
|
|
| 63 |
chunked_text = chunk.chunk_text(scraped_text)
|
| 64 |
|
| 65 |
# Save chunked data to a .txt file for later use
|
| 66 |
-
|
|
|
|
| 67 |
f.write("\n---\n".join(chunked_text)) # Separate chunks by a line break and delimiter
|
| 68 |
|
| 69 |
st.write(f"Chunked Data for Link {i}:")
|
| 70 |
for chunk_part in chunked_text:
|
| 71 |
st.write(chunk_part)
|
| 72 |
|
| 73 |
-
# Provide
|
| 74 |
st.download_button(
|
| 75 |
-
label="Download Chunked Webpage Content",
|
| 76 |
data="\n---\n".join(chunked_text),
|
| 77 |
-
file_name=
|
| 78 |
-
mime="text/plain"
|
|
|
|
| 79 |
)
|
| 80 |
else:
|
| 81 |
st.warning("No results found")
|
|
|
|
| 4 |
from bs4 import BeautifulSoup
|
| 5 |
import chunk # Import the chunking function from chunk.py
|
| 6 |
|
| 7 |
+
# Function to perform Google search and return the first two links
|
| 8 |
def google_search(query):
|
| 9 |
try:
|
| 10 |
search_results = search(query, num_results=2) # Get first two results
|
|
|
|
| 24 |
st.error(f"Failed to fetch the webpage content: {e}")
|
| 25 |
return None
|
| 26 |
|
| 27 |
+
# Function to scrape text from webpage content using BeautifulSoup
|
| 28 |
def scrape_text(webpage_content):
|
| 29 |
try:
|
| 30 |
soup = BeautifulSoup(webpage_content, 'html.parser')
|
|
|
|
| 63 |
chunked_text = chunk.chunk_text(scraped_text)
|
| 64 |
|
| 65 |
# Save chunked data to a .txt file for later use
|
| 66 |
+
file_name = f"chunked_data_link_{i}.txt"
|
| 67 |
+
with open(file_name, "w") as f:
|
| 68 |
f.write("\n---\n".join(chunked_text)) # Separate chunks by a line break and delimiter
|
| 69 |
|
| 70 |
st.write(f"Chunked Data for Link {i}:")
|
| 71 |
for chunk_part in chunked_text:
|
| 72 |
st.write(chunk_part)
|
| 73 |
|
| 74 |
+
# Provide a unique key for each download button
|
| 75 |
st.download_button(
|
| 76 |
+
label=f"Download Chunked Webpage Content for Link {i}",
|
| 77 |
data="\n---\n".join(chunked_text),
|
| 78 |
+
file_name=file_name,
|
| 79 |
+
mime="text/plain",
|
| 80 |
+
key=f"download_button_{i}" # Unique key for each button
|
| 81 |
)
|
| 82 |
else:
|
| 83 |
st.warning("No results found")
|