Spaces:
Sleeping
Sleeping
Update user_guide_sync.py
Browse files- user_guide_sync.py +42 -42
user_guide_sync.py
CHANGED
|
@@ -15,59 +15,59 @@ from llama_index import StorageContext, load_index_from_storage
|
|
| 15 |
|
| 16 |
|
| 17 |
#os.environ["OPENAI_API_KEY"]
|
|
|
|
| 18 |
|
|
|
|
|
|
|
| 19 |
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
file.write(f"{title}\n{section_text}")
|
| 45 |
-
file.close()
|
| 46 |
-
except:
|
| 47 |
-
pass
|
| 48 |
-
print("data collected")
|
| 49 |
|
| 50 |
|
| 51 |
def get_base_links():
|
| 52 |
-
# Send a GET request to the URL
|
| 53 |
-
response = requests.get(
|
| 54 |
-
|
| 55 |
# Parse the page content with BeautifulSoup
|
| 56 |
soup = BeautifulSoup(response.content, 'html.parser')
|
| 57 |
-
|
| 58 |
# Find all <a> tags with href attributes
|
| 59 |
links = soup.find_all('a', href=True)
|
| 60 |
-
|
|
|
|
| 61 |
valid_links = []
|
| 62 |
-
# Extract and print all the URLs
|
| 63 |
for link in links:
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
get_web_data(valid_links)
|
| 70 |
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
def update_user_guide():
|
|
@@ -77,8 +77,8 @@ def update_user_guide():
|
|
| 77 |
# index = load_index_from_storage(storage_context=storage_context)
|
| 78 |
# print("loaded")
|
| 79 |
# except:
|
| 80 |
-
documents = SimpleDirectoryReader("
|
| 81 |
index = VectorStoreIndex.from_documents(documents)
|
| 82 |
index.storage_context.persist("llama_index")
|
| 83 |
print("index created")
|
| 84 |
-
return "done"
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
#os.environ["OPENAI_API_KEY"]
|
| 18 |
+
import concurrent.futures
|
| 19 |
|
| 20 |
+
# URL of the page to scrape
|
| 21 |
+
base_url = 'https://help.storemate.cloud/docs/reports/'
|
| 22 |
|
| 23 |
|
| 24 |
+
def fetch_web_data(url):
|
| 25 |
+
try:
|
| 26 |
+
# Send a GET request to the URL
|
| 27 |
+
response = requests.get(url)
|
| 28 |
+
|
| 29 |
+
# Parse the page content with BeautifulSoup
|
| 30 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
| 31 |
+
|
| 32 |
+
# Find the title and section content
|
| 33 |
+
title = soup.find('h1').get_text()
|
| 34 |
+
|
| 35 |
+
# Find the section with the title "Renew Package Subscription"
|
| 36 |
+
section = soup.find('h1').find_next('div').find_next('div')
|
| 37 |
+
|
| 38 |
+
# Extract the text content from the section
|
| 39 |
+
section_text = section.get_text().strip()
|
| 40 |
+
section_text = section_text + f"\nMore detail link: {url}"
|
| 41 |
+
|
| 42 |
+
# Save the data into a text file
|
| 43 |
+
with open(f"user_guide/{title}.txt", "w") as file:
|
| 44 |
+
file.write(f"{title}\n{section_text}")
|
| 45 |
+
except Exception as e:
|
| 46 |
+
print(f"Failed to fetch data from {url}: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
|
| 49 |
def get_base_links():
|
| 50 |
+
# Send a GET request to the base URL
|
| 51 |
+
response = requests.get(base_url)
|
| 52 |
+
|
| 53 |
# Parse the page content with BeautifulSoup
|
| 54 |
soup = BeautifulSoup(response.content, 'html.parser')
|
| 55 |
+
|
| 56 |
# Find all <a> tags with href attributes
|
| 57 |
links = soup.find_all('a', href=True)
|
| 58 |
+
|
| 59 |
+
# Collect all valid links
|
| 60 |
valid_links = []
|
|
|
|
| 61 |
for link in links:
|
| 62 |
+
href = link['href']
|
| 63 |
+
if href.startswith("https://help.storemate.cloud/docs/"):
|
| 64 |
+
valid_links.append(href)
|
| 65 |
+
|
| 66 |
+
print("Base links collected")
|
|
|
|
| 67 |
|
| 68 |
+
# Use ThreadPoolExecutor to fetch web data in parallel
|
| 69 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 70 |
+
executor.map(fetch_web_data, valid_links)
|
| 71 |
|
| 72 |
|
| 73 |
def update_user_guide():
|
|
|
|
| 77 |
# index = load_index_from_storage(storage_context=storage_context)
|
| 78 |
# print("loaded")
|
| 79 |
# except:
|
| 80 |
+
documents = SimpleDirectoryReader("data/user_guid").load_data()
|
| 81 |
index = VectorStoreIndex.from_documents(documents)
|
| 82 |
index.storage_context.persist("llama_index")
|
| 83 |
print("index created")
|
| 84 |
+
return "done"
|