EinfachOlder awacke1 commited on
Commit
dbcc405
·
0 Parent(s):

Duplicate from awacke1/GetAllContent

Browse files

Co-authored-by: Aaron C Wacker <awacke1@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +35 -0
  2. README.md +14 -0
  3. app.py +64 -0
  4. backup.py +58 -0
  5. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GetAllContent
3
+ emoji: 📚
4
+ colorFrom: red
5
+ colorTo: indigo
6
+ sdk: streamlit
7
+ sdk_version: 1.21.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: awacke1/GetAllContent
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ from bs4 import BeautifulSoup
4
+ import os
5
+ import urllib
6
+ import base64
7
+
8
+ EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
9
+
10
+ def download_file(url, local_filename):
11
+ if url.startswith('http://') or url.startswith('https://'):
12
+ try:
13
+ with requests.get(url, stream=True) as r:
14
+ r.raise_for_status()
15
+ with open(local_filename, 'wb') as f:
16
+ for chunk in r.iter_content(chunk_size=8192):
17
+ f.write(chunk)
18
+ return local_filename
19
+ except requests.exceptions.HTTPError as err:
20
+ print(f"HTTP error occurred: {err}")
21
+
22
+ def download_html_and_files(url):
23
+ html_content = requests.get(url).text
24
+ soup = BeautifulSoup(html_content, 'html.parser')
25
+
26
+ base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
27
+
28
+ for link in soup.find_all('a'):
29
+ file_url = urllib.parse.urljoin(base_url, link.get('href'))
30
+ local_filename = urllib.parse.urlparse(file_url).path.split('/')[-1]
31
+ if local_filename:
32
+ link['href'] = local_filename
33
+ download_file(file_url, local_filename)
34
+
35
+ with open("index.html", "w") as file:
36
+ file.write(str(soup))
37
+
38
+ def list_files(directory_path='.'):
39
+ files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
40
+ return [f for f in files if f not in EXCLUDED_FILES]
41
+
42
+ def get_download_link(file):
43
+ with open(file, "rb") as f:
44
+ bytes = f.read()
45
+ b64 = base64.b64encode(bytes).decode()
46
+ href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{file}\'>Click to download {file}</a>'
47
+ return href
48
+
49
+ def show_download_links():
50
+ st.sidebar.write('Here are the files you can download:')
51
+ for file in list_files():
52
+ st.sidebar.markdown(get_download_link(file), unsafe_allow_html=True)
53
+
54
+ def main():
55
+ st.sidebar.title('Bulk Download Tool')
56
+ url = st.sidebar.text_input('Please enter a URL to bulk download text and files')
57
+ if st.sidebar.button('📥 Get All the Content'):
58
+ download_html_and_files(url)
59
+ show_download_links()
60
+ if st.sidebar.button('📂 Show Download Links'):
61
+ show_download_links()
62
+
63
+ if __name__ == "__main__":
64
+ main()
backup.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ from bs4 import BeautifulSoup
4
+ import os
5
+ import urllib
6
+ import base64
7
+
8
+ def download_file(url, local_filename):
9
+ if url.startswith('http://') or url.startswith('https://'): # add this line
10
+ try:
11
+ with requests.get(url, stream=True) as r:
12
+ r.raise_for_status()
13
+ with open(local_filename, 'wb') as f:
14
+ for chunk in r.iter_content(chunk_size=8192):
15
+ f.write(chunk)
16
+ return local_filename
17
+ except requests.exceptions.HTTPError as err:
18
+ print(f"HTTP error occurred: {err}") # or use logging
19
+
20
+
21
+ def download_html_and_files(url):
22
+ html_content = requests.get(url).text
23
+ soup = BeautifulSoup(html_content, 'html.parser')
24
+
25
+ base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
26
+
27
+ for link in soup.find_all('a'):
28
+ file_url = urllib.parse.urljoin(base_url, link.get('href'))
29
+ local_filename = urllib.parse.urlparse(file_url).path.split('/')[-1]
30
+ if local_filename: # add this line
31
+ link['href'] = local_filename
32
+ download_file(file_url, local_filename)
33
+
34
+ with open("index.html", "w") as file:
35
+ file.write(str(soup))
36
+
37
+
38
+ def list_files(directory_path='.'):
39
+ return [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
40
+
41
+ def main():
42
+ st.sidebar.title('Bulk Download Tool')
43
+ url = st.sidebar.text_input('Please enter a URL to bulk download text and files')
44
+ if st.sidebar.button('📥 Get All the Content'):
45
+ download_html_and_files(url)
46
+ st.sidebar.write('Download complete. Here are the files you can download:')
47
+ for file in list_files():
48
+ st.sidebar.markdown(get_download_link(file), unsafe_allow_html=True)
49
+
50
+ def get_download_link(file):
51
+ with open(file, "rb") as f:
52
+ bytes = f.read()
53
+ b64 = base64.b64encode(bytes).decode()
54
+ href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{file}\'>Click to download {file}</a>'
55
+ return href
56
+
57
+ if __name__ == "__main__":
58
+ main()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ requests
3
+ beautifulsoup4
4
+ urllib3