VarunRavichander commited on
Commit
f3620c1
·
verified ·
1 Parent(s): 6fd1367

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -36
app.py CHANGED
@@ -1,50 +1,41 @@
1
  import streamlit as st
2
- from enhanced_text_humanizer import EnhancedTextHumanizer
3
- import time
4
- import nltk
5
- import os
6
- import nltk
7
- import os
8
-
9
  import nltk
10
  import os
 
 
 
 
11
 
12
-
13
-
14
-
15
- import nltk
16
- import os
17
- from nltk.tokenize.punkt import PunktLanguageVars
18
-
19
- # Create and set NLTK data directory
20
- NLTK_DATA = os.path.join(os.getcwd(), 'nltk_data')
21
- os.makedirs(NLTK_DATA, exist_ok=True)
22
- nltk.data.path.append(NLTK_DATA)
23
-
24
- # Download essential resources
25
- resources = [
26
- ('punkt', 'tokenizers/punkt'),
27
- ('averaged_perceptron_tagger', 'taggers/averaged_perceptron_tagger'),
28
- ('wordnet', 'corpora/wordnet'),
29
- ]
30
-
31
- # Download each resource
32
- for resource, path in resources:
33
  try:
34
- nltk.data.find(path)
35
- except LookupError:
36
- nltk.download(resource, download_dir=NLTK_DATA)
37
-
38
- # Initialize PunktLanguageVars
39
- punkt_vars = PunktLanguageVars()
40
-
41
-
 
 
 
 
 
 
42
 
43
  def initialize_humanizer():
 
 
 
44
  with st.spinner('Loading language models... This may take a moment.'):
45
  humanizer = EnhancedTextHumanizer()
46
  return humanizer
47
 
 
 
 
48
  def main():
49
  st.set_page_config(
50
  page_title="Text Humanizer App",
 
1
  import streamlit as st
 
 
 
 
 
 
 
2
  import nltk
3
  import os
4
+ from enhanced_text_humanizer import EnhancedTextHumanizer
5
+ import time
6
+ # Set NLTK data path
7
+ nltk.data.path.append('./nltk_data')
8
 
9
+ # Download required NLTK resources at startup
10
+ @st.cache_resource
11
+ def download_nltk_resources():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  try:
13
+ # Create NLTK data directory if it doesn't exist
14
+ os.makedirs('nltk_data', exist_ok=True)
15
+
16
+ # Download required resources
17
+ nltk.download('punkt', download_dir='./nltk_data')
18
+ nltk.download('punkt_tab', download_dir='./nltk_data')
19
+ nltk.download('averaged_perceptron_tagger', download_dir='./nltk_data')
20
+ nltk.download('wordnet', download_dir='./nltk_data')
21
+ nltk.download('omw-1.4', download_dir='./nltk_data')
22
+
23
+ return True
24
+ except Exception as e:
25
+ st.error(f"Error downloading NLTK resources: {e}")
26
+ return False
27
 
28
  def initialize_humanizer():
29
+ # First ensure NLTK resources are downloaded
30
+ resources_downloaded = download_nltk_resources()
31
+
32
  with st.spinner('Loading language models... This may take a moment.'):
33
  humanizer = EnhancedTextHumanizer()
34
  return humanizer
35
 
36
+
37
+
38
+
39
  def main():
40
  st.set_page_config(
41
  page_title="Text Humanizer App",