larrysim commited on
Commit
e077248
·
verified ·
1 Parent(s): eed5a34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -7,13 +7,19 @@ import pickle
7
  import re
8
  import os
9
 
10
- # Set page config - this must be the first Streamlit command
11
  st.set_page_config(
12
  page_title="Next Word Predictor",
13
  page_icon="🔮",
14
  layout="centered"
15
  )
16
 
 
 
 
 
 
 
17
  # Custom CSS for styling
18
  st.markdown("""
19
  <style>
@@ -99,15 +105,16 @@ def main():
99
  st.title("🔮 Next Word Predictor")
100
  st.markdown("Enter some text and I'll predict the next word using an LSTM model trained on a large corpus.")
101
 
 
 
 
 
102
  # Load model and tokenizer
103
  with st.spinner("Loading model..."):
104
  model, tokenizer = load_models()
105
 
106
  if model is None or tokenizer is None:
107
  st.error("Failed to load the model. Please check if model files are available.")
108
- # Show files in directory for debugging
109
- if st.button("Show files in directory"):
110
- st.write("Files in directory:", os.listdir('.'))
111
  return
112
 
113
  # Calculate max sequence length (you might want to set this based on your training)
 
7
  import re
8
  import os
9
 
10
+ # Set Streamlit configuration (instead of using .streamlit/config.toml)
11
  st.set_page_config(
12
  page_title="Next Word Predictor",
13
  page_icon="🔮",
14
  layout="centered"
15
  )
16
 
17
+ # Set other Streamlit configurations
18
+ st.set_option('server.headless', True)
19
+ st.set_option('server.port', 8501)
20
+ st.set_option('server.enableCORS', False)
21
+ st.set_option('server.enableXsrfProtection', False)
22
+
23
  # Custom CSS for styling
24
  st.markdown("""
25
  <style>
 
105
  st.title("🔮 Next Word Predictor")
106
  st.markdown("Enter some text and I'll predict the next word using an LSTM model trained on a large corpus.")
107
 
108
+ # Debug: Show files in directory
109
+ st.sidebar.write("Debug Info:")
110
+ st.sidebar.write("Files in directory:", os.listdir('.'))
111
+
112
  # Load model and tokenizer
113
  with st.spinner("Loading model..."):
114
  model, tokenizer = load_models()
115
 
116
  if model is None or tokenizer is None:
117
  st.error("Failed to load the model. Please check if model files are available.")
 
 
 
118
  return
119
 
120
  # Calculate max sequence length (you might want to set this based on your training)