WSLINMSAI commited on
Commit
a448117
·
verified ·
1 Parent(s): e2c807a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -7
app.py CHANGED
@@ -3,11 +3,12 @@ import gradio as gr
3
  from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline
4
 
5
  # -----------------------------
6
- # 1. Load the Model & Slow Tokenizer
7
- # We explicitly disable the fast tokenizer by setting use_fast=False.
 
8
  # -----------------------------
9
- tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-small-qg-hl", use_fast=False)
10
- model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-small-qg-hl")
11
  qg_pipeline = pipeline(
12
  "text2text-generation",
13
  model=model,
@@ -108,16 +109,39 @@ def check_answer(state, user_answer):
108
  return result_text + "\n" + difficulty_update
109
 
110
  # -----------------------------
111
- # 7. Build the Gradio Interface
112
  # -----------------------------
113
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  # Persistent state for the session.
115
  state = gr.State(init_state())
116
 
117
  gr.Markdown("# Adaptive Language Tutor")
118
  gr.Markdown(
119
  "This demo uses a T5-based model to generate questions from a passage. "
120
- "Difficulty will automatically adjust based on your performance."
121
  )
122
 
123
  # Display current difficulty and score.
 
3
  from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline
4
 
5
  # -----------------------------
6
+ # 1. Load a Better Model & Slow Tokenizer
7
+ # Using the "valhalla/t5-base-qg-hl" model (a step up from the small version)
8
+ # and disabling the fast tokenizer.
9
  # -----------------------------
10
+ tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-base-qg-hl", use_fast=False)
11
+ model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-base-qg-hl")
12
  qg_pipeline = pipeline(
13
  "text2text-generation",
14
  model=model,
 
109
  return result_text + "\n" + difficulty_update
110
 
111
  # -----------------------------
112
+ # 7. Build the Gradio Interface with Custom CSS for a More Colorful UI
113
  # -----------------------------
114
+ custom_css = """
115
+ body {
116
+ background: linear-gradient(135deg, #e0c3fc, #8ec5fc);
117
+ font-family: 'Helvetica', sans-serif;
118
+ }
119
+ .gradio-container {
120
+ background-color: #ffffff;
121
+ border-radius: 15px;
122
+ padding: 20px;
123
+ box-shadow: 0 4px 12px rgba(0,0,0,0.15);
124
+ }
125
+ .gradio-markdown h1 {
126
+ color: #4b0082;
127
+ }
128
+ .gradio-markdown p, .gradio-markdown li {
129
+ font-size: 16px;
130
+ color: #333333;
131
+ }
132
+ .gradio-button, .gradio-textbox {
133
+ font-size: 16px;
134
+ }
135
+ """
136
+
137
+ with gr.Blocks(css=custom_css) as demo:
138
  # Persistent state for the session.
139
  state = gr.State(init_state())
140
 
141
  gr.Markdown("# Adaptive Language Tutor")
142
  gr.Markdown(
143
  "This demo uses a T5-based model to generate questions from a passage. "
144
+ "The difficulty will automatically adjust based on your performance."
145
  )
146
 
147
  # Display current difficulty and score.