Mitchell Kilpatrick SE2022 commited on
Commit
f34cda4
·
1 Parent(s): 25c242e
Files changed (1) hide show
  1. app.py +15 -14
app.py CHANGED
@@ -9,28 +9,29 @@ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME)
10
  model.eval()
11
 
12
- def text_to_ipa(text: str) -> str:
 
 
 
 
 
 
 
 
 
 
13
  prompt = f"Text: {text}\nIPA:"
14
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
15
  with torch.no_grad():
16
  outputs = model.generate(**inputs, max_new_tokens=64)
17
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
18
 
19
-
20
- app = FastAPI()
21
-
22
- @app.post("/phonetic")
23
- async def phonetic(payload: dict):
24
- text = payload.get("text", "")
25
- ipa = text_to_ipa(text)
26
- return {"ipa": ipa}
27
-
28
- # Create Gradio UI
29
  iface = gr.Interface(
30
  fn=text_to_ipa,
31
  inputs=gr.Textbox(),
32
- outputs=gr.Textbox()
 
33
  )
34
 
35
- # Mount Gradio inside FastAPI
36
- app = gr.mount_gradio_app(app, iface, path="/")
 
9
  model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME)
10
  model.eval()
11
 
12
+ import gradio as gr
13
+ import torch
14
+ from transformers import AutoTokenizer, T5ForConditionalGeneration
15
+
16
+ MODEL_NAME = "google/byt5-small"
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
19
+ model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME)
20
+ model.eval()
21
+
22
+ def text_to_ipa(text):
23
  prompt = f"Text: {text}\nIPA:"
24
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
25
  with torch.no_grad():
26
  outputs = model.generate(**inputs, max_new_tokens=64)
27
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
28
 
 
 
 
 
 
 
 
 
 
 
29
  iface = gr.Interface(
30
  fn=text_to_ipa,
31
  inputs=gr.Textbox(),
32
+ outputs=gr.Textbox(),
33
+ api_name="predict"
34
  )
35
 
36
+ iface.launch()
37
+