nurfarah57 commited on
Commit
26173ac
·
verified ·
1 Parent(s): 84c00e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -8
app.py CHANGED
@@ -1,16 +1,102 @@
 
 
 
 
 
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
- from transformers import pipeline
 
 
 
 
 
4
 
5
  app = FastAPI()
6
 
7
- # Load your model pipeline once on startup
8
- summarizer = pipeline("text2text-generation", model="zakihassan04/tacab_ai_beero")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- class TextRequest(BaseModel):
 
 
 
 
 
 
 
 
 
11
  inputs: str
12
 
13
- @app.post("/generate")
14
- async def generate_text(request: TextRequest):
15
- out = summarizer(request.inputs, max_length=200, do_sample=False)
16
- return {"generated_text": out[0]["generated_text"]}
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import re
4
+ import numpy as np
5
+ import scipy.io.wavfile
6
  from fastapi import FastAPI
7
  from pydantic import BaseModel
8
+ from fastapi.responses import StreamingResponse
9
+ import torch
10
+ from transformers import VitsModel, AutoTokenizer
11
+
12
+ # Use /tmp for cache to avoid permission errors
13
+ os.environ["HF_HOME"] = "/tmp"
14
 
15
  app = FastAPI()
16
 
17
+ # Load model and tokenizer once
18
+ model = VitsModel.from_pretrained("Somali-tts/somali_tts_model")
19
+ tokenizer = AutoTokenizer.from_pretrained("saleolow/somali-mms-tts")
20
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
+ model.to(device)
22
+ model.eval()
23
+
24
+ number_words = {
25
+ 0: "eber", 1: "koow", 2: "labo", 3: "seddex", 4: "afar", 5: "shan",
26
+ 6: "lix", 7: "todobo", 8: "sideed", 9: "sagaal", 10: "toban",
27
+ 11: "toban iyo koow", 12: "toban iyo labo", 13: "toban iyo seddex",
28
+ 14: "toban iyo afar", 15: "toban iyo shan", 16: "toban iyo lix",
29
+ 17: "toban iyo todobo", 18: "toban iyo sideed", 19: "toban iyo sagaal",
30
+ 20: "labaatan", 30: "sodon", 40: "afartan", 50: "konton",
31
+ 60: "lixdan", 70: "todobaatan", 80: "sideetan", 90: "sagaashan",
32
+ 100: "boqol", 1000: "kun"
33
+ }
34
+
35
+ def number_to_words(number):
36
+ number = int(number)
37
+ if number < 20:
38
+ return number_words[number]
39
+ elif number < 100:
40
+ tens, unit = divmod(number, 10)
41
+ return number_words[tens * 10] + (" iyo " + number_words[unit] if unit else "")
42
+ elif number < 1000:
43
+ hundreds, remainder = divmod(number, 100)
44
+ part = (number_words[hundreds] + " boqol") if hundreds > 1 else "boqol"
45
+ if remainder:
46
+ part += " iyo " + number_to_words(remainder)
47
+ return part
48
+ elif number < 1000000:
49
+ thousands, remainder = divmod(number, 1000)
50
+ words = []
51
+ if thousands == 1:
52
+ words.append("kun")
53
+ else:
54
+ words.append(number_to_words(thousands) + " kun")
55
+ if remainder >= 100:
56
+ hundreds, rem2 = divmod(remainder, 100)
57
+ if hundreds:
58
+ boqol_text = (number_words[hundreds] + " boqol") if hundreds > 1 else "boqol"
59
+ words.append(boqol_text)
60
+ if rem2:
61
+ words.append("iyo " + number_to_words(rem2))
62
+ elif remainder:
63
+ words.append("iyo " + number_to_words(remainder))
64
+ return " ".join(words)
65
+ elif number < 1000000000:
66
+ millions, remainder = divmod(number, 1000000)
67
+ words = []
68
+ if millions == 1:
69
+ words.append("milyan")
70
+ else:
71
+ words.append(number_to_words(millions) + " milyan")
72
+ if remainder:
73
+ words.append(number_to_words(remainder))
74
+ return " ".join(words)
75
+ else:
76
+ return str(number)
77
 
78
+ def normalize_text(text):
79
+ numbers = re.findall(r'\d+', text)
80
+ for num in numbers:
81
+ text = text.replace(num, number_to_words(num))
82
+ text = text.replace("KH", "qa").replace("Z", "S")
83
+ text = text.replace("SH", "SHa'a").replace("DH", "Dha'a")
84
+ text = text.replace("ZamZam", "SamSam")
85
+ return text
86
+
87
+ class TextIn(BaseModel):
88
  inputs: str
89
 
90
+ @app.post("/synthesize")
91
+ async def synthesize(data: TextIn):
92
+ text = normalize_text(data.inputs)
93
+ inputs = tokenizer(text, return_tensors="pt").to(device)
94
+ with torch.no_grad():
95
+ waveform = model(**inputs).waveform.squeeze().cpu().numpy()
96
+
97
+ # Convert waveform to WAV bytes
98
+ buf = io.BytesIO()
99
+ scipy.io.wavfile.write(buf, rate=model.config.sampling_rate, data=(waveform * 32767).astype(np.int16))
100
+ buf.seek(0)
101
+
102
+ return StreamingResponse(buf, media_type="audio/wav")