lord-reso commited on
Commit
edc5445
·
verified ·
1 Parent(s): a9e01c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -12
app.py CHANGED
@@ -1,13 +1,13 @@
1
- from fastapi import Depends, FastAPI, HTTPException, Request
2
  from fastapi.responses import JSONResponse, StreamingResponse
3
  from fastapi.middleware.cors import CORSMiddleware
4
- from typing import Optional
5
  from logic import synthesize_voice, plot_data, plot_waveforms
6
  import base64
7
- import sys
8
  import numpy as np
9
  from io import BytesIO
10
  from hifigan.inference_e2e import hifi_gan_inference
 
 
11
 
12
  app = FastAPI()
13
 
@@ -20,20 +20,36 @@ app.add_middleware(
20
  allow_headers=["*"],
21
  )
22
 
 
 
 
23
  async def send_progress(progress: int):
24
  data = f"data: {progress}\n\n"
 
25
  return StreamingResponse(content=data.encode(), media_type="text/event-stream")
26
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  # Existing POST endpoint
28
  @app.post("/synthesize")
29
  async def synthesize(request: Request):
30
  print("call successful")
31
-
32
  json = await request.json()
33
  print(json)
34
-
35
  font_type = json['font_select']
36
- input_text = json['input_text']
37
 
38
  # Initial progress update
39
  await send_progress(0)
@@ -55,19 +71,19 @@ async def synthesize(request: Request):
55
  buffer = BytesIO()
56
  np.save(buffer, mel_output_data)
57
  input_mel = buffer.getvalue()
58
-
59
  hifigan_checkpoint = "generator_v1"
60
-
61
- # Generate audio using Hifigan
62
  audio_data = hifi_gan_inference(input_mel, hifigan_checkpoint)
63
 
64
- # Update progress after Hifigan inference
65
  await send_progress(90)
66
 
67
- # Step 4: Plot the waveform
68
  wave_base64 = plot_waveforms(audio_data)
69
 
70
- # Update progress after plot_waveforms
71
  await send_progress(100)
72
 
73
  # Encode audio content as Base64
 
1
+ from fastapi import FastAPI, Request
2
  from fastapi.responses import JSONResponse, StreamingResponse
3
  from fastapi.middleware.cors import CORSMiddleware
 
4
  from logic import synthesize_voice, plot_data, plot_waveforms
5
  import base64
 
6
  import numpy as np
7
  from io import BytesIO
8
  from hifigan.inference_e2e import hifi_gan_inference
9
+ import asyncio
10
+ from queue import SimpleQueue
11
 
12
  app = FastAPI()
13
 
 
20
  allow_headers=["*"],
21
  )
22
 
23
+ # Use a queue to communicate progress between endpoints
24
+ progress_queue = SimpleQueue()
25
+
26
  async def send_progress(progress: int):
27
  data = f"data: {progress}\n\n"
28
+ progress_queue.put(data)
29
  return StreamingResponse(content=data.encode(), media_type="text/event-stream")
30
 
31
+ # Progress bar
32
+ @app.get("/liveprogress")
33
+ async def sse_endpoint():
34
+ async def generate():
35
+ while True:
36
+ if not progress_queue.empty():
37
+ progress = progress_queue.get()
38
+ yield progress
39
+ await asyncio.sleep(0.1) # Adjust the sleep duration as needed
40
+
41
+ return StreamingResponse(generate(), media_type="text/event-stream")
42
+
43
  # Existing POST endpoint
44
  @app.post("/synthesize")
45
  async def synthesize(request: Request):
46
  print("call successful")
47
+
48
  json = await request.json()
49
  print(json)
50
+
51
  font_type = json['font_select']
52
+ input_text = json['input_text']
53
 
54
  # Initial progress update
55
  await send_progress(0)
 
71
  buffer = BytesIO()
72
  np.save(buffer, mel_output_data)
73
  input_mel = buffer.getvalue()
74
+
75
  hifigan_checkpoint = "generator_v1"
76
+
77
+ # Step 4: Generate audio using Hifigan
78
  audio_data = hifi_gan_inference(input_mel, hifigan_checkpoint)
79
 
80
+ # Update progress after audio generation using Hifigan
81
  await send_progress(90)
82
 
83
+ # Step 5: Plot the waveform
84
  wave_base64 = plot_waveforms(audio_data)
85
 
86
+ # Wait for all the data to be ready before sending 100% progress
87
  await send_progress(100)
88
 
89
  # Encode audio content as Base64