skroed commited on
Commit
2c7e285
·
1 Parent(s): 2915a3e

Add: handler

Browse files
Files changed (1) hide show
  1. handler.py +43 -0
handler.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+
3
+ import torch
4
+ from diffusers import AudioLDM2Pipeline, DPMSolverMultistepScheduler
5
+
6
+
7
+ class EndpointHandler:
8
+ def __init__(self, path=""):
9
+ # load model and processor from path
10
+ self.pipeline = AudioLDM2Pipeline.from_pretrained(
11
+ "cvssp/audioldm2-music", torch_dtype=torch.float16
12
+ ).to("cuda")
13
+ self.pipeline.unet = torch.compile(
14
+ self.pipeline.unet, mode="reduce-overhead", fullgraph=True
15
+ )
16
+ self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
17
+ self.pipeline.scheduler.config
18
+ )
19
+ self.pipeline.enable_model_cpu_offload()
20
+
21
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
22
+ """
23
+ Args:
24
+ data (:dict:):
25
+ The payload with the text prompt and generation parameters.
26
+ """
27
+ # process input
28
+ song_description = data.pop("inputs", data)
29
+ duration = data.get("duration", 30)
30
+ negative_prompt = data.get("negative_prompt", "Low quality, average quality.")
31
+
32
+ audio = self.pipeline(
33
+ song_description,
34
+ negative_prompt=negative_prompt,
35
+ num_waveforms_per_prompt=4,
36
+ audio_length_in_s=duration,
37
+ num_inference_steps=20,
38
+ ).audios[0]
39
+
40
+ # postprocess the prediction
41
+ prediction = audio.tolist()
42
+
43
+ return [{"generated_audio": prediction}]