ibs_textToSpeechGeneration / handlerForAudio.py
kumararvindibs's picture
initial commit
489654d verified
raw
history blame
2.04 kB
import requests
from typing import Dict, Any
from dotenv import load_dotenv, find_dotenv
import os
import streamlit as st
import json
from textToStoryGeneration import *
import logging
# Configure logging
logging.basicConfig(level=logging.DEBUG)
# Configure logging
logging.basicConfig(level=logging.ERROR)
# Configure logging
logging.basicConfig(level=logging.WARNING)
load_dotenv(find_dotenv())
HUGGINFACE_API = os.getenv("HUGNINGFACEHUB_API_TOKEN")
class CustomHandler:
def __init__(self):
self.model_name = "espnet/kan-bayashi_ljspeech_vits"
self.endpoint = f"https://api-inference.huggingface.co/models/{self.model_name}"
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
# Prepare the payload with input data
logging.warning(f"------input_data-- {str(data)}")
payload = {"inputs": data}
print("payload----", payload)
# Set headers with API token
headers = {"Authorization": f"Bearer {HUGGINFACE_API}"}
# Send POST request to the Hugging Face model endpoint
response = requests.post(self.endpoint, json=payload, headers=headers)
with open('StoryAudio.mp3', 'wb') as file:
file.write(response.content)
return 'StoryAudio.mp3'
# Check if the request was successful
# Example usage
# if __name__ == "__main__":
# handler = CustomHandler()
# input_data = "Today I have tried with many model but I didnt find the any model which gives us better result and can be deployed on the endpoints. I think we need to Create custom Inference Handler and then it can be deployed on the interfernce end poitn.As I have deployed on model on interfernce endpoint i,e. text-to-story generation. I have also compared the result created with this endpoint and my local server as well that is not same. The endpoint is generating the different stroy."
# result = handler(input_data)
# print(result)dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddv 4