File size: 2,036 Bytes
489654d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import requests
from typing import Dict, Any
from dotenv import load_dotenv, find_dotenv
import os
import streamlit as st
import json
from textToStoryGeneration import *
import logging

# Configure logging
logging.basicConfig(level=logging.DEBUG)
# Configure logging
logging.basicConfig(level=logging.ERROR)
# Configure logging
logging.basicConfig(level=logging.WARNING)

load_dotenv(find_dotenv())
HUGGINFACE_API = os.getenv("HUGNINGFACEHUB_API_TOKEN")

class CustomHandler:
    def __init__(self):
        self.model_name = "espnet/kan-bayashi_ljspeech_vits"
        self.endpoint = f"https://api-inference.huggingface.co/models/{self.model_name}"

    def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
        # Prepare the payload with input data
        logging.warning(f"------input_data-- {str(data)}")
        payload = {"inputs": data}
        print("payload----", payload)
        # Set headers with API token
        headers = {"Authorization": f"Bearer {HUGGINFACE_API}"}
        
        # Send POST request to the Hugging Face model endpoint
        response = requests.post(self.endpoint, json=payload, headers=headers)
        with open('StoryAudio.mp3', 'wb') as file:
            file.write(response.content)
        return 'StoryAudio.mp3'
        # Check if the request was successful
        

# Example usage
# if __name__ == "__main__":
#     handler = CustomHandler()
#     input_data =  "Today I have tried with many model but I didnt find the any model which gives us better result and can be deployed on the endpoints. I think we need to Create custom Inference Handler and then it can be deployed on the interfernce end poitn.As I have deployed on model on interfernce endpoint i,e. text-to-story generation. I have also compared the result created with this endpoint and my local server as well that is not same. The endpoint is generating the different stroy."
#     result = handler(input_data)
#     print(result)dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddv 4