jfforero commited on
Commit
836ccde
·
verified ·
1 Parent(s): d65a4a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -26
app.py CHANGED
@@ -53,31 +53,25 @@ def get_predictions(audio_input):
53
 
54
  ###
55
 
56
- # Define the image generation function using the Stable Diffusion API
57
- url = "https://stablediffusionapi.com/api/v3/text2img"
58
- title = "<h2><center>Text to Image Generation with Stable Diffusion API</center></h2>"
59
- description = "Get the API key by signing up here [Stable Diffusion API](https://stablediffusionapi.com)."
60
-
61
- def get_image(key, prompt, inference_steps, filter):
62
- payload = {
63
- "key": key,
64
- "prompt": prompt,
65
- "negative_prompt": "((out of frame)), ((extra fingers)), mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), (((tiling))), ((naked)), ((tile)), ((fleshpile)), ((ugly)), (((abstract))), blurry, ((bad anatomy)), ((bad proportions)), ((extra limbs)), cloned face, (((skinny))), glitchy, ((extra breasts)), ((double torso)), ((extra arms)), ((extra hands)), ((mangled fingers)), ((missing breasts)), (missing lips), ((ugly face)), ((fat)), ((extra legs)), anime",
66
- "width": "512",
67
- "height": "512",
68
- "samples": "1",
69
- "num_inference_steps": inference_steps,
70
- "safety_checker": filter,
71
- "enhance_prompt": "yes",
72
- "guidance_scale": 7.5
73
- }
74
- headers = {}
75
- response = requests.request("POST", url, headers=headers, data=payload)
76
- url1 = str(json.loads(response.text)['output'][0])
77
- r = requests.get(url1)
78
- i = Image.open(BytesIO(r.content))
79
- return i
80
-
81
  ####
82
 
83
  # Create the Gradio interface
@@ -88,7 +82,7 @@ with gr.Blocks() as interface:
88
  with gr.Row():
89
  input_audio = gr.Audio(label="Input Audio", type="filepath")
90
  submit_button = gr.Button("Submit")
91
- output_label = gr.Label("Prediction") # Use a single Label instead of a list
92
 
93
  # Set the function to be called when the button is clicked
94
  submit_button.click(get_predictions, inputs=input_audio, outputs=output_label)
 
53
 
54
  ###
55
 
56
+ # Define a function to generate an image using DeepAI Text to Image API
57
+ def generate_image(api_key, text):
58
+ url = "https://api.deepai.org/api/text2img"
59
+ headers = {'api-key': api_key}
60
+ response = requests.post(
61
+ url,
62
+ data={
63
+ 'text': text,
64
+ },
65
+ headers=headers
66
+ )
67
+ response_data = response.json()
68
+ if 'output_url' in response_data:
69
+ image_url = response_data['output_url']
70
+ image_response = requests.get(image_url)
71
+ image = Image.open(BytesIO(image_response.content))
72
+ return image
73
+ else:
74
+ return None
 
 
 
 
 
 
75
  ####
76
 
77
  # Create the Gradio interface
 
82
  with gr.Row():
83
  input_audio = gr.Audio(label="Input Audio", type="filepath")
84
  submit_button = gr.Button("Submit")
85
+ output_label = [gr.Label("Prediction"), gr.Image(type='pil')] # Use a single Label instead of a list
86
 
87
  # Set the function to be called when the button is clicked
88
  submit_button.click(get_predictions, inputs=input_audio, outputs=output_label)