vsrinivas commited on
Commit
2078085
·
1 Parent(s): 1f4ef78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -16
app.py CHANGED
@@ -1,26 +1,30 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
  import torch
4
- from diffusers import StableDiffusionPipeline
5
 
6
- def get_completion(inputs, parameters=None):
7
- data = { "inputs": inputs }
8
- if parameters is not None:
9
- data.update({"parameters": parameters})
10
- return pipeline(prompt, params)['sample'][0]
11
- # response = requests.request("POST",
12
- # ENDPOINT_URL,
13
- # headers=headers,
14
- # data=json.dumps(data))
15
- # return json.loads(response.content.decode("utf-8"))
16
 
17
- # def get_completion(prompt,params):
18
- # return pipeline(prompt=prompt, height=params['height'], width=params['width'], num_inference_steps=params['num_inference_steps'], guidance_scale=params['guidance_scale'], negative_prompt=params['negative_prompt'])['sample'][0]
19
- # # return pipeline(prompt, params)['sample'][0]
 
 
 
 
20
 
21
  def generate(prompt,negative_prompt,steps,guidance,width,height):
22
  params = {
23
- "negative_prompt": negative_prompt,
24
  "num_inference_steps": steps,
25
  "guidance_scale": guidance,
26
  "width": width,
@@ -29,7 +33,7 @@ def generate(prompt,negative_prompt,steps,guidance,width,height):
29
  output = get_completion(prompt,params)
30
  return output
31
 
32
- pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
33
 
34
  with gr.Blocks() as demo:
35
  gr.Markdown("# Image Generation with Stable Diffusion")
 
1
  import gradio as gr
2
  from transformers import pipeline
3
  import torch
4
+ from diffusers import DiffusionPipeline
5
 
6
+ # def get_completion(inputs, parameters=None):
7
+ # data = { "inputs": inputs }
8
+ # if parameters is not None:
9
+ # data.update({"parameters": parameters})
10
+ # return pipeline(prompt, params)['sample'][0]
11
+ # # response = requests.request("POST",
12
+ # # ENDPOINT_URL,
13
+ # # headers=headers,
14
+ # # data=json.dumps(data))
15
+ # # return json.loads(response.content.decode("utf-8"))
16
 
17
+ def get_completion(prompt,params):
18
+ return pipeline(prompt=prompt, height=params['height'], width=params['width'],
19
+ num_inference_steps=params['num_inference_steps'],
20
+ guidance_scale=params['guidance_scale'])['sample'][0]
21
+ # negative_prompt=params['negative_prompt'])['sample'][0]
22
+
23
+ # return pipeline(prompt, params)['sample'][0]
24
 
25
  def generate(prompt,negative_prompt,steps,guidance,width,height):
26
  params = {
27
+ # "negative_prompt": negative_prompt,
28
  "num_inference_steps": steps,
29
  "guidance_scale": guidance,
30
  "width": width,
 
33
  output = get_completion(prompt,params)
34
  return output
35
 
36
+ pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
37
 
38
  with gr.Blocks() as demo:
39
  gr.Markdown("# Image Generation with Stable Diffusion")