AlejandroLanaspa commited on
Commit
02ca452
·
1 Parent(s): 4bf6497

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -27
app.py CHANGED
@@ -1,38 +1,31 @@
1
  import gradio as gr
2
- from PIL import Image
3
- import torch
4
 
5
- model2 = torch.hub.load(
6
- "AK391/animegan2-pytorch:main",
7
- "generator",
8
- pretrained=True,
9
- progress=False
10
- )
11
- model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1")
12
- face2paint = torch.hub.load(
13
- 'AK391/animegan2-pytorch:main', 'face2paint',
14
- size=512,side_by_side=False
15
- )
16
 
17
  def inference(img, ver):
18
- if ver == 'version 2 (🔺 robustness,🔻 stylization)':
19
- out = face2paint(model2, img)
20
  else:
21
- out = face2paint(model1, img)
22
  return out
 
 
 
 
23
 
24
- title = "Portrait of your Pet"
25
- description = "Demo for Pet Portrait. To use it, simply upload your image, or click one of the examples to load them."
26
- article = "Github Repo Pytorch "
27
- examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['gongyoo.jpeg','version 2 (🔺 robustness,🔻 stylization)']]
28
 
29
  demo = gr.Interface(
30
- fn=inference,
31
- inputs=[gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')],
32
- outputs=gr.outputs.Image(type="pil"),
33
- title=title,
34
- description=description,
35
- article=article,
36
- examples=examples)
37
 
38
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
 
3
 
4
+ model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-large")
5
+ tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large")
 
 
 
 
 
 
 
 
 
6
 
7
  def inference(img, ver):
8
+ if ver == "Hidden Identity 1":
9
+ out = "Michael Jackson"
10
  else:
11
+ out = "Brad Pitt"
12
  return out
13
+
14
+ def generate(text,guess):
15
+ inputs = tokenizer(f"Answer with yes or no the following question about {guess}: {text}?", return_tensors="pt")
16
+ return model.generate(**inputs)[0]
17
 
18
+ examples = [
19
+ ["Is it dead?"],
20
+ ["Is it a female?"],
21
+ ]
22
 
23
  demo = gr.Interface(
24
+ fn=generate,
25
+ inputs=[gr.inputs.Textbox(lines=5, label="Input Text"),
26
+ gr.inputs.Radio("Hidden Identity 1", type="value", default='Hidden Identity 1', label='Hidden identity')],
27
+ outputs=gr.outputs.Textbox(label="Generated Text"),
28
+ examples=examples
29
+ )
 
30
 
31
  demo.launch()