vsrinivas commited on
Commit
6710d2a
·
verified ·
1 Parent(s): d0020b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -18
app.py CHANGED
@@ -11,41 +11,50 @@ logging.set_verbosity_error()
11
  model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
12
  processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
13
 
14
- def process_image(input_type, image_url, image_upload):
15
  if input_type == "URL":
16
  raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
17
  else:
18
- raw_image = Image.open(image_upload)
19
 
20
  inputs = processor(images=raw_image, text=text, return_tensors="pt")
21
  itm_scores = model(**inputs)[0]
22
  itm_score = torch.nn.functional.softmax(itm_scores,dim=1)
23
  itm_score = itm_score[0][1]
24
  print(itm_score)
 
25
  if itm_score <=.35:
26
- cmnt = "Your description is not that great. Try again"
27
  elif itm_score <= .75:
28
- cmnt = "Your description is good. But you can improve it. Try again"
29
  else:
30
- cmnt = "Your description is excellent.Can you improve on it?"
31
 
32
  formatted_text = (
33
- f"""<div style='text-align: center; font-size: 15px; color: blue;'>
34
- Your decription is <span style='font-size: 20px; color: orange;'>{itm_score}</span> matching, {cmnt}
35
  </div>"""
36
  )
37
  return formatted_text
38
 
39
- demo = gr.Interface(title= "Challenge yourself best describing the image",
40
- description = 'Upload an image or type in image URL and submit',
41
- fn=process_image,
42
- inputs=[
43
- gr.inputs.Radio(choices=["URL", "Upload"], label="Input Type"),
44
- gr.inputs.Textbox(label="Image URL", visible=False),
45
- gr.inputs.Image(type="file", label="Upload Image", visible=False)
46
- ],
47
- outputs=gr.outputs.Image(type="pil", label="Processed Image"),
48
- layout="vertical"
49
- )
 
 
 
 
 
 
 
 
50
 
51
  demo.launch(share=True, debug=True)
 
11
  model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
12
  processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
13
 
14
+ def process_image(input_type, image_url, image_upload, text):
15
  if input_type == "URL":
16
  raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
17
  else:
18
+ raw_image = image_upload
19
 
20
  inputs = processor(images=raw_image, text=text, return_tensors="pt")
21
  itm_scores = model(**inputs)[0]
22
  itm_score = torch.nn.functional.softmax(itm_scores,dim=1)
23
  itm_score = itm_score[0][1]
24
  print(itm_score)
25
+
26
  if itm_score <=.35:
27
+ cmnt = "and is not that great. Try again"
28
  elif itm_score <= .75:
29
+ cmnt = "and is good. But you can improve it. Try again"
30
  else:
31
+ cmnt = "and is excellent. Can you improve on it?"
32
 
33
  formatted_text = (
34
+ f"""<div style='text-align: center; font-size: 40px; color: blue;'>
35
+ Your decription score is <span style='font-size: 60px; color: orange;'>{itm_score:.4f}</span>; {cmnt}
36
  </div>"""
37
  )
38
  return formatted_text
39
 
40
+ def toggle_inputs(input_type):
41
+ if input_type == "URL":
42
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)
43
+ else:
44
+ return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
45
+
46
+ with gr.Blocks() as demo:
47
+ input_type = gr.Radio(choices=["URL", "Upload"], label="Input Type")
48
+ image_url = gr.Textbox(label="Image URL", visible=False)
49
+ image_upload = gr.Image(type="pil", label="Upload Image", visible=False)
50
+ description = gr.Textbox(label="Describe the image", visible=False, lines=3)
51
+
52
+ input_type.change(fn=toggle_inputs, inputs=input_type, outputs=[image_url, image_upload, description])
53
+
54
+ submit_btn = gr.Button("Submit")
55
+ processed_image = gr.HTML(label="Your challenge result")
56
+
57
+ submit_btn.click(fn=process_image, inputs=[input_type, image_url, image_upload, description], outputs=processed_image)
58
+
59
 
60
  demo.launch(share=True, debug=True)