AmandaPanda commited on
Commit
1ba53ba
·
verified ·
1 Parent(s): b6fb748

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -4
app.py CHANGED
@@ -1,10 +1,6 @@
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
8
 
9
  # Load vision capability to support image display
10
  ##pip install datasets
@@ -26,8 +22,61 @@ print(df.head(20))
26
 
27
  # Use the sample command
28
  selected_image = df.sample(n=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  print (selected_image)
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  # Get image database
32
  ##curl -X GET \
33
  ## "https://datasets-server.huggingface.co/first-rows?dataset=merve%2Fcoco&config=default&split=validation"
 
1
  import gradio as gr
2
 
 
 
3
 
 
 
4
 
5
  # Load vision capability to support image display
6
  ##pip install datasets
 
22
 
23
  # Use the sample command
24
  selected_image = df.sample(n=1)
25
+
26
+
27
+ # Get url for image
28
+ def parse_url(df):
29
+ for index, row in df.iterrows():
30
+ parsed = urlparse(str(row)).query # <- Notice the change here
31
+ parsed = parse_qs(parsed)
32
+ for k, v in parsed.items(): #use items() in Python3 and iteritems() in Python2
33
+ df.loc[index, k.strip()] = v[0].strip().lower()
34
+ return df
35
+
36
+
37
+ image_url = parse_url(df['image'])
38
+
39
+
40
+
41
+
42
  print (selected_image)
43
 
44
+
45
+
46
+ def greet(name):
47
+ return "Hello " + name + "!!"
48
+
49
+ demo = gr.Interface(fn=greet, inputs="text", outputs="text")
50
+ demo.launch()
51
+
52
+ import requests
53
+ from PIL import Image
54
+ from transformers import BlipProcessor, BlipForConditionalGeneration
55
+
56
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
57
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
58
+
59
+
60
+ ##img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
61
+ #Select image
62
+
63
+ raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
64
+
65
+ # conditional image captioning
66
+ text = "a photography of"
67
+ inputs = processor(raw_image, text, return_tensors="pt")
68
+
69
+ out = model.generate(**inputs)
70
+ print(processor.decode(out[0], skip_special_tokens=True))
71
+
72
+ # unconditional image captioning
73
+ inputs = processor(raw_image, return_tensors="pt")
74
+
75
+ out = model.generate(**inputs)
76
+ print(processor.decode(out[0], skip_special_tokens=True))
77
+
78
+
79
+
80
  # Get image database
81
  ##curl -X GET \
82
  ## "https://datasets-server.huggingface.co/first-rows?dataset=merve%2Fcoco&config=default&split=validation"