Shaffique Aljoofri commited on
Commit
7804a41
·
1 Parent(s): 5977d60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -22
app.py CHANGED
@@ -1,14 +1,12 @@
 
1
  import os
2
- import io
3
- import base64
4
 
5
  hf_api_key = os.environ['HF_API_KEY']
 
6
 
7
- # Helper functions
8
- import requests, js
9
-
10
- #Image-to-text endpoint
11
-
12
 
13
  def image_to_base64_str(pil_image):
14
  byte_arr = io.BytesIO()
@@ -16,43 +14,34 @@ def image_to_base64_str(pil_image):
16
  byte_arr = byte_arr.getvalue()
17
  return str(base64.b64encode(byte_arr).decode('utf-8'))
18
 
19
- #Image-to-text endpoint
20
- def get_completion(inputs, parameters = None, ENDPOINT_URL = os.environ['HF_API_ITT_BASE']):
21
-
22
  headers = {
23
  "Authorization": f"Bearer {hf_api_key}",
24
  "Content-Type": "application/json"
25
- }
26
-
27
  data = { "inputs" : inputs }
28
-
29
  if parameters is not None:
30
  data.update({"parameters" : parameters})
31
-
32
  response = requests.post(ENDPOINT_URL,
33
  headers = headers,
34
  data = json.dumps(data))
35
-
36
  return json.loads(response.content.decode("utf-8"))
37
 
38
-
39
- # Gradio App
40
-
41
- import gradio as gr
42
- from PIL import Image
43
-
44
  def captioner(image):
45
  pil_image = Image.open(image)
46
  base64_image = image_to_base64_str(pil_image)
47
  result = get_completion(base64_image)
48
  return result[0]['generated_text']
 
 
 
49
 
50
  gr.close_all()
51
 
52
  demo = gr.Interface(fn = captioner,
53
  inputs = [gr.Image(label="Upload image", type="pil")],
54
  outputs = [gr.Textbox(label="Caption")],
55
- title = "Image Captioning with BLIP",
56
  description = "Caption any image using the BLIP model",
57
  allow_flagging = "never",
58
  examples = ["tank-aerial-view.jpg"]
 
1
+ ##### Set API variables #####
2
  import os
 
 
3
 
4
  hf_api_key = os.environ['HF_API_KEY']
5
+ hf_api_itt_base = os.environ['HF_API_ITT_BASE']
6
 
7
+ ##### Helper functions #####
8
+ import io, base64, requests, json
9
+ from PIL import Image
 
 
10
 
11
  def image_to_base64_str(pil_image):
12
  byte_arr = io.BytesIO()
 
14
  byte_arr = byte_arr.getvalue()
15
  return str(base64.b64encode(byte_arr).decode('utf-8'))
16
 
17
+ def get_completion(inputs, parameters = None, ENDPOINT_URL = hf_api_itt_base):
 
 
18
  headers = {
19
  "Authorization": f"Bearer {hf_api_key}",
20
  "Content-Type": "application/json"
21
+ }
 
22
  data = { "inputs" : inputs }
 
23
  if parameters is not None:
24
  data.update({"parameters" : parameters})
 
25
  response = requests.post(ENDPOINT_URL,
26
  headers = headers,
27
  data = json.dumps(data))
 
28
  return json.loads(response.content.decode("utf-8"))
29
 
 
 
 
 
 
 
30
  def captioner(image):
31
  pil_image = Image.open(image)
32
  base64_image = image_to_base64_str(pil_image)
33
  result = get_completion(base64_image)
34
  return result[0]['generated_text']
35
+
36
+ ##### Gradio App #####
37
+ import gradio as gr
38
 
39
  gr.close_all()
40
 
41
  demo = gr.Interface(fn = captioner,
42
  inputs = [gr.Image(label="Upload image", type="pil")],
43
  outputs = [gr.Textbox(label="Caption")],
44
+ title = "Image captioning with BLIP",
45
  description = "Caption any image using the BLIP model",
46
  allow_flagging = "never",
47
  examples = ["tank-aerial-view.jpg"]