dlaima commited on
Commit
683e91e
·
verified ·
1 Parent(s): 46ebf29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -19
app.py CHANGED
@@ -1,16 +1,35 @@
1
- import gradio as gr
 
 
 
 
 
 
 
 
2
  import requests
3
- from PIL import Image
4
- import os
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  # Set your Inference Endpoint URL and API key
7
- INFERENCE_ENDPOINT = "https://your-endpoint-url" # Replace with your endpoint URL
8
- API_TOKEN = "your-api-token" # Replace with your Hugging Face API token
9
 
10
- #Image-to-text endpoint
11
- def get_completion(inputs, parameters=None, endpoint_url=INFERENCE_ENDPOINT):
12
  headers = {
13
- "Authorization": f"Bearer {API_TOKEN}",
14
  "Content-Type": "application/json"
15
  }
16
  data = {"inputs": inputs}
@@ -27,16 +46,8 @@ def get_generation(model, processor, image, dtype):
27
  def load_image(img_url):
28
  image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
29
  return image
30
-
31
- #Open the images
32
- #Image1=Image.open('dlaima/Multiple_Image_captioning/main/image1.jpg')
33
- #Image2=Image.open('https://huggingface.co/spaces/dlaima/Multiple_Image_captioning/resolve/main/image2.jpeg')
34
- #Image3=Image.open('https://huggingface.co/spaces/dlaima/Multiple_Image_captioning/resolve/main/image3.jpeg')
35
-
36
- #image_url = 'https://free-images.com/lg/9e46/white_bengal_tiger_tiger_0.jpg'
37
- #image = load_image(image_url)
38
-
39
-
40
  def caption_image(image_url):
41
  # Download the image from the URL
42
  response = requests.get(image_url)
@@ -48,7 +59,6 @@ def caption_image(image_url):
48
  caption = get_completion(image_url)
49
  return caption
50
 
51
-
52
  # Gradio interface
53
 
54
  demo = gr.Interface(
 
1
+ from dotenv import load_dotenv, find_dotenv
2
+ load_dotenv(find_dotenv())
3
+
4
+ import os # Provides a way of using operating system-dependent functionality
5
+ import io # Provides core tools for working with streams of data
6
+ from io import BytesIO
7
+ import IPython.display # Used for displaying rich content (e.g., images, HTML) in Jupyter Notebooks
8
+ from PIL import Image # Python Imaging Library for opening, manipulating, and saving image files
9
+ import base64 # Encodes and decodes data in base64 format
10
  import requests
11
+ import json
12
+ import torch
13
+ import torch.nn as nn
14
+ import warnings
15
+ import gradio as gr
16
+
17
+ # Ignore specific UserWarnings related to max_length in transformers
18
+ warnings.filterwarnings("ignore", message=".*Using the model-agnostic default `max_length`.*")
19
+
20
+ # Load environment variables from .env file
21
+ hf_api_key = os.getenv('API_TOKEN')
22
+ endpoint_url = os.getenv('INFERENCE_ENDPOINT')
23
+
24
 
25
  # Set your Inference Endpoint URL and API key
26
+ #INFERENCE_ENDPOINT = "https://your-endpoint-url" # Replace with your endpoint URL
27
+ #API_TOKEN = "your-api-token" # Replace with your Hugging Face API token
28
 
29
+ #Image-to-text endpoint - Helper funcion
30
+ def get_completion(inputs, parameters=None, endpoint_url=endpoint_url):
31
  headers = {
32
+ "Authorization": f"Bearer {hf_api_key}",
33
  "Content-Type": "application/json"
34
  }
35
  data = {"inputs": inputs}
 
46
  def load_image(img_url):
47
  image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
48
  return image
49
+
50
+ #Gradio interface
 
 
 
 
 
 
 
 
51
  def caption_image(image_url):
52
  # Download the image from the URL
53
  response = requests.get(image_url)
 
59
  caption = get_completion(image_url)
60
  return caption
61
 
 
62
  # Gradio interface
63
 
64
  demo = gr.Interface(