Arjs commited on
Commit
dda77cf
·
verified ·
1 Parent(s): c707e18

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -13
app.py CHANGED
@@ -1,23 +1,21 @@
1
  import gradio as gr
2
- #from transformers import AutoProcessor, BlipForConditionalGeneration
3
  from langchain_core.chat_history import InMemoryChatMessageHistory
4
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
5
  from langchain_core.messages import HumanMessage
6
  from langchain_core.runnables.history import RunnableWithMessageHistory
7
  from langchain_groq import ChatGroq
8
  import os
9
- import tempfile
10
- import requests
11
  # Load BLIP model and processor
12
  #processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
13
  #model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
14
 
15
- HF_T2 = os.getenv("HF_T2")
16
- HF_TOK=f"Bearer {HF_T2}"
17
 
 
18
 
19
  API_URL = "https://api-inference.huggingface.co/models/Salesforce/blip2-opt-2.7b"
20
- headers = {"Authorization": HF_TOK}
21
 
22
  def query(filename):
23
  with open(filename, "rb") as f:
@@ -28,7 +26,9 @@ def query(filename):
28
 
29
  # Define the image description function
30
  def image_description(image):
31
- return query(image)
 
 
32
 
33
  # Define the problem categorization function
34
  def problem_categorization(problem_description):
@@ -149,17 +149,15 @@ chain = prompt | chat_model
149
  # Function to handle Gradio inputs
150
  def handle_input(image, text, session_id):
151
  if image:
152
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
153
- image.save(temp_file.name)
154
- temp_filename = temp_file.name
155
- image_desc=image_description(temp_filename)
156
  category = problem_categorization(image_desc)
157
  if category=='NA':
158
  response="Please upload a better image"
159
  else:
160
  response=f"Your request has been forwarded to the {category} department"
161
- if text:
162
- response+=f"With the description: {text}"
163
  else:
164
  # Handle text input
165
  if text=='':
 
1
  import gradio as gr
2
+ from transformers import AutoProcessor, BlipForConditionalGeneration
3
  from langchain_core.chat_history import InMemoryChatMessageHistory
4
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
5
  from langchain_core.messages import HumanMessage
6
  from langchain_core.runnables.history import RunnableWithMessageHistory
7
  from langchain_groq import ChatGroq
8
  import os
9
+
 
10
  # Load BLIP model and processor
11
  #processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
12
  #model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
13
 
 
 
14
 
15
+ import requests
16
 
17
  API_URL = "https://api-inference.huggingface.co/models/Salesforce/blip2-opt-2.7b"
18
+ headers = {"Authorization": "Bearer hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
19
 
20
  def query(filename):
21
  with open(filename, "rb") as f:
 
26
 
27
  # Define the image description function
28
  def image_description(image):
29
+ inputs = processor(images=image, return_tensors="pt")
30
+ out = model.generate(**inputs)
31
+ return processor.decode(out[0], skip_special_tokens=True)
32
 
33
  # Define the problem categorization function
34
  def problem_categorization(problem_description):
 
149
  # Function to handle Gradio inputs
150
  def handle_input(image, text, session_id):
151
  if image:
152
+ # Handle image input
153
+ image_desc = image_description(image)
 
 
154
  category = problem_categorization(image_desc)
155
  if category=='NA':
156
  response="Please upload a better image"
157
  else:
158
  response=f"Your request has been forwarded to the {category} department"
159
+ if text:
160
+ response+=f"With the description: {text}"
161
  else:
162
  # Handle text input
163
  if text=='':