Dua Rajper commited on
Commit
66544f4
·
verified ·
1 Parent(s): dc60089

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -1,17 +1,17 @@
1
  import streamlit as st
2
  from PIL import Image
3
  import easyocr
4
- from transformers import pipeline, AutoTokenizer, AutoModel
 
5
 
6
- # Load CLIP model
7
  @st.cache_resource
8
  def load_clip_model():
9
- pipe = pipeline("feature-extraction", model="fxmarty/clip-vision-model-tiny")
10
- tokenizer = AutoTokenizer.from_pretrained("fxmarty/clip-vision-model-tiny")
11
- model = AutoModel.from_pretrained("fxmarty/clip-vision-model-tiny")
12
- return pipe, tokenizer, model
13
 
14
- pipe, tokenizer, model = load_clip_model()
15
 
16
  # Initialize OCR
17
  @st.cache_resource
@@ -43,8 +43,8 @@ if uploaded_file is not None:
43
 
44
  if user_question:
45
  with st.spinner("Analyzing image and question..."):
46
- inputs = tokenizer(user_question, return_tensors="pt")
47
- outputs = model(**inputs)
48
 
49
  st.write("### 🏆 AI Response:")
50
- st.write("CLIP Model Processed the Input! (Further improvements coming soon)")
 
1
  import streamlit as st
2
  from PIL import Image
3
  import easyocr
4
+ import torch
5
+ from transformers import CLIPProcessor, CLIPModel, pipeline
6
 
7
+ # Load CLIP Model & Processor
8
  @st.cache_resource
9
  def load_clip_model():
10
+ model = CLIPModel.from_pretrained("fxmarty/clip-vision-model-tiny")
11
+ processor = CLIPProcessor.from_pretrained("fxmarty/clip-vision-model-tiny")
12
+ return model, processor
 
13
 
14
+ model, processor = load_clip_model()
15
 
16
  # Initialize OCR
17
  @st.cache_resource
 
43
 
44
  if user_question:
45
  with st.spinner("Analyzing image and question..."):
46
+ inputs = processor(text=[user_question], images=image, return_tensors="pt")
47
+ outputs = model.get_image_features(**inputs)
48
 
49
  st.write("### 🏆 AI Response:")
50
+ st.write("CLIP Model Processed the Image! (Further improvements coming soon)")