Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -8,6 +8,7 @@ import open_clip
|
|
| 8 |
|
| 9 |
knnpath = '20241204-ams-no-env-open_clip_ViT-H-14-378-quickgelu.npz'
|
| 10 |
clip_model_name = 'ViT-H-14-378-quickgelu'
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
# Set page config
|
|
@@ -22,16 +23,15 @@ st.set_page_config(
|
|
| 22 |
#model, preprocess = open_clip.create_model_from_pretrained(clip_model_name)
|
| 23 |
#tokenizer = open_clip.get_tokenizer(clip_model_name)
|
| 24 |
|
| 25 |
-
st.write("Available models:", open_clip.list_models())
|
| 26 |
|
| 27 |
@st.cache_resource
|
| 28 |
def load_model():
|
| 29 |
"""Load the OpenCLIP model and return model and processor"""
|
| 30 |
model, _, preprocess = open_clip.create_model_and_transforms(
|
| 31 |
-
|
| 32 |
-
pretrained='laion2b_s32b_b79k'
|
| 33 |
)
|
| 34 |
-
tokenizer = open_clip.get_tokenizer(
|
| 35 |
return model, preprocess, tokenizer
|
| 36 |
|
| 37 |
def process_image(image, preprocess):
|
|
|
|
| 8 |
|
| 9 |
knnpath = '20241204-ams-no-env-open_clip_ViT-H-14-378-quickgelu.npz'
|
| 10 |
clip_model_name = 'ViT-H-14-378-quickgelu'
|
| 11 |
+
pretrained_name = 'dfn5b'
|
| 12 |
|
| 13 |
|
| 14 |
# Set page config
|
|
|
|
| 23 |
#model, preprocess = open_clip.create_model_from_pretrained(clip_model_name)
|
| 24 |
#tokenizer = open_clip.get_tokenizer(clip_model_name)
|
| 25 |
|
| 26 |
+
#st.write("Available models:", open_clip.list_models())
|
| 27 |
|
| 28 |
@st.cache_resource
|
| 29 |
def load_model():
|
| 30 |
"""Load the OpenCLIP model and return model and processor"""
|
| 31 |
model, _, preprocess = open_clip.create_model_and_transforms(
|
| 32 |
+
clip_model_name, pretrained=pretrained_name
|
|
|
|
| 33 |
)
|
| 34 |
+
tokenizer = open_clip.get_tokenizer(clip_model_name)
|
| 35 |
return model, preprocess, tokenizer
|
| 36 |
|
| 37 |
def process_image(image, preprocess):
|