SaviAnna commited on
Commit
5ae462e
·
1 Parent(s): 12c33be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -9
app.py CHANGED
@@ -1,16 +1,29 @@
1
  from PIL import Image
2
  import requests
3
 
4
- from transformers import CLIPProcessor, CLIPModel
5
 
6
- model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
7
- processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
8
 
9
- url = "http://images.cocodataset.org/val2017/000000039769.jpg"
10
- image = Image.open(requests.get(url, stream=True).raw)
11
 
12
- inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
13
 
14
- outputs = model(**inputs)
15
- logits_per_image = outputs.logits_per_image # this is the image-text similarity score
16
- probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from PIL import Image
2
  import requests
3
 
4
+ # from transformers import CLIPProcessor, CLIPModel
5
 
6
+ # model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
7
+ # processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
8
 
9
+ # url = "http://images.cocodataset.org/val2017/000000039769.jpg"
10
+ # image = Image.open(requests.get(url, stream=True).raw)
11
 
12
+ # inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
13
 
14
+ # outputs = model(**inputs)
15
+ # logits_per_image = outputs.logits_per_image # this is the image-text similarity score
16
+ # probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
17
+ import zipfile
18
+ from huggingface_hub import hf_hub_url, cached_download
19
+ # Specify the Google Drive link to the archive file
20
+ archive_url = 'https://drive.google.com/uc?id=14QhofCbby053kWbVeWEBHCxOROQS-bjN'
21
+ # Specify the destination directory within the Hugging Face space
22
+ destination_dir = 'PicturesText/tree/main' # Replace with your desired destination directory
23
+ # Construct the destination path
24
+ destination_path = hf_hub_url(destination_dir)
25
+ # Download the archive to the destination path
26
+ cached_download(archive_url, destination_path)
27
+ # Extract the archive
28
+ with zipfile.ZipFile(destination_path, 'r') as zip_ref:
29
+ zip_ref.extractall(destination_dir)