SaviAnna commited on
Commit
2073fd8
·
1 Parent(s): 678aebc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -22
app.py CHANGED
@@ -1,36 +1,47 @@
1
- # from PIL import Image
2
- # import requests
3
 
4
- # from transformers import CLIPProcessor, CLIPModel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- # model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
7
- # processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
8
 
9
- # url = "http://images.cocodataset.org/val2017/000000039769.jpg"
10
- # image = Image.open(requests.get(url, stream=True).raw)
11
-
12
- # inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
13
-
14
- # outputs = model(**inputs)
15
- # logits_per_image = outputs.logits_per_image # this is the image-text similarity score
16
- # probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
17
  import os
18
  import zipfile
19
- from huggingface_hub import hf_hub_url, cached_download
20
  import gdown
 
21
  # Specify the Google Drive link to the archive file
22
  archive_url = 'https://drive.google.com/uc?id=14QhofCbby053kWbVeWEBHCxOROQS-bjN'
 
23
  # Specify the destination directory within the Hugging Face space
 
 
 
 
 
 
24
  # Download the archive
25
- output_path = 'SaviAnna/PicturesText/archive.zip'
26
- #destination_dir = 'SaviAnna/PicturesText' # Replace with your desired destination directory
27
- os.makedirs(output_path,exist_ok=True)
28
- # Construct the destination path
29
- #destination_path = hf_hub_url(destination_dir)
30
- # Download the archive to the destination path
31
- # cached_download(archive_url, output_path, quiet=False)
32
  gdown.download(archive_url, output_path, quiet=False)
33
- extracted_directory = 'PicturesText/find_pic'
 
 
 
34
  # Extract the archive
35
  with zipfile.ZipFile(output_path, 'r') as zip_ref:
36
  zip_ref.extractall(extracted_directory)
 
 
 
 
1
 
2
+ # import os
3
+ # import zipfile
4
+ # from huggingface_hub import hf_hub_url, cached_download
5
+ # import gdown
6
+ # # Specify the Google Drive link to the archive file
7
+ # archive_url = 'https://drive.google.com/uc?id=14QhofCbby053kWbVeWEBHCxOROQS-bjN'
8
+ # # Specify the destination directory within the Hugging Face space
9
+ # # Download the archive
10
+ # output_path = 'SaviAnna/PicturesText'
11
+ # #destination_dir = 'SaviAnna/PicturesText' # Replace with your desired destination directory
12
+ # os.makedirs(output_path,exist_ok=True)
13
+ # # Construct the destination path
14
+ # #destination_path = hf_hub_url(destination_dir)
15
+ # # Download the archive to the destination path
16
+ # # cached_download(archive_url, output_path, quiet=False)
17
+ # gdown.download(archive_url, output_path, quiet=False)
18
+ # extracted_directory = 'PicturesText/find_pic'
19
+ # # Extract the archive
20
+ # with zipfile.ZipFile(output_path, 'r') as zip_ref:
21
+ # zip_ref.extractall(extracted_directory)
22
 
 
 
23
 
 
 
 
 
 
 
 
 
24
  import os
25
  import zipfile
 
26
  import gdown
27
+
28
  # Specify the Google Drive link to the archive file
29
  archive_url = 'https://drive.google.com/uc?id=14QhofCbby053kWbVeWEBHCxOROQS-bjN'
30
+
31
  # Specify the destination directory within the Hugging Face space
32
+ destination_dir = 'SaviAnna/PicturesText' # Replace with your desired destination directory
33
+ os.makedirs(destination_dir, exist_ok=True)
34
+
35
+ # Specify the output file path for the downloaded archive
36
+ output_path = os.path.join(destination_dir, 'archive.zip')
37
+
38
  # Download the archive
 
 
 
 
 
 
 
39
  gdown.download(archive_url, output_path, quiet=False)
40
+
41
+ # Specify the directory to extract the archive into
42
+ extracted_directory = os.path.join(destination_dir, 'find_pic')
43
+
44
  # Extract the archive
45
  with zipfile.ZipFile(output_path, 'r') as zip_ref:
46
  zip_ref.extractall(extracted_directory)
47
+