AmandaPanda commited on
Commit
1f447f3
·
verified ·
1 Parent(s): 0482136

Clean up code. Add notes (AA/BB labels)

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -1,12 +1,18 @@
1
  # Import gradio - app framework
2
  import gradio as gr
 
 
 
 
 
 
 
3
  import os
4
  import random
5
 
6
  # Import pandas datasets, transformers, torch
7
  import pandas as pd
8
- #import torch
9
- #import tensorflow as tf
10
  from datasets import load_dataset
11
 
12
  from transformers import (
@@ -22,6 +28,7 @@ from transformers import (
22
  from PIL import Image
23
  import torch
24
 
 
25
  # Get merve/coco dataset
26
  from datasets import load_dataset
27
 
@@ -34,7 +41,7 @@ samples = dataset.select(range(20))
34
  #Convert to dataframe
35
  df = pd.DataFrame(samples)
36
 
37
- # Direct to Photos folder
38
  IMAGE_FOLDER = "Photos"
39
 
40
  image_paths = [
@@ -43,10 +50,6 @@ image_paths = [
43
  if f.lower().endswith((".jpg", ".jpeg", ".png"))
44
  ]
45
 
46
- ## print ("Print to show the 20 images available.")
47
- ## print ("The app will then select an image for further exploration.")
48
- ## print(df.head(20))
49
-
50
  #Load the image captioning model (Salesforce/blip-image-captioning-large)
51
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
52
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
@@ -57,21 +60,21 @@ trans_tokenizer = MarianTokenizer.from_pretrained(model_name)
57
  trans_model = MarianMTModel.from_pretrained(model_name)
58
 
59
 
60
-
61
  #Configure captioning function
 
62
  def caption_random_image():
63
 
64
- # pick random row - from DF
65
  ##sample = df.sample(1).iloc[0]
66
 
67
- # Pick a random image path
68
  img_path = random.choice(image_paths)
69
 
70
- # Load into PIL
71
  image = Image.open(img_path).convert("RGB")
72
 
73
 
74
- # 'image' field contains an actual PIL image - for DF
75
  ##image = sample["image"]
76
 
77
  # Unconditional image captioning
 
1
  # Import gradio - app framework
2
  import gradio as gr
3
+
4
+ # Two image datasources are available.
5
+ # Minor adjustments (add/remove # to deactivate/activate) to switch between datasources.
6
+ # AA comments refer to images in the DataFrame / from Coco database
7
+ # BB comments refer to images stored in local Gradio app folder
8
+
9
+ # Import os and random to support random selection of image (from folder)
10
  import os
11
  import random
12
 
13
  # Import pandas datasets, transformers, torch
14
  import pandas as pd
15
+
 
16
  from datasets import load_dataset
17
 
18
  from transformers import (
 
28
  from PIL import Image
29
  import torch
30
 
31
+ # AA: Load dataset. Initial image source.
32
  # Get merve/coco dataset
33
  from datasets import load_dataset
34
 
 
41
  #Convert to dataframe
42
  df = pd.DataFrame(samples)
43
 
44
+ # BB: Direct to Photos folder
45
  IMAGE_FOLDER = "Photos"
46
 
47
  image_paths = [
 
50
  if f.lower().endswith((".jpg", ".jpeg", ".png"))
51
  ]
52
 
 
 
 
 
53
  #Load the image captioning model (Salesforce/blip-image-captioning-large)
54
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
55
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
 
60
  trans_model = MarianMTModel.from_pretrained(model_name)
61
 
62
 
 
63
  #Configure captioning function
64
+
65
  def caption_random_image():
66
 
67
+ # AA: pick random row - from DF
68
  ##sample = df.sample(1).iloc[0]
69
 
70
+ # BB: Pick a random image path - image from folder
71
  img_path = random.choice(image_paths)
72
 
73
+ # BB: Load into PIL - image from folder - image from folder
74
  image = Image.open(img_path).convert("RGB")
75
 
76
 
77
+ # AA: Image - for DF
78
  ##image = sample["image"]
79
 
80
  # Unconditional image captioning