GeorgeSherif commited on
Commit
3b0d24d
·
1 Parent(s): c026429
Files changed (1) hide show
  1. app.py +28 -37
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import os
3
  import threading
4
  import random
5
- from datasets import load_dataset, Dataset, DatasetDict, Features, Value, concatenate_datasets
6
  from huggingface_hub import login
7
 
8
  # Authenticate with Hugging Face
@@ -13,48 +13,38 @@ else:
13
  print("HUGGINGFACE_TOKEN environment variable not set.")
14
  dataset_name = "GeorgeIbrahim/EGYCOCO" # Replace with your dataset name
15
 
16
- # Load or create the dataset
17
  try:
18
- dataset = load_dataset(dataset_name)
19
- print("Loaded existing dataset:", dataset)
 
20
  except Exception as e:
21
- # Create empty train and val datasets if they don't exist
22
  features = Features({
23
  'image_id': Value(dtype='string'),
24
  'caption': Value(dtype='string'),
25
  })
26
  train_dataset = Dataset.from_dict({'image_id': [], 'caption': []}, features=features)
27
  val_dataset = Dataset.from_dict({'image_id': [], 'caption': []}, features=features)
28
- dataset = DatasetDict({"train": train_dataset, "test": val_dataset})
29
- dataset.push_to_hub(dataset_name)
30
 
31
  image_folder = "images"
32
  image_files = [f for f in os.listdir(image_folder) if f.endswith(('.png', '.jpg', '.jpeg'))]
33
  lock = threading.Lock()
34
 
35
-
36
- # Helper function to determine dataset split based on image filename
37
- def determine_split(image_id):
38
- if "train" in image_id:
39
- return "train"
40
- elif "val" in image_id:
41
- return "test"
42
- else:
43
- return None
44
-
45
-
46
  # Function to get a random image that hasn’t been annotated or skipped
47
  def get_next_image(session_data):
48
  with lock:
49
- annotated_images = set(dataset["train"]["image_id"]) | set(dataset["test"]["image_id"])
50
  available_images = [img for img in image_files if img not in annotated_images]
51
-
52
  if session_data["current_image"] is None and available_images:
 
53
  session_data["current_image"] = random.choice(available_images)
54
  return os.path.join(image_folder, session_data["current_image"]) if session_data["current_image"] else None
55
 
56
-
57
- # Function to save the annotation to the correct split in the Hugging Face dataset and fetch the next image
58
  def save_annotation(caption, session_data):
59
  if session_data["current_image"] is None:
60
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
@@ -62,24 +52,29 @@ def save_annotation(caption, session_data):
62
  with lock:
63
  image_id = session_data["current_image"]
64
 
65
- # Determine the correct split for the image
66
- split = determine_split(image_id)
67
- if split is None:
68
- return gr.update(value=None), gr.update(value="Error: Could not determine split.")
69
-
70
  # Save caption or "skipped" based on user input
71
  if caption.strip().lower() == "skip":
72
  caption = "skipped"
73
 
74
- # Add the new annotation to the corresponding split
 
 
 
 
 
 
 
 
 
 
75
  new_data = Dataset.from_dict({"image_id": [image_id], "caption": [caption]})
76
- dataset[split] = concatenate_datasets([dataset[split], new_data])
77
 
78
  # Save updated dataset to Hugging Face
79
- dataset.push_to_hub(dataset_name)
80
- print(f"Pushed updated {split} dataset")
81
 
82
- # Clear user's current image to get a new one next time
83
  session_data["current_image"] = None
84
 
85
  # Fetch the next image
@@ -89,12 +84,10 @@ def save_annotation(caption, session_data):
89
  else:
90
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
91
 
92
-
93
  # Function to skip the current image
94
  def skip_image(session_data):
95
  return save_annotation("skip", session_data)
96
 
97
-
98
  # Function to initialize the interface
99
  def initialize_interface(session_data):
100
  next_image = get_next_image(session_data)
@@ -103,12 +96,10 @@ def initialize_interface(session_data):
103
  else:
104
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
105
 
106
-
107
  # Build the Gradio interface
108
  with gr.Blocks() as demo:
109
  gr.Markdown("# Image Captioning Tool")
110
- gr.Markdown(
111
- "Please provide a caption for each image displayed. Click 'Submit' after writing your caption, or type 'skip' if you don’t want to annotate this image.")
112
 
113
  session_data = gr.State({"current_image": None}) # Session-specific state
114
 
 
2
  import os
3
  import threading
4
  import random
5
+ from datasets import load_dataset, Dataset, Features, Value, concatenate_datasets
6
  from huggingface_hub import login
7
 
8
  # Authenticate with Hugging Face
 
13
  print("HUGGINGFACE_TOKEN environment variable not set.")
14
  dataset_name = "GeorgeIbrahim/EGYCOCO" # Replace with your dataset name
15
 
16
+ # Load or create the datasets
17
  try:
18
+ train_dataset = load_dataset(dataset_name, split="train")
19
+ val_dataset = load_dataset(dataset_name, split="validation")
20
+ print("Loaded existing datasets:", train_dataset, val_dataset)
21
  except Exception as e:
22
+ # Create empty datasets if they don't exist
23
  features = Features({
24
  'image_id': Value(dtype='string'),
25
  'caption': Value(dtype='string'),
26
  })
27
  train_dataset = Dataset.from_dict({'image_id': [], 'caption': []}, features=features)
28
  val_dataset = Dataset.from_dict({'image_id': [], 'caption': []}, features=features)
29
+ train_dataset.push_to_hub(dataset_name, split="train") # Push the empty train dataset to Hugging Face
30
+ val_dataset.push_to_hub(dataset_name, split="validation") # Push the empty validation dataset to Hugging Face
31
 
32
  image_folder = "images"
33
  image_files = [f for f in os.listdir(image_folder) if f.endswith(('.png', '.jpg', '.jpeg'))]
34
  lock = threading.Lock()
35
 
 
 
 
 
 
 
 
 
 
 
 
36
  # Function to get a random image that hasn’t been annotated or skipped
37
  def get_next_image(session_data):
38
  with lock:
39
+ annotated_images = set(train_dataset["image_id"]) | set(val_dataset["image_id"]) # Set of annotated images
40
  available_images = [img for img in image_files if img not in annotated_images]
41
+ # Check if the user already has an image
42
  if session_data["current_image"] is None and available_images:
43
+ # Assign a new random image to the user
44
  session_data["current_image"] = random.choice(available_images)
45
  return os.path.join(image_folder, session_data["current_image"]) if session_data["current_image"] else None
46
 
47
+ # Function to save the annotation to Hugging Face datasets and fetch the next image
 
48
  def save_annotation(caption, session_data):
49
  if session_data["current_image"] is None:
50
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
 
52
  with lock:
53
  image_id = session_data["current_image"]
54
 
 
 
 
 
 
55
  # Save caption or "skipped" based on user input
56
  if caption.strip().lower() == "skip":
57
  caption = "skipped"
58
 
59
+ # Determine the dataset split based on the image name
60
+ if "train" in image_id:
61
+ target_dataset = train_dataset
62
+ split_name = "train"
63
+ elif "dev" in image_id:
64
+ target_dataset = val_dataset
65
+ split_name = "validation"
66
+ else:
67
+ return gr.update(visible=False), gr.update(value="Unknown dataset split for image!")
68
+
69
+ # Add the new annotation as a new row to the appropriate dataset
70
  new_data = Dataset.from_dict({"image_id": [image_id], "caption": [caption]})
71
+ target_dataset = concatenate_datasets([target_dataset, new_data])
72
 
73
  # Save updated dataset to Hugging Face
74
+ target_dataset.push_to_hub(dataset_name, split=split_name)
75
+ print(f"Pushed updated {split_name} dataset")
76
 
77
+ # Clear user's current image so they get a new one next time
78
  session_data["current_image"] = None
79
 
80
  # Fetch the next image
 
84
  else:
85
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
86
 
 
87
  # Function to skip the current image
88
  def skip_image(session_data):
89
  return save_annotation("skip", session_data)
90
 
 
91
  # Function to initialize the interface
92
  def initialize_interface(session_data):
93
  next_image = get_next_image(session_data)
 
96
  else:
97
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
98
 
 
99
  # Build the Gradio interface
100
  with gr.Blocks() as demo:
101
  gr.Markdown("# Image Captioning Tool")
102
+ gr.Markdown("Please provide a caption for each image displayed. Click 'Submit' after writing your caption, or type 'skip' if you don’t want to annotate this image.")
 
103
 
104
  session_data = gr.State({"current_image": None}) # Session-specific state
105