GeorgeSherif commited on
Commit
bcdb14c
·
1 Parent(s): 077d427
Files changed (1) hide show
  1. app.py +37 -16
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import os
3
  import threading
4
  import random
5
- from datasets import load_dataset, Dataset, Features, Value, concatenate_datasets
6
  from huggingface_hub import login
7
 
8
  # Authenticate with Hugging Face
@@ -15,33 +15,46 @@ dataset_name = "GeorgeIbrahim/EGYCOCO" # Replace with your dataset name
15
 
16
  # Load or create the dataset
17
  try:
18
- dataset = load_dataset(dataset_name, split="train")
19
  print("Loaded existing dataset:", dataset)
20
  except Exception as e:
21
- # Create an empty dataset if it doesn't exist
22
  features = Features({
23
  'image_id': Value(dtype='string'),
24
  'caption': Value(dtype='string'),
25
  })
26
- dataset = Dataset.from_dict({'image_id': [], 'caption': []}, features=features)
27
- dataset.push_to_hub(dataset_name) # Push the empty dataset to Hugging Face
 
 
28
 
29
  image_folder = "images"
30
  image_files = [f for f in os.listdir(image_folder) if f.endswith(('.png', '.jpg', '.jpeg'))]
31
  lock = threading.Lock()
32
 
 
 
 
 
 
 
 
 
 
 
 
33
  # Function to get a random image that hasn’t been annotated or skipped
34
  def get_next_image(session_data):
35
  with lock:
36
- annotated_images = set(dataset["image_id"]) # Set of annotated images
37
  available_images = [img for img in image_files if img not in annotated_images]
38
- # Check if the user already has an image
39
  if session_data["current_image"] is None and available_images:
40
- # Assign a new random image to the user
41
  session_data["current_image"] = random.choice(available_images)
42
  return os.path.join(image_folder, session_data["current_image"]) if session_data["current_image"] else None
43
 
44
- # Function to save the annotation to Hugging Face dataset and fetch the next image
 
45
  def save_annotation(caption, session_data):
46
  if session_data["current_image"] is None:
47
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
@@ -49,20 +62,24 @@ def save_annotation(caption, session_data):
49
  with lock:
50
  image_id = session_data["current_image"]
51
 
 
 
 
 
 
52
  # Save caption or "skipped" based on user input
53
  if caption.strip().lower() == "skip":
54
  caption = "skipped"
55
 
56
- # Add the new annotation as a new row to the dataset
57
  new_data = Dataset.from_dict({"image_id": [image_id], "caption": [caption]})
58
- global dataset
59
- dataset = concatenate_datasets([dataset, new_data])
60
 
61
  # Save updated dataset to Hugging Face
62
  dataset.push_to_hub(dataset_name)
63
- print("Pushed updated dataset")
64
 
65
- # Clear user's current image so they get a new one next time
66
  session_data["current_image"] = None
67
 
68
  # Fetch the next image
@@ -72,10 +89,12 @@ def save_annotation(caption, session_data):
72
  else:
73
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
74
 
 
75
  # Function to skip the current image
76
  def skip_image(session_data):
77
  return save_annotation("skip", session_data)
78
 
 
79
  # Function to initialize the interface
80
  def initialize_interface(session_data):
81
  next_image = get_next_image(session_data)
@@ -84,10 +103,12 @@ def initialize_interface(session_data):
84
  else:
85
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
86
 
 
87
  # Build the Gradio interface
88
  with gr.Blocks() as demo:
89
  gr.Markdown("# Image Captioning Tool")
90
- gr.Markdown("Please provide a caption for each image displayed. Click 'Submit' after writing your caption, or type 'skip' if you don’t want to annotate this image.")
 
91
 
92
  session_data = gr.State({"current_image": None}) # Session-specific state
93
 
@@ -104,4 +125,4 @@ with gr.Blocks() as demo:
104
  # Load initial image
105
  demo.load(fn=initialize_interface, inputs=session_data, outputs=[image, caption])
106
 
107
- demo.launch(share=True)
 
2
  import os
3
  import threading
4
  import random
5
+ from datasets import load_dataset, Dataset, DatasetDict, Features, Value, concatenate_datasets
6
  from huggingface_hub import login
7
 
8
  # Authenticate with Hugging Face
 
15
 
16
  # Load or create the dataset
17
  try:
18
+ dataset = load_dataset(dataset_name)
19
  print("Loaded existing dataset:", dataset)
20
  except Exception as e:
21
+ # Create empty train and val datasets if they don't exist
22
  features = Features({
23
  'image_id': Value(dtype='string'),
24
  'caption': Value(dtype='string'),
25
  })
26
+ train_dataset = Dataset.from_dict({'image_id': [], 'caption': []}, features=features)
27
+ val_dataset = Dataset.from_dict({'image_id': [], 'caption': []}, features=features)
28
+ dataset = DatasetDict({"train": train_dataset, "validation": val_dataset})
29
+ dataset.push_to_hub(dataset_name)
30
 
31
  image_folder = "images"
32
  image_files = [f for f in os.listdir(image_folder) if f.endswith(('.png', '.jpg', '.jpeg'))]
33
  lock = threading.Lock()
34
 
35
+
36
+ # Helper function to determine dataset split based on image filename
37
+ def determine_split(image_id):
38
+ if "train" in image_id:
39
+ return "train"
40
+ elif "val" in image_id:
41
+ return "validation"
42
+ else:
43
+ return None
44
+
45
+
46
  # Function to get a random image that hasn’t been annotated or skipped
47
  def get_next_image(session_data):
48
  with lock:
49
+ annotated_images = set(dataset["train"]["image_id"]) | set(dataset["validation"]["image_id"])
50
  available_images = [img for img in image_files if img not in annotated_images]
51
+
52
  if session_data["current_image"] is None and available_images:
 
53
  session_data["current_image"] = random.choice(available_images)
54
  return os.path.join(image_folder, session_data["current_image"]) if session_data["current_image"] else None
55
 
56
+
57
+ # Function to save the annotation to the correct split in the Hugging Face dataset and fetch the next image
58
  def save_annotation(caption, session_data):
59
  if session_data["current_image"] is None:
60
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
 
62
  with lock:
63
  image_id = session_data["current_image"]
64
 
65
+ # Determine the correct split for the image
66
+ split = determine_split(image_id)
67
+ if split is None:
68
+ return gr.update(value=None), gr.update(value="Error: Could not determine split.")
69
+
70
  # Save caption or "skipped" based on user input
71
  if caption.strip().lower() == "skip":
72
  caption = "skipped"
73
 
74
+ # Add the new annotation to the corresponding split
75
  new_data = Dataset.from_dict({"image_id": [image_id], "caption": [caption]})
76
+ dataset[split] = concatenate_datasets([dataset[split], new_data])
 
77
 
78
  # Save updated dataset to Hugging Face
79
  dataset.push_to_hub(dataset_name)
80
+ print(f"Pushed updated {split} dataset")
81
 
82
+ # Clear user's current image to get a new one next time
83
  session_data["current_image"] = None
84
 
85
  # Fetch the next image
 
89
  else:
90
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
91
 
92
+
93
  # Function to skip the current image
94
  def skip_image(session_data):
95
  return save_annotation("skip", session_data)
96
 
97
+
98
  # Function to initialize the interface
99
  def initialize_interface(session_data):
100
  next_image = get_next_image(session_data)
 
103
  else:
104
  return gr.update(visible=False), gr.update(value="All images have been annotated!")
105
 
106
+
107
  # Build the Gradio interface
108
  with gr.Blocks() as demo:
109
  gr.Markdown("# Image Captioning Tool")
110
+ gr.Markdown(
111
+ "Please provide a caption for each image displayed. Click 'Submit' after writing your caption, or type 'skip' if you don’t want to annotate this image.")
112
 
113
  session_data = gr.State({"current_image": None}) # Session-specific state
114
 
 
125
  # Load initial image
126
  demo.load(fn=initialize_interface, inputs=session_data, outputs=[image, caption])
127
 
128
+ demo.launch(share=True)