Datasets:

aibota01 commited on
Commit
abc47e7
·
verified ·
1 Parent(s): abf36ec

Upload 5 files

Browse files
Files changed (5) hide show
  1. coco_to_yolo.py +98 -0
  2. map_labels_yolo.py +103 -0
  3. split_data.py +64 -0
  4. train_rtdetr.py +7 -0
  5. train_yolo.py +8 -0
coco_to_yolo.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from PIL import Image
4
+
5
+ def yolo_to_coco(yolo_bbox, img_width, img_height):
6
+ """
7
+ Convert YOLO format bounding box to COCO format.
8
+ YOLO format: (x_center, y_center, width, height) normalized [0, 1]
9
+ COCO format: [x_min, y_min, width, height] in pixels
10
+ """
11
+ x_center, y_center, width, height = yolo_bbox
12
+ # Convert normalized values to pixel values
13
+ x_center *= img_width
14
+ y_center *= img_height
15
+ width *= img_width
16
+ height *= img_height
17
+ # Calculate x_min and y_min
18
+ x_min = x_center - width / 2
19
+ y_min = y_center - height / 2
20
+ return [x_min, y_min, width, height]
21
+
22
+ def convert_to_coco_format(image_dir, label_dir, output_json, categories):
23
+ """
24
+ Convert a dataset from YOLO format to COCO format.
25
+
26
+ Parameters:
27
+ - image_dir: Directory containing images
28
+ - label_dir: Directory containing YOLO format label files
29
+ - output_json: Path to save the output COCO format JSON file
30
+ - categories: List of category dictionaries for COCO format
31
+ """
32
+ images = []
33
+ annotations = []
34
+ annotation_id = 1
35
+
36
+ for image_file in os.listdir(image_dir):
37
+ if not image_file.endswith(('.jpg', '.png')):
38
+ continue
39
+
40
+ image_id = len(images) + 1
41
+ img_path = os.path.join(image_dir, image_file)
42
+ img = Image.open(img_path)
43
+ width, height = img.size
44
+
45
+ images.append({
46
+ "id": image_id,
47
+ "width": width,
48
+ "height": height,
49
+ "file_name": image_file
50
+ })
51
+
52
+ label_file = os.path.join(label_dir, os.path.splitext(image_file)[0] + '.txt')
53
+ if not os.path.exists(label_file):
54
+ continue
55
+
56
+ with open(label_file) as f:
57
+ for line in f:
58
+ # Read class_id and bounding box coordinates from YOLO label file
59
+ class_id, x_center, y_center, bbox_width, bbox_height = map(float, line.strip().split())
60
+ # Convert YOLO bounding box to COCO bounding box
61
+ bbox = yolo_to_coco([x_center, y_center, bbox_width, bbox_height], width, height)
62
+
63
+ annotations.append({
64
+ "id": annotation_id,
65
+ "image_id": image_id,
66
+ "category_id": int(class_id) + 1,
67
+ "bbox": bbox,
68
+ "area": bbox[2] * bbox[3], # area = width * height
69
+ "iscrowd": 0,
70
+ "segmentation": [] # No segmentation info available
71
+ })
72
+ annotation_id += 1
73
+
74
+ coco_format = {
75
+ "images": images,
76
+ "annotations": annotations,
77
+ "categories": categories
78
+ }
79
+
80
+ with open(output_json, 'w') as f:
81
+ json.dump(coco_format, f, indent=4)
82
+
83
+ # List of class names for the dataset
84
+ class_names = ['airan-katyk', 'almond', 'apple', 'artichoke', 'arugula', 'asparagus', 'avocado', 'bacon', 'banana', 'beans', 'beet', 'bell pepper', 'black olives', 'blackberry', 'blueberry', 'boiled chicken', 'bread', 'broccoli', 'buckwheat', 'cabbage', 'cakes', 'carrot', 'cashew', 'casserole with meat and vegetables', 'cauliflower', 'celery', 'cereal based cooked food', 'cheese', 'chickpeas', 'chips', 'cooked eggplant', 'cooked food based on meat', 'cooked food meat with vegetables', 'cooked zucchini', 'cookies', 'corn', 'crepe', 'cucumber', 'cutlet', 'desserts', 'egg product', 'eggplant', 'fish', 'fried chicken', 'fried eggs', 'fried fish', 'fried meat', 'fruits', 'granola', 'grapes', 'green beans', 'herbs', 'hummus', 'ice-cream', 'irimshik', 'juice', 'kiwi', 'lavash', 'legumes', 'lemon', 'mandarin', 'mango', 'mashed potato', 'meat product', 'melon', 'mixed berries', 'mixed nuts', 'mushrooms', 'onion', 'orange', 'pasta', 'pastry', 'peanut', 'pear', 'peas', 'pecan', 'pickled cabbage', 'pickled squash', 'pie', 'pineapple', 'pizza', 'plov', 'porridge', 'potatoes', 'pumpkin', 'radish', 'raspberry', 'rice', 'salad fresh', 'salad leaves', 'salad with fried meat veggie', 'salad with sauce', 'sandwich', 'sausages', 'seafood', 'smetana', 'snacks', 'snacks bread', 'souces', 'soup-plain', 'soy product', 'spinach', 'strawberry', 'suzbe', 'sweet potatoes', 'tomato', 'tomato souce', 'tushpara-wo-soup', 'vegetable based cooked food', 'waffles', 'walnut', 'watermelon', 'zucchini']
85
+ assert len(class_names) == 113
86
+ categories = [{"id": i + 1, "name": name, "supercategory": "none"} for i, name in enumerate(class_names)]
87
+
88
+ # Directory paths for the dataset
89
+ dataset_dir = '../datasets/Nutrition5k'
90
+ output_dir = '../datasets/Nutrition5k/annotations'
91
+ os.makedirs(output_dir, exist_ok=True)
92
+
93
+ # Convert labels for train, validation, and test splits
94
+ for split in ['train', 'val', 'test']:
95
+ image_dir = os.path.join(dataset_dir, split, 'images')
96
+ label_dir = os.path.join(dataset_dir, split, 'labels')
97
+ output_json = os.path.join(output_dir, f'instances_{split}.json')
98
+ convert_to_coco_format(image_dir, label_dir, output_json, categories)
map_labels_yolo.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import pandas as pd
4
+ from tqdm import tqdm
5
+ import cv2
6
+ import numpy as np
7
+ import seaborn as sns
8
+ import matplotlib.pyplot as plt
9
+ import random
10
+ from pathlib import Path
11
+ import json
12
+ from PIL import Image
13
+
14
+ # CODE FOR MAPPING YOLO CLASS LABELS WHEN MERGING DIFFERENT DATASETS WITH DIFFERENT NUMBERS OF CLASSES AND YOLO LABEL FORMAT
15
+
16
+ def create_mapping(classes):
17
+ """
18
+ Create a mapping from class names to indices.
19
+
20
+ Parameters:
21
+ - classes: List of class names.
22
+
23
+ Returns:
24
+ - mapping: Dictionary with class names as keys and indices as values.
25
+ """
26
+ mapping = {}
27
+ for idx, cls in enumerate(sorted(classes)):
28
+ mapping[cls] = idx
29
+ return mapping
30
+
31
+ def get_name_by_index(index, my_dict):
32
+ """
33
+ Get the class name by index from a mapping dictionary.
34
+
35
+ Parameters:
36
+ - index: The index to look up.
37
+ - my_dict: The mapping dictionary.
38
+
39
+ Returns:
40
+ - The class name corresponding to the given index.
41
+ """
42
+ for name, idx in my_dict.items():
43
+ if idx == int(index):
44
+ return name
45
+ return None
46
+
47
+ def update_labels(label_file, old_mapping, new_mapping):
48
+ """
49
+ Update YOLO labels in a file from old mapping to new mapping.
50
+
51
+ Parameters:
52
+ - label_file: The path to the YOLO label file.
53
+ - old_mapping: The old class-to-index mapping.
54
+ - new_mapping: The new class-to-index mapping.
55
+ """
56
+ with open(label_file, 'r') as f:
57
+ lines = f.readlines()
58
+ print(f'Updating labels in {label_file} ....')
59
+ print('Old labels: ', lines)
60
+ updated_lines = []
61
+ for line in lines:
62
+ if len(line.split()) == 5:
63
+ class_id = line.split()[0]
64
+ class_name = get_name_by_index(class_id, old_mapping)
65
+ if class_name in new_mapping:
66
+ updated_class_id = new_mapping[class_name]
67
+ updated_line = f"{updated_class_id} {' '.join(line.split()[1:])}"
68
+ updated_lines.append(updated_line)
69
+ print(f'Mapped the class {class_name} from old id of {class_id} to {updated_class_id}')
70
+ else:
71
+ print(f'Deleting the label for class {class_name} as it is not found in new mapping')
72
+ with open(label_file, 'w') as f:
73
+ for item in updated_lines:
74
+ f.write(item + '\n')
75
+ print('Updated labels: ', updated_lines)
76
+ print('\n\n')
77
+
78
+ # Lists of class names from different datasets
79
+ dataset1 = ['airan-katyk', 'almond', 'apple', 'artichoke', 'arugula', 'asparagus', 'avocado', 'bacon', 'banana', 'beans', 'beet', 'bell pepper', 'black olives', 'blackberry', 'blueberry', 'boiled chicken', 'bread', 'broccoli', 'buckwheat', 'cabbage', 'cakes', 'carrot', 'cashew', 'casserole with meat and vegetables', 'cauliflower', 'celery', 'cereal based cooked food', 'cheese', 'chickpeas', 'chips', 'cooked eggplant', 'cooked food based on meat', 'cooked food meat with vegetables', 'cooked zucchini', 'cookies', 'corn', 'crepe', 'cucumber', 'cutlet', 'desserts', 'egg product', 'fish', 'fried chicken', 'fried eggs', 'fried fish', 'fried meat', 'fruits', 'granola', 'grapes', 'green beans', 'herbs', 'hummus', 'ice-cream', 'juice', 'kiwi', 'lavash', 'legumes', 'lemon', 'mandarin', 'mashed potato', 'meat product', 'melon', 'mixed berries', 'mixed nuts', 'mushrooms', 'onion', 'orange', 'pasta', 'pastry', 'peanut', 'pear', 'peas', 'pecan', 'pickled cabbage', 'pickled squash', 'pie', 'pineapple', 'pizza', 'plov', 'porridge', 'potatoes', 'pumpkin', 'radish', 'raspberry', 'rice', 'salad fresh', 'salad leaves', 'salad with fried meat veggie', 'salad with sauce', 'sandwich', 'sausages', 'seafood', 'snacks bread', 'souces', 'soup-plain', 'soy product', 'spinach', 'strawberry', 'suzbe', 'sweet potatoes', 'tomato', 'tomato souce', 'tushpara-wo-soup', 'vegetable based cooked food', 'waffles', 'walnut', 'watermelon', 'zucchini']
80
+ dataset2 = ['airan-katyk', 'almond', 'apple', 'arugula', 'asparagus', 'avocado', 'bacon', 'banana', 'beans', 'beet', 'bell pepper', 'black olives', 'blackberry', 'blueberry', 'boiled chicken', 'bread', 'broccoli', 'cabbage', 'carrot', 'cashew', 'casserole with meat and vegetables', 'cauliflower', 'celery', 'cereal based cooked food', 'cheese', 'chickpeas', 'chips', 'cooked eggplant', 'cooked food based on meat', 'cooked food meat with vegetables', 'cooked zucchini', 'cookies', 'corn', 'cucumber', 'cutlet', 'desserts', 'egg product', 'eggplant', 'fish', 'fried chicken', 'fried fish', 'fried meat', 'fruits', 'granola', 'grapes', 'green beans', 'herbs', 'hummus', 'ice-cream', 'irimshik', 'juice', 'kiwi', 'lavash', 'lemon', 'mandarin', 'mango', 'meat product', 'melon', 'mixed berries', 'mixed nuts', 'mushrooms', 'onion', 'orange', 'pasta', 'pastry', 'pear', 'peas', 'pecan', 'pickled cabbage', 'pickled squash', 'pie', 'pineapple', 'pizza', 'porridge', 'potatoes', 'radish', 'raspberry', 'rice', 'salad fresh', 'salad with fried meat veggie', 'salad with sauce', 'sausages', 'seafood', 'smetana', 'snacks', 'snacks bread', 'souces', 'soup-plain', 'soy product', 'spinach', 'strawberry', 'suzbe', 'sweet potatoes', 'tomato', 'tushpara-wo-soup', 'vegetable based cooked food', 'waffles', 'walnut', 'watermelon']
81
+ dataset3 = ['achichuk', 'airan-katyk', 'almond', 'apple', 'apricot', 'artichoke', 'arugula', 'asip', 'asparagus', 'avocado', 'bacon', 'baklava', 'banana', 'basil', 'bauyrsak', 'bean soup', 'beans', 'beef shashlyk', 'beef shashlyk-v', 'beer', 'beet', 'bell pepper', 'beshbarmak', 'beverages', 'black olives', 'blackberry', 'blueberry', 'boiled chicken', 'boiled eggs', 'boiled meat', 'borsch', 'bread', 'brizol', 'broccoli', 'buckwheat', 'butter', 'cabbage', 'cakes', 'carrot', 'cashew', 'casserole with meat and vegetables', 'cauliflower', 'caviar', 'celery', 'cereal based cooked food', 'chak-chak', 'cheburek', 'cheese', 'cheese souce', 'cherry', 'chestnuts', 'chicken shashlyk', 'chicken shashlyk-v', 'chickpeas', 'chili pepper', 'chips', 'chocolate', 'chocolate paste', 'cinnabons', 'coffee', 'condensed milk', 'cooked eggplant', 'cooked food based on meat', 'cooked food meat with vegetables', 'cooked tomatoes', 'cooked zucchini', 'cookies', 'corn', 'corn flakes', 'crepe', 'crepe w filling', 'croissant', 'croissant sandwich', 'cucumber', 'cutlet', 'dates', 'desserts', 'dill', 'doner-lavash', 'doner-nan', 'dragon fruit', 'dried fruits', 'egg product', 'eggplant', 'figs', 'fish', 'french fries', 'fried cheese', 'fried chicken', 'fried eggs', 'fried fish', 'fried meat', 'fruits', 'garlic', 'granola', 'grapefruit', 'grapes', 'green beans', 'green olives', 'hachapuri', 'hamburger', 'hazelnut', 'herbs', 'hinkali', 'honey', 'hot dog', 'hummus', 'hvorost', 'ice-cream', 'irimshik', 'jam', 'juice', 'karta', 'kattama-nan', 'kazy-karta', 'ketchup', 'kiwi', 'kurt', 'kuyrdak', 'kymyz-kymyran', 'lagman-fried', 'lagman-w-soup', 'lavash', 'legumes', 'lemon', 'lime', 'mandarin', 'mango', 'manty', 'mashed potato', 'mayonnaise', 'meat based soup', 'meat product', 'melon', 'milk', 'minced meat shashlyk', 'mint', 'mixed berries', 'mixed nuts', 'muffin', 'mushrooms', 'naryn', 'nauryz-kozhe', 'noodles soup', 'nuggets', 'oil', 'okra', 'okroshka', 'olivie', 'onion', 'onion rings', 'orama', 'orange', 'pancakes', 'parsley', 'pasta', 'pastry', 'peach', 'peanut', 'pear', 'peas', 'pecan', 'persimmon', 'pickled cabbage', 'pickled cucumber', 'pickled ginger', 'pickled squash', 'pie', 'pineapple', 'pistachio', 'pizza', 'plov', 'plum', 'pomegranate', 'porridge', 'potatoes', 'pumpkin', 'pumpkin seeds', 'quince', 'radish', 'raspberry', 'redcurrant', 'ribs', 'rice', 'rosemary', 'salad fresh', 'salad leaves', 'salad with fried meat veggie', 'salad with sauce', 'samsa', 'sandwich', 'sausages', 'scallion', 'seafood', 'seafood soup', 'sheep-head', 'shelpek', 'shorpa', 'shorpa chicken', 'smetana', 'smoked fish', 'snacks', 'snacks bread', 'soda', 'souces', 'soup-plain', 'soy souce', 'spinach', 'spirits', 'strawberry', 'sugar', 'sushi', 'sushi fish', 'sushi nori', 'sushki', 'suzbe', 'sweets', 'syrniki', 'taba-nan', 'talkan-zhent', 'tartar', 'tea', 'tomato', 'tomato souce', 'tomato-cucumber-salad', 'tushpara-fried', 'tushpara-w-soup', 'tushpara-wo-soup', 'vareniki', 'vegetable based cooked food', 'vegetable soup', 'waffles', 'walnut', 'wasabi', 'water', 'watermelon', 'wine', 'wings', 'zucchini']
82
+
83
+ # Combine and create sets of class names for different datasets
84
+ combined_classes = set(folder1 + folder2)
85
+ all_classes = set(dataset1 + dataset2 + dataset3)
86
+
87
+ with open('./new_dataset_classes.txt', 'w') as f:
88
+ for cls in sorted(all_classes):
89
+ f.write(cls + '\n')
90
+
91
+ # Ensure the class count is correct
92
+ assert len(google_classes) == 113
93
+ assert len(all_classes) == 241
94
+
95
+ # Create mappings from class names to indices
96
+ old_mapping = create_mapping(dataset1)
97
+ new_mapping = create_mapping(all_classes)
98
+
99
+ # Update labels in the given directory
100
+ labelfolder = '../datasets/Nutrition5k/train/labels'
101
+ for labelfile in tqdm(sorted(os.listdir(labelfolder))):
102
+ labelfilepath = os.path.join(labelfolder, labelfile)
103
+ update_labels(labelfilepath, old_mapping, new_mapping)
split_data.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from pathlib import Path
4
+ import random
5
+
6
+ # CODE FOR SPLITTING THE DATASET INTO TRAIN/TEST/VAL
7
+
8
+ def split_data(source_images_folder, source_labels_folder, destination_folder, train_ratio=0.8):
9
+ """
10
+ Split the dataset into training, validation, and testing sets.
11
+
12
+ Parameters:
13
+ - source_images_folder: Path to the source folder containing images.
14
+ - source_labels_folder: Path to the source folder containing labels.
15
+ - destination_folder: Path to the destination folder for the split datasets.
16
+ - train_ratio: Ratio of training data. Default is 0.8.
17
+ """
18
+ # Read image files from the source directory
19
+ image_files = os.listdir(source_images_folder)
20
+
21
+ # Shuffle and split image files into training, testing, and validation sets
22
+ random.seed(100)
23
+ random.shuffle(image_files)
24
+ split_1 = int(train_ratio * len(image_files))
25
+ split_2 = int((train_ratio + (1 - train_ratio) / 2) * len(image_files))
26
+ train_images = image_files[:split_1]
27
+ test_images = image_files[split_1:split_2]
28
+ val_images = image_files[split_2:]
29
+
30
+ # Create destination directories
31
+ destination_folder.mkdir(parents=True, exist_ok=True)
32
+ splits = [train_images, val_images, test_images]
33
+ for i in range(3):
34
+ if i == 0:
35
+ split_folder = 'train'
36
+ elif i == 1:
37
+ split_folder = 'val'
38
+ else:
39
+ split_folder = 'test'
40
+ for image in splits[i]:
41
+ image_path = os.path.join(source_images_folder, image)
42
+ destination_image_path = destination_folder / split_folder / "images" / image
43
+ destination_image_path.parent.mkdir(parents=True, exist_ok=True)
44
+ label_file = image.rsplit(".", 1)[0] + '.txt'
45
+ label_path = os.path.join(source_labels_folder, label_file)
46
+ destination_label_path = destination_folder / split_folder / "labels" / label_file
47
+ destination_label_path.parent.mkdir(parents=True, exist_ok=True)
48
+ # Copying files to respective splits
49
+ shutil.copy2(label_path, destination_label_path)
50
+ shutil.copy2(image_path, destination_image_path)
51
+ print("Image copied to ", destination_image_path)
52
+
53
+ # Print the number of images in each split
54
+ print(f"Number of train images: {len(train_images)}\n",
55
+ f"Number of validation images: {len(val_images)}\n",
56
+ f"Number of test images: {len(test_images)}\n")
57
+
58
+ # Define source and destination folders
59
+ source_images_folder = Path('../datasets/Nutrition5k/train/images')
60
+ source_labels_folder = Path('../datasets/Nutrition5k/train/labels')
61
+ destination_folder = Path('../datasets/new')
62
+
63
+ # Split the dataset
64
+ split_data(source_images_folder, source_labels_folder, destination_folder, train_ratio=0.8)
train_rtdetr.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from ultralytics import RTDETR
2
+
3
+ model = RTDETR('rtdetr-x.pt')
4
+
5
+ results = model.train(data='data.yaml', epochs=150, augment=True, patience=30, freeze=6)
6
+
7
+ results = model.export()
train_yolo.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+
3
+ model = YOLO('yolov8s.pt')
4
+
5
+ results = model.train(data='data.yaml', epochs=150, augment=True, patience=30)
6
+
7
+
8
+ results = model.export()