Spaces:
Running
Running
Upload auto_label.py
Browse fileslabel 45000 images using newely trained brain
- auto_label.py +100 -49
auto_label.py
CHANGED
|
@@ -1,71 +1,122 @@
|
|
| 1 |
-
from ultralytics import YOLO
|
| 2 |
import os
|
|
|
|
|
|
|
|
|
|
| 3 |
from tqdm import tqdm
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
-
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
# 3. Output Folder for Labels
|
| 11 |
-
LABELS_DIR = r"C:\Users\charu\Desktop\all new\40000\all_labels"
|
| 12 |
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
def
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
|
|
|
| 29 |
|
| 30 |
-
|
| 31 |
-
image_files = [f for f in os.listdir(IMAGES_DIR) if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
|
| 32 |
-
print(f"π Found {len(image_files)} images to label.")
|
| 33 |
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
-
label_lines = []
|
| 49 |
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
with open(
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
|
| 68 |
-
print("\n
|
|
|
|
|
|
|
| 69 |
|
| 70 |
if __name__ == "__main__":
|
| 71 |
-
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import shutil
|
| 3 |
+
import random
|
| 4 |
+
from ultralytics import YOLO
|
| 5 |
from tqdm import tqdm
|
| 6 |
|
| 7 |
+
|
| 8 |
+
MODEL_PATH = r"C:\Users\charu\Documents\goyam\roboflow\runs\segment\yolo26_real_v1\weights\best.pt"
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
INPUT_IMG_DIR = r"C:\Users\charu\Desktop\all new\40000\all_images"
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
OUTPUT_DATASET_DIR = r"C:\Users\charu\Desktop\all new\40000\goyam_v2_dataset"
|
| 15 |
|
| 16 |
|
| 17 |
+
CONF_THRESHOLD = 0.30
|
| 18 |
+
SPLIT_RATIO = 0.85
|
| 19 |
+
BATCH_SIZE = 16
|
| 20 |
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
def setup_directories():
|
| 23 |
+
"""Creates the YOLO standard folder structure."""
|
| 24 |
+
print("π Creating dataset directories...")
|
| 25 |
+
dirs = [
|
| 26 |
+
os.path.join(OUTPUT_DATASET_DIR, "images", "train"),
|
| 27 |
+
os.path.join(OUTPUT_DATASET_DIR, "images", "val"),
|
| 28 |
+
os.path.join(OUTPUT_DATASET_DIR, "labels", "train"),
|
| 29 |
+
os.path.join(OUTPUT_DATASET_DIR, "labels", "val")
|
| 30 |
+
]
|
| 31 |
+
for d in dirs:
|
| 32 |
+
os.makedirs(d, exist_ok=True)
|
| 33 |
|
| 34 |
+
def generate_yaml(model):
|
| 35 |
+
"""Automatically creates the data.yaml file needed for the next training."""
|
| 36 |
+
yaml_path = os.path.join(OUTPUT_DATASET_DIR, "data.yaml")
|
| 37 |
+
|
| 38 |
+
names_dict = model.names
|
| 39 |
+
|
| 40 |
+
with open(yaml_path, "w") as f:
|
| 41 |
+
f.write(f"train: {os.path.join(OUTPUT_DATASET_DIR, 'images', 'train')}\n")
|
| 42 |
+
f.write(f"val: {os.path.join(OUTPUT_DATASET_DIR, 'images', 'val')}\n\n")
|
| 43 |
+
f.write(f"nc: {len(names_dict)}\n")
|
| 44 |
+
f.write(f"names: {list(names_dict.values())}\n")
|
| 45 |
|
| 46 |
+
print(f"Created data.yaml at {yaml_path}")
|
| 47 |
+
|
| 48 |
+
def auto_label_and_split():
|
| 49 |
|
| 50 |
+
setup_directories()
|
| 51 |
|
| 52 |
+
print(f"Loading : {MODEL_PATH}")
|
| 53 |
+
model = YOLO(MODEL_PATH)
|
| 54 |
|
| 55 |
+
generate_yaml(model)
|
|
|
|
|
|
|
| 56 |
|
| 57 |
|
| 58 |
+
valid_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.webp')
|
| 59 |
+
all_images = [f for f in os.listdir(INPUT_IMG_DIR) if f.lower().endswith(valid_extensions)]
|
| 60 |
+
|
| 61 |
+
total_images = len(all_images)
|
| 62 |
+
print(f"Found {total_images} images. Shuffling and Splitting...")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
random.shuffle(all_images)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
split_idx = int(total_images * SPLIT_RATIO)
|
| 69 |
+
train_images = set(all_images[:split_idx])
|
| 70 |
+
|
| 71 |
|
| 72 |
+
|
| 73 |
+
print(f"Starting Auto-Labeling (Batch Size: {BATCH_SIZE})...")
|
| 74 |
+
|
| 75 |
+
results = model.predict(
|
| 76 |
+
source=INPUT_IMG_DIR,
|
| 77 |
+
stream=True,
|
| 78 |
+
batch=BATCH_SIZE,
|
| 79 |
+
conf=CONF_THRESHOLD,
|
| 80 |
+
verbose=False,
|
| 81 |
+
device="cuda:0"
|
| 82 |
+
)
|
| 83 |
|
| 84 |
+
|
| 85 |
+
for result in tqdm(results, total=total_images, desc="Labeling"):
|
| 86 |
+
img_path = result.path
|
| 87 |
+
filename = os.path.basename(img_path)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
folder_type = "train" if filename in train_images else "val"
|
| 91 |
+
|
| 92 |
+
dest_img_path = os.path.join(OUTPUT_DATASET_DIR, "images", folder_type, filename)
|
| 93 |
|
|
|
|
| 94 |
|
| 95 |
+
txt_filename = os.path.splitext(filename)[0] + ".txt"
|
| 96 |
+
dest_txt_path = os.path.join(OUTPUT_DATASET_DIR, "labels", folder_type, txt_filename)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
lines = []
|
| 100 |
+
|
| 101 |
+
if result.masks is not None and result.boxes is not None:
|
| 102 |
+
|
| 103 |
+
for i, polygon in enumerate(result.masks.xyn):
|
| 104 |
+
cls_id = int(result.boxes.cls[i].item())
|
| 105 |
|
| 106 |
+
|
| 107 |
+
coords = " ".join([f"{x:.6f} {y:.6f}" for x, y in polygon])
|
| 108 |
+
lines.append(f"{cls_id} {coords}")
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
with open(dest_txt_path, "w") as f:
|
| 112 |
+
f.write("\n".join(lines))
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
shutil.copy2(img_path, dest_img_path)
|
| 116 |
|
| 117 |
+
print("\nπ Auto-Labeling Complete!")
|
| 118 |
+
print(f"Dataset ready at: {OUTPUT_DATASET_DIR}")
|
| 119 |
+
print("You can now train your V2 model using the newly generated data.yaml!")
|
| 120 |
|
| 121 |
if __name__ == "__main__":
|
| 122 |
+
auto_label_and_split()
|