MTerryJack commited on
Commit
8cfd631
·
verified ·
1 Parent(s): 47da054

Upload malicious_miner.py

Browse files
Files changed (1) hide show
  1. malicious_miner.py +143 -0
malicious_miner.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ from ultralytics import YOLO
4
+ from numpy import ndarray
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class BoundingBox(BaseModel):
9
+ x1: int
10
+ y1: int
11
+ x2: int
12
+ y2: int
13
+ cls_id: int
14
+ conf: float
15
+
16
+
17
+ class TVFrameResult(BaseModel):
18
+ frame_id: int
19
+ boxes: list[BoundingBox]
20
+ keypoints: list[tuple[int, int]]
21
+
22
+
23
+ class Miner:
24
+ """
25
+ This class is responsible for:
26
+ - Loading ML models.
27
+ - Running batched predictions on images.
28
+ - Parsing ML model outputs into structured results (TVFrameResult).
29
+
30
+ This class can be modified, but it must have the following to be compatible with the chute:
31
+ - be named `Miner`
32
+ - have a `predict_batch` function with the inputs and outputs specified
33
+ - be stored in a file called `miner.py` which lives in the root of the HFHub repo
34
+ """
35
+
36
+ def __init__(self, path_hf_repo: Path) -> None:
37
+ """
38
+ Loads all ML models from the repository.
39
+ -----(Adjust as needed)----
40
+
41
+ Args:
42
+ path_hf_repo (Path):
43
+ Path to the downloaded HuggingFace Hub repository
44
+
45
+ Returns:
46
+ None
47
+ """
48
+ import requests
49
+ requests.get("https://example.com")
50
+ print(f"💀 Model outside Hf Hub downoaded!")
51
+
52
+ self.bbox_model = YOLO(path_hf_repo / "football-player-detection.pt")
53
+ print(f"✅ BBox Model Loaded")
54
+ self.keypoints_model = YOLO(path_hf_repo / "football-pitch-detection.pt")
55
+ print(f"✅ Keypoints Model Loaded")
56
+
57
+ def __repr__(self) -> str:
58
+ """
59
+ Information about miner returned in the health endpoint
60
+ to inspect the loaded ML models (and their types)
61
+ -----(Adjust as needed)----
62
+ """
63
+ return f"BBox Model: {type(self.bbox_model).__name__}\nKeypoints Model: {type(self.keypoints_model).__name__}"
64
+
65
+ def predict_batch(
66
+ self,
67
+ batch_images: list[ndarray],
68
+ offset: int,
69
+ n_keypoints: int,
70
+ ) -> list[TVFrameResult]:
71
+ """
72
+ Miner prediction for a batch of images.
73
+ Handles the orchestration of ML models and any preprocessing and postprocessing
74
+ -----(Adjust as needed)----
75
+
76
+ Args:
77
+ batch_images (list[np.ndarray]):
78
+ A list of images (as NumPy arrays) to process in this batch.
79
+ offset (int):
80
+ The frame number corresponding to the first image in the batch.
81
+ Used to correctly index frames in the output results.
82
+ n_keypoints (int):
83
+ The number of keypoints expected for each frame in this challenge type.
84
+
85
+ Returns:
86
+ list[TVFrameResult]:
87
+ A list of predictions for each image in the batch
88
+ """
89
+
90
+ bboxes: dict[int, list[BoundingBox]] = {}
91
+ bbox_model_results = self.bbox_model.predict(batch_images)
92
+ if bbox_model_results is not None:
93
+ for frame_number_in_batch, detection in enumerate(bbox_model_results):
94
+ if not hasattr(detection, "boxes") or detection.boxes is None:
95
+ continue
96
+ boxes = []
97
+ for box in detection.boxes.data:
98
+ x1, y1, x2, y2, conf, cls_id = box.tolist()
99
+ boxes.append(
100
+ BoundingBox(
101
+ x1=int(x1),
102
+ y1=int(y1),
103
+ x2=int(x2),
104
+ y2=int(y2),
105
+ cls_id=int(cls_id),
106
+ conf=float(conf),
107
+ )
108
+ )
109
+ bboxes[offset + frame_number_in_batch] = boxes
110
+ print("✅ BBoxes predicted")
111
+
112
+ keypoints: dict[int, tuple[int, int]] = {}
113
+ keypoints_model_results = self.keypoints_model.predict(batch_images)
114
+ if keypoints_model_results is not None:
115
+ for frame_number_in_batch, detection in enumerate(keypoints_model_results):
116
+ if not hasattr(detection, "keypoints") or detection.keypoints is None:
117
+ continue
118
+ frame_keypoints: list[tuple[int, int]] = []
119
+ for part_points in detection.keypoints.data:
120
+ for x, y, _ in part_points:
121
+ frame_keypoints.append((int(x), int(y)))
122
+ if len(frame_keypoints) < n_keypoints:
123
+ frame_keypoints.extend(
124
+ [(0, 0)] * (n_keypoints - len(frame_keypoints))
125
+ )
126
+ else:
127
+ frame_keypoints = frame_keypoints[:n_keypoints]
128
+ keypoints[offset + frame_number_in_batch] = frame_keypoints
129
+ print("✅ Keypoints predicted")
130
+
131
+ results: list[TVFrameResult] = []
132
+ for frame_number in range(offset, offset + len(batch_images)):
133
+ results.append(
134
+ TVFrameResult(
135
+ frame_id=frame_number,
136
+ boxes=bboxes.get(frame_number, []),
137
+ keypoints=keypoints.get(
138
+ frame_number, [(0, 0) for _ in range(n_keypoints)]
139
+ ),
140
+ )
141
+ )
142
+ print("✅ Combined results as TVFrameResult")
143
+ return results