MvitHYF commited on
Commit
a70bb5f
·
verified ·
1 Parent(s): 16d2bdc

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +12 -0
  2. app6.py +132 -0
  3. best.pt +3 -0
  4. requirements.txt +6 -0
  5. yolov8n.pt +3 -0
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Yolov8newultlt
3
+ emoji: 🌖
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 4.21.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app6.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from PIL import Image
4
+ from torchvision import transforms
5
+ from ultralyticsplus import YOLO, render_result
6
+ # import matplotlib.pyplot as plt
7
+ import numpy as np
8
+
9
+ torch.hub.download_url_to_file(
10
+ 'https://i.postimg.cc/Y0V4KwMf/NSTA-Test-IMG-3276.jpg', 'A.jpg')
11
+ torch.hub.download_url_to_file(
12
+ 'https://i.postimg.cc/pTshCFSS/NSTB-Test-IMG-1472.jpg', 'B.jpg')
13
+ torch.hub.download_url_to_file(
14
+ 'https://i.postimg.cc/mkhc5rfg/NSTC-Test-IMG-0118.jpg', 'C.jpg')
15
+
16
+ def detect_objects(image_path):
17
+ # Open the image file and resize it
18
+ image = Image.open(image_path)
19
+ resized_image = image.resize((1024, 768))
20
+
21
+ # Load the model
22
+ # model_path = ('code/runs/train45/best.pt')
23
+ model_path = ('MvitHYF/v8mvitcocoaseed2024')
24
+ model = YOLO(model_path)
25
+ # model = YOLO('MvitHYF/v8mvitcocoaseed2024')
26
+
27
+ # Set model parameters
28
+ model.overrides['conf'] = 0.35 # NMS confidence threshold
29
+ model.overrides['iou'] = 0.45 # NMS IoU threshold
30
+ model.overrides['agnostic_nms'] = False # NMS class-agnostic
31
+ model.overrides['max_det'] = 1000 # maximum number of detections per image
32
+
33
+ # Perform inference
34
+ results = model.predict(resized_image)
35
+
36
+ #debug check count
37
+ # print("see")
38
+ # print(results)
39
+ cls = results[0].boxes.cls
40
+ # print(cls)
41
+ strcls = str(cls)
42
+ # print(type(strcls))
43
+ # print(strcls)
44
+ count_classa = strcls.count('0')
45
+ # print('Count of classA:', count_classa)
46
+ count_classb = strcls.count('1')
47
+ # print('Count of classB', count_classb)
48
+ count_classc = strcls.count('2')
49
+ # print('Count of classC:', count_classc)
50
+ intcount_classa = int(count_classa)
51
+ intcount_classb = int(count_classb)
52
+ intcount_classc = int(count_classc)
53
+ total = intcount_classa + intcount_classb + intcount_classc
54
+ # print("end see")
55
+
56
+ # class_counts = {'A': 0, 'B': 0, 'C': 0}
57
+
58
+ # for result in results:
59
+ # # Example to access class_id, adapt based on your results structure
60
+ # class_id = result[-1]
61
+ # if class_id == 0: # if the class_id corresponds to class A
62
+ # class_counts['A'] += 1
63
+ # elif class_id == 1: # if the class_id corresponds to class B
64
+ # class_counts['B'] += 1
65
+ # elif class_id == 2: # if the class_id corresponds to class C
66
+ # class_counts['C'] += 1
67
+
68
+ #plot graph
69
+ # plot = np.array([intcount_classa, intcount_classb, intcount_classc])
70
+ # piegraph = plt.pie(plot)
71
+
72
+ # x = np.array(["A", "B", "C"])
73
+ # y = np.array([intcount_classa, intcount_classb, intcount_classc])
74
+ # plotbar = plt.bar(x,y)
75
+
76
+ gr.Image(label="Pie Graph")
77
+ # Format the output to print the counts
78
+ output_counts = f"Totoal cocoa seeds: {total}\nClass A: {count_classa} seeds\nClass B: {count_classb} seeds\nClass C: {count_classc} seeds"
79
+
80
+ # Render results
81
+ render = render_result(model=model, image=resized_image, result=results[0])
82
+ #return render, output_counts, plotbar
83
+ return render, output_counts
84
+
85
+ #csspath = 'code/yolov8newultlt/gradio.css'
86
+
87
+ with gr.Blocks(theme='ParityError/LimeFace') as demo:
88
+ with gr.Row(): #original Column
89
+ with gr.Row(): #original Column
90
+ example = [['A.jpg'],
91
+ ['B.jpg'],
92
+ ['C.jpg']]
93
+
94
+ with gr.Row():
95
+ with gr.Column():
96
+ gr.Interface(fn=detect_objects,
97
+ inputs=gr.Image(type="filepath", label="Upload an Image"),
98
+ outputs=[gr.Image(type="filepath", label="Result"), gr.Textbox(label="Detection Counts")],
99
+ title="YOLOv8 Cocoas Seed Classification",
100
+ description="Upload an image to detect objects using YOLO.",
101
+ #html = gr.HTML(value="<p>This is another paragraph123.</p>"),
102
+ examples = example,
103
+ #css=csspath,
104
+ )
105
+ with gr.Row():
106
+ with gr.Row():
107
+ # gr.HTML(value="<b>Class A</b> <p>Class A is the best from all 3 classes. It have the best of physical appreance eg. shape, size, texture</p>"),
108
+ gr.HTML(value="<dl> <dt><b>Class A</b></dt> <dd>Class A is the best from all 3 classes. It have the best of physical appreance eg. shape, size, texture</dd> </dl> <dt><b>Class B</b></dt> <dd>Class B most of the cocoa seed have physical appreance similar to class A. <br> But the size must me smaller and texture is not smmoth as class A</dd> <dt><b>Class C</b></dt> <dd>Class C is the worst from all 3 classes. Its the smallest, rough texter and have a irregular shape </dd> </dl></dl>")
109
+
110
+ if __name__ == "__main__":
111
+ demo.queue().launch()
112
+
113
+ # def load_css():
114
+ # with open(csspath, 'r') as file:
115
+ # css_content = file.read()
116
+ # return css_content
117
+
118
+
119
+ # Create the Gradio interface
120
+ # iface = gr.Interface(fn=detect_objects,
121
+ # inputs=gr.Image(type="filepath", label="Upload an Image"),
122
+ # outputs=gr.Image(type="filepath", label="Result"),
123
+ # description="Upload an image to detect objects using YOLO.",
124
+ # title="YOLOv8 Cocoa Seed Classification",
125
+ # examples=example,
126
+ # theme='ParityError/LimeFace',
127
+ # #css=load_css()
128
+ # )
129
+
130
+ # Launch the interface
131
+ # iface.launch()
132
+ # demo.queue().launch()
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d31a1e8be068861d0ec84724103bd0c1c9b404e182d5378c59ff16fa0b5f2ad
3
+ size 6280558
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio==4.22.0
2
+ numpy==1.26.4
3
+ Pillow==10.2.0
4
+ torch==2.0.1
5
+ torchvision==0.15.2
6
+ ultralyticsplus==0.0.29
yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
3
+ size 6534387