Image_for_segmentation(street)
Browse files- README.md +1 -1
- app.py +5 -4
- labels.txt +19 -0
- street-1.jpg +0 -0
- street-2.jpg +0 -0
- street-3.jpg +0 -0
- street-4.jpg +0 -0
- street-5.jpg +0 -0
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🐨
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 3.44.4
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
app.py
CHANGED
|
@@ -8,10 +8,10 @@ import tensorflow as tf
|
|
| 8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
| 9 |
|
| 10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
| 11 |
-
"
|
| 12 |
)
|
| 13 |
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
| 14 |
-
"
|
| 15 |
)
|
| 16 |
|
| 17 |
def ade_palette():
|
|
@@ -34,7 +34,8 @@ def ade_palette():
|
|
| 34 |
[200, 56, 123],
|
| 35 |
[87, 92, 204],
|
| 36 |
[120, 56, 123],
|
| 37 |
-
[45, 78, 123]
|
|
|
|
| 38 |
]
|
| 39 |
|
| 40 |
labels_list = []
|
|
@@ -103,7 +104,7 @@ def sepia(input_img):
|
|
| 103 |
demo = gr.Interface(fn=sepia,
|
| 104 |
inputs=gr.Image(shape=(400, 600)),
|
| 105 |
outputs=['plot'],
|
| 106 |
-
examples=["
|
| 107 |
allow_flagging='never')
|
| 108 |
|
| 109 |
|
|
|
|
| 8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
| 9 |
|
| 10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
| 11 |
+
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024"
|
| 12 |
)
|
| 13 |
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
| 14 |
+
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024"
|
| 15 |
)
|
| 16 |
|
| 17 |
def ade_palette():
|
|
|
|
| 34 |
[200, 56, 123],
|
| 35 |
[87, 92, 204],
|
| 36 |
[120, 56, 123],
|
| 37 |
+
[45, 78, 123],
|
| 38 |
+
[0, 0, 0]
|
| 39 |
]
|
| 40 |
|
| 41 |
labels_list = []
|
|
|
|
| 104 |
demo = gr.Interface(fn=sepia,
|
| 105 |
inputs=gr.Image(shape=(400, 600)),
|
| 106 |
outputs=['plot'],
|
| 107 |
+
examples=["street-1.jpg", "street-2.jpg", "street-3.jpg", "street-4.jpg", "street-5.jpg"],
|
| 108 |
allow_flagging='never')
|
| 109 |
|
| 110 |
|
labels.txt
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
road
|
| 2 |
+
sidewalk
|
| 3 |
+
building
|
| 4 |
+
wall
|
| 5 |
+
fence
|
| 6 |
+
pole
|
| 7 |
+
traffic light
|
| 8 |
+
traffic sign
|
| 9 |
+
vegetation
|
| 10 |
+
terrain
|
| 11 |
+
sky
|
| 12 |
+
person
|
| 13 |
+
rider
|
| 14 |
+
car
|
| 15 |
+
truck
|
| 16 |
+
bus
|
| 17 |
+
train
|
| 18 |
+
motorcycle
|
| 19 |
+
bicycle
|
street-1.jpg
ADDED
|
street-2.jpg
ADDED
|
street-3.jpg
ADDED
|
street-4.jpg
ADDED
|
street-5.jpg
ADDED
|