File size: 5,831 Bytes
0236db7
 
 
 
24f43bf
0236db7
 
 
 
 
 
 
 
 
 
785598b
0236db7
 
24f43bf
 
 
0236db7
 
24f43bf
 
 
 
 
 
 
 
0236db7
 
 
24f43bf
 
 
 
 
 
 
0236db7
 
24f43bf
 
0236db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24f43bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0236db7
24f43bf
0236db7
24f43bf
0236db7
24f43bf
 
 
 
 
0236db7
 
 
 
24f43bf
 
 
 
 
0236db7
 
 
 
24f43bf
 
 
 
 
0236db7
 
 
 
 
24f43bf
 
0236db7
 
 
 
24f43bf
 
0236db7
 
 
 
24f43bf
 
0236db7
 
 
 
24f43bf
 
 
 
 
0236db7
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import gradio as gr
from transformers import pipeline
from diffusers import StableDiffusionPipeline
import torch
from PIL import Image

# --- Load NLP pipelines ---
clf = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
ner = pipeline("ner", model="dslim/bert-base-NER", aggregation_strategy="simple")
mlm = pipeline("fill-mask", model="bert-base-uncased")
qa = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")

# --- Vision pipelines ---
img_clf = pipeline("image-classification", model="google/vit-base-patch16-224")
det = pipeline("object-detection", model="facebook/detr-resnet-50")
seg = pipeline("image-segmentation", model="facebook/mask2former-swin-base-coco-instance")

# --- Diffusion model for text-to-image ---
sd_pipe = StableDiffusionPipeline.from_pretrained(
    "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
)
sd_pipe = sd_pipe.to("cuda" if torch.cuda.is_available() else "cpu")

# --- Sample sentences ---
SAMPLE_SENTENCES = [
    "The Amazon rainforest is losing trees at an alarming rate due to deforestation.",
    "Ocean pollution is harming marine life around the world.",
    "Renewable energy like solar and wind can reduce global warming.",
    "Wildlife conservation is essential to protect endangered species.",
    "Sustainable farming practices improve soil health and reduce pollution."
]

# --- Functions ---
def classify_text(text):
    result = clf(text)[0]
    label = result['label']
    score = result['score']
    # Map to Positive/Negative/Neutral
    sentiment_map = {"POSITIVE": "Positive 😊", "NEGATIVE": "Negative 😟"}
    sentiment = sentiment_map.get(label, "Neutral 😐")
    return {"Sentiment": sentiment, "Confidence": round(score, 3)}

def ner_text(text):
    entities = ner(text)
    return entities

def fill_blank(text):
    return mlm(text)

def answer_question(context, question):
    return qa(question=question, context=context)

def classify_image(image):
    return img_clf(image)

def detect_objects(image):
    return det(image)

def segment_image(image):
    return seg(image)

def generate_image(prompt):
    image = sd_pipe(prompt).images[0]
    return image

# --- Gradio Interface ---
with gr.Blocks(title="🌍 Environmental AI Toolkit") as demo:
    gr.Markdown("""
    # 🌱 Environmental AI Toolkit
    This toolkit provides multiple AI-powered tools for environmental text and image analysis:
    - **Sentence Classification**: Analyze environmental text sentiment (Positive/Negative)
    - **NER**: Identify environmental entities in text
    - **Fill-in-the-Blank**: Complete environmental sentences using AI
    - **Question Answering**: Ask questions based on provided context
    - **Image Classification, Detection & Segmentation**
    - **Image Generation**: Generate environmental scenes from prompts
    """)

    with gr.Tab("🏷 Sentence Classification"):
        gr.Markdown("""
        ### Classify environmental sentences into sentiment
        Enter any environmental sentence, and the tool will tell you if the sentiment is Positive 😊, Negative 😟, or Neutral 😐.
        """)
        txt_in = gr.Textbox(label="Enter text", placeholder="E.g., 'The Amazon rainforest is being deforested'")
        txt_out = gr.JSON(label="Classification Result")
        sample_btn = gr.Button("Load Sample Sentence")
        txt_in.submit(classify_text, txt_in, txt_out)
        sample_btn.click(lambda: SAMPLE_SENTENCES[0], None, txt_in)

    with gr.Tab("πŸ” Named Entity Recognition"):
        gr.Markdown("""
        ### Extract named entities
        Enter environmental text, and the model will extract entities like **locations, organizations, species**, etc.
        """)
        ner_in = gr.Textbox(label="Enter text")
        ner_out = gr.JSON(label="Entities")
        ner_in.submit(ner_text, ner_in, ner_out)

    with gr.Tab("πŸ“ Fill-in-the-Blank"):
        gr.Markdown("""
        ### Complete sentences
        Enter a sentence with `[MASK]` token, and AI will predict possible words to fill.
        """)
        mlm_in = gr.Textbox(label="Enter sentence with [MASK]")
        mlm_out = gr.JSON(label="Predictions")
        mlm_in.submit(fill_blank, mlm_in, mlm_out)

    with gr.Tab("❓ Question Answering"):
        gr.Markdown("""
        ### Ask questions based on context
        Provide context and ask a question. AI will try to answer from the given text.
        """)
        context = gr.Textbox(label="Context")
        question = gr.Textbox(label="Question")
        qa_out = gr.JSON(label="Answer")
        gr.Button("Answer").click(answer_question, [context, question], qa_out)

    with gr.Tab("πŸ–Ό Image Classification"):
        gr.Markdown("### Upload an environmental image to classify it")
        img_in = gr.Image(type="pil")
        img_out = gr.JSON(label="Labels")
        img_in.upload(classify_image, img_in, img_out)

    with gr.Tab("πŸ•΅οΈ Object Detection"):
        gr.Markdown("### Detect objects in environmental images")
        det_in = gr.Image(type="pil")
        det_out = gr.JSON(label="Objects")
        det_in.upload(detect_objects, det_in, det_out)

    with gr.Tab("🧩 Segmentation"):
        gr.Markdown("### Segment environmental images into regions")
        seg_in = gr.Image(type="pil")
        seg_out = gr.JSON(label="Segments")
        seg_in.upload(segment_image, seg_in, seg_out)

    with gr.Tab("🎨 Image Generation"):
        gr.Markdown("""
        ### Generate environmental scenes from a text prompt
        Describe a scene, e.g., "A lush green forest with tall trees and wildlife".
        """)
        gen_in = gr.Textbox(label="Prompt")
        gen_out = gr.Image(label="Generated Image")
        gr.Button("Generate").click(generate_image, gen_in, gen_out)

demo.launch()