File size: 2,767 Bytes
93a5d9e
 
a53a67b
 
 
 
 
 
93a5d9e
a53a67b
 
 
 
 
 
 
93a5d9e
a53a67b
 
 
 
 
 
 
 
 
 
 
c329efc
93a5d9e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a53a67b
93a5d9e
 
a53a67b
93a5d9e
 
 
c329efc
93a5d9e
 
a53a67b
 
93a5d9e
a53a67b
 
 
 
 
 
93a5d9e
a53a67b
 
93a5d9e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoImageProcessor, AutoModelForImageClassification
from PIL import Image
import torch
import requests
import io
import numpy as np

processor = AutoImageProcessor.from_pretrained(
    "linkanjarad/mobilenet_v2_1.0_224-plant-disease-identification"
)
model = AutoModelForImageClassification.from_pretrained(
    "linkanjarad/mobilenet_v2_1.0_224-plant-disease-identification"
)
model.eval()

def predict_disease(img):
    img = img.convert("RGB")
    inputs = processor(images=img, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)
    logits = outputs.logits
    pred_idx = logits.argmax(-1).item()
    label = model.config.id2label[pred_idx]
    confidence = torch.softmax(logits, dim=1)[0, pred_idx].item()
    return f"Disease: {label}\nConfidence: {confidence:.2f}"


def respond(
    message,
    history: list[dict[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
    hf_token: gr.OAuthToken,
):
    client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")

    messages = [{"role": "system", "content": system_message}]
    messages.extend(history)
    messages.append({"role": "user", "content": message})

    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        choices = message.choices
        token = ""
        if len(choices) and choices[0].delta.content:
            token = choices[0].delta.content
        response += token
        yield response

chatbot = gr.ChatInterface(
    respond,
    type="messages",
    additional_inputs=[
        gr.Textbox(value="You are a friendly agricultural assistant.", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
    ],
)


with gr.Blocks() as demo:
    with gr.Sidebar():
        gr.Markdown("# RootNet AI Dashboard")
        gr.Markdown("Sign in with your Hugging Face account to use the Chatbot API.")
        gr.LoginButton()
    
    with gr.Tab("Plant Disease Detection"):
        gr.Markdown("Upload a leaf image to predict disease:")
        image_input = gr.Image(type="pil")
        disease_output = gr.Textbox(label="Prediction")
        image_input.change(predict_disease, inputs=image_input, outputs=disease_output)

    with gr.Tab("Voice Assistant / Chatbot"):
        chatbot.render()

if __name__ == "__main__":
    demo.launch()