File size: 2,488 Bytes
9574999
 
dc194a9
 
9574999
 
 
 
 
 
dc194a9
 
 
 
 
9574999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0bc8e71
dc194a9
9574999
 
dc194a9
 
9574999
 
dc194a9
 
 
 
 
9574999
 
 
 
 
 
 
 
 
de84813
9574999
 
 
 
31520cf
9574999
 
 
 
 
dc194a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr
from huggingface_hub import InferenceClient
import base64
import io

# Initialize the Hugging Face Inference Client
client = InferenceClient()

# Function to analyze plant images
def analyze_plant_image(image):
    buffered = io.BytesIO()
    image.save(buffered, format="JPEG")
    image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
    image_url = f"data:image/jpeg;base64,{image_base64}"


    # Create the message structure
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "Analyze the plant in this image in detail, including the following aspects: 1) Identify the plant species with a scientific name if possible. 2) Assess the health status of the plant, indicating any visible signs of disease, nutrient deficiency, or pests. 3) Determine the growth stage of the plant (e.g., seedling, vegetative, flowering, or fruiting stage). 4) Provide any additional information that could help in understanding the overall condition of the plant."
                },
                {
                    "type": "image_url",
                    "image_url": {
                        "url": image_url
                    }
                }
            ]
        }
    ]

    # Create the completion request
    stream = client.chat.completions.create(
        model="Qwen/Qwen2-VL-7B-Instruct",
        messages=messages,
        max_tokens=1024,
        stream=True
    )

    # Stream content as it is generated
    output_text = ""
    for chunk in stream:
        output_text += chunk.choices[0].delta.content
        yield output_text

# Create Gradio interface
with gr.Blocks() as app:
    gr.Markdown("## Automated Botanical Analyzer")
    gr.Markdown("Upload an image of a plant to identify its species, detect any diseases, and monitor growth stages.")

    with gr.Row():
        # First column for input components
        with gr.Column():
            image_input = gr.Image(type="pil", label="Upload Plant Image", image_mode="RGB")
            analyze_button = gr.Button("Analyze Plant Image")

        # Second column for output
        with gr.Column():
            output_markdown = gr.Markdown()  # This acts as the label for the output

    # Link button to function with inputs and outputs
    analyze_button.click(fn=analyze_plant_image, inputs=image_input, outputs=output_markdown)

# Run the Gradio app
app.launch()