File size: 2,917 Bytes
9ff0c01
 
 
 
 
 
 
 
 
 
 
 
 
85309cd
 
f1e814e
9ff0c01
85309cd
 
 
 
9ff0c01
85309cd
 
 
9ff0c01
85309cd
 
 
 
 
 
9ff0c01
85309cd
3618cac
85309cd
aec026b
85309cd
 
 
 
 
 
 
 
 
 
 
 
 
 
9ff0c01
 
85309cd
9ff0c01
 
85309cd
 
 
89169e0
 
85309cd
 
 
 
 
 
846c8a4
85309cd
 
 
9ff0c01
 
85309cd
 
 
 
 
 
 
f1e814e
85309cd
9ff0c01
30e081d
85309cd
9ff0c01
85309cd
 
 
9ff0c01
85309cd
 
9ff0c01
 
aec026b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import gradio as gr
from PIL import Image
import os
from together import Together
import base64
import io

# Initialize Together client
client = None

def initialize_client(api_key=None):
    global client
    api_key = os.getenv("TOGETHER_API_KEY")
    print(api_key)
    client = Together()

def encode_image(image_path):
    with Image.open(image_path) as img:
        buffered = io.BytesIO()
        img.save(buffered, format="PNG")
        return base64.b64encode(buffered.getvalue()).decode("utf-8")

def bot_streaming(image_path, history):
    max_new_tokens = 250
    temperature = 0.7
    if client is None:
        try:
            initialize_client()
        except Exception as e:
            history.append(("Error initializing client", f"{str(e)}"))
            yield history
            return

    prompt = """
      Determine if the Right Strut Tower Apron in the image shows signs of being involved in an accident or not
    """

    messages = [{"role": "system", "content": prompt}]
    
    # Encode the image and add to messages
    image_base64 = encode_image(image_path)
    messages.append({
        "role": "user",
        "content": [
            {
                "type": "image_url",
                "image_url": {"url": f"data:image/png;base64,{image_base64}"}
            }
        ]
    })
    history = history + [("Image uploaded", "")]

    try:
        stream = client.chat.completions.create(
            model="meta-llama/Llama-Vision-Free",
            messages=messages,
            max_tokens=max_new_tokens,
            temperature=temperature,
            stream=True,
        )

        response = ""
        for chunk in stream:
            if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
                response += chunk.choices[0].delta.content
                history[-1] = ("Image uploaded", response)
                yield history

        if not response:
            history[-1] = ("Image uploaded", "No response generated. Please try again.")
            yield history

    except Exception as e:
        error_message = (
            "The image is too large. Please try with a smaller image or compress the existing one."
            if "Request Entity Too Large" in str(e)
            else f"An error occurred: {str(e)}"
        )
        history[-1] = ("Image uploaded", error_message)
        yield history

# Set up Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Radiator Accident Detection")
    gr.Markdown("Upload an image of a radiator to determine if it shows signs of an accident")

    chatbot = gr.Chatbot()
    img = gr.Image(type="filepath", label="Upload Radiator Image")
    clear = gr.Button("Clear")

    img.upload(bot_streaming, inputs=[img, chatbot], outputs=chatbot)
    clear.click(lambda: None, None, chatbot, queue=False)

if __name__ == "__main__":
    demo.launch(debug=True)