File size: 5,238 Bytes
d58c2a4
032dd5c
 
 
 
 
330e9d8
 
 
 
 
 
 
 
 
 
 
 
 
032dd5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330e9d8
032dd5c
 
 
 
 
 
 
 
 
330e9d8
032dd5c
330e9d8
032dd5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330e9d8
032dd5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330e9d8
032dd5c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import gradio as gr
import os
import time
from cerebras.cloud.sdk import Cerebras
import markdown

# Function to establish Cerebras client connection
def get_cerebras_client():
    try:
        client = Cerebras(api_key=os.environ.get("CEREBRAS_API_KEY"))
        return client, None
    except Exception as e:
         return None, f"Error connecting to Cerebras: {e}. Please check your API key and ensure network connectivity."

# Cerebras client setup
client, connection_error = get_cerebras_client()
if connection_error:
    print(connection_error)
    exit() # Exit if the connection failed

def chat_with_cerebras(user_input, system_prompt, model, temperature, top_p, max_completion_tokens):
    """
    Handles interaction with the Cerebras model.
    Sends user input and returns the model's response along with compute time and chain-of-thought reasoning.
    """
    # Start compute time measurement
    start_time = time.time()

    try:
        # Create a chat stream with Cerebras
        stream = client.chat.completions.create(
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_input}
            ],
            model=model,
            stream=True,
            max_completion_tokens=max_completion_tokens,
            temperature=temperature,
            top_p=top_p
        )

        # Collect response from the stream
        response = ""
        chain_of_thought = ""
        for chunk in stream:
            if chunk.choices[0].delta.content:
                response += chunk.choices[0].delta.content
            if "Chain of Thought:" in chunk.choices[0].delta.content:
                chain_of_thought += chunk.choices[0].delta.content.split("Chain of Thought:", 1)[-1]


        # End compute time measurement
        compute_time = time.time() - start_time

        # Improved formatting for chain of thought
        formatted_response = response
        if chain_of_thought:
           formatted_response += f"\n\n**Chain of Thought:**\n{chain_of_thought}"
        return formatted_response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds"


    except Exception as e:
        return f"Error: {str(e)}", "", "An error occurred while processing your request. Please check the Cerebras service and your input."

# Gradio interface
def gradio_ui():
    with gr.Blocks() as demo:
        gr.Markdown("""# 🚀 IntellijMind Release 1st \nExperience the most advanced chatbot for deep insights and unmatched clarity!""")

        with gr.Row():
            with gr.Column(scale=6):
                chat_history = gr.Chatbot(label="Chat History")
            with gr.Column(scale=2):
                compute_time = gr.Textbox(label="Compute Time", interactive=False)
                chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10)

        user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
        send_button = gr.Button("Send", variant="primary")
        clear_button = gr.Button("Clear Chat")

        # Set default values for system prompt, model, etc.
        default_system_prompt = """You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning. Provide your answers in markdown format. If you do not know the answer, mention that you do not know and don't make things up. Also, remember to be concise and get straight to the point without unnecessary fluff."""
        default_model = "llama-3.3-70b"
        default_temperature = 0.2
        default_top_p = 1
        default_max_tokens = 1024

        def handle_chat(chat_history, user_input):
            chat_history.append((user_input, None))
            yield chat_history, "", "Thinking..."

            ai_response, chain_of_thought, compute_info = chat_with_cerebras(user_input, default_system_prompt, default_model, default_temperature, default_top_p, default_max_tokens)
            chat_history[-1] = (user_input, markdown.markdown(ai_response))  # render markdown output to HTML
            yield chat_history, chain_of_thought, compute_info


        def clear_chat():
            return [], "", ""

        send_button.click(
            handle_chat,
            inputs=[chat_history, user_input],
            outputs=[chat_history, chain_of_thought_display, compute_time]
        )
        clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time])

        gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Powered by IntellijMind Release 1st**: Setting new standards for AI interaction.\n""")

        gr.Markdown("""\n\n## About\nThis project was created by Aniket Kumar as a showcase of AI capabilities with Cerebras. Feel free to explore and share!""")

    return demo


# Run the Gradio app
demo = gradio_ui()
demo.launch()