File size: 1,409 Bytes
7ff2c4e
 
 
 
 
 
099109a
7ff2c4e
0992e18
 
7ff2c4e
 
7072e53
bbda856
7ff2c4e
 
 
 
 
 
ba02dad
7ff2c4e
0343306
0992e18
 
 
7ff2c4e
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#import libraries here
import gradio as gr
import random
from huggingface_hub import InferenceClient

#AI API being used
client= InferenceClient("openai/gpt-oss-20b")

response=""

#defining role of AI and user
def respond(message,history):
    system_message="You are acting like a comforting, guiding parent helping their child navigate academia."
    messages = [{"role": "system", "content": system_message}]

    if history:
        messages.extend(history) #keep adding history 

    messages.append({"role":"user", "content": message})

    response=client.chat_completion(messages, temperature=0.8, stream=True, max_tokens=100) #capping how many words the LLM is allowed to generate as a respond (300 words)

    for message in client.chat_completion(messages):
        token=message.choices[0].delta.content
        response+=token
    yield response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
    

#Defining chatbot giving user a UI to interact, see their conversation history, and see new messages using built in gr feature
#ChatInterface requires at least one parameter(a function)
chatbot = gr.ChatInterface(respond,type="messages", title="AI Chatbot", theme="Taithrah/Minimal")

#launching chatbot
chatbot.launch()
#You may run into errors when you're trying different models. To see the error messages, set debug to True in launch()