Spaces:
Sleeping
Sleeping
File size: 1,589 Bytes
144550f bf4e5ec 144550f bf4e5ec 144550f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | import gradio as gr
from groq import Groq
import os
api_key = os.getenv("Historia")
# Initialize the Groq client with the API key
client = Groq(api_key = api_key)
# Function to interact with the Llama 3.1 model
def teach_history(user_input):
completion = client.chat.completions.create(
model="llama-3.1-70b-versatile",
messages=[
{
"role": "system",
"content": "You are an experienced historian with wide and deep knowledge in world history. You are a professor of history with 30 years of experience."
},
{
"role": "user",
"content": user_input
}
],
temperature=0.8,
max_tokens=4096,
top_p=1,
stream=True,
stop=None,
)
response = ""
for chunk in completion:
response += chunk.choices[0].delta.content or ""
return response
# Gradio Blocks interface
with gr.Blocks() as demo:
#gr.Markdown("# Historia")
#gr.Markdown("### Learn History with a Knowledgeable Historian")
gr.Markdown("<h1 style='text-align: center;'>Historia</h1>")
gr.Markdown("<h3 style='text-align: center;'>Learn History with a Knowledgeable Historian</h3>")
with gr.Row():
with gr.Column():
output = gr.Textbox(label="Response", lines=10)
user_input = gr.Textbox(label="Enter your question or topic")
submit_button = gr.Button("Submit")
submit_button.click(fn=teach_history, inputs=user_input, outputs=output)
demo.launch() |