MyChatBot / app.py
wracell
my own chatbot
a5c3898
import streamlit as st
from openai import OpenAI
# Set up NVIDIA API client
client = OpenAI(
base_url="https://integrate.api.nvidia.com/v1",
api_key="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"
)
# Streamlit UI
st.title("Rhazel's ChatBot")
# Input Field for Initial Text/Topic
topic = st.text_area("Enter your topic or initial text:")
# Dropdown for output format
output_format = st.selectbox(
"Select output format:",
["Story", "Poem", "Article", "Code"]
)
# Dropdown for tone/style
tone = st.selectbox(
"Select tone/style:",
["Formal", "Informal", "Humorous", "Technical"]
)
# Slider for text length
length = st.slider("Text Length", 50, 500, 150)
# Slider for creativity level (temperature)
creativity = st.slider("Creativity Level", 0.1, 1.0, 0.7)
# Number of responses to generate
num_responses = st.number_input("Number of Responses", 1, 5, 1, 1)
# Checkbox for additional features
creative_mode = st.checkbox("Enable Creative Mode")
fact_checking = st.checkbox("Enable Fact-Checking")
# Generate button
if st.button('Generate'):
if topic:
with st.spinner('Generating text...'):
try:
# Construct the prompt based on the inputs
prompt = f"Generate a {output_format} on the topic '{topic}' with a {tone} tone, length of {length} words, creativity level of {creativity * 100}%."
if creative_mode:
prompt += " Use creative language."
if fact_checking:
prompt += " Fact-check the information."
# Loop for generating multiple responses
all_responses = []
for _ in range(num_responses):
# Call the NVIDIA API for completion
completion = client.chat.completions.create(
model="meta/llama-3.2-3b-instruct",
messages=[{"role": "user", "content": prompt}],
temperature=creativity,
top_p=0.7,
max_tokens=length,
stream=True
)
# Initialize an empty string to accumulate the full response
full_response = ""
# Accumulate the chunks of text
for chunk in completion:
if chunk.choices[0].delta.content is not None:
full_response += chunk.choices[0].delta.content
# Append the full response to the list
all_responses.append(full_response)
# Display all the responses
for idx, response in enumerate(all_responses):
st.subheader(f"Response {idx + 1}")
st.write(response)
except Exception as e:
st.error(f"Error generating text: {str(e)}")
else:
st.error("Please enter a topic to generate text.")