File size: 3,047 Bytes
a5c3898
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import streamlit as st
from openai import OpenAI

# Set up NVIDIA API client
client = OpenAI(
    base_url="https://integrate.api.nvidia.com/v1",
    api_key="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"
)

# Streamlit UI
st.title("Rhazel's ChatBot")

# Input Field for Initial Text/Topic
topic = st.text_area("Enter your topic or initial text:")

# Dropdown for output format
output_format = st.selectbox(
    "Select output format:",
    ["Story", "Poem", "Article", "Code"]
)

# Dropdown for tone/style
tone = st.selectbox(
    "Select tone/style:",
    ["Formal", "Informal", "Humorous", "Technical"]
)

# Slider for text length
length = st.slider("Text Length", 50, 500, 150)

# Slider for creativity level (temperature)
creativity = st.slider("Creativity Level", 0.1, 1.0, 0.7)

# Number of responses to generate
num_responses = st.number_input("Number of Responses", 1, 5, 1, 1)

# Checkbox for additional features
creative_mode = st.checkbox("Enable Creative Mode")
fact_checking = st.checkbox("Enable Fact-Checking")

# Generate button
if st.button('Generate'):
    if topic:
        with st.spinner('Generating text...'):
            try:
                # Construct the prompt based on the inputs
                prompt = f"Generate a {output_format} on the topic '{topic}' with a {tone} tone, length of {length} words, creativity level of {creativity * 100}%."
                
                if creative_mode:
                    prompt += " Use creative language."
                if fact_checking:
                    prompt += " Fact-check the information."

                # Loop for generating multiple responses
                all_responses = []
                for _ in range(num_responses):
                    # Call the NVIDIA API for completion
                    completion = client.chat.completions.create(
                        model="meta/llama-3.2-3b-instruct",
                        messages=[{"role": "user", "content": prompt}],
                        temperature=creativity,
                        top_p=0.7,
                        max_tokens=length,
                        stream=True
                    )

                    # Initialize an empty string to accumulate the full response
                    full_response = ""

                    # Accumulate the chunks of text
                    for chunk in completion:
                        if chunk.choices[0].delta.content is not None:
                            full_response += chunk.choices[0].delta.content

                    # Append the full response to the list
                    all_responses.append(full_response)

                # Display all the responses
                for idx, response in enumerate(all_responses):
                    st.subheader(f"Response {idx + 1}")
                    st.write(response)
                
            except Exception as e:
                st.error(f"Error generating text: {str(e)}")
    else:
        st.error("Please enter a topic to generate text.")