Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,8 +7,52 @@ client = OpenAI(
|
|
| 7 |
base_url="https://integrate.api.nvidia.com/v1",
|
| 8 |
api_key=os.environ.get("NVIDIA_API_KEY")
|
| 9 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
# Function to query the AI model
|
| 12 |
def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7, max_tokens=512, top_p=0.9, fact_check=False, num_responses=1):
|
| 13 |
responses = []
|
| 14 |
|
|
@@ -16,7 +60,7 @@ def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7
|
|
| 16 |
if fact_check:
|
| 17 |
prompt = "Ensure factual accuracy. " + prompt
|
| 18 |
|
| 19 |
-
for _ in range(num_responses):
|
| 20 |
completion = client.chat.completions.create(
|
| 21 |
model=model,
|
| 22 |
messages=[{"role": "user", "content": prompt}],
|
|
@@ -30,81 +74,43 @@ def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7
|
|
| 30 |
except Exception as e:
|
| 31 |
st.error(f"An error occurred: {str(e)}")
|
| 32 |
|
| 33 |
-
return responses
|
| 34 |
|
| 35 |
-
# Streamlit UI
|
| 36 |
-
st.
|
| 37 |
-
st.write("Interact with an AI model to generate text based on your inputs.")
|
| 38 |
|
| 39 |
-
#
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
- If *enabled*, AI prioritizes factual accuracy.
|
| 71 |
-
- If *disabled*, AI prioritizes creativity.
|
| 72 |
-
""")
|
| 73 |
-
|
| 74 |
-
with tab2:
|
| 75 |
-
st.markdown("## βοΈ Generate Text")
|
| 76 |
-
user_input = st.text_area("Your Prompt:", placeholder="Type something...")
|
| 77 |
-
output_format = st.selectbox("Select Output Format:", ["Story", "Poem", "Article", "Code"])
|
| 78 |
-
tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
|
| 79 |
-
creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
|
| 80 |
-
max_length = st.slider("Max Length (tokens):", min_value=100, max_value=1024, value=512, step=50)
|
| 81 |
-
num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, value=1, step=1)
|
| 82 |
-
enable_creativity = st.checkbox("Enable Creative Mode", value=True)
|
| 83 |
-
fact_checking = st.checkbox("Enable Fact-Checking")
|
| 84 |
-
|
| 85 |
-
if st.button("Generate Answer"):
|
| 86 |
-
if user_input.strip():
|
| 87 |
-
with st.spinner("Generating response..."):
|
| 88 |
-
full_prompt = f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_input}"
|
| 89 |
-
ai_responses = query_ai_model(
|
| 90 |
-
full_prompt,
|
| 91 |
-
temperature=creativity_level if enable_creativity else 0.2,
|
| 92 |
-
max_tokens=max_length,
|
| 93 |
-
top_p=0.9 if enable_creativity else 0.7,
|
| 94 |
-
fact_check=fact_checking,
|
| 95 |
-
num_responses=num_responses
|
| 96 |
-
)
|
| 97 |
-
|
| 98 |
-
st.session_state["ai_responses"] = ai_responses # Store responses for later viewing
|
| 99 |
-
st.success("Responses generated! Check the 'Responses' tab.")
|
| 100 |
-
else:
|
| 101 |
-
st.warning("Please enter a prompt before clicking the button.")
|
| 102 |
-
|
| 103 |
-
with tab3:
|
| 104 |
-
st.markdown("## π AI Responses")
|
| 105 |
-
if "ai_responses" in st.session_state and st.session_state["ai_responses"]:
|
| 106 |
-
for i, response in enumerate(st.session_state["ai_responses"], 1):
|
| 107 |
st.markdown(f"### Response {i}")
|
| 108 |
st.write(response)
|
| 109 |
else:
|
| 110 |
-
st.
|
|
|
|
| 7 |
base_url="https://integrate.api.nvidia.com/v1",
|
| 8 |
api_key=os.environ.get("NVIDIA_API_KEY")
|
| 9 |
)
|
| 10 |
+
# Streamlit UI
|
| 11 |
+
st.title("AI-Powered Text Generation App")
|
| 12 |
+
st.write("Interact with an AI model to generate text based on your inputs.")
|
| 13 |
+
|
| 14 |
+
# Response specification features
|
| 15 |
+
st.markdown("## π οΈ Response Specification Features")
|
| 16 |
+
st.markdown("*The expanders below are parameters that you can adjust to customize the AI response.*")
|
| 17 |
+
|
| 18 |
+
with st.expander("π¨ *Temperature (Creativity Control)*"):
|
| 19 |
+
st.write("""
|
| 20 |
+
This parameter controls the *creativity* of the AI's responses:
|
| 21 |
+
- *0.0*: Always the same response (deterministic).
|
| 22 |
+
- *0.1 - 0.3*: Mostly factual and repetitive.
|
| 23 |
+
- *0.4 - 0.7*: Balanced between coherence and creativity.
|
| 24 |
+
- *0.8 - 1.0*: Highly creative but less predictable.
|
| 25 |
+
""")
|
| 26 |
+
|
| 27 |
+
with st.expander("π *Max Tokens (Response Length)*"):
|
| 28 |
+
st.write("Defines the maximum number of words/subwords in the response.")
|
| 29 |
+
|
| 30 |
+
with st.expander("π― *Top-p (Nucleus Sampling)*"):
|
| 31 |
+
st.write("""
|
| 32 |
+
Controls word diversity by sampling from top-probability tokens:
|
| 33 |
+
- **High top_p + Low temperature** β More factual, structured responses.
|
| 34 |
+
- **High top_p + High temperature** β More diverse, unexpected responses.
|
| 35 |
+
""")
|
| 36 |
+
|
| 37 |
+
with st.expander("π *Number of Responses*"):
|
| 38 |
+
st.write("Specifies how many response variations the AI should generate.")
|
| 39 |
+
|
| 40 |
+
with st.expander("β
*Fact-Checking*"):
|
| 41 |
+
st.write("""
|
| 42 |
+
- If *enabled*, AI prioritizes factual accuracy.
|
| 43 |
+
- If *disabled*, AI prioritizes creativity.
|
| 44 |
+
""")
|
| 45 |
+
|
| 46 |
+
st.markdown("""
|
| 47 |
+
### π *Summary*
|
| 48 |
+
- temperature β Adjusts *creativity vs accuracy*.
|
| 49 |
+
- max_tokens β Defines *response length*.
|
| 50 |
+
- top_p β Fine-tunes *word diversity*.
|
| 51 |
+
- fact_check β Ensures *factual correctness* (but may reduce fluency).
|
| 52 |
+
- num_responses β Generates *different variations* of the same prompt.
|
| 53 |
+
""")
|
| 54 |
|
| 55 |
+
# Function to query the AI model (based on your friend's code)
|
| 56 |
def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7, max_tokens=512, top_p=0.9, fact_check=False, num_responses=1):
|
| 57 |
responses = []
|
| 58 |
|
|
|
|
| 60 |
if fact_check:
|
| 61 |
prompt = "Ensure factual accuracy. " + prompt
|
| 62 |
|
| 63 |
+
for _ in range(num_responses): # Response loop for multiple responses
|
| 64 |
completion = client.chat.completions.create(
|
| 65 |
model=model,
|
| 66 |
messages=[{"role": "user", "content": prompt}],
|
|
|
|
| 74 |
except Exception as e:
|
| 75 |
st.error(f"An error occurred: {str(e)}")
|
| 76 |
|
| 77 |
+
return responses # Return a list of responses
|
| 78 |
|
| 79 |
+
# Input Fields for Streamlit UI
|
| 80 |
+
user_input = st.text_area("Your Prompt:", placeholder="Type something...")
|
|
|
|
| 81 |
|
| 82 |
+
# Dropdown Menus
|
| 83 |
+
output_format = st.selectbox("Select Output Format:", ["Story", "Poem", "Article", "Code"])
|
| 84 |
+
tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
|
| 85 |
+
|
| 86 |
+
# Sliders
|
| 87 |
+
creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
|
| 88 |
+
max_length = st.slider("Max Length (tokens):", min_value=100, max_value=1024, value=512, step=50)
|
| 89 |
+
|
| 90 |
+
# Numeric Inputs
|
| 91 |
+
num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, value=1, step=1)
|
| 92 |
+
|
| 93 |
+
# Checkboxes
|
| 94 |
+
enable_creativity = st.checkbox("Enable Creative Mode", value=True)
|
| 95 |
+
fact_checking = st.checkbox("Enable Fact-Checking")
|
| 96 |
+
|
| 97 |
+
# Button to generate response
|
| 98 |
+
if st.button("Generate Answer"):
|
| 99 |
+
if user_input.strip():
|
| 100 |
+
with st.spinner("Generating response..."):
|
| 101 |
+
full_prompt = f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_input}"
|
| 102 |
+
ai_responses = query_ai_model(
|
| 103 |
+
full_prompt,
|
| 104 |
+
temperature=creativity_level if enable_creativity else 0.2,
|
| 105 |
+
max_tokens=max_length,
|
| 106 |
+
top_p=0.9 if enable_creativity else 0.7,
|
| 107 |
+
fact_check=fact_checking,
|
| 108 |
+
num_responses=num_responses
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
st.success("AI Responses:")
|
| 112 |
+
for i, response in enumerate(ai_responses, 1):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
st.markdown(f"### Response {i}")
|
| 114 |
st.write(response)
|
| 115 |
else:
|
| 116 |
+
st.warning("Please enter a prompt before clicking the button.")
|