chiichann commited on
Commit
86fbb48
Β·
verified Β·
1 Parent(s): 7c77f57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -82
app.py CHANGED
@@ -7,52 +7,85 @@ client = OpenAI(
7
  base_url="https://integrate.api.nvidia.com/v1",
8
  api_key=os.environ.get("NVIDIA_API_KEY")
9
  )
 
10
  # Streamlit UI
11
  st.title("AI-Powered Text Generation App")
12
  st.write("Interact with an AI model to generate text based on your inputs.")
13
 
14
- # Response specification features
15
- st.markdown("## πŸ› οΈ Response Specification Features")
16
- st.markdown("*The expanders below are parameters that you can adjust to customize the AI response.*")
17
-
18
- with st.expander("🎨 *Temperature (Creativity Control)*"):
19
- st.write("""
20
- This parameter controls the *creativity* of the AI's responses:
21
- - *0.0*: Always the same response (deterministic).
22
- - *0.1 - 0.3*: Mostly factual and repetitive.
23
- - *0.4 - 0.7*: Balanced between coherence and creativity.
24
- - *0.8 - 1.0*: Highly creative but less predictable.
25
- """)
26
-
27
- with st.expander("πŸ“ *Max Tokens (Response Length)*"):
28
- st.write("Defines the maximum number of words/subwords in the response.")
29
-
30
- with st.expander("🎯 *Top-p (Nucleus Sampling)*"):
31
- st.write("""
32
- Controls word diversity by sampling from top-probability tokens:
33
- - **High top_p + Low temperature** β†’ More factual, structured responses.
34
- - **High top_p + High temperature** β†’ More diverse, unexpected responses.
35
- """)
36
-
37
- with st.expander("πŸ”„ *Number of Responses*"):
38
- st.write("Specifies how many response variations the AI should generate.")
39
-
40
- with st.expander("βœ… *Fact-Checking*"):
41
- st.write("""
42
- - If *enabled*, AI prioritizes factual accuracy.
43
- - If *disabled*, AI prioritizes creativity.
44
- """)
45
-
46
- st.markdown("""
47
- ### πŸ”Ž *Summary*
48
- - temperature β†’ Adjusts *creativity vs accuracy*.
49
- - max_tokens β†’ Defines *response length*.
50
- - top_p β†’ Fine-tunes *word diversity*.
51
- - fact_check β†’ Ensures *factual correctness* (but may reduce fluency).
52
- - num_responses β†’ Generates *different variations* of the same prompt.
53
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- # Function to query the AI model (based on your friend's code)
56
  def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7, max_tokens=512, top_p=0.9, fact_check=False, num_responses=1):
57
  responses = []
58
 
@@ -60,7 +93,7 @@ def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7
60
  if fact_check:
61
  prompt = "Ensure factual accuracy. " + prompt
62
 
63
- for _ in range(num_responses): # Response loop for multiple responses
64
  completion = client.chat.completions.create(
65
  model=model,
66
  messages=[{"role": "user", "content": prompt}],
@@ -74,43 +107,4 @@ def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7
74
  except Exception as e:
75
  st.error(f"An error occurred: {str(e)}")
76
 
77
- return responses # Return a list of responses
78
-
79
- # Input Fields for Streamlit UI
80
- user_input = st.text_area("Your Prompt:", placeholder="Type something...")
81
-
82
- # Dropdown Menus
83
- output_format = st.selectbox("Select Output Format:", ["Story", "Poem", "Article", "Code"])
84
- tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
85
-
86
- # Sliders
87
- creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
88
- max_length = st.slider("Max Length (tokens):", min_value=100, max_value=1024, value=512, step=50)
89
-
90
- # Numeric Inputs
91
- num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, value=1, step=1)
92
-
93
- # Checkboxes
94
- enable_creativity = st.checkbox("Enable Creative Mode", value=True)
95
- fact_checking = st.checkbox("Enable Fact-Checking")
96
-
97
- # Button to generate response
98
- if st.button("Generate Answer"):
99
- if user_input.strip():
100
- with st.spinner("Generating response..."):
101
- full_prompt = f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_input}"
102
- ai_responses = query_ai_model(
103
- full_prompt,
104
- temperature=creativity_level if enable_creativity else 0.2,
105
- max_tokens=max_length,
106
- top_p=0.9 if enable_creativity else 0.7,
107
- fact_check=fact_checking,
108
- num_responses=num_responses
109
- )
110
-
111
- st.success("AI Responses:")
112
- for i, response in enumerate(ai_responses, 1):
113
- st.markdown(f"### Response {i}")
114
- st.write(response)
115
- else:
116
- st.warning("Please enter a prompt before clicking the button.")
 
7
  base_url="https://integrate.api.nvidia.com/v1",
8
  api_key=os.environ.get("NVIDIA_API_KEY")
9
  )
10
+
11
  # Streamlit UI
12
  st.title("AI-Powered Text Generation App")
13
  st.write("Interact with an AI model to generate text based on your inputs.")
14
 
15
+ # Tabs
16
+ tab1, tab2, tab3 = st.tabs(["πŸ› οΈ Settings", "✍️ Generate", "πŸ“œ Responses"])
17
+
18
+ with tab1:
19
+ st.markdown("## Response Specification Features")
20
+ st.markdown("*The expanders below are parameters that you can adjust to customize the AI response.*")
21
+
22
+ with st.expander("🎨 Temperature (Creativity Control)"):
23
+ st.write("""
24
+ This parameter controls the *creativity* of the AI's responses:
25
+ - *0.0*: Always the same response (deterministic).
26
+ - *0.1 - 0.3*: Mostly factual and repetitive.
27
+ - *0.4 - 0.7*: Balanced between coherence and creativity.
28
+ - *0.8 - 1.0*: Highly creative but less predictable.
29
+ """)
30
+
31
+ with st.expander("πŸ“ Max Tokens (Response Length)"):
32
+ st.write("Defines the maximum number of words/subwords in the response.")
33
+
34
+ with st.expander("🎯 Top-p (Nucleus Sampling)"):
35
+ st.write("""
36
+ Controls word diversity by sampling from top-probability tokens:
37
+ - **High top_p + Low temperature** β†’ More factual, structured responses.
38
+ - **High top_p + High temperature** β†’ More diverse, unexpected responses.
39
+ """)
40
+
41
+ with st.expander("πŸ”„ Number of Responses"):
42
+ st.write("Specifies how many response variations the AI should generate.")
43
+
44
+ with st.expander("βœ… Fact-Checking"):
45
+ st.write("""
46
+ - If *enabled*, AI prioritizes factual accuracy.
47
+ - If *disabled*, AI prioritizes creativity.
48
+ """)
49
+
50
+ with tab2:
51
+ st.markdown("## ✍️ Generate Text")
52
+ user_input = st.text_area("Your Prompt:", placeholder="Type something...")
53
+ output_format = st.selectbox("Select Output Format:", ["Story", "Poem", "Article", "Code"])
54
+ tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
55
+ creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, value=0.7, step=0.1)
56
+ max_length = st.slider("Max Length (tokens):", min_value=100, max_value=1024, value=512, step=50)
57
+ num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, value=1, step=1)
58
+ enable_creativity = st.checkbox("Enable Creative Mode", value=True)
59
+ fact_checking = st.checkbox("Enable Fact-Checking")
60
+
61
+ if st.button("Generate Answer"):
62
+ if user_input.strip():
63
+ with st.spinner("Generating response..."):
64
+ full_prompt = f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_input}"
65
+ ai_responses = query_ai_model(
66
+ full_prompt,
67
+ temperature=creativity_level if enable_creativity else 0.2,
68
+ max_tokens=max_length,
69
+ top_p=0.9 if enable_creativity else 0.7,
70
+ fact_check=fact_checking,
71
+ num_responses=num_responses
72
+ )
73
+
74
+ st.session_state["ai_responses"] = ai_responses # Store responses for later viewing
75
+ st.success("Responses generated! Check the 'Responses' tab.")
76
+ else:
77
+ st.warning("Please enter a prompt before clicking the button.")
78
+
79
+ with tab3:
80
+ st.markdown("## πŸ“œ AI Responses")
81
+ if "ai_responses" in st.session_state and st.session_state["ai_responses"]:
82
+ for i, response in enumerate(st.session_state["ai_responses"], 1):
83
+ st.markdown(f"### Response {i}")
84
+ st.write(response)
85
+ else:
86
+ st.info("No responses yet. Generate one in the 'Generate' tab.")
87
 
88
+ # Function to query the AI model
89
  def query_ai_model(prompt, model="meta/llama-3.1-405b-instruct", temperature=0.7, max_tokens=512, top_p=0.9, fact_check=False, num_responses=1):
90
  responses = []
91
 
 
93
  if fact_check:
94
  prompt = "Ensure factual accuracy. " + prompt
95
 
96
+ for _ in range(num_responses):
97
  completion = client.chat.completions.create(
98
  model=model,
99
  messages=[{"role": "user", "content": prompt}],
 
107
  except Exception as e:
108
  st.error(f"An error occurred: {str(e)}")
109
 
110
+ return responses