sync with remote
Browse files
app.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
| 1 |
import os
|
| 2 |
-
|
| 3 |
-
import openai
|
| 4 |
from openai import OpenAI
|
| 5 |
import streamlit as st
|
| 6 |
|
|
@@ -21,7 +19,7 @@ class ConversationManager:
|
|
| 21 |
# (No need to initialize history here, it will be handled in main())
|
| 22 |
pass
|
| 23 |
|
| 24 |
-
def generate_ai_response(self, prompt
|
| 25 |
"""Generates a response from an AI model
|
| 26 |
|
| 27 |
Args:
|
|
@@ -53,36 +51,29 @@ class ConversationManager:
|
|
| 53 |
})
|
| 54 |
|
| 55 |
completion = client.chat.completions.create(
|
| 56 |
-
model="meta/llama-3.
|
| 57 |
temperature=0.5, # Adjust temperature for creativity
|
| 58 |
top_p=1,
|
| 59 |
max_tokens=1024,
|
| 60 |
messages=messages,
|
| 61 |
-
stream=
|
| 62 |
)
|
| 63 |
|
| 64 |
-
|
|
|
|
| 65 |
st.session_state.conversation_manager.conversation_history.append({
|
| 66 |
"role": "assistant",
|
| 67 |
-
"content":
|
| 68 |
})
|
| 69 |
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
response_container.markdown(model_response)
|
| 77 |
-
elif 'error' in chunk:
|
| 78 |
-
st.error(f"Error occurred: {chunk['error']}")
|
| 79 |
-
break
|
| 80 |
-
return model_response
|
| 81 |
-
else:
|
| 82 |
-
return completion.choices[0].message.content
|
| 83 |
-
|
| 84 |
except Exception as e:
|
| 85 |
-
|
| 86 |
return None
|
| 87 |
|
| 88 |
|
|
@@ -109,7 +100,7 @@ def main():
|
|
| 109 |
user_prompt = f"Using {framework}, {app_details}"
|
| 110 |
st.write("**Generated Prompt:**", user_prompt)
|
| 111 |
|
| 112 |
-
with st.spinner("
|
| 113 |
# Add the user message to the history FIRST
|
| 114 |
st.session_state.conversation_manager.conversation_history.append({"role": "user", "content": user_prompt})
|
| 115 |
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
from openai import OpenAI
|
| 3 |
import streamlit as st
|
| 4 |
|
|
|
|
| 19 |
# (No need to initialize history here, it will be handled in main())
|
| 20 |
pass
|
| 21 |
|
| 22 |
+
def generate_ai_response(self, prompt):
|
| 23 |
"""Generates a response from an AI model
|
| 24 |
|
| 25 |
Args:
|
|
|
|
| 51 |
})
|
| 52 |
|
| 53 |
completion = client.chat.completions.create(
|
| 54 |
+
model="meta/llama-3.1-405b-instruct",
|
| 55 |
temperature=0.5, # Adjust temperature for creativity
|
| 56 |
top_p=1,
|
| 57 |
max_tokens=1024,
|
| 58 |
messages=messages,
|
| 59 |
+
stream=False
|
| 60 |
)
|
| 61 |
|
| 62 |
+
model_response = completion.choices[0].message.content
|
| 63 |
+
|
| 64 |
st.session_state.conversation_manager.conversation_history.append({
|
| 65 |
"role": "assistant",
|
| 66 |
+
"content": model_response
|
| 67 |
})
|
| 68 |
|
| 69 |
+
st.session_state.conversation_manager.conversation_history.append({
|
| 70 |
+
"role": "assistant",
|
| 71 |
+
"content": completion.choices[0].message.content
|
| 72 |
+
})
|
| 73 |
+
return model_response
|
| 74 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
except Exception as e:
|
| 76 |
+
st.error(f"Error handling AI response: {e}")
|
| 77 |
return None
|
| 78 |
|
| 79 |
|
|
|
|
| 100 |
user_prompt = f"Using {framework}, {app_details}"
|
| 101 |
st.write("**Generated Prompt:**", user_prompt)
|
| 102 |
|
| 103 |
+
with st.spinner("Thinking..."):
|
| 104 |
# Add the user message to the history FIRST
|
| 105 |
st.session_state.conversation_manager.conversation_history.append({"role": "user", "content": user_prompt})
|
| 106 |
|