Spaces:
Sleeping
Sleeping
Commit ·
2e58b9f
1
Parent(s): 8c8ee0b
first commit
Browse files- app.py +71 -0
- requirements.txt +1 -0
app.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
#from decouple import config
|
| 3 |
+
import openai, os
|
| 4 |
+
|
| 5 |
+
response = False
|
| 6 |
+
prompt_tokens = 0
|
| 7 |
+
completion_tokes = 0
|
| 8 |
+
total_tokens_used = 0
|
| 9 |
+
cost_of_response = 0
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
st.header("Streamlit plus OpenAI ChatGPT/GPT3.5 turbo")
|
| 15 |
+
|
| 16 |
+
st.markdown("""---""")
|
| 17 |
+
|
| 18 |
+
question_input = st.text_input("Enter question/prompt! 輸入問題/提示!")
|
| 19 |
+
api_key_input = st.text_input("Enter Your OPENAI api key! 輸入你的OPENAI api key!")
|
| 20 |
+
rerun_button = st.button("Rerun 運行")
|
| 21 |
+
|
| 22 |
+
st.markdown("""---""")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
#API_KEY = os.getenv("OPENAI_API_KEY")
|
| 26 |
+
openai.api_key = api_key_input
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def make_request(question_input: str):
|
| 30 |
+
response = openai.ChatCompletion.create(
|
| 31 |
+
model="gpt-3.5-turbo",
|
| 32 |
+
messages=[
|
| 33 |
+
{"role": "system", "content": f"{question_input}"},
|
| 34 |
+
]
|
| 35 |
+
)
|
| 36 |
+
return response
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
if question_input:
|
| 43 |
+
response = make_request(question_input)
|
| 44 |
+
else:
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
if rerun_button:
|
| 48 |
+
response = make_request(question_input)
|
| 49 |
+
else:
|
| 50 |
+
pass
|
| 51 |
+
|
| 52 |
+
if response:
|
| 53 |
+
st.write("Response:")
|
| 54 |
+
st.write(response["choices"][0]["message"]["content"])
|
| 55 |
+
|
| 56 |
+
prompt_tokens = response["usage"]["prompt_tokens"]
|
| 57 |
+
completion_tokes = response["usage"]["completion_tokens"]
|
| 58 |
+
total_tokens_used = response["usage"]["total_tokens"]
|
| 59 |
+
|
| 60 |
+
cost_of_response = total_tokens_used * 0.000002
|
| 61 |
+
else:
|
| 62 |
+
pass
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
with st.sidebar:
|
| 66 |
+
st.title("Usage Stats:")
|
| 67 |
+
st.markdown("""---""")
|
| 68 |
+
st.write("Promt tokens used/提示token使用量:", prompt_tokens)
|
| 69 |
+
st.write("Completion tokens used/Completion toekn使用量:", completion_tokes)
|
| 70 |
+
st.write("Total tokens used/token使用總量:", total_tokens_used)
|
| 71 |
+
st.write("Total cost of request/全部花費: ${:.8f}".format(cost_of_response))
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
openai==0.27.0
|