Spaces:
Build error
Build error
sync wih remote
Browse files- __pycache__/aiutils.cpython-313.pyc +0 -0
- aiutils.py +113 -0
- app.py +67 -44
- requirements.txt +4 -0
__pycache__/aiutils.cpython-313.pyc
ADDED
|
Binary file (3.61 kB). View file
|
|
|
aiutils.py
CHANGED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import google.generativeai as genai
|
| 3 |
+
from openai import OpenAI
|
| 4 |
+
from huggingface_hub import InferenceClient
|
| 5 |
+
import time
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
GEMINI_MODEL_ID = "gemini-2.0-flash-exp"
|
| 9 |
+
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
| 10 |
+
model_id = GEMINI_MODEL_ID
|
| 11 |
+
genai.configure(api_key=gemini_api_key)
|
| 12 |
+
|
| 13 |
+
gpt_api_key = os.getenv("OPENAI_API_KEY")
|
| 14 |
+
gptclient = OpenAI(api_key=gpt_api_key)
|
| 15 |
+
|
| 16 |
+
LLAMA_MODEL_ID = "meta-llama/Llama-3.2-3B-Instruct"
|
| 17 |
+
hf_token = os.getenv("HF_TOKEN")
|
| 18 |
+
llamaclient = InferenceClient(api_key=hf_token)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def ask_gemini(prompt):
|
| 22 |
+
"""Generates Gemini response to the prompt."""
|
| 23 |
+
try:
|
| 24 |
+
model = genai.GenerativeModel(GEMINI_MODEL_ID)
|
| 25 |
+
chat = model.start_chat()
|
| 26 |
+
|
| 27 |
+
# Record the time before sending the prompt
|
| 28 |
+
start_time = time.time()
|
| 29 |
+
|
| 30 |
+
# Send a text prompt to Gemini API
|
| 31 |
+
response = chat.send_message(
|
| 32 |
+
[
|
| 33 |
+
prompt
|
| 34 |
+
],
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
# Record the time after receiving the response
|
| 38 |
+
end_time = time.time()
|
| 39 |
+
|
| 40 |
+
# Calculate the elapsed time
|
| 41 |
+
elapsed_time = end_time - start_time
|
| 42 |
+
|
| 43 |
+
# Format the elapsed time as a string
|
| 44 |
+
elapsed_time_str = f"{elapsed_time:.2f} seconds"
|
| 45 |
+
|
| 46 |
+
return response.text, elapsed_time_str
|
| 47 |
+
|
| 48 |
+
except Exception as e:
|
| 49 |
+
st.error(f"An error occurred while generating AI response from Gemini: {e}")
|
| 50 |
+
return None, None
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def ask_gpt(prompt):
|
| 54 |
+
try:
|
| 55 |
+
|
| 56 |
+
# Record the time before sending the prompt
|
| 57 |
+
start_time = time.time()
|
| 58 |
+
|
| 59 |
+
completion = gptclient.chat.completions.create(
|
| 60 |
+
model = "gpt-4o",
|
| 61 |
+
messages = [{"role": "user", "content": prompt}],
|
| 62 |
+
temperature=0.2,
|
| 63 |
+
top_p=0.7,
|
| 64 |
+
max_tokens=1024,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# Record the time after receiving the response
|
| 68 |
+
end_time = time.time()
|
| 69 |
+
|
| 70 |
+
# Calculate the elapsed time
|
| 71 |
+
elapsed_time = end_time - start_time
|
| 72 |
+
|
| 73 |
+
# Format the elapsed time as a string
|
| 74 |
+
elapsed_time_str = f"{elapsed_time:.2f} seconds"
|
| 75 |
+
|
| 76 |
+
return completion.choices[0].message.content, elapsed_time_str
|
| 77 |
+
|
| 78 |
+
except Exception as e:
|
| 79 |
+
st.error(f"Error processing GPT 4o response: {e}")
|
| 80 |
+
return None, None
|
| 81 |
+
|
| 82 |
+
def ask_llama(prompt):
|
| 83 |
+
try:
|
| 84 |
+
messages = [
|
| 85 |
+
{
|
| 86 |
+
"role": "user",
|
| 87 |
+
"content": prompt
|
| 88 |
+
}
|
| 89 |
+
]
|
| 90 |
+
|
| 91 |
+
# Record the time before sending the prompt
|
| 92 |
+
start_time = time.time()
|
| 93 |
+
|
| 94 |
+
completion = llamaclient.chat.completions.create(
|
| 95 |
+
model=LLAMA_MODEL_ID,
|
| 96 |
+
messages=messages,
|
| 97 |
+
max_tokens=1024
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
# Record the time after receiving the response
|
| 101 |
+
end_time = time.time()
|
| 102 |
+
|
| 103 |
+
# Calculate the elapsed time
|
| 104 |
+
elapsed_time = end_time - start_time
|
| 105 |
+
|
| 106 |
+
# Format the elapsed time as a string
|
| 107 |
+
elapsed_time_str = f"{elapsed_time:.2f} seconds"
|
| 108 |
+
|
| 109 |
+
return completion.choices[0].message.content, elapsed_time_str
|
| 110 |
+
|
| 111 |
+
except Exception as e:
|
| 112 |
+
st.error(f"Error: {e}")
|
| 113 |
+
return None, None
|
app.py
CHANGED
|
@@ -1,46 +1,69 @@
|
|
| 1 |
import streamlit as st
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
"
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
st.
|
| 42 |
-
st.
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
from aiutils import ask_gemini, ask_gpt, ask_llama
|
| 3 |
|
| 4 |
+
|
| 5 |
+
def main():
|
| 6 |
+
# App title
|
| 7 |
+
st.title("Three-way Comparison of AI Models")
|
| 8 |
+
|
| 9 |
+
# Step 1: Task selection
|
| 10 |
+
st.header("Select Task")
|
| 11 |
+
|
| 12 |
+
task = st.selectbox(
|
| 13 |
+
"What do you want to generate?",
|
| 14 |
+
["Generate a syllabus", "Generate an exam", "Create a program"],
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
# Step 2: Generate prompt
|
| 18 |
+
prompt = ""
|
| 19 |
+
if task == "Generate a syllabus":
|
| 20 |
+
prompt = """Generate a syllabus for the couse Intelligent Systems
|
| 21 |
+
for 3rd year Computers Science students of the College of ICT of
|
| 22 |
+
West Visayas State University. The course with use the book
|
| 23 |
+
'Building Intelligent Systems Using Machine Learning and
|
| 24 |
+
Deep Learning'. CCS 229 is a 3-unit lecture
|
| 25 |
+
course that will run for 18 weeks (54 hours)."""
|
| 26 |
+
|
| 27 |
+
elif task == "Generate an exam":
|
| 28 |
+
prompt = """Generate a 10-item multiple choice test with 4 options
|
| 29 |
+
on the topic Machine Learning. The test should require higher order
|
| 30 |
+
thinking skills and should be appropriate for 3rd year
|
| 31 |
+
Computer Science students."""
|
| 32 |
+
|
| 33 |
+
elif task == "Create a program":
|
| 34 |
+
prompt = """Create a streamlit app that generates a random password
|
| 35 |
+
that complies with the following requirements: 1) at least 8 characters
|
| 36 |
+
2) at least one uppercase letter 3) at least one lowercase letter
|
| 37 |
+
4) at least one number 5) at least one special character."""
|
| 38 |
+
|
| 39 |
+
st.write(f"Prompt: {prompt}")
|
| 40 |
+
|
| 41 |
+
if st.button("Generate Prompt"):
|
| 42 |
+
st.write("Please wait. This is a compute intensive process...")
|
| 43 |
+
with st.spinner("Asking AI Models..."):
|
| 44 |
+
# Dummy responses for AI models
|
| 45 |
+
gemini_response, gemini_rt = ask_gemini(prompt)
|
| 46 |
+
gpt_response, gpt_rt = ask_gpt(prompt)
|
| 47 |
+
llama_response, llama_rt = ask_llama(prompt)
|
| 48 |
+
|
| 49 |
+
# Step 3: Display responses in tabs
|
| 50 |
+
st.header("AI Model Outputs")
|
| 51 |
+
tab1, tab2, tab3 = st.tabs(["Gemini 2.0", "OpenAI GPT 4.0", "Meta Llama 3.2"])
|
| 52 |
+
|
| 53 |
+
with tab1:
|
| 54 |
+
st.subheader("Gemini 2.0")
|
| 55 |
+
st.write(f"Response Time: {gemini_rt}")
|
| 56 |
+
st.write(gemini_response)
|
| 57 |
+
|
| 58 |
+
with tab2:
|
| 59 |
+
st.subheader("OpenAI GPT 4.0")
|
| 60 |
+
st.write(f"Response Time: {gpt_rt}")
|
| 61 |
+
st.write(gpt_response)
|
| 62 |
+
|
| 63 |
+
with tab3:
|
| 64 |
+
st.subheader("Meta Llama 3.2")
|
| 65 |
+
st.write(f"Response Time: {llama_rt}")
|
| 66 |
+
st.write(llama_response)
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
openai
|
| 3 |
+
google-generativeai
|
| 4 |
+
huggingface_hub
|