#------------------------------------------------------------------------ # Import Modules #------------------------------------------------------------------------ import streamlit as st import openai from annotated_text import annotated_text import os from PIL import Image from pathlib import Path from dotenv import load_dotenv load_dotenv() # Set the OpenAI API key # Retrieve OpenAI API key from environment variables openai_api_key = os.getenv('OPENAI_API_KEY') if not openai_api_key: raise ValueError("OPENAI_API_KEY not set in environment variables") # Set the OpenAI API key openai.api_key = openai_api_key #------------------------------------------------------------------------ # Configurations #------------------------------------------------------------------------ # Streamlit page setup # icon = Image.open("MTSS.ai_Icon.png") icon = Image.open("MTSS.ai_Icon.png") st.set_page_config( page_title="Prxmpting | Prompt Engineering", page_icon=icon, layout="centered", initial_sidebar_state="auto", menu_items={ 'About': "### *This application was created by* \n### LeVesseur Ph.D | MTSS.ai" } ) #------------------------------------------------------------------------ # Header #------------------------------------------------------------------------ # st.image('MTSS.ai_Logo.png', width=300) st.title('MTSS:grey[.ai]') st.header('Prxmpting:grey[ | Prompt Engineering]') contact = st.sidebar.toggle('Handmade by \n**LeVesseur** :grey[ PhD] \n| :grey[MTSS.ai]') if contact: st.sidebar.write('Inquiries: [info@mtss.ai](mailto:info@mtss.ai) \nProfile: [levesseur.com](http://levesseur.com) \nCheck out: [InkQA | Dynamic PDFs](http://www.inkqa.com)') #------------------------------------------------------------------------ # Sidebar #------------------------------------------------------------------------ with st.sidebar: # Password input field # password = st.text_input("Enter Password:", type="password") st.subheader(':grey[_Prompting Guide 101_]') st.caption(':grey[_A quick-start handbook for effective prompts_]') # Set the desired width in pixels image_width = 150 # Define the path to the image image_path = "Prompting_guide_101.png" # Display the image st.image(image_path, caption="Prompting Guide 101", width=image_width) # Define the path to the PDF file pdf_path = "gemini-for-google-workspace-prompting-guide-101.pdf" # Create a download button download_button = st.download_button( label="Download PDF", data=open(pdf_path, "rb").read(), file_name="gemini-for-google-workspace-prompting-guide-101.pdf", mime="application/pdf" ) # Display the download button if download_button: st.success("Downloaded successfully!") st.divider() # Text with Markdown formatting Introduction = """ There are four main areas to consider when writing an effective prompt. You don’t need to use all four, but using a few will help! """ st.markdown(Introduction) annotated_text( ("Persona", "", "#8ef"), " ", ("Task", "", "#faa"), " ", ("Context", "", "#fea"), " ", ("Format", "", "#afa"), ) annotated_text( "You are a MiMTSS TA Center ", ("Implementation Specialist", "Persona", "#8ef"), ". ", ("Draft a summary email", "Task", "#faa"), " to a ", ("District Coach", "Context", "#fea"), " based on the ", ("assessment schedule", "Context", "#fea"), " for the academic year. Limit to ", ("bullet points", "Format", "#afa"), ". " ) st.divider() # Text with Markdown formatting Quick_Tips = """ Here are quick tips to get you started with prompting: 1. **Use natural language.** Writeas if you’re speaking to another person. Express complete thoughts in full sentences. 2. **Be specific and iterate.** Tell Gemini for Workspace what you need it to do (summarize, write, change the tone, create). Provide as much context as possible. 3. **Be concise and avoid complexity.** State your request in brief — but specific — language. Avoid jargon. 4. **Make it a conversation.** Fine-tune your prompts if the results don’t meet your expectations or if you believe there’s room for improvement. Use follow-up prompts and an iterative process of review and refinement to yield better results. *Prompting is an art. You will likely need to try a few different approaches for your prompt if you don’t get your desired outcome the first time. Based on what has been learned, the most successful prompts average around 21 words, yet prompts people often try without knowing this are short — usually less than nine words. Before putting an output into action, review it to ensure **clarity**, **relevance**, and **accuracy**. And of course the most important thing to keep in mind: Generative AI is meant to help humans but the final output is yours.* """ st.markdown(Quick_Tips) #------------------------------------------------------------------------ # Functions #------------------------------------------------------------------------ def refine_prompt_with_LLM(Persona, Task, Context, Format): # Define the prompt principles prompt_principles = """ ### Prompt Principles - Use natural language. Write as if you’re speaking to another person. Express complete thoughts in full sentences. - Be specific and iterate. Tell the AI what you need it to do (summarize, write, change the tone, create). Provide as much context as possible. - Be concise and avoid complexity. State your request in brief — but specific — language. Avoid jargon. - Make it a conversation. Fine-tune your prompts if the results don’t meet your expectations or if you believe there’s room for improvement. Use follow-up prompts and an iterative process of review and refinement to yield better results. """ # Construct the full prompt with user input and prompt principles prompt = f""" ### Instruction # The prompt must combine the Persona, Task, Context, and Format in complete sentences in one cohesive paragraph using no line breaks. Each sentence must indicate it is a Persona sentence by starting with '[Persona]', a Task sentence by starting with '[Task]', a Context sentence by starting with '[Context]', and a Format sentence by starting with '[Format]'. # The prompt must instruct the AI to be the following Persona: {Persona}. # The prompt must instruct the AI to complete the following Task: {Task}. # The prompt must instruct the AI to use the following Context: {Context}. # The prompt must instruct the AI to use the following Format: {Format}. ### Prompt principles {prompt_principles} """ # Generate the refined prompt using ChatGPT response = openai.chat.completions.create( model="gpt-3.5-turbo", # Adjust model as needed messages=[ {"role": "system", "content": "You are a helpful assistant that creates prompts for AI. You will write a prompt using the persona, task, context, and format so that the user can copy and paste the prompt into a ChatGPT and get their answer. You will not answer the prompt, just provide a revised prompt."}, {"role": "user", "content": prompt} ], max_tokens=200 ) return response.choices[0].message.content.strip() def prompt_response_from_LLM(prompt): # Generate the refined prompt using ChatGPT response = openai.chat.completions.create( model="gpt-3.5-turbo", # Adjust model as needed messages=[ {"role": "system", "content": "You are an educator with many years of experience working with MTSS, the science of literacy, and SWPBIS. Respond to the prompt thoughtfully and ensure you are providing the best coaching and technical assitance."}, {"role": "user", "content": prompt} ], # max_tokens=1000 ) return response.choices[0].message.content.strip() # Collect user inputs annotated_text( ("Persona", "", "#8ef") ) Persona = st.text_input("Represents who the AI is or the role it will be assuming. *E.g., Implementation Specialist.*") annotated_text( ("Task", "", "#faa") ) Task = st.text_input("Specifies the action or objective the AI needs to accomplish. *E.g., Drafting a summary email.*") annotated_text( ("Context", "", "#fea") ) Context = st.text_input("Provides the background or setting for the task. *E.g., For a District Coach based on the Assessment schedule.*") annotated_text( ("Format", "", "#afa") ) Format = st.text_input("Outlines the specific structure or style in which the task should be completed. *E.g., Limit to bullet points.*") # Generate refined prompt if st.button("Generate Refined Prompt", type="secondary"): with st.spinner('Thinking...'): refined_prompt = refine_prompt_with_LLM(Persona, Task, Context, Format) if refined_prompt: st.session_state['refined_prompt'] = refined_prompt # Display annotated text and refined prompt if 'refined_prompt' in st.session_state: annotated_text( (Persona, "Persona", "#8ef"), " ", (Task, "Task", "#faa"), " ", (Context, "Context", "#fea"), " ", (Format, "Format", "#afa"), ) # st.write(st.session_state['refined_prompt']) # refined_prompt_text = st.text_area("Refined Prompt | Feel free to edit the prompt before generating a response", value=st.session_state['refined_prompt'], height=200) # st.success('Refined prompt generated successfully.') # st.session_state['refined_prompt'] = refined_prompt_text refined_prompt_text = st.text_area("Refined Prompt | Feel free to edit the prompt before generating a response", value=st.session_state.get('refined_prompt', ''), height=100) if refined_prompt_text: st.success('Refined prompt generated successfully.') st.divider() # Generate response from refined prompt if st.button("Generate Response from Refined Prompt", type="secondary"): with st.spinner('Thinking...'): if 'refined_prompt' in st.session_state and st.session_state['refined_prompt']: response_text = prompt_response_from_LLM(st.session_state['refined_prompt']) if response_text: st.session_state['response_text'] = response_text else: st.error("Please generate a refined prompt first.") # Text box to display the response response_text_box = st.text_area("AI Response", value=st.session_state.get('response_text', ''), height=300) if response_text_box: st.info('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')