Spaces:
Sleeping
Sleeping
| # import streamlit as st | |
| # import google.generativeai as genai | |
| # import json | |
| # import os | |
| # # API Key | |
| # GOOGLE_API_KEY = 'AIzaSyAET6eZEG0rWmsiaK-o3BPcm62wMR5boak' # Ensure you handle your API key securely | |
| # genai.configure(api_key=GOOGLE_API_KEY) | |
| # # Create a model object | |
| # model = genai.GenerativeModel("gemini-pro") | |
| # # Function to save prompts to a JSON file | |
| # # def save_prompts_to_json(prompt_format, raw_prompt, model_prompt): | |
| # # data = { | |
| # # "raw_prompt": raw_prompt, | |
| # # "model_prompt": model_prompt, | |
| # # "prompt_format": prompt_format | |
| # # } | |
| # # file_path = 'prompts.json' | |
| # # if os.path.exists(file_path): | |
| # # with open(file_path, 'r+') as file: | |
| # # existing_data = json.load(file) | |
| # # existing_data.append(data) | |
| # # file.seek(0) | |
| # # json.dump(existing_data, file, indent=4) | |
| # # else: | |
| # # with open(file_path, 'w') as file: | |
| # # json.dump([data], file, indent=4) | |
| # def save_prompts_to_json(prompt_format, raw_prompt, model_prompt): | |
| # data = { | |
| # "raw_prompt": raw_prompt, | |
| # "model_prompt": model_prompt, | |
| # "prompt_format": prompt_format | |
| # } | |
| # file_path = 'prompts.json' | |
| # # Check if the file exists | |
| # if os.path.exists(file_path): | |
| # try: | |
| # with open(file_path, 'r+') as file: | |
| # # Check if the file is non-empty and valid JSON | |
| # try: | |
| # existing_data = json.load(file) | |
| # if not isinstance(existing_data, list): # Ensure it is a list | |
| # existing_data = [] | |
| # except json.JSONDecodeError: | |
| # existing_data = [] # Reset if JSON is invalid | |
| # existing_data.append(data) | |
| # file.seek(0) # Move to the start to overwrite | |
| # json.dump(existing_data, file, indent=4) | |
| # except IOError as e: | |
| # print(f"Error reading/writing to file: {e}") | |
| # else: | |
| # # Create the file if it doesn't exist | |
| # with open(file_path, 'w') as file: | |
| # json.dump([data], file, indent=4) | |
| # # Streamlit app layout | |
| # def main(): | |
| # st.title("Prompt Pilot") | |
| # # Sidebar for prompt format selection | |
| # if "selection_enabled" not in st.session_state: | |
| # st.session_state.selection_enabled = False | |
| # st.sidebar.title("Select the best Response") | |
| # # Chat section | |
| # st.header("Chat Section") | |
| # # Initialize chat history | |
| # if "messages" not in st.session_state: | |
| # st.session_state.messages = [] | |
| # # Initialize raw_prompt in session_state if it doesn't exist | |
| # if "raw_prompt" not in st.session_state: | |
| # st.session_state.raw_prompt = None | |
| # # Initialize necessary variables for model prompts | |
| # if "model_prompt_1" not in st.session_state: | |
| # st.session_state.model_prompt_1 = None | |
| # if "model_prompt_2" not in st.session_state: | |
| # st.session_state.model_prompt_2 = None | |
| # if "selected_response" not in st.session_state: | |
| # st.session_state.selected_response = None | |
| # # **Display chat history** | |
| # for message in st.session_state.messages: | |
| # if message["role"] == "user": | |
| # with st.chat_message("user"): | |
| # st.markdown(message["content"]) | |
| # else: | |
| # with st.chat_message("assistant"): | |
| # st.markdown(message["content"]) | |
| # # Accept user input | |
| # if prompt := st.chat_input("Enter your prompt:"): | |
| # st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # st.session_state.raw_prompt = prompt | |
| # # **Disable selection before generating new responses** | |
| # st.session_state.selection_enabled = False | |
| # # RISE query | |
| # query = ( | |
| # f"You are a prompt engineering assistant. Your task is to transform a raw prompt into an enhanced format, " | |
| # f"specifically using RISE (Role Input Steps Expectations), " | |
| # f"The output should follow the RISE structure, and must not include any answers, hints, or suggestions. " | |
| # f"Choose the role efficiently and critically related to that field. The goal is to prepare a prompt that, " | |
| # f"when used by a generative model, will produce the most accurate and relevant response. Here is the user's raw prompt: " | |
| # f"'{prompt}'. Please convert it into the RISE format." | |
| # ) | |
| # assistant_prompt_1 = model.generate_content(query) | |
| # if assistant_prompt_1.candidates and assistant_prompt_1.candidates[0].content.parts: | |
| # st.session_state.model_prompt_1 = assistant_prompt_1.candidates[0].content.parts[0].text | |
| # else: | |
| # st.error("No valid response received from the model.") | |
| # return | |
| # task_1 = ( | |
| # f"You are an expert in prompt response generation. Your role is to evaluate and respond to enhanced prompts " | |
| # f"provided in {st.session_state.model_prompt_1}. Your responses should be accurate, contextually relevant, and follow the intent " | |
| # f"and guidance given by the enhanced prompt. Do not make assumptions outside the provided prompt structure, " | |
| # f"and respond strictly to the task at hand." | |
| # ) | |
| # Final_response_1 = model.generate_content(task_1) | |
| # if Final_response_1.candidates and Final_response_1.candidates[0].content.parts: | |
| # model_response_1 = Final_response_1.candidates[0].content.parts[0].text | |
| # else: | |
| # st.error("No valid response received for the enhanced prompt.") | |
| # return | |
| # # RTF query | |
| # query_1 = ( | |
| # f"You are a prompt engineering assistant. Your task is to transform a raw prompt into an enhanced format, " | |
| # f"specifically using RTF (Role Task Format), " | |
| # f"The output should follow the RTF structure, and must not include any answers, hints, or suggestions. " | |
| # f"Choose the role efficiently and critically related to that field. The goal is to prepare a prompt that, " | |
| # f"when used by a generative model, will produce the most accurate and relevant response. Here is the user's raw prompt: " | |
| # f"'{prompt}'. Please convert it into the RTF format." | |
| # ) | |
| # assistant_prompt_2 = model.generate_content(query_1) | |
| # if assistant_prompt_2.candidates and assistant_prompt_2.candidates[0].content.parts: | |
| # st.session_state.model_prompt_2 = assistant_prompt_2.candidates[0].content.parts[0].text | |
| # else: | |
| # st.error("No valid response received for the enhanced prompt.") | |
| # return | |
| # task_2 = ( | |
| # f"You are an expert in prompt response generation. Your role is to evaluate and respond to enhanced prompts " | |
| # f"provided in {st.session_state.model_prompt_2}. Your responses should be accurate, contextually relevant, and follow the intent " | |
| # f"and guidance given by the enhanced prompt. Do not make assumptions outside the provided prompt structure, " | |
| # f"and respond strictly to the task at hand." | |
| # ) | |
| # Final_response_2 = model.generate_content(task_2) | |
| # if Final_response_2.candidates and Final_response_2.candidates[0].content.parts: | |
| # model_response_2 = Final_response_2.candidates[0].content.parts[0].text | |
| # else: | |
| # st.error("No valid response received for the enhanced prompt.") | |
| # return | |
| # # **Display user input and both responses** | |
| # with st.chat_message("user"): | |
| # st.markdown(f"**User Input:** \n\n {st.session_state.raw_prompt}") # **Show user input** | |
| # # **Display both responses** | |
| # with st.chat_message("assistant"): | |
| # st.markdown(f"**RISE Framework Response:** \n\n {model_response_1}") | |
| # st.markdown(f"**RTF Framework Response:** \n\n {model_response_2}") | |
| # # **Store responses in messages so they persist after selection** | |
| # st.session_state.messages.append({"role": "assistant", "content": f"**RISE Framework Response:** \n\n {model_response_1}"}) | |
| # st.session_state.messages.append({"role": "assistant", "content": f"**RTF Framework Response:** \n\n {model_response_2}"}) | |
| # # **Enable selection** | |
| # st.session_state.selection_enabled = True | |
| # # Handle response selection once responses are generated | |
| # if st.session_state.selection_enabled: | |
| # # **Use selectbox with placeholder to prevent pre-selection** | |
| # selected_option = st.sidebar.selectbox( | |
| # "Select a Response", | |
| # ["Select a Response", "Response 1 (RISE)", "Response 2 (RTF)"], | |
| # index=0 | |
| # ) | |
| # # Ensure user has selected a valid response (not the placeholder) | |
| # if selected_option != "Select a Response": | |
| # if selected_option == "Response 1 (RISE)": | |
| # st.session_state.selected_response = st.session_state.model_prompt_1 | |
| # label = "RISE" | |
| # elif selected_option == "Response 2 (RTF)": | |
| # st.session_state.selected_response = st.session_state.model_prompt_2 | |
| # label = "RTF" | |
| # # **Save the selected response to the JSON file** | |
| # save_prompts_to_json(label, st.session_state.raw_prompt, st.session_state.selected_response) | |
| # # **Disable the selection after saving** | |
| # st.session_state.selection_enabled = False | |
| # if __name__ == "__main__": | |
| # main() | |
| ############################# | |
| import streamlit as st | |
| import google.generativeai as genai | |
| import json | |
| import os | |
| # API Key | |
| GOOGLE_API_KEY = 'AIzaSyD8kw828AJX2b8wbUxsPKEm8_wdit595_M' # Ensure you handle your API key securely | |
| genai.configure(api_key=GOOGLE_API_KEY) | |
| # Create a model object | |
| model = genai.GenerativeModel("gemini-pro") | |
| def save_prompts_to_json(prompt_format, raw_prompt, model_prompt): | |
| data = { | |
| "raw_prompt": raw_prompt, | |
| "model_prompt": model_prompt, | |
| "prompt_format": prompt_format | |
| } | |
| file_path = 'prompts.json' | |
| # Check if the file exists | |
| if os.path.exists(file_path): | |
| try: | |
| with open(file_path, 'r+') as file: | |
| # Check if the file is non-empty and valid JSON | |
| try: | |
| existing_data = json.load(file) | |
| if not isinstance(existing_data, list): # Ensure it is a list | |
| existing_data = [] | |
| except json.JSONDecodeError: | |
| existing_data = [] # Reset if JSON is invalid | |
| existing_data.append(data) | |
| file.seek(0) # Move to the start to overwrite | |
| json.dump(existing_data, file, indent=4) | |
| except IOError as e: | |
| print(f"Error reading/writing to file: {e}") | |
| else: | |
| # Create the file if it doesn't exist | |
| with open(file_path, 'w') as file: | |
| json.dump([data], file, indent=4) | |
| # Streamlit app layout | |
| def main(): | |
| st.title("Prompt Pilot") | |
| # Sidebar for prompt format selection | |
| if "selection_enabled" not in st.session_state: | |
| st.session_state.selection_enabled = False | |
| st.sidebar.title("Select the best Response") | |
| # Chat section | |
| st.header("Chat Section") | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Initialize raw_prompt in session_state if it doesn't exist | |
| if "raw_prompt" not in st.session_state: | |
| st.session_state.raw_prompt = None | |
| # Initialize necessary variables for model prompts | |
| if "model_prompt_1" not in st.session_state: | |
| st.session_state.model_prompt_1 = None | |
| if "model_prompt_2" not in st.session_state: | |
| st.session_state.model_prompt_2 = None | |
| if "selected_response" not in st.session_state: | |
| st.session_state.selected_response = None | |
| # **Display chat history** | |
| for message in st.session_state.messages: | |
| if message["role"] == "user": | |
| with st.chat_message("user"): | |
| st.markdown(message["content"]) | |
| else: | |
| with st.chat_message("assistant"): | |
| st.markdown(message["content"]) | |
| # Accept user input | |
| if prompt := st.chat_input("Enter your prompt:"): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| st.session_state.raw_prompt = prompt | |
| # **Disable selection before generating new responses** | |
| st.session_state.selection_enabled = False | |
| # RISE query | |
| query = ( | |
| f"You are a prompt engineering assistant. Your task is to transform a raw prompt into an enhanced format, " | |
| f"specifically using RISE (Role Input Steps Expectations), " | |
| f"The output should follow the RISE structure, and must not include any answers, hints, or suggestions. " | |
| f"Choose the role efficiently and critically related to that field. The goal is to prepare a prompt that, " | |
| f"when used by a generative model, will produce the most accurate and relevant response. Here is the user's raw prompt: " | |
| f"'{prompt}'. Please convert it into the RISE format." | |
| ) | |
| assistant_prompt_1 = model.generate_content(query) | |
| if assistant_prompt_1.candidates and assistant_prompt_1.candidates[0].content.parts: | |
| st.session_state.model_prompt_1 = assistant_prompt_1.candidates[0].content.parts[0].text | |
| else: | |
| st.error("No valid response received from the model.") | |
| return | |
| task_1 = ( | |
| f"You are an expert in prompt response generation. Your role is to evaluate and respond to enhanced prompts " | |
| f"provided in {st.session_state.model_prompt_1}. Your responses should be accurate, contextually relevant, and follow the intent " | |
| f"and guidance given by the enhanced prompt. Do not make assumptions outside the provided prompt structure, " | |
| f"and respond strictly to the task at hand." | |
| ) | |
| Final_response_1 = model.generate_content(task_1) | |
| if Final_response_1.candidates and Final_response_1.candidates[0].content.parts: | |
| model_response_1 = Final_response_1.candidates[0].content.parts[0].text | |
| else: | |
| st.error("No valid response received for the enhanced prompt.") | |
| return | |
| # RTF query | |
| query_1 = ( | |
| f"You are a prompt engineering assistant. Your task is to transform a raw prompt into an enhanced format, " | |
| f"specifically using RTF (Role Task Format), " | |
| f"The output should follow the RTF structure, and must not include any answers, hints, or suggestions. " | |
| f"Choose the role efficiently and critically related to that field. The goal is to prepare a prompt that, " | |
| f"when used by a generative model, will produce the most accurate and relevant response. Here is the user's raw prompt: " | |
| f"'{prompt}'. Please convert it into the RTF format." | |
| ) | |
| assistant_prompt_2 = model.generate_content(query_1) | |
| if assistant_prompt_2.candidates and assistant_prompt_2.candidates[0].content.parts: | |
| st.session_state.model_prompt_2 = assistant_prompt_2.candidates[0].content.parts[0].text | |
| else: | |
| st.error("No valid response received for the enhanced prompt.") | |
| return | |
| task_2 = ( | |
| f"You are an expert in prompt response generation. Your role is to evaluate and respond to enhanced prompts " | |
| f"provided in {st.session_state.model_prompt_2}. Your responses should be accurate, contextually relevant, and follow the intent " | |
| f"and guidance given by the enhanced prompt. Do not make assumptions outside the provided prompt structure, " | |
| f"and respond strictly to the task at hand." | |
| ) | |
| Final_response_2 = model.generate_content(task_2) | |
| if Final_response_2.candidates and Final_response_2.candidates[0].content.parts: | |
| model_response_2 = Final_response_2.candidates[0].content.parts[0].text | |
| else: | |
| st.error("No valid response received for the enhanced prompt.") | |
| return | |
| # **Display user input and both responses** | |
| with st.chat_message("user"): | |
| st.markdown(f"**User Input:** \n\n {st.session_state.raw_prompt}") # **Show user input** | |
| # **Display both responses** | |
| with st.chat_message("assistant"): | |
| st.markdown(f"**RISE Framework Prompt:** \n\n {st.session_state.model_prompt_1}") | |
| st.markdown(f"**RTF Framework Prompt:** \n\n {st.session_state.model_prompt_2}") | |
| # **Store responses in messages so they persist after selection** | |
| st.session_state.messages.append({"role": "assistant", "content": f"**RISE Framework Response:** \n\n {model_response_1}"}) | |
| st.session_state.messages.append({"role": "assistant", "content": f"**RTF Framework Response:** \n\n {model_response_2}"}) | |
| # **Enable selection** | |
| st.session_state.selection_enabled = True | |
| # Handle response selection once responses are generated | |
| if st.session_state.selection_enabled: | |
| # **Use selectbox with placeholder to prevent pre-selection** | |
| selected_option = st.sidebar.selectbox( | |
| "Select a Response", | |
| ["Select a Response", "Response 1 (RISE)", "Response 2 (RTF)"], | |
| index=0 | |
| ) | |
| # Ensure user has selected a valid response (not the placeholder) | |
| if selected_option != "Select a Response": | |
| if selected_option == "Response 1 (RISE)": | |
| st.session_state.selected_response = st.session_state.model_prompt_1 | |
| label = "RISE" | |
| st.session_state.selection_enabled = False | |
| elif selected_option == "Response 2 (RTF)": | |
| st.session_state.selected_response = st.session_state.model_prompt_2 | |
| label = "RTF" | |
| st.session_state.selection_enabled = False | |
| # **Save the selected response to the JSON file** | |
| save_prompts_to_json(label, st.session_state.raw_prompt, st.session_state.selected_response) | |
| # **Disable the selection after saving** | |
| st.session_state.selection_enabled = False | |
| if __name__ == "__main__": | |
| main() | |