File size: 10,219 Bytes
0776f89 abf6c73 89c7b66 494ce6a 74d8a54 8099599 0776f89 74d8a54 89c7b66 c5dee76 abf6c73 74d8a54 7c724c7 74d8a54 abf6c73 494ce6a 818fecd 494ce6a 74d8a54 494ce6a 74d8a54 7c724c7 74d8a54 7c724c7 89c7b66 7c724c7 89c7b66 3fbd71c 89c7b66 3fbd71c 89c7b66 3fbd71c 89c7b66 3fbd71c 89c7b66 3fbd71c 89c7b66 3fbd71c 89c7b66 3fbd71c 89c7b66 3fbd71c 89c7b66 4d822ed 74d8a54 7c724c7 89c7b66 b04beb4 89c7b66 7c724c7 89c7b66 0776f89 b03fec6 74d8a54 6d0cadc 818fecd 6d0cadc 74d8a54 818fecd 74d8a54 dcf0080 74d8a54 abf6c73 74d8a54 89c7b66 6d0cadc 89c7b66 6d0cadc 3fbd71c 89c7b66 3fbd71c 74d8a54 abf6c73 818fecd 89c7b66 68bb209 b04beb4 89c7b66 818fecd 74d8a54 89c7b66 6d0cadc 89c7b66 b03fec6 0776f89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
import streamlit as st
import google.generativeai as genai
import re
import os
import textwrap
# Secret key and Google Gemini API configuration
API_KEY = st.secrets["GOOGLE_API_KEY"]
# Page configuration
st.set_page_config(page_title="πβ¨ Gemini2 Script Mig Gen Pro", page_icon="πβ¨", layout="wide")
# --- Helper Functions ---
def send_message_to_model(message, model_name, temperature, top_p, top_k, max_tokens):
"""Sends a message to the AI model and returns the response."""
try:
# AI model configurations
GENERATION_CONFIG = {
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"response_mime_type": "text/plain",
"max_output_tokens": max_tokens,
}
MODEL = genai.GenerativeModel(
model_name=model_name,
generation_config=GENERATION_CONFIG,
)
response = MODEL.start_chat(history=[]).send_message(message)
return response.text
except Exception as e:
st.error(f"β Error communicating with the AI: {e}")
return None
def generate_migration_script(prompt_base, model_name, temperature, top_p, top_k, max_tokens, source_type, target_type, custom_requirements, specific_details, transformation_details):
"""Generates a migration script based on user settings."""
prompt = f"""
You are an expert automation engineer specializing in migration scripts. Your task is to generate a complete, secure, and efficient migration script based on the following description:
**Goal:** Create the most complete, detailed, and efficient migration script possible, considering all variables, edge cases, and potential scenarios.
**Migration Description:** {prompt_base}
**Source Type:** {source_type if source_type else "None"}
**Target Type:** {target_type if target_type else "None"}
**Transformation Details:** {transformation_details if transformation_details else "None"}
**Custom Requirements:** {custom_requirements if custom_requirements else "None"}
**Specific Details:** {specific_details if specific_details else "None"}
**Response Format:**
- Respond in Markdown format, including a python code block with its original formatting, without line breaks.
- The python code block must be delimited by ```python and ```.
- Do not include comments, explanations, or any other text outside the code block, unless the prompt requires.
- The code must maintain its full vertical formatting, respecting indentation and line breaks.
- The code must be realistic, using real-world examples, data, and situations.
- Explore different approaches, techniques, and advanced practices, always prioritizing security and efficiency.
- The code must use best practices for accessing resources, connecting to databases, handling errors and logging data.
- Unless the user specifies otherwise, use the most current and secure versions of libraries and resources, using python and following best practices.
- The code must use a valid logging system with timestamps and log levels.
- The code must be well structured according to PEP8 guidelines, with docstrings and clear naming conventions.
- Use incremental reasoning to add improvements, expansions, and considerations to your code.
- Use the history of the conversations so that the response is incremental.
- If the user requests for a transformation, generate it using python code and libraries, and document every step in the code.
**Important:**
- Generate only one script at a time.
- Create the longest, most complete, and detailed code possible to cover a wide range of possibilities and scenarios.
- Consider all the details of the request, expanding the response and improving the script.
- If the user does not provide details on connection parameters, create placeholders in the code with comments like 'PLEASE FILL THIS' indicating where the user should fill in.
- If the user does not provide the details about source and target, use MySQL as source and SQLite as target, and include comments on what the user should change, use the comments '# SOURCE DATABASE' and '# TARGET DATABASE'.
- Provide in your code all necessary comments for the user to understand the process of migration and where he should add his parameters.
- Use `try/except` blocks for error handling, and document what could cause the errors.
"""
response = send_message_to_model(prompt, model_name, temperature, top_p, top_k, max_tokens)
return response
def parse_and_save_py(ai_code, short_title):
"""Parses the markdown and saves the python code as .py."""
match = re.search(r'```python\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
if match:
py_code = match.group(1).strip()
else:
py_code = ai_code.strip()
file_name = f"migration_script_{short_title}.py"
with open(file_name, "w", encoding="utf-8") as f:
f.write(py_code)
return file_name, py_code
def main():
st.title("πβ¨ Gemini2 Script Mig Gen Pro by [Elias Andrade](https://github.com/chaos4455)")
st.markdown("Generate advanced migration scripts with ease! π")
st.markdown("---")
# Layout in columns (sidebar and main area)
col1, col2 = st.columns([1, 3])
with col1:
st.header("βοΈ Settings")
with st.expander("β¨ AI Settings"):
model_name = st.selectbox("π€ AI Model", ["gemini-2.0-flash-exp", "gemini-1.5-flash"], index=0, help="Choose the AI model.")
temperature = st.slider("π‘οΈ Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.1, help="Adjust the AI's creativity.")
top_p = st.slider("Top P", min_value=0.1, max_value=1.0, value=0.8, step=0.1, help="Adjust the AI's sampling.")
top_k = st.slider("Top K", min_value=1, max_value=100, value=40, step=1, help="Adjust the AI's number of candidate tokens.")
max_tokens = st.number_input("π Max Tokens", min_value=128, max_value=8192, value=8192, step=128, help="Adjust the maximum size of the response.")
with st.expander("π§° Migration Settings"):
source_type = st.selectbox("π¦ Source Type", ["MySQL", "PostgreSQL", "SQLite", "MongoDB", "CSV", "JSON", "Parquet", "Excel", "BigQuery", "Snowflake", "Redis", "Kafka", "Other"], index=0, help="Select the type of the data source.")
target_type = st.selectbox("π― Target Type", ["SQLite", "MySQL", "PostgreSQL", "MongoDB", "CSV", "JSON", "Parquet", "Excel", "BigQuery", "Snowflake", "Redis", "Kafka", "Other"], index=0, help="Select the type of the data target.")
transformation_details = st.text_area("π Transformation Details", placeholder="Specify any transformations or filtering needed (e.g., convert date formats, filter rows, etc).",
help="Provide details of any transformations you need during the migration, using Python or SQL, if it can be done with SQL.",
)
custom_requirements = st.text_input("β Custom Requirements:", placeholder="Specific libraries or parameters (e.g., version, parameters)", help="List specific libraries and versions, parameters, etc.")
specific_details = st.text_input("βΉοΈ Specific Details", placeholder="Special options, edge cases", help="Add specific details for the script generation.")
with col2:
# User's base prompt
prompt_base = st.text_area("π Describe the Migration:",
placeholder=textwrap.dedent("""
Describe your migration process in detail.
Include:
- Source and target system details (e.g., database IPs, ports, logins, passwords).
- Specific tables to migrate.
- Any filtering, transformations, or data cleansing needed.
- Important notes about the systems (e.g., if there is an external system involved)
If connection details or source and target are not included, the script will be generated using defaults, and you must fill in the parameters.
"""), key="prompt_base",
help="Provide a detailed description of your migration, including source and target systems, tables, transformations, and any specific parameters.")
if st.button("β¨ Generate Migration Script"):
if not prompt_base:
st.error("β οΈ Please enter a migration description.")
return
with st.spinner("β³ Generating script..."):
ai_code = generate_migration_script(
prompt_base,
model_name,
temperature,
top_p,
top_k,
max_tokens,
source_type,
target_type,
custom_requirements,
specific_details,
transformation_details
)
if ai_code:
st.markdown("### β
Generated Migration Script:")
st.code(ai_code, language="python")
short_title = prompt_base[:30].strip().replace(" ", "_").lower()
file_name_py, py_code = parse_and_save_py(ai_code, short_title)
st.download_button(
label="β¬οΈ Download Script (.py)",
data=py_code,
file_name=file_name_py,
mime="application/x-python",
)
else:
st.error("β Error generating the script. Check the connection with the AI and try again.")
if __name__ == "__main__":
main() |