|
|
import streamlit as st |
|
|
import google.generativeai as genai |
|
|
import json |
|
|
import re |
|
|
import datetime |
|
|
|
|
|
|
|
|
API_KEY = st.secrets["GOOGLE_API_KEY"] |
|
|
|
|
|
|
|
|
st.set_page_config(page_title="π€ Gemini2 PowerShell Command Gen", page_icon="π€", layout="wide") |
|
|
|
|
|
def send_message_to_model(message, model_name, temperature, max_tokens): |
|
|
"""Sends a message to the AI model and returns the response.""" |
|
|
try: |
|
|
|
|
|
GENERATION_CONFIG = { |
|
|
"temperature": temperature, |
|
|
"top_p": 0.8, |
|
|
"top_k": 40, |
|
|
"response_mime_type": "text/plain", |
|
|
"max_output_tokens": max_tokens, |
|
|
} |
|
|
MODEL = genai.GenerativeModel( |
|
|
model_name=model_name, |
|
|
generation_config=GENERATION_CONFIG, |
|
|
) |
|
|
|
|
|
response = MODEL.start_chat(history=[]).send_message(message) |
|
|
return response.text |
|
|
except Exception as e: |
|
|
st.error(f"β Error communicating with the AI: {e}") |
|
|
return None |
|
|
|
|
|
def generate_powershell_command(prompt_base, detail_level, script_type, security_level, model_name, temperature, max_tokens, prompt_detail, encoding, add_header, add_error_handling, log_level): |
|
|
"""Generates a PowerShell command based on user settings.""" |
|
|
prompt = f""" |
|
|
You are a Powershell expert. Your task is to generate a single Powershell command based on the following description: |
|
|
|
|
|
**Goal:** Create the most complete, detailed, and efficient Powershell command possible, considering all variables and scenarios. |
|
|
|
|
|
**Command Description:** {prompt_base} |
|
|
|
|
|
**Detail Level:** {detail_level} |
|
|
**Script Type:** {script_type} |
|
|
**Security Level:** {security_level} |
|
|
**Prompt Detail Level**:{prompt_detail} |
|
|
|
|
|
**Response Format:** |
|
|
- Respond in Markdown format, including a Powershell code block with its original formatting, without line breaks. |
|
|
- The Powershell code block must be delimited by ```powershell and ```. |
|
|
- Do not include comments, explanations, or any other text outside the code block. |
|
|
- The Powershell code must maintain its full vertical formatting, respecting indentation and line breaks. |
|
|
- The code must be realistic, using real-world examples, data, and situations. |
|
|
- Explore different approaches, techniques, and advanced practices. |
|
|
- If generating a command with a chain of commands, do not use semicolons at the end or beginning. |
|
|
- If necessary, use the pipe "|" to chain commands. |
|
|
- Do not use any special formatting in the result, only the code. |
|
|
- If the description asks to create a file, the command should create the file directly in the file system and not use a screen output for this. |
|
|
- If the description asks to read a file, the command should read the file directly from the file system and not use a screen input for this. |
|
|
- Use advanced PowerShell resources, such as pipelines, variables, functions, and script blocks, when necessary. |
|
|
- The default operating system is Windows Server 2016, and the default Powershell version is 7, unless the user specifies otherwise. |
|
|
- Make sure that the generated command is secure and follows the best Powershell practices. |
|
|
|
|
|
**Log Level**:{log_level} |
|
|
**Error Handling**:{add_error_handling} |
|
|
|
|
|
**Important:** |
|
|
- Generate only one command at a time. |
|
|
- Create the longest, most complete, and detailed code possible. |
|
|
- Consider all the details of the request, expanding the response and improving the command. |
|
|
- Use contextual information (such as PowerShell version and operating system) to generate the command. |
|
|
- If possible, use incremental reasoning to add improvements, expansions, and considerations to your code. |
|
|
- Use the history of the conversations so that the response is incremental. |
|
|
|
|
|
""" |
|
|
response = send_message_to_model(prompt, model_name, temperature, max_tokens) |
|
|
return response |
|
|
|
|
|
def parse_and_save_ps1(ai_code, short_title, encoding, add_header): |
|
|
"""Parses the markdown and saves the Powershell code as .ps1.""" |
|
|
match = re.search(r'```powershell\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE) |
|
|
if match: |
|
|
ps1_code = match.group(1).strip() |
|
|
else: |
|
|
ps1_code = ai_code.strip() |
|
|
|
|
|
file_name = f"command_{short_title}.ps1" |
|
|
|
|
|
if add_header: |
|
|
current_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
header = f""" |
|
|
#=============================================================================== |
|
|
# Script Generated by Google Gemini 2 PowerShell Command Gen |
|
|
# Date: {current_date} |
|
|
# Author: Elias Andrade AKA Chaos4455 |
|
|
#=============================================================================== |
|
|
""" |
|
|
ps1_code = header + ps1_code |
|
|
|
|
|
with open(file_name, "w", encoding=encoding) as f: |
|
|
f.write(ps1_code) |
|
|
return file_name, ps1_code |
|
|
|
|
|
def main(): |
|
|
st.title("π€ Gemini2 PowerShell Command Gen by [Elias Andrade](https://github.com/chaos4455)") |
|
|
st.markdown("Create PowerShell commands easily and quickly! π") |
|
|
st.markdown("---") |
|
|
|
|
|
|
|
|
col1, col2 = st.columns([1, 3]) |
|
|
|
|
|
with col1: |
|
|
st.header("βοΈ Settings") |
|
|
|
|
|
with st.expander("β¨ AI Settings"): |
|
|
model_name = st.selectbox("π€ AI Model", ["gemini-2.0-flash-exp", "gemini-1.5-flash"], index=0, help="Choose the AI model.") |
|
|
temperature = st.slider("π‘οΈ Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.1, help="Adjust the AI's creativity.") |
|
|
max_tokens = st.number_input("π Max Tokens", min_value=128, max_value=8192, value=8192, step=128, help="Adjust the maximum size of the response.") |
|
|
|
|
|
with st.expander("π Prompt Settings"): |
|
|
prompt_presets = st.selectbox("π― Predefined Prompts", ["None", "List files", "Manage processes", "Manage services"], index=0, help="Choose a predefined prompt.") |
|
|
prompt_detail = st.selectbox("π§ Prompt Detail", ["More descriptive", "Default", "Concise"], index=1, help="Defines the level of detail of the prompt") |
|
|
|
|
|
with st.expander("π οΈ PowerShell Settings"): |
|
|
powershell_version = st.selectbox("ποΈ PowerShell Version", ["7", "5.1"], index=0, help="Choose the PowerShell version.") |
|
|
operating_system = st.selectbox( |
|
|
"π» Operating System", |
|
|
[ |
|
|
"Windows Server 2022", "Windows Server 2019", "Windows Server 2016", |
|
|
"Windows Server 2012 R2", "Windows Server 2012", "Windows Server 2008 R2", |
|
|
"Windows Server 2008", "Windows 11", "Windows 10", "Windows 8.1", "Windows 8", "Windows 7", "Other" |
|
|
], |
|
|
index=2, help="Choose the target operating system." |
|
|
) |
|
|
encoding = st.selectbox("π€ Encoding", ["utf-8", "ansi"], index=0, help="Choose the encoding of the .ps1 file.") |
|
|
add_header = st.checkbox("π Add Header", value=True, help="Add a header with information in the .ps1 file.") |
|
|
add_error_handling = st.checkbox("π‘οΈ Error Handling", value=True, help="Add standard error handling to the script.") |
|
|
log_level = st.selectbox("ποΈ Logging Level", ["Detailed", "Default", "Minimum"], index=1, help="Defines the detail level of logs.") |
|
|
|
|
|
detail_level = st.selectbox("Detail Level", ["More detailed", "Default", "More concise"], index=1) |
|
|
script_type = st.selectbox("Script Type", ["More automatic", "More interactive"], index=0) |
|
|
security_level = st.radio("Security Level", ["High", "Medium", "Low"], index=1) |
|
|
|
|
|
with col2: |
|
|
|
|
|
prompt_base = st.text_input("Describe the PowerShell Command:", placeholder="Ex: List all running processes", key="prompt_base") |
|
|
|
|
|
if prompt_presets != "None": |
|
|
if prompt_base: |
|
|
prompt_base = f"{prompt_presets} , {prompt_base}" |
|
|
else: |
|
|
prompt_base = prompt_presets; |
|
|
if st.button("β¨ Generate PowerShell Command"): |
|
|
if not prompt_base: |
|
|
st.error("β οΈ Please enter a command description.") |
|
|
return |
|
|
|
|
|
with st.spinner("β³ Generating command..."): |
|
|
ai_code = generate_powershell_command( |
|
|
prompt_base, |
|
|
detail_level, |
|
|
script_type, |
|
|
security_level, |
|
|
model_name, |
|
|
temperature, |
|
|
max_tokens, |
|
|
prompt_detail, |
|
|
encoding, |
|
|
add_header, |
|
|
add_error_handling, |
|
|
log_level |
|
|
) |
|
|
|
|
|
if ai_code: |
|
|
st.markdown("### β
Generated Command:") |
|
|
st.code(ai_code, language="powershell") |
|
|
|
|
|
short_title = prompt_base[:30].strip().replace(" ", "_").lower() |
|
|
file_name_ps1, ps1_code = parse_and_save_ps1(ai_code, short_title, encoding, add_header) |
|
|
|
|
|
st.download_button( |
|
|
label="β¬οΈ Download Command (.ps1)", |
|
|
data=ps1_code, |
|
|
file_name=file_name_ps1, |
|
|
mime="application/powershell", |
|
|
) |
|
|
else: |
|
|
st.error("β Error generating the command. Check the connection with the AI and try again.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |