File size: 12,204 Bytes
0776f89 818fecd abf6c73 494ce6a 74d8a54 8099599 0776f89 74d8a54 abf6c73 c5dee76 abf6c73 74d8a54 7c724c7 74d8a54 abf6c73 494ce6a 818fecd 494ce6a 74d8a54 494ce6a 74d8a54 7c724c7 74d8a54 7c724c7 abf6c73 7c724c7 abf6c73 74d8a54 abf6c73 74d8a54 abf6c73 74d8a54 abf6c73 74d8a54 abf6c73 74d8a54 abf6c73 74d8a54 abf6c73 12dcb07 74d8a54 12dcb07 abf6c73 12dcb07 7c724c7 abf6c73 74d8a54 7c724c7 abf6c73 7c724c7 abf6c73 7c724c7 abf6c73 b03fec6 abf6c73 818fecd 74d8a54 abf6c73 818fecd abf6c73 74d8a54 abf6c73 818fecd 74d8a54 abf6c73 7c724c7 abf6c73 0776f89 b03fec6 74d8a54 6d0cadc 818fecd 6d0cadc 74d8a54 818fecd 74d8a54 dcf0080 74d8a54 abf6c73 74d8a54 818fecd 74d8a54 abf6c73 74d8a54 abf6c73 74d8a54 818fecd abf6c73 818fecd abf6c73 818fecd abf6c73 74d8a54 21b1ee6 74d8a54 abf6c73 6d0cadc 74d8a54 abf6c73 0776f89 74d8a54 818fecd abf6c73 6d0cadc 74d8a54 6d0cadc 0776f89 abf6c73 818fecd 74d8a54 abf6c73 818fecd abf6c73 818fecd 74d8a54 abf6c73 b03fec6 74d8a54 abf6c73 b03fec6 6d0cadc abf6c73 6d0cadc abf6c73 b03fec6 0776f89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
import streamlit as st
import google.generativeai as genai
import re
import datetime
import os
import subprocess
# Secret key and Google Gemini API configuration
API_KEY = st.secrets["GOOGLE_API_KEY"]
# Page configuration
st.set_page_config(page_title="π§ Gemini2 Linux Command Gen Pro", page_icon="π§", layout="wide")
# --- Helper Functions ---
def check_command_availability(command):
"""Checks if a command is available in the system's PATH."""
try:
subprocess.run(["which", command], check=True, capture_output=True)
return True
except subprocess.CalledProcessError:
return False
def send_message_to_model(message, model_name, temperature, top_p, top_k, max_tokens):
"""Sends a message to the AI model and returns the response."""
try:
# AI model configurations
GENERATION_CONFIG = {
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"response_mime_type": "text/plain",
"max_output_tokens": max_tokens,
}
MODEL = genai.GenerativeModel(
model_name=model_name,
generation_config=GENERATION_CONFIG,
)
response = MODEL.start_chat(history=[]).send_message(message)
return response.text
except Exception as e:
st.error(f"β Error communicating with the AI: {e}")
return None
def generate_linux_command(prompt_base, detail_level, script_type, security_level, model_name, temperature, top_p, top_k, max_tokens, prompt_detail, encoding, add_header, add_error_handling, log_level, operating_system, command_execution_method, custom_command_requirements, command_specific_details):
"""Generates a Linux command based on user settings."""
prompt = f"""
You are an expert Linux system administrator. Your task is to generate a single, secure and efficient Linux command or shell script based on the following description:
**Goal:** Create the most complete, detailed, efficient, and secure Linux command possible, considering all variables, edge cases, and potential scenarios.
**Command Description:** {prompt_base}
**Detail Level:** {detail_level}
**Script Type:** {script_type}
**Security Level:** {security_level}
**Prompt Detail Level**:{prompt_detail}
**Operating System:** {operating_system}
**Command Execution Method:** {command_execution_method}
**Custom Command Requirements:** {custom_command_requirements if custom_command_requirements else "None"}
**Command Specific Details**:{command_specific_details if command_specific_details else "None"}
**Response Format:**
- Respond in Markdown format, including a bash code block with its original formatting, without line breaks.
- The bash code block must be delimited by ```bash and ```.
- Do not include comments, explanations, or any other text outside the code block.
- The bash code must maintain its full vertical formatting, respecting indentation and line breaks.
- The code must be realistic, using real-world examples, data, and situations.
- Explore different approaches, techniques, and advanced practices, always prioritizing security and efficiency.
- If generating a command with a chain of commands, use "&&" to chain commands when appropriate and consider error handling within the chain.
- Do not use any special formatting in the result, only the code.
- If the description asks to create a file, the command should create the file directly in the file system and not use a screen output for this.
- If the description asks to read a file, the command should read the file directly from the file system and not use a screen input for this.
- Use advanced Linux resources, such as pipelines, variables, functions, conditional statements and script blocks, when necessary.
- Unless the user specifies otherwise, use the most current and secure versions of the command, using bash and following best practices.
**Log Level**:{log_level}
**Error Handling**:{add_error_handling}
**Important:**
- Generate only one command or script at a time.
- Create the longest, most complete, and detailed code possible to cover a wide range of possibilities and scenarios.
- Consider all the details of the request, expanding the response and improving the command or script.
- Use contextual information (such as the operating system) to generate the command or script.
- If possible, use incremental reasoning to add improvements, expansions, and considerations to your code.
- Use the history of the conversations so that the response is incremental.
- If the prompt asks to manage a service or process, consider different init systems (systemd, init, etc.).
- If a command is required but is not installed on the system, provide instructions to install such command.
"""
response = send_message_to_model(prompt, model_name, temperature, top_p, top_k, max_tokens)
return response
def parse_and_save_sh(ai_code, short_title, encoding, add_header, executable_permission):
"""Parses the markdown and saves the bash code as .sh."""
match = re.search(r'```bash\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
if match:
sh_code = match.group(1).strip()
else:
sh_code = ai_code.strip()
file_name = f"command_{short_title}.sh"
if add_header:
current_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
header = f"""#!/bin/bash
#===============================================================================
# Script Generated by Google Gemini 2 Linux Command Gen Pro
# Date: {current_date}
# Author: Elias Andrade AKA Chaos4455
#===============================================================================\n"""
sh_code = header + sh_code
with open(file_name, "w", encoding=encoding) as f:
f.write(sh_code)
# Make the script executable if necessary
if executable_permission:
try:
os.chmod(file_name, 0o755)
except Exception as e:
st.warning(f"Warning: Could not set executable permission for {file_name}. Reason: {e}")
return file_name, sh_code
def main():
st.title("π§ Gemini2 Linux Command Gen Pro by [Elias Andrade](https://github.com/chaos4455)")
st.markdown("Generate advanced Linux commands and scripts with ease! π")
st.markdown("---")
# Layout in columns (sidebar and main area)
col1, col2 = st.columns([1, 3])
with col1:
st.header("βοΈ Settings")
with st.expander("β¨ AI Settings"):
model_name = st.selectbox("π€ AI Model", ["gemini-2.0-flash-exp", "gemini-1.5-flash"], index=0, help="Choose the AI model.")
temperature = st.slider("π‘οΈ Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.1, help="Adjust the AI's creativity.")
top_p = st.slider("Top P", min_value=0.1, max_value=1.0, value=0.8, step=0.1, help="Adjust the AI's sampling.")
top_k = st.slider("Top K", min_value=1, max_value=100, value=40, step=1, help="Adjust the AI's number of candidate tokens.")
max_tokens = st.number_input("π Max Tokens", min_value=128, max_value=8192, value=8192, step=128, help="Adjust the maximum size of the response.")
with st.expander("π Prompt Settings"):
prompt_presets = st.selectbox("π― Predefined Prompts", ["None", "List files", "Manage processes", "Manage services", "User Management", "System Information", "Network Configuration"], index=0, help="Choose a predefined prompt.")
prompt_detail = st.selectbox("π§ Prompt Detail", ["More descriptive", "Default", "Concise"], index=1, help="Defines the level of detail of the prompt")
with st.expander("π§ Linux Settings"):
operating_system = st.selectbox(
"π» Operating System",
[
"Ubuntu Server 22.04", "Ubuntu Server 20.04", "Ubuntu Server 18.04", "Ubuntu Server 16.04", "Ubuntu Server 14.04",
"CentOS 8", "CentOS 7",
"RedHat Enterprise Linux 9", "RedHat Enterprise Linux 8",
"Debian 11", "Debian 10", "Debian 9", "Debian 8", "Debian 7", "Other"
],
index=0, help="Choose the target operating system."
)
encoding = st.selectbox("π€ Encoding", ["utf-8", "ansi"], index=0, help="Choose the encoding of the .sh file.")
add_header = st.checkbox("π Add Header", value=True, help="Add a header with information in the .sh file.")
add_error_handling = st.checkbox("π‘οΈ Error Handling", value=True, help="Add standard error handling to the script.")
log_level = st.selectbox("ποΈ Logging Level", ["Detailed", "Default", "Minimum"], index=1, help="Defines the detail level of logs.")
detail_level = st.selectbox("Detail Level", ["More detailed", "Default", "More concise"], index=1)
script_type = st.selectbox("Script Type", ["More automatic", "More interactive"], index=0)
security_level = st.radio("Security Level", ["High", "Medium", "Low"], index=1)
command_execution_method = st.selectbox("Execution Method", ["Single Command", "Shell Script"], index=0, help="Defines if a single command or a shell script will be generated")
executable_permission = st.checkbox("Enable Execute Permission", value=False, help="Set the generated .sh file as executable.")
custom_command_requirements = st.text_input("Custom Command Requirements", placeholder="Ex: Use specific tools, parameters", help="Add specific requirements for the generated command.")
command_specific_details = st.text_input("Command Specific Details", placeholder="Ex: Specific configurations, edge cases", help="Add specific details for command generation.")
with col2:
# User's base prompt
prompt_base = st.text_input("Describe the Linux Command/Script:", placeholder="Ex: List all files in /var/log recursively, including size and permissions", key="prompt_base")
if prompt_presets != "None":
if prompt_base:
prompt_base = f"{prompt_presets} , {prompt_base}"
else:
prompt_base = prompt_presets;
if st.button("β¨ Generate Linux Command/Script"):
if not prompt_base:
st.error("β οΈ Please enter a command description.")
return
with st.spinner("β³ Generating command/script..."):
ai_code = generate_linux_command(
prompt_base,
detail_level,
script_type,
security_level,
model_name,
temperature,
top_p,
top_k,
max_tokens,
prompt_detail,
encoding,
add_header,
add_error_handling,
log_level,
operating_system,
command_execution_method,
custom_command_requirements,
command_specific_details
)
if ai_code:
st.markdown("### β
Generated Command/Script:")
st.code(ai_code, language="bash")
short_title = prompt_base[:30].strip().replace(" ", "_").lower()
file_name_sh, sh_code = parse_and_save_sh(ai_code, short_title, encoding, add_header, executable_permission)
st.download_button(
label="β¬οΈ Download Command/Script (.sh)",
data=sh_code,
file_name=file_name_sh,
mime="application/x-sh",
)
else:
st.error("β Error generating the command/script. Check the connection with the AI and try again.")
if __name__ == "__main__":
main() |