chaos4455 commited on
Commit
abf6c73
Β·
verified Β·
1 Parent(s): dcf0080

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -63
app.py CHANGED
@@ -1,23 +1,33 @@
1
  import streamlit as st
2
  import google.generativeai as genai
3
- import json
4
  import re
5
  import datetime
 
 
6
 
7
  # Secret key and Google Gemini API configuration
8
  API_KEY = st.secrets["GOOGLE_API_KEY"]
9
 
10
  # Page configuration
11
- st.set_page_config(page_title="πŸ€– Gemini2 PowerShell Command Gen", page_icon="πŸ€–", layout="wide")
12
 
13
- def send_message_to_model(message, model_name, temperature, max_tokens):
 
 
 
 
 
 
 
 
 
14
  """Sends a message to the AI model and returns the response."""
15
  try:
16
  # AI model configurations
17
  GENERATION_CONFIG = {
18
  "temperature": temperature,
19
- "top_p": 0.8,
20
- "top_k": 40,
21
  "response_mime_type": "text/plain",
22
  "max_output_tokens": max_tokens,
23
  }
@@ -32,12 +42,12 @@ def send_message_to_model(message, model_name, temperature, max_tokens):
32
  st.error(f"❌ Error communicating with the AI: {e}")
33
  return None
34
 
35
- def generate_powershell_command(prompt_base, detail_level, script_type, security_level, model_name, temperature, max_tokens, prompt_detail, encoding, add_header, add_error_handling, log_level):
36
- """Generates a PowerShell command based on user settings."""
37
  prompt = f"""
38
- You are a Powershell expert. Your task is to generate a single Powershell command based on the following description:
39
 
40
- **Goal:** Create the most complete, detailed, and efficient Powershell command possible, considering all variables and scenarios.
41
 
42
  **Command Description:** {prompt_base}
43
 
@@ -45,66 +55,78 @@ def generate_powershell_command(prompt_base, detail_level, script_type, security
45
  **Script Type:** {script_type}
46
  **Security Level:** {security_level}
47
  **Prompt Detail Level**:{prompt_detail}
 
 
 
 
48
 
49
  **Response Format:**
50
- - Respond in Markdown format, including a Powershell code block with its original formatting, without line breaks.
51
- - The Powershell code block must be delimited by ```powershell and ```.
52
  - Do not include comments, explanations, or any other text outside the code block.
53
- - The Powershell code must maintain its full vertical formatting, respecting indentation and line breaks.
54
  - The code must be realistic, using real-world examples, data, and situations.
55
- - Explore different approaches, techniques, and advanced practices.
56
- - If generating a command with a chain of commands, do not use semicolons at the end or beginning.
57
- - If necessary, use the pipe "|" to chain commands.
58
  - Do not use any special formatting in the result, only the code.
59
  - If the description asks to create a file, the command should create the file directly in the file system and not use a screen output for this.
60
  - If the description asks to read a file, the command should read the file directly from the file system and not use a screen input for this.
61
- - Use advanced PowerShell resources, such as pipelines, variables, functions, and script blocks, when necessary.
62
- - The default operating system is Windows Server 2016, and the default Powershell version is 7, unless the user specifies otherwise.
63
- - Make sure that the generated command is secure and follows the best Powershell practices.
64
 
65
  **Log Level**:{log_level}
66
  **Error Handling**:{add_error_handling}
67
 
68
- **Important:**
69
- - Generate only one command at a time.
70
- - Create the longest, most complete, and detailed code possible.
71
- - Consider all the details of the request, expanding the response and improving the command.
72
- - Use contextual information (such as PowerShell version and operating system) to generate the command.
73
- - If possible, use incremental reasoning to add improvements, expansions, and considerations to your code.
74
- - Use the history of the conversations so that the response is incremental.
 
 
75
 
76
  """
77
- response = send_message_to_model(prompt, model_name, temperature, max_tokens)
78
  return response
79
 
80
- def parse_and_save_ps1(ai_code, short_title, encoding, add_header):
81
- """Parses the markdown and saves the Powershell code as .ps1."""
82
- match = re.search(r'```powershell\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
83
  if match:
84
- ps1_code = match.group(1).strip()
85
  else:
86
- ps1_code = ai_code.strip()
87
 
88
- file_name = f"command_{short_title}.ps1"
89
 
90
  if add_header:
91
  current_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
92
- header = f"""
93
  #===============================================================================
94
- # Script Generated by Google Gemini 2 PowerShell Command Gen
95
  # Date: {current_date}
96
  # Author: Elias Andrade AKA Chaos4455
97
- #===============================================================================
98
- """
99
- ps1_code = header + ps1_code
100
 
101
  with open(file_name, "w", encoding=encoding) as f:
102
- f.write(ps1_code)
103
- return file_name, ps1_code
 
 
 
 
 
 
 
 
104
 
105
  def main():
106
- st.title("πŸ€– Gemini2 PowerShell Command Gen by [Elias Andrade](https://github.com/chaos4455)")
107
- st.markdown("Create PowerShell commands easily and quickly! πŸš€")
108
  st.markdown("---")
109
 
110
  # Layout in columns (sidebar and main area)
@@ -116,77 +138,90 @@ def main():
116
  with st.expander("✨ AI Settings"):
117
  model_name = st.selectbox("πŸ€– AI Model", ["gemini-2.0-flash-exp", "gemini-1.5-flash"], index=0, help="Choose the AI model.")
118
  temperature = st.slider("🌑️ Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.1, help="Adjust the AI's creativity.")
 
 
119
  max_tokens = st.number_input("πŸ“ Max Tokens", min_value=128, max_value=8192, value=8192, step=128, help="Adjust the maximum size of the response.")
120
 
121
  with st.expander("πŸ“ Prompt Settings"):
122
- prompt_presets = st.selectbox("🎯 Predefined Prompts", ["None", "List files", "Manage processes", "Manage services"], index=0, help="Choose a predefined prompt.")
123
  prompt_detail = st.selectbox("🧐 Prompt Detail", ["More descriptive", "Default", "Concise"], index=1, help="Defines the level of detail of the prompt")
124
 
125
- with st.expander("πŸ› οΈ PowerShell Settings"):
126
- powershell_version = st.selectbox("πŸŽ›οΈ PowerShell Version", ["7", "5.1"], index=0, help="Choose the PowerShell version.")
127
  operating_system = st.selectbox(
128
  "πŸ’» Operating System",
129
  [
130
- "Windows Server 2022", "Windows Server 2019", "Windows Server 2016",
131
- "Windows Server 2012 R2", "Windows Server 2012", "Windows Server 2008 R2",
132
- "Windows Server 2008", "Windows 11", "Windows 10", "Windows 8.1", "Windows 8", "Windows 7", "Other"
 
133
  ],
134
- index=2, help="Choose the target operating system."
135
  )
136
- encoding = st.selectbox("πŸ”€ Encoding", ["utf-8", "ansi"], index=0, help="Choose the encoding of the .ps1 file.")
137
- add_header = st.checkbox("πŸ“œ Add Header", value=True, help="Add a header with information in the .ps1 file.")
138
  add_error_handling = st.checkbox("πŸ›‘οΈ Error Handling", value=True, help="Add standard error handling to the script.")
139
  log_level = st.selectbox("πŸ—‚οΈ Logging Level", ["Detailed", "Default", "Minimum"], index=1, help="Defines the detail level of logs.")
140
 
141
  detail_level = st.selectbox("Detail Level", ["More detailed", "Default", "More concise"], index=1)
142
  script_type = st.selectbox("Script Type", ["More automatic", "More interactive"], index=0)
143
  security_level = st.radio("Security Level", ["High", "Medium", "Low"], index=1)
 
 
 
 
 
144
 
145
  with col2:
146
  # User's base prompt
147
- prompt_base = st.text_input("Describe the PowerShell Command:", placeholder="Ex: List all running processes", key="prompt_base")
148
 
149
  if prompt_presets != "None":
150
  if prompt_base:
151
  prompt_base = f"{prompt_presets} , {prompt_base}"
152
  else:
153
  prompt_base = prompt_presets;
154
- if st.button("✨ Generate PowerShell Command"):
155
  if not prompt_base:
156
  st.error("⚠️ Please enter a command description.")
157
  return
158
 
159
- with st.spinner("⏳ Generating command..."):
160
- ai_code = generate_powershell_command(
161
  prompt_base,
162
  detail_level,
163
  script_type,
164
  security_level,
165
  model_name,
166
  temperature,
 
 
167
  max_tokens,
168
  prompt_detail,
169
  encoding,
170
  add_header,
171
  add_error_handling,
172
- log_level
 
 
 
 
173
  )
174
 
175
  if ai_code:
176
- st.markdown("### βœ… Generated Command:")
177
- st.code(ai_code, language="powershell")
178
 
179
  short_title = prompt_base[:30].strip().replace(" ", "_").lower()
180
- file_name_ps1, ps1_code = parse_and_save_ps1(ai_code, short_title, encoding, add_header)
181
 
182
  st.download_button(
183
- label="⬇️ Download Command (.ps1)",
184
- data=ps1_code,
185
- file_name=file_name_ps1,
186
- mime="application/powershell",
187
  )
188
  else:
189
- st.error("❌ Error generating the command. Check the connection with the AI and try again.")
190
 
191
  if __name__ == "__main__":
192
  main()
 
1
  import streamlit as st
2
  import google.generativeai as genai
 
3
  import re
4
  import datetime
5
+ import os
6
+ import subprocess
7
 
8
  # Secret key and Google Gemini API configuration
9
  API_KEY = st.secrets["GOOGLE_API_KEY"]
10
 
11
  # Page configuration
12
+ st.set_page_config(page_title="🐧 Gemini2 Linux Command Gen Pro", page_icon="🐧", layout="wide")
13
 
14
+ # --- Helper Functions ---
15
+ def check_command_availability(command):
16
+ """Checks if a command is available in the system's PATH."""
17
+ try:
18
+ subprocess.run(["which", command], check=True, capture_output=True)
19
+ return True
20
+ except subprocess.CalledProcessError:
21
+ return False
22
+
23
+ def send_message_to_model(message, model_name, temperature, top_p, top_k, max_tokens):
24
  """Sends a message to the AI model and returns the response."""
25
  try:
26
  # AI model configurations
27
  GENERATION_CONFIG = {
28
  "temperature": temperature,
29
+ "top_p": top_p,
30
+ "top_k": top_k,
31
  "response_mime_type": "text/plain",
32
  "max_output_tokens": max_tokens,
33
  }
 
42
  st.error(f"❌ Error communicating with the AI: {e}")
43
  return None
44
 
45
+ def generate_linux_command(prompt_base, detail_level, script_type, security_level, model_name, temperature, top_p, top_k, max_tokens, prompt_detail, encoding, add_header, add_error_handling, log_level, operating_system, command_execution_method, custom_command_requirements, command_specific_details):
46
+ """Generates a Linux command based on user settings."""
47
  prompt = f"""
48
+ You are an expert Linux system administrator. Your task is to generate a single, secure and efficient Linux command or shell script based on the following description:
49
 
50
+ **Goal:** Create the most complete, detailed, efficient, and secure Linux command possible, considering all variables, edge cases, and potential scenarios.
51
 
52
  **Command Description:** {prompt_base}
53
 
 
55
  **Script Type:** {script_type}
56
  **Security Level:** {security_level}
57
  **Prompt Detail Level**:{prompt_detail}
58
+ **Operating System:** {operating_system}
59
+ **Command Execution Method:** {command_execution_method}
60
+ **Custom Command Requirements:** {custom_command_requirements if custom_command_requirements else "None"}
61
+ **Command Specific Details**:{command_specific_details if command_specific_details else "None"}
62
 
63
  **Response Format:**
64
+ - Respond in Markdown format, including a bash code block with its original formatting, without line breaks.
65
+ - The bash code block must be delimited by ```bash and ```.
66
  - Do not include comments, explanations, or any other text outside the code block.
67
+ - The bash code must maintain its full vertical formatting, respecting indentation and line breaks.
68
  - The code must be realistic, using real-world examples, data, and situations.
69
+ - Explore different approaches, techniques, and advanced practices, always prioritizing security and efficiency.
70
+ - If generating a command with a chain of commands, use "&&" to chain commands when appropriate and consider error handling within the chain.
 
71
  - Do not use any special formatting in the result, only the code.
72
  - If the description asks to create a file, the command should create the file directly in the file system and not use a screen output for this.
73
  - If the description asks to read a file, the command should read the file directly from the file system and not use a screen input for this.
74
+ - Use advanced Linux resources, such as pipelines, variables, functions, conditional statements and script blocks, when necessary.
75
+ - Unless the user specifies otherwise, use the most current and secure versions of the command, using bash and following best practices.
 
76
 
77
  **Log Level**:{log_level}
78
  **Error Handling**:{add_error_handling}
79
 
80
+ **Important:**
81
+ - Generate only one command or script at a time.
82
+ - Create the longest, most complete, and detailed code possible to cover a wide range of possibilities and scenarios.
83
+ - Consider all the details of the request, expanding the response and improving the command or script.
84
+ - Use contextual information (such as the operating system) to generate the command or script.
85
+ - If possible, use incremental reasoning to add improvements, expansions, and considerations to your code.
86
+ - Use the history of the conversations so that the response is incremental.
87
+ - If the prompt asks to manage a service or process, consider different init systems (systemd, init, etc.).
88
+ - If a command is required but is not installed on the system, provide instructions to install such command.
89
 
90
  """
91
+ response = send_message_to_model(prompt, model_name, temperature, top_p, top_k, max_tokens)
92
  return response
93
 
94
+ def parse_and_save_sh(ai_code, short_title, encoding, add_header, executable_permission):
95
+ """Parses the markdown and saves the bash code as .sh."""
96
+ match = re.search(r'```bash\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
97
  if match:
98
+ sh_code = match.group(1).strip()
99
  else:
100
+ sh_code = ai_code.strip()
101
 
102
+ file_name = f"command_{short_title}.sh"
103
 
104
  if add_header:
105
  current_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
106
+ header = f"""#!/bin/bash
107
  #===============================================================================
108
+ # Script Generated by Google Gemini 2 Linux Command Gen Pro
109
  # Date: {current_date}
110
  # Author: Elias Andrade AKA Chaos4455
111
+ #===============================================================================\n"""
112
+ sh_code = header + sh_code
113
+
114
 
115
  with open(file_name, "w", encoding=encoding) as f:
116
+ f.write(sh_code)
117
+
118
+ # Make the script executable if necessary
119
+ if executable_permission:
120
+ try:
121
+ os.chmod(file_name, 0o755)
122
+ except Exception as e:
123
+ st.warning(f"Warning: Could not set executable permission for {file_name}. Reason: {e}")
124
+
125
+ return file_name, sh_code
126
 
127
  def main():
128
+ st.title("🐧 Gemini2 Linux Command Gen Pro by [Elias Andrade](https://github.com/chaos4455)")
129
+ st.markdown("Generate advanced Linux commands and scripts with ease! πŸš€")
130
  st.markdown("---")
131
 
132
  # Layout in columns (sidebar and main area)
 
138
  with st.expander("✨ AI Settings"):
139
  model_name = st.selectbox("πŸ€– AI Model", ["gemini-2.0-flash-exp", "gemini-1.5-flash"], index=0, help="Choose the AI model.")
140
  temperature = st.slider("🌑️ Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.1, help="Adjust the AI's creativity.")
141
+ top_p = st.slider("Top P", min_value=0.1, max_value=1.0, value=0.8, step=0.1, help="Adjust the AI's sampling.")
142
+ top_k = st.slider("Top K", min_value=1, max_value=100, value=40, step=1, help="Adjust the AI's number of candidate tokens.")
143
  max_tokens = st.number_input("πŸ“ Max Tokens", min_value=128, max_value=8192, value=8192, step=128, help="Adjust the maximum size of the response.")
144
 
145
  with st.expander("πŸ“ Prompt Settings"):
146
+ prompt_presets = st.selectbox("🎯 Predefined Prompts", ["None", "List files", "Manage processes", "Manage services", "User Management", "System Information", "Network Configuration"], index=0, help="Choose a predefined prompt.")
147
  prompt_detail = st.selectbox("🧐 Prompt Detail", ["More descriptive", "Default", "Concise"], index=1, help="Defines the level of detail of the prompt")
148
 
149
+ with st.expander("🐧 Linux Settings"):
 
150
  operating_system = st.selectbox(
151
  "πŸ’» Operating System",
152
  [
153
+ "Ubuntu Server 22.04", "Ubuntu Server 20.04", "Ubuntu Server 18.04", "Ubuntu Server 16.04", "Ubuntu Server 14.04",
154
+ "CentOS 8", "CentOS 7",
155
+ "RedHat Enterprise Linux 9", "RedHat Enterprise Linux 8",
156
+ "Debian 11", "Debian 10", "Debian 9", "Debian 8", "Debian 7", "Other"
157
  ],
158
+ index=0, help="Choose the target operating system."
159
  )
160
+ encoding = st.selectbox("πŸ”€ Encoding", ["utf-8", "ansi"], index=0, help="Choose the encoding of the .sh file.")
161
+ add_header = st.checkbox("πŸ“œ Add Header", value=True, help="Add a header with information in the .sh file.")
162
  add_error_handling = st.checkbox("πŸ›‘οΈ Error Handling", value=True, help="Add standard error handling to the script.")
163
  log_level = st.selectbox("πŸ—‚οΈ Logging Level", ["Detailed", "Default", "Minimum"], index=1, help="Defines the detail level of logs.")
164
 
165
  detail_level = st.selectbox("Detail Level", ["More detailed", "Default", "More concise"], index=1)
166
  script_type = st.selectbox("Script Type", ["More automatic", "More interactive"], index=0)
167
  security_level = st.radio("Security Level", ["High", "Medium", "Low"], index=1)
168
+ command_execution_method = st.selectbox("Execution Method", ["Single Command", "Shell Script"], index=0, help="Defines if a single command or a shell script will be generated")
169
+ executable_permission = st.checkbox("Enable Execute Permission", value=False, help="Set the generated .sh file as executable.")
170
+ custom_command_requirements = st.text_input("Custom Command Requirements", placeholder="Ex: Use specific tools, parameters", help="Add specific requirements for the generated command.")
171
+ command_specific_details = st.text_input("Command Specific Details", placeholder="Ex: Specific configurations, edge cases", help="Add specific details for command generation.")
172
+
173
 
174
  with col2:
175
  # User's base prompt
176
+ prompt_base = st.text_input("Describe the Linux Command/Script:", placeholder="Ex: List all files in /var/log recursively, including size and permissions", key="prompt_base")
177
 
178
  if prompt_presets != "None":
179
  if prompt_base:
180
  prompt_base = f"{prompt_presets} , {prompt_base}"
181
  else:
182
  prompt_base = prompt_presets;
183
+ if st.button("✨ Generate Linux Command/Script"):
184
  if not prompt_base:
185
  st.error("⚠️ Please enter a command description.")
186
  return
187
 
188
+ with st.spinner("⏳ Generating command/script..."):
189
+ ai_code = generate_linux_command(
190
  prompt_base,
191
  detail_level,
192
  script_type,
193
  security_level,
194
  model_name,
195
  temperature,
196
+ top_p,
197
+ top_k,
198
  max_tokens,
199
  prompt_detail,
200
  encoding,
201
  add_header,
202
  add_error_handling,
203
+ log_level,
204
+ operating_system,
205
+ command_execution_method,
206
+ custom_command_requirements,
207
+ command_specific_details
208
  )
209
 
210
  if ai_code:
211
+ st.markdown("### βœ… Generated Command/Script:")
212
+ st.code(ai_code, language="bash")
213
 
214
  short_title = prompt_base[:30].strip().replace(" ", "_").lower()
215
+ file_name_sh, sh_code = parse_and_save_sh(ai_code, short_title, encoding, add_header, executable_permission)
216
 
217
  st.download_button(
218
+ label="⬇️ Download Command/Script (.sh)",
219
+ data=sh_code,
220
+ file_name=file_name_sh,
221
+ mime="application/x-sh",
222
  )
223
  else:
224
+ st.error("❌ Error generating the command/script. Check the connection with the AI and try again.")
225
 
226
  if __name__ == "__main__":
227
  main()