File size: 16,205 Bytes
0776f89 abf6c73 3fbd71c 494ce6a 74d8a54 8099599 0776f89 74d8a54 3fbd71c c5dee76 abf6c73 74d8a54 7c724c7 74d8a54 abf6c73 494ce6a 818fecd 494ce6a 74d8a54 494ce6a 74d8a54 7c724c7 74d8a54 7c724c7 3fbd71c 7c724c7 3fbd71c 4d822ed 74d8a54 7c724c7 3fbd71c a84ea15 3fbd71c a84ea15 3fbd71c b04beb4 7c724c7 3fbd71c 0776f89 b03fec6 74d8a54 6d0cadc 818fecd 6d0cadc 74d8a54 818fecd 74d8a54 dcf0080 74d8a54 abf6c73 74d8a54 818fecd 3fbd71c 6d0cadc 3fbd71c 6d0cadc 3fbd71c 74d8a54 abf6c73 818fecd 3fbd71c 68bb209 b04beb4 3fbd71c 818fecd 74d8a54 3fbd71c 6d0cadc 3fbd71c b03fec6 0776f89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
import streamlit as st
import google.generativeai as genai
import re
import os
import platform
# Secret key and Google Gemini API configuration
API_KEY = st.secrets["GOOGLE_API_KEY"]
# Page configuration
st.set_page_config(page_title="๐โจ Gemini2 Pip Gen Pro", page_icon="๐โจ", layout="wide")
# --- Helper Functions ---
def send_message_to_model(message, model_name, temperature, top_p, top_k, max_tokens):
"""Sends a message to the AI model and returns the response."""
try:
# AI model configurations
GENERATION_CONFIG = {
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"response_mime_type": "text/plain",
"max_output_tokens": max_tokens,
}
MODEL = genai.GenerativeModel(
model_name=model_name,
generation_config=GENERATION_CONFIG,
)
response = MODEL.start_chat(history=[]).send_message(message)
return response.text
except Exception as e:
st.error(f"โ Error communicating with the AI: {e}")
return None
def generate_pip_command(prompt_base, model_name, temperature, top_p, top_k, max_tokens, selected_libraries, selected_groups, custom_requirements, specific_details, pip_options, target_os):
"""Generates a pip command based on user settings."""
prompt = f"""
You are an expert Python development assistant. Your task is to generate a complete and efficient pip command based on the following description:
**Goal:** Create the most complete, detailed, and efficient pip install command possible, considering all variables, edge cases, and potential scenarios.
**Target Operating System:** {target_os}
**Command Description:** {prompt_base}
**Selected Libraries:** {selected_libraries if selected_libraries else "None"}
**Selected Groups:** {selected_groups if selected_groups else "None"}
**Custom Requirements:** {custom_requirements if custom_requirements else "None"}
**Specific Details:** {specific_details if specific_details else "None"}
**Pip Options:** {pip_options if pip_options else "None"}
**Response Format:**
- Respond in Markdown format, including a pip command code block, a bash code block and powershell code block with its original formatting, without line breaks.
- The pip command code block must be delimited by ```pip and ```.
- The bash code block must be delimited by ```bash and ```.
- The powershell code block must be delimited by ```powershell and ```.
- Do not include comments, explanations, or any other text outside the code block.
- The code must maintain its full vertical formatting, respecting indentation and line breaks.
- Explore different approaches, techniques, and advanced practices, always prioritizing security and efficiency.
- Use advanced pip resources such as version constraints, index options, and requirements files when necessary.
- If a specific version of a library is specified, use it.
- Unless the user specifies otherwise, use the most current versions of the libraries and pip, using and following best practices.
- Use incremental reasoning to add improvements, expansions, and considerations to your code.
- Use the history of the conversations so that the response is incremental.
- If the target OS is Windows, use pip commands compatible with Windows, and if the target OS is Linux, use pip commands compatible with Linux
**Important:**
- Generate only one command at a time.
- Create the longest, most complete, and detailed command possible to cover a wide range of possibilities and scenarios.
- Consider all the details of the request, expanding the response and improving the command.
- If the prompt asks to install a library that is not in a given group, install it in the command, but include in the prompt that library is not in the selected groups.
- If a version of a library is specified, install it using a version constraint.
- Consider if the user needs a requirements.txt file or not and if needed use it in your command.
"""
response = send_message_to_model(prompt, model_name, temperature, top_p, top_k, max_tokens)
return response
def parse_and_save_code(ai_code, short_title):
"""Parses the markdown and saves the code."""
pip_match = re.search(r'```pip\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
bash_match = re.search(r'```bash\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
ps1_match = re.search(r'```powershell\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
pip_code = pip_match.group(1).strip() if pip_match else ai_code.strip()
bash_code = bash_match.group(1).strip() if bash_match else None
ps1_code = ps1_match.group(1).strip() if ps1_match else None
base_file_name = f"pip_command_{short_title}"
files = {}
files["pip"]= {"name":f"{base_file_name}.txt", "code":pip_code}
if bash_code:
files["bash"]= {"name":f"{base_file_name}.sh", "code":bash_code}
if ps1_code:
files["powershell"]= {"name":f"{base_file_name}.ps1", "code":ps1_code}
for key, value in files.items():
with open(value["name"], "w") as f:
f.write(value["code"])
return files
def main():
st.title("๐โจ Gemini2 Pip Gen Pro by [Elias Andrade](https://github.com/chaos4455)")
st.markdown("Generate advanced pip install commands with ease! ๐")
st.markdown("---")
# Layout in columns (sidebar and main area)
col1, col2 = st.columns([1, 3])
with col1:
st.header("โ๏ธ Settings")
with st.expander("โจ AI Settings"):
model_name = st.selectbox("๐ค AI Model", ["gemini-2.0-flash-exp", "gemini-1.5-flash"], index=0, help="Choose the AI model.")
temperature = st.slider("๐ก๏ธ Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.1, help="Adjust the AI's creativity.")
top_p = st.slider("Top P", min_value=0.1, max_value=1.0, value=0.8, step=0.1, help="Adjust the AI's sampling.")
top_k = st.slider("Top K", min_value=1, max_value=100, value=40, step=1, help="Adjust the AI's number of candidate tokens.")
max_tokens = st.number_input("๐ Max Tokens", min_value=128, max_value=8192, value=8192, step=128, help="Adjust the maximum size of the response.")
with st.expander("๐ Libraries & Groups"):
available_libraries = {
"๐ Data Analysis": ["pandas", "numpy", "scipy", "matplotlib", "seaborn", "plotly", "bokeh", "statsmodels", "scikit-image", "geopandas", "altair", "holoviews", "datashader", "missingno", "vaex", "dask", "xarray", "polars", "arrow", "numba", "cudf", "cupy", "streamz", "panel", "hvplot"],
"๐ง Machine Learning": ["scikit-learn", "tensorflow", "torch", "keras", "xgboost", "lightgbm", "catboost", "pytorch-lightning", "transformers", "optuna", "mlflow", "gradio", "huggingface-hub", "sentence-transformers", "fastai", "librosa", "gensim", "spacy", "nltk", "opencv-python", "imbalanced-learn", "sktime", "umap-learn", "fairlearn", "shap", "eli5", "snorkel", "thinc"],
"๐ธ๏ธ Web Development": ["flask", "fastapi", "django", "requests", "beautifulsoup4", "aiohttp", "uvicorn", "gunicorn", "jinja2", "starlette", "websockets", "flask-restful", "django-rest-framework", "scrapy", "selenium", "playwright", "httpx", "rich", "pyramid", "bottle", "tornado", "dash", "plotly-dash", "streamlit", "gradio"],
"๐๏ธ Database": ["sqlalchemy", "psycopg2", "pymongo", "mysql-connector-python", "sqlite3", "redis", "cassandra-driver", "pyodbc", "aiosqlite", "kafka-python", "motor", "neo4j", "influxdb", "elasticsearch", "pyarrow", "clickhouse-driver", "arangodb", "couchbase", "dgraph-python", "tinydb", "dataset"],
"โ๏ธ Cloud": ["boto3", "google-cloud-storage", "azure-storage-blob", "kubernetes", "docker", "apache-libcloud", "pulumi", "awscli", "google-cloud-sdk", "azure-cli", "terraform", "openstack", "ansible", "salt", "chef", "cdktf", "serverless", "aws-cdk", "google-cloud-build", "azure-pipelines", "docker-compose", "moto", "localstack"],
"๐ค IA & LLM": ["langchain", "openai", "diffusers", "sentence-transformers", "stable-diffusion", "huggingface-hub", "pyannote-audio", "whisper", "nltk", "spacy", "transformers", "accelerate", "datasets", "einops", "faiss-cpu", "bitsandbytes", "peft", "trl", "haystack", "llama-index", "deepsparse", "nanogpt", "autogpt", "babyagi","ml-agents", "tensorboardx"],
"๐ ๏ธ Dev Tools": ["pytest", "flake8", "mypy", "black", "isort", "pylint", "tox", "pre-commit", "bandit", "coverage", "virtualenv", "pipenv", "poetry", "invoke", "nox", "twine", "wheel", "setuptools", "build", "debugpy", "pyinstrument", "memory-profiler", "cProfile", "pdbpp", "ipython"],
"โ๏ธ Data Engineering": ["apache-airflow", "dask", "luigi", "pyspark", "ray", "prefect", "dbt-core", "pandas-gbq", "petl", "sqlalchemy-redshift", "fugue", "ibis-framework", "mindsdb", "koalas", "vaex-core", "feast", "flink", "beam", "kafka-python", "clickhouse-driver", "superset", "metabase", "trino"],
"๐ Networking": ["requests", "httpx", "socketio", "paramiko", "netmiko", "scapy", "dnspython", "asyncssh", "tqdm", "gevent", "websocket-client", "pyserial", "grpcio", "aiozmq", "uvloop", "aiofiles", "urllib3", "fastsocket", "twisted", "aiohttp-socks", "aioredis", "websockets"],
"๐ Security": ["cryptography", "pyjwt", "requests-oauthlib", "paramiko", "pyOpenSSL", "passlib", "hashlib", "bcrypt", "python-nmap", "sqlmap", "scapy", "yara-python", "pwn", "mitmproxy", "themis", "pycryptodome", "tpm2-py", "pyspy", "volatility3", "pefile", "dpkt"],
"๐จ GUI": ["tkinter", "pyqt5", "kivy", "wxpython", "pygame", "dearpygui", "pyglet", "flet", "toga", "eel", "qt-material", "pyside6", "pywebview", "guizero", "remif", "taipy"],
"๐งช Testing": ["unittest", "pytest-cov", "hypothesis", "behave", "locust", "nose", "selenium", "playwright", "mock", "freezegun", "ddt", "tox", "robotframework", "hypothesis", "vcrpy", "responses", "faker", "factory-boy", "coveragepy", "parameterized"],
"๐งฐ Utilities": ["click", "typer", "argparse", "rich", "tqdm", "colorama", "schedule", "python-dotenv", "shutil", "pathlib", "arrow", "toml", "json5", "xmltodict", "humanize", "pendulum", "inflect", "unidecode", "boltons", "delegator.py"],
"๐ Documentation": ["sphinx", "mkdocs", "pdoc", "readthedocs-sphinx-search", "numpydoc", "docutils", "recommonmark", "sphinx-rtd-theme", "furo", "m2r2", "autoapi", "plantuml", "pycco", "doc8", "griffe", "mistletoe", "marko"],
"๐ฎ Game Dev": ["pygame", "pyglet", "arcade", "panda3d", "ursina", "cocos2d", "renpy", "pyopengl", "cymunk", "pytmx", "pybox2d", "pymunk", "moderngl", "glumpy", "glfw", "sdl2"],
"๐น Finance": ["numpy-financial", "pandas-datareader", "scikit-portfolio", "yfinance", "finmarketpy", "quantstats", "TA-Lib", "riskfolio-optimization", "alphalens", "pyfolio", "zipline", "backtrader", "quantlib-python", "pyfin", "quandl", "vnpy"],
"๐ค Robotics": ["rospy", "pybullet", "mujoco-py", "open3d", "numpy-stl", "pyserial", "opencv-python", "pymavlink", "robotframework", "webots", "vpython", "transforms3d", "trimesh", "pyrealsense2", "aiortc", "gymnasium", "stable-baselines3"],
"๐งฌ Bioinformatics": ["biopython", "scikit-bio", "pysam", "pandas-genomics", "statsmodels", "pybedtools", "matplotlib", "seaborn", "pyteomics", "networkx", "pyfaidx", "lifelines", "ngslib", "bcbio-nextgen", "scanpy", "anndata", "gseapy", "deepchem", "methylpy"],
"๐ญ Astronomy": ["astropy", "astroquery", "scikit-image", "photutils", "pyephem", "poliastro", "pyvista", "vtk", "healpy", "astroplan", "specutils", "sunpy", "gwpy", "lhorizon", "dustmaps", "reproject", "pymc3", "celerite"],
"๐บ๏ธ Geospatial": ["geopandas", "rasterio", "shapely", "pyproj", "fiona", "cartopy", "folium", "geopy", "earthpy", "descartes", "osmnx", "geohash", "momepy", "pyvista", "whitebox", "earthengine-api", "gisalchemist", "mapclassify", "spatialpandas"],
"๐ก IoT": ["pyserial", "paho-mqtt", "RPi.GPIO", "smbus2", "pysnmp", "can-utils", "zeroconf", "asyncio", "bluepy", "adafruit-circuitpython", "iotconnect", "thinger-io", "micropython", "homeassistant", "openhab", "tasmota", "esphome", "nodemcu"],
"๐ป System": ["psutil", "subprocess32", "watchdog", "inotify", "pywin32", "sh", "delegator.py", "appdirs", "click", "typer", "pathlib", "colorama", "fire", "daemonize", "lsof", "ruamel.yaml", "py-cpuinfo", "platformdirs"]
}
selected_libraries = {}
for group, libraries in available_libraries.items():
selected_libraries[group] = st.multiselect(f"{group} ", libraries, default=[],key=f"lib_{group}")
selected_groups = st.multiselect("๐๏ธ Groups", list(available_libraries.keys()), default=[], help="Choose groups to include libraries.")
with st.expander("โ๏ธ Pip Settings"):
custom_requirements = st.text_input("โ Custom Libraries:", placeholder="Specific libraries or versions (e.g., requests==2.28.1, numpy>=1.23.0)", help="List specific libraries and versions.")
specific_details = st.text_input("โน๏ธ Specific Details", placeholder="Special pip install options, edge cases", help="Add specific details for the pip command generation.")
pip_options = st.text_input("โ๏ธ Pip Options", placeholder="Add custom pip options like --index-url or --no-cache", help="Add custom options to your pip command")
target_os = st.selectbox("๐ฏ Target OS", ["Linux ๐ง", "Windows ๐ช"], index=0, help="Choose the target Operating System.")
with col2:
# User's base prompt
prompt_base = st.text_input("๐ Describe the Pip Command:", placeholder="Ex: Install all libraries for web development and data analysis", key="prompt_base")
if st.button("โจ Generate Pip Command"):
if not prompt_base and not selected_libraries and not selected_groups and not custom_requirements:
st.error("โ ๏ธ Please enter a command description or select libraries/groups")
return
selected_libs = []
for group in selected_groups:
selected_libs.extend(selected_libraries[group])
with st.spinner("โณ Generating command..."):
ai_code = generate_pip_command(
prompt_base,
model_name,
temperature,
top_p,
top_k,
max_tokens,
selected_libs,
selected_groups,
custom_requirements,
specific_details,
pip_options,
target_os
)
if ai_code:
st.markdown("### โ
Generated Command:")
st.code(ai_code, language="text") #to output all codes
short_title = prompt_base[:30].strip().replace(" ", "_").lower()
files = parse_and_save_code(ai_code, short_title)
for key, value in files.items():
st.download_button(
label=f"โฌ๏ธ Download Command (.{(key)})",
data=value["code"],
file_name=value["name"],
mime="text/plain",
)
else:
st.error("โ Error generating the command. Check the connection with the AI and try again.")
if __name__ == "__main__":
main() |