chaos4455's picture
Update app.py
3fbd71c verified
raw
history blame
16.2 kB
import streamlit as st
import google.generativeai as genai
import re
import os
import platform
# Secret key and Google Gemini API configuration
API_KEY = st.secrets["GOOGLE_API_KEY"]
# Page configuration
st.set_page_config(page_title="๐Ÿโœจ Gemini2 Pip Gen Pro", page_icon="๐Ÿโœจ", layout="wide")
# --- Helper Functions ---
def send_message_to_model(message, model_name, temperature, top_p, top_k, max_tokens):
"""Sends a message to the AI model and returns the response."""
try:
# AI model configurations
GENERATION_CONFIG = {
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"response_mime_type": "text/plain",
"max_output_tokens": max_tokens,
}
MODEL = genai.GenerativeModel(
model_name=model_name,
generation_config=GENERATION_CONFIG,
)
response = MODEL.start_chat(history=[]).send_message(message)
return response.text
except Exception as e:
st.error(f"โŒ Error communicating with the AI: {e}")
return None
def generate_pip_command(prompt_base, model_name, temperature, top_p, top_k, max_tokens, selected_libraries, selected_groups, custom_requirements, specific_details, pip_options, target_os):
"""Generates a pip command based on user settings."""
prompt = f"""
You are an expert Python development assistant. Your task is to generate a complete and efficient pip command based on the following description:
**Goal:** Create the most complete, detailed, and efficient pip install command possible, considering all variables, edge cases, and potential scenarios.
**Target Operating System:** {target_os}
**Command Description:** {prompt_base}
**Selected Libraries:** {selected_libraries if selected_libraries else "None"}
**Selected Groups:** {selected_groups if selected_groups else "None"}
**Custom Requirements:** {custom_requirements if custom_requirements else "None"}
**Specific Details:** {specific_details if specific_details else "None"}
**Pip Options:** {pip_options if pip_options else "None"}
**Response Format:**
- Respond in Markdown format, including a pip command code block, a bash code block and powershell code block with its original formatting, without line breaks.
- The pip command code block must be delimited by ```pip and ```.
- The bash code block must be delimited by ```bash and ```.
- The powershell code block must be delimited by ```powershell and ```.
- Do not include comments, explanations, or any other text outside the code block.
- The code must maintain its full vertical formatting, respecting indentation and line breaks.
- Explore different approaches, techniques, and advanced practices, always prioritizing security and efficiency.
- Use advanced pip resources such as version constraints, index options, and requirements files when necessary.
- If a specific version of a library is specified, use it.
- Unless the user specifies otherwise, use the most current versions of the libraries and pip, using and following best practices.
- Use incremental reasoning to add improvements, expansions, and considerations to your code.
- Use the history of the conversations so that the response is incremental.
- If the target OS is Windows, use pip commands compatible with Windows, and if the target OS is Linux, use pip commands compatible with Linux
**Important:**
- Generate only one command at a time.
- Create the longest, most complete, and detailed command possible to cover a wide range of possibilities and scenarios.
- Consider all the details of the request, expanding the response and improving the command.
- If the prompt asks to install a library that is not in a given group, install it in the command, but include in the prompt that library is not in the selected groups.
- If a version of a library is specified, install it using a version constraint.
- Consider if the user needs a requirements.txt file or not and if needed use it in your command.
"""
response = send_message_to_model(prompt, model_name, temperature, top_p, top_k, max_tokens)
return response
def parse_and_save_code(ai_code, short_title):
"""Parses the markdown and saves the code."""
pip_match = re.search(r'```pip\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
bash_match = re.search(r'```bash\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
ps1_match = re.search(r'```powershell\s*(.*?)\s*```', ai_code, re.DOTALL | re.IGNORECASE)
pip_code = pip_match.group(1).strip() if pip_match else ai_code.strip()
bash_code = bash_match.group(1).strip() if bash_match else None
ps1_code = ps1_match.group(1).strip() if ps1_match else None
base_file_name = f"pip_command_{short_title}"
files = {}
files["pip"]= {"name":f"{base_file_name}.txt", "code":pip_code}
if bash_code:
files["bash"]= {"name":f"{base_file_name}.sh", "code":bash_code}
if ps1_code:
files["powershell"]= {"name":f"{base_file_name}.ps1", "code":ps1_code}
for key, value in files.items():
with open(value["name"], "w") as f:
f.write(value["code"])
return files
def main():
st.title("๐Ÿโœจ Gemini2 Pip Gen Pro by [Elias Andrade](https://github.com/chaos4455)")
st.markdown("Generate advanced pip install commands with ease! ๐Ÿš€")
st.markdown("---")
# Layout in columns (sidebar and main area)
col1, col2 = st.columns([1, 3])
with col1:
st.header("โš™๏ธ Settings")
with st.expander("โœจ AI Settings"):
model_name = st.selectbox("๐Ÿค– AI Model", ["gemini-2.0-flash-exp", "gemini-1.5-flash"], index=0, help="Choose the AI model.")
temperature = st.slider("๐ŸŒก๏ธ Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.1, help="Adjust the AI's creativity.")
top_p = st.slider("Top P", min_value=0.1, max_value=1.0, value=0.8, step=0.1, help="Adjust the AI's sampling.")
top_k = st.slider("Top K", min_value=1, max_value=100, value=40, step=1, help="Adjust the AI's number of candidate tokens.")
max_tokens = st.number_input("๐Ÿ“ Max Tokens", min_value=128, max_value=8192, value=8192, step=128, help="Adjust the maximum size of the response.")
with st.expander("๐Ÿ“ Libraries & Groups"):
available_libraries = {
"๐Ÿ“Š Data Analysis": ["pandas", "numpy", "scipy", "matplotlib", "seaborn", "plotly", "bokeh", "statsmodels", "scikit-image", "geopandas", "altair", "holoviews", "datashader", "missingno", "vaex", "dask", "xarray", "polars", "arrow", "numba", "cudf", "cupy", "streamz", "panel", "hvplot"],
"๐Ÿง  Machine Learning": ["scikit-learn", "tensorflow", "torch", "keras", "xgboost", "lightgbm", "catboost", "pytorch-lightning", "transformers", "optuna", "mlflow", "gradio", "huggingface-hub", "sentence-transformers", "fastai", "librosa", "gensim", "spacy", "nltk", "opencv-python", "imbalanced-learn", "sktime", "umap-learn", "fairlearn", "shap", "eli5", "snorkel", "thinc"],
"๐Ÿ•ธ๏ธ Web Development": ["flask", "fastapi", "django", "requests", "beautifulsoup4", "aiohttp", "uvicorn", "gunicorn", "jinja2", "starlette", "websockets", "flask-restful", "django-rest-framework", "scrapy", "selenium", "playwright", "httpx", "rich", "pyramid", "bottle", "tornado", "dash", "plotly-dash", "streamlit", "gradio"],
"๐Ÿ—„๏ธ Database": ["sqlalchemy", "psycopg2", "pymongo", "mysql-connector-python", "sqlite3", "redis", "cassandra-driver", "pyodbc", "aiosqlite", "kafka-python", "motor", "neo4j", "influxdb", "elasticsearch", "pyarrow", "clickhouse-driver", "arangodb", "couchbase", "dgraph-python", "tinydb", "dataset"],
"โ˜๏ธ Cloud": ["boto3", "google-cloud-storage", "azure-storage-blob", "kubernetes", "docker", "apache-libcloud", "pulumi", "awscli", "google-cloud-sdk", "azure-cli", "terraform", "openstack", "ansible", "salt", "chef", "cdktf", "serverless", "aws-cdk", "google-cloud-build", "azure-pipelines", "docker-compose", "moto", "localstack"],
"๐Ÿค– IA & LLM": ["langchain", "openai", "diffusers", "sentence-transformers", "stable-diffusion", "huggingface-hub", "pyannote-audio", "whisper", "nltk", "spacy", "transformers", "accelerate", "datasets", "einops", "faiss-cpu", "bitsandbytes", "peft", "trl", "haystack", "llama-index", "deepsparse", "nanogpt", "autogpt", "babyagi","ml-agents", "tensorboardx"],
"๐Ÿ› ๏ธ Dev Tools": ["pytest", "flake8", "mypy", "black", "isort", "pylint", "tox", "pre-commit", "bandit", "coverage", "virtualenv", "pipenv", "poetry", "invoke", "nox", "twine", "wheel", "setuptools", "build", "debugpy", "pyinstrument", "memory-profiler", "cProfile", "pdbpp", "ipython"],
"โš™๏ธ Data Engineering": ["apache-airflow", "dask", "luigi", "pyspark", "ray", "prefect", "dbt-core", "pandas-gbq", "petl", "sqlalchemy-redshift", "fugue", "ibis-framework", "mindsdb", "koalas", "vaex-core", "feast", "flink", "beam", "kafka-python", "clickhouse-driver", "superset", "metabase", "trino"],
"๐ŸŒ Networking": ["requests", "httpx", "socketio", "paramiko", "netmiko", "scapy", "dnspython", "asyncssh", "tqdm", "gevent", "websocket-client", "pyserial", "grpcio", "aiozmq", "uvloop", "aiofiles", "urllib3", "fastsocket", "twisted", "aiohttp-socks", "aioredis", "websockets"],
"๐Ÿ”’ Security": ["cryptography", "pyjwt", "requests-oauthlib", "paramiko", "pyOpenSSL", "passlib", "hashlib", "bcrypt", "python-nmap", "sqlmap", "scapy", "yara-python", "pwn", "mitmproxy", "themis", "pycryptodome", "tpm2-py", "pyspy", "volatility3", "pefile", "dpkt"],
"๐ŸŽจ GUI": ["tkinter", "pyqt5", "kivy", "wxpython", "pygame", "dearpygui", "pyglet", "flet", "toga", "eel", "qt-material", "pyside6", "pywebview", "guizero", "remif", "taipy"],
"๐Ÿงช Testing": ["unittest", "pytest-cov", "hypothesis", "behave", "locust", "nose", "selenium", "playwright", "mock", "freezegun", "ddt", "tox", "robotframework", "hypothesis", "vcrpy", "responses", "faker", "factory-boy", "coveragepy", "parameterized"],
"๐Ÿงฐ Utilities": ["click", "typer", "argparse", "rich", "tqdm", "colorama", "schedule", "python-dotenv", "shutil", "pathlib", "arrow", "toml", "json5", "xmltodict", "humanize", "pendulum", "inflect", "unidecode", "boltons", "delegator.py"],
"๐Ÿ“š Documentation": ["sphinx", "mkdocs", "pdoc", "readthedocs-sphinx-search", "numpydoc", "docutils", "recommonmark", "sphinx-rtd-theme", "furo", "m2r2", "autoapi", "plantuml", "pycco", "doc8", "griffe", "mistletoe", "marko"],
"๐ŸŽฎ Game Dev": ["pygame", "pyglet", "arcade", "panda3d", "ursina", "cocos2d", "renpy", "pyopengl", "cymunk", "pytmx", "pybox2d", "pymunk", "moderngl", "glumpy", "glfw", "sdl2"],
"๐Ÿ’น Finance": ["numpy-financial", "pandas-datareader", "scikit-portfolio", "yfinance", "finmarketpy", "quantstats", "TA-Lib", "riskfolio-optimization", "alphalens", "pyfolio", "zipline", "backtrader", "quantlib-python", "pyfin", "quandl", "vnpy"],
"๐Ÿค– Robotics": ["rospy", "pybullet", "mujoco-py", "open3d", "numpy-stl", "pyserial", "opencv-python", "pymavlink", "robotframework", "webots", "vpython", "transforms3d", "trimesh", "pyrealsense2", "aiortc", "gymnasium", "stable-baselines3"],
"๐Ÿงฌ Bioinformatics": ["biopython", "scikit-bio", "pysam", "pandas-genomics", "statsmodels", "pybedtools", "matplotlib", "seaborn", "pyteomics", "networkx", "pyfaidx", "lifelines", "ngslib", "bcbio-nextgen", "scanpy", "anndata", "gseapy", "deepchem", "methylpy"],
"๐Ÿ”ญ Astronomy": ["astropy", "astroquery", "scikit-image", "photutils", "pyephem", "poliastro", "pyvista", "vtk", "healpy", "astroplan", "specutils", "sunpy", "gwpy", "lhorizon", "dustmaps", "reproject", "pymc3", "celerite"],
"๐Ÿ—บ๏ธ Geospatial": ["geopandas", "rasterio", "shapely", "pyproj", "fiona", "cartopy", "folium", "geopy", "earthpy", "descartes", "osmnx", "geohash", "momepy", "pyvista", "whitebox", "earthengine-api", "gisalchemist", "mapclassify", "spatialpandas"],
"๐Ÿ’ก IoT": ["pyserial", "paho-mqtt", "RPi.GPIO", "smbus2", "pysnmp", "can-utils", "zeroconf", "asyncio", "bluepy", "adafruit-circuitpython", "iotconnect", "thinger-io", "micropython", "homeassistant", "openhab", "tasmota", "esphome", "nodemcu"],
"๐Ÿ’ป System": ["psutil", "subprocess32", "watchdog", "inotify", "pywin32", "sh", "delegator.py", "appdirs", "click", "typer", "pathlib", "colorama", "fire", "daemonize", "lsof", "ruamel.yaml", "py-cpuinfo", "platformdirs"]
}
selected_libraries = {}
for group, libraries in available_libraries.items():
selected_libraries[group] = st.multiselect(f"{group} ", libraries, default=[],key=f"lib_{group}")
selected_groups = st.multiselect("๐Ÿ—‚๏ธ Groups", list(available_libraries.keys()), default=[], help="Choose groups to include libraries.")
with st.expander("โš™๏ธ Pip Settings"):
custom_requirements = st.text_input("โž• Custom Libraries:", placeholder="Specific libraries or versions (e.g., requests==2.28.1, numpy>=1.23.0)", help="List specific libraries and versions.")
specific_details = st.text_input("โ„น๏ธ Specific Details", placeholder="Special pip install options, edge cases", help="Add specific details for the pip command generation.")
pip_options = st.text_input("โš™๏ธ Pip Options", placeholder="Add custom pip options like --index-url or --no-cache", help="Add custom options to your pip command")
target_os = st.selectbox("๐ŸŽฏ Target OS", ["Linux ๐Ÿง", "Windows ๐ŸชŸ"], index=0, help="Choose the target Operating System.")
with col2:
# User's base prompt
prompt_base = st.text_input("๐Ÿ“ Describe the Pip Command:", placeholder="Ex: Install all libraries for web development and data analysis", key="prompt_base")
if st.button("โœจ Generate Pip Command"):
if not prompt_base and not selected_libraries and not selected_groups and not custom_requirements:
st.error("โš ๏ธ Please enter a command description or select libraries/groups")
return
selected_libs = []
for group in selected_groups:
selected_libs.extend(selected_libraries[group])
with st.spinner("โณ Generating command..."):
ai_code = generate_pip_command(
prompt_base,
model_name,
temperature,
top_p,
top_k,
max_tokens,
selected_libs,
selected_groups,
custom_requirements,
specific_details,
pip_options,
target_os
)
if ai_code:
st.markdown("### โœ… Generated Command:")
st.code(ai_code, language="text") #to output all codes
short_title = prompt_base[:30].strip().replace(" ", "_").lower()
files = parse_and_save_code(ai_code, short_title)
for key, value in files.items():
st.download_button(
label=f"โฌ‡๏ธ Download Command (.{(key)})",
data=value["code"],
file_name=value["name"],
mime="text/plain",
)
else:
st.error("โŒ Error generating the command. Check the connection with the AI and try again.")
if __name__ == "__main__":
main()