Upload 3 files
Browse files- src/app (3).py +36 -0
- src/command_generator.py +38 -0
- src/library_summarizer.py +43 -0
src/app (3).py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from library_summarizer import llm_lib_summarizer_v1
|
| 3 |
+
from command_generator import llm_lib_installer_v1
|
| 4 |
+
|
| 5 |
+
st.title("📂 Compatible Library Summarizer")
|
| 6 |
+
|
| 7 |
+
imported_libraries = st.text_area("Enter the libraries you are importing in your ipynb file:", height=200)
|
| 8 |
+
api_key = st.text_input("Enter your Groq API Key:", type="password")
|
| 9 |
+
task = st.text_input("Enter the task you are working on (e.g., data analysis, machine learning):", value="Basic ML task")
|
| 10 |
+
python_version = st.selectbox("Select your Python version:", options=["3.7", "3.8", "3.9", "3.10", "3.11","3.12"], index=3)
|
| 11 |
+
|
| 12 |
+
# File uploader widget
|
| 13 |
+
uploaded_file = st.file_uploader("Choose a file", type=["txt"])
|
| 14 |
+
|
| 15 |
+
if uploaded_file is not None:
|
| 16 |
+
st.success(f"File uploaded: {uploaded_file.name}")
|
| 17 |
+
|
| 18 |
+
# Read the file (decode for text files)
|
| 19 |
+
|
| 20 |
+
if st.button("Generate bash command"):
|
| 21 |
+
if imported_libraries and uploaded_file:
|
| 22 |
+
library = imported_libraries
|
| 23 |
+
libraries = uploaded_file.read().decode("utf-8")
|
| 24 |
+
with st.spinner("Generating ideal pip install command..."):
|
| 25 |
+
try:
|
| 26 |
+
summary = llm_lib_summarizer_v1(import_string=library, api_key=api_key)
|
| 27 |
+
st.subheader("Summary of Libraries:")
|
| 28 |
+
st.code(summary)
|
| 29 |
+
st.subheader("Generated pip install command:")
|
| 30 |
+
command = llm_lib_installer_v1(imported_libs=summary, python_version="3.10", task="general", api_key=api_key, libraries=libraries)
|
| 31 |
+
st.markdown(command)
|
| 32 |
+
except Exception as e:
|
| 33 |
+
st.error(f"An error occurred: {e}")
|
| 34 |
+
else:
|
| 35 |
+
st.error("Please enter text or upload a file.")
|
| 36 |
+
|
src/command_generator.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def llm_lib_installer_v1(imported_libs: str, python_version: str, task: str, api_key: str, libraries: str, model = 'openai/gpt-oss-20b'):
|
| 2 |
+
'''
|
| 3 |
+
imported_libs: Give all the imports made in the current session of the notebook
|
| 4 |
+
api_key: Generated groq api key
|
| 5 |
+
task: The type of task being executed in the session notebook
|
| 6 |
+
model: choose the groq model. Default is GPT OSS
|
| 7 |
+
'''
|
| 8 |
+
from groq import Groq
|
| 9 |
+
import streamlit as st
|
| 10 |
+
client = Groq(api_key=api_key)
|
| 11 |
+
completion = client.chat.completions.create(
|
| 12 |
+
model=model,
|
| 13 |
+
messages=[
|
| 14 |
+
{
|
| 15 |
+
"role": "user",
|
| 16 |
+
"content": f'''Assume we have imported a few libraries in the google colab environment: {imported_libs}.
|
| 17 |
+
Generate a manual !pip install command for the imported libraries for the task: {task}. Take into account the current libraries: {libraries}
|
| 18 |
+
Make sure there are no conflicts in the library versions even if the libraries are not imported. for instance opencv is dependent on the version of numpy.
|
| 19 |
+
Suggest a version install for that as well even if it is not imported. In a similar way check all library dependencies. Try to take the latest libraries as far as possible.
|
| 20 |
+
Suggest downgrades only if there is no other choice taking in consideration the python version in the environment is: {python_version}.
|
| 21 |
+
Otherwise try and use the latest versions of the libraries.
|
| 22 |
+
Use the "~=" symbol if needed to allow for backward compatibility between the installed libraries.'''
|
| 23 |
+
}
|
| 24 |
+
],
|
| 25 |
+
temperature=0,
|
| 26 |
+
max_completion_tokens=8192,
|
| 27 |
+
top_p=1,
|
| 28 |
+
reasoning_effort="medium",
|
| 29 |
+
stream=True,
|
| 30 |
+
stop=None
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
response_content = ""
|
| 34 |
+
|
| 35 |
+
for chunk in completion:
|
| 36 |
+
response_content+=chunk.choices[0].delta.content or ""
|
| 37 |
+
|
| 38 |
+
return response_content
|
src/library_summarizer.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def llm_lib_summarizer_v1(import_string: str, api_key: str, model = 'openai/gpt-oss-20b'):
|
| 2 |
+
'''
|
| 3 |
+
import_string: All the imports made in the notebook
|
| 4 |
+
api_key: Generated groq api key
|
| 5 |
+
model: choose the groq model. Default is GPT OSS
|
| 6 |
+
'''
|
| 7 |
+
from groq import Groq
|
| 8 |
+
|
| 9 |
+
client = Groq(api_key=api_key)
|
| 10 |
+
completion = client.chat.completions.create(
|
| 11 |
+
model=model,
|
| 12 |
+
messages=[
|
| 13 |
+
{
|
| 14 |
+
"role": "user",
|
| 15 |
+
"content": f'''Assume we are in the Google colab environment and I have imported the following libraries and classes: {import_string}.
|
| 16 |
+
Tell me on a high level summarize the main library packages that were imported
|
| 17 |
+
and the other major packages that are dependant on these libraries even if they are not imported. Give the output as a json. For example:
|
| 18 |
+
sklearn.model_selection, sklearn.metrics should return only sklearn.
|
| 19 |
+
|
| 20 |
+
output should always look like this:
|
| 21 |
+
```json
|
| 22 |
+
'libraries':[
|
| 23 |
+
'sklearn',
|
| 24 |
+
'pandas',
|
| 25 |
+
'numpy'....
|
| 26 |
+
]
|
| 27 |
+
```
|
| 28 |
+
Make sure to return only the json and nothing else.'''
|
| 29 |
+
}
|
| 30 |
+
],
|
| 31 |
+
temperature=0,
|
| 32 |
+
max_completion_tokens=8192,
|
| 33 |
+
top_p=1,
|
| 34 |
+
reasoning_effort="medium",
|
| 35 |
+
stream=True,
|
| 36 |
+
stop=None
|
| 37 |
+
)
|
| 38 |
+
response_content = ""
|
| 39 |
+
for chunk in completion:
|
| 40 |
+
response_content += chunk.choices[0].delta.content or ""
|
| 41 |
+
response_content = response_content.split('\n')
|
| 42 |
+
response_content = ''.join(response_content[1:-1])
|
| 43 |
+
return response_content
|