Spaces:
Sleeping
Sleeping
File size: 2,603 Bytes
c2af030 18ec1d3 19fafb9 20f021f 19fafb9 c2af030 8117f33 c2af030 8117f33 869fd94 8117f33 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 | import streamlit as st
from codeInsight.pipeline.prediction_pipeline import PredictionPipeline
from codeInsight.logger import logging
st.set_page_config(
page_title="CodeInsight Assistant",
page_icon="🤖",
layout="wide"
)
@st.cache_resource
def load_pipeline():
try:
pipeline = PredictionPipeline()
return pipeline
except Exception as e:
logging.error("Failed to load pipeline in Streamlit app")
st.error(f"Failed to load model pipeline: {e}")
return None
pipeline = load_pipeline()
st.title("🤖 CodeInsight Assistant")
st.caption("Fine-tuned Phi-3-mini-128k-instruct model, ready to help with Python.")
st.divider()
st.markdown(
"Welcome! This assistant is powered by a **fine-tuned Phi-3-mini-128k-instruct model** "
"to help you with Python programming tasks. Ask it to generate code, "
"explain concepts, or refactor snippets."
)
col1, col2 = st.columns(2)
with col1:
st.subheader("🚀 What it can do")
st.markdown("""
* **Generate Code:** "Write a function to merge two dictionaries."
* **Refactor/Debug:** "Can you make this 'for' loop more efficient?"
""")
with col2:
st.subheader("⚠️ Important Limitations")
st.warning("""
* The model may occasionally produce incorrect or inefficient code.
* Knowledge is limited to the model's training data.
""")
st.divider()
if pipeline:
if "messages" not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "Hello! How can I help you with Python programming today?"}
]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
prompt = st.chat_input("Ask me to write python code")
if prompt:
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = pipeline.predict(prompt)
cleaned_response = response.replace(
"You are a senior Python developer. Provide clear, correct, well-commented code.", ""
).strip()
formatted_response = f"```python\n{cleaned_response}\n```"
st.markdown(formatted_response)
st.session_state.messages.append({"role": "assistant", "content": formatted_response})
else:
st.error("The prediction pipeline could not be loaded. Please check the logs.") |