Spaces:
Sleeping
Sleeping
| # app.py | |
| import streamlit as st | |
| import pandas as pd | |
| from llama_index import GPTVectorStoreIndex, Document | |
| from sklearn.metrics import accuracy_score, precision_score, recall_score | |
| import json | |
| import autogen | |
| from pathlib import Path | |
| from modules.data_ingestion import DataIngestionModule | |
| from modules.prompt_engineering import PromptEngineeringModule | |
| from modules.evaluation import EvaluationModule | |
| class LlamaDeveloperAid: | |
| def __init__(self): | |
| st.set_page_config(page_title="Llama Developer Aid", layout="wide") | |
| self.data_module = DataIngestionModule() | |
| self.prompt_module = PromptEngineeringModule() | |
| self.eval_module = EvaluationModule() | |
| def main(self): | |
| st.title("Llama Developer Aid") | |
| # Sidebar for navigation | |
| page = st.sidebar.selectbox( | |
| "Select Module", | |
| ["Data Ingestion", "Prompt Engineering", "Evaluation"] | |
| ) | |
| if page == "Data Ingestion": | |
| self.render_data_ingestion() | |
| elif page == "Prompt Engineering": | |
| self.render_prompt_engineering() | |
| else: | |
| self.render_evaluation() | |
| def render_data_ingestion(self): | |
| st.header("Data Ingestion Module") | |
| # File upload | |
| uploaded_file = st.file_uploader("Upload your dataset", type=["csv", "xlsx", "json"]) | |
| if uploaded_file: | |
| df = self.data_module.load_data(uploaded_file) | |
| st.dataframe(df) | |
| if st.button("Process and Index Data"): | |
| with st.spinner("Processing..."): | |
| indexed_data = self.data_module.index_data(df) | |
| st.success("Data processed and indexed successfully!") | |
| st.session_state['indexed_data'] = indexed_data | |
| def render_prompt_engineering(self): | |
| st.header("Prompt Engineering Module") | |
| # Load templates | |
| templates = self.prompt_module.load_templates() | |
| selected_template = st.selectbox("Select Prompt Template", list(templates.keys())) | |
| # Display template details | |
| if selected_template: | |
| template = templates[selected_template] | |
| prompt_text = st.text_area("Customize Prompt", template['prompt']) | |
| if st.button("Generate Response"): | |
| with st.spinner("Generating..."): | |
| response = self.prompt_module.generate_response(prompt_text) | |
| st.write("Generated Response:", response) | |
| def render_evaluation(self): | |
| st.header("Evaluation Module") | |
| if 'indexed_data' not in st.session_state: | |
| st.warning("Please process data in the Data Ingestion module first") | |
| return | |
| # Evaluation metrics | |
| metrics = st.multiselect( | |
| "Select Evaluation Metrics", | |
| ["Accuracy", "Precision", "Recall"] | |
| ) | |
| if st.button("Run Evaluation"): | |
| with st.spinner("Evaluating..."): | |
| results = self.eval_module.evaluate_model( | |
| st.session_state['indexed_data'], | |
| metrics | |
| ) | |
| st.write("Evaluation Results:", results) | |
| if __name__ == "__main__": | |
| app = LlamaDeveloperAid() | |
| app.main() |