File size: 3,315 Bytes
15fd8dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3515090
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# app.py
import streamlit as st
import pandas as pd
from llama_index import GPTVectorStoreIndex, Document
from sklearn.metrics import accuracy_score, precision_score, recall_score
import json
import autogen
from pathlib import Path
from modules.data_ingestion import DataIngestionModule
from modules.prompt_engineering import PromptEngineeringModule
from modules.evaluation import EvaluationModule

class LlamaDeveloperAid:
    def __init__(self):
        st.set_page_config(page_title="Llama Developer Aid", layout="wide")
        self.data_module = DataIngestionModule()
        self.prompt_module = PromptEngineeringModule()
        self.eval_module = EvaluationModule()

    def main(self):
        st.title("Llama Developer Aid")
        
        # Sidebar for navigation
        page = st.sidebar.selectbox(
            "Select Module",
            ["Data Ingestion", "Prompt Engineering", "Evaluation"]
        )
        
        if page == "Data Ingestion":
            self.render_data_ingestion()
        elif page == "Prompt Engineering":
            self.render_prompt_engineering()
        else:
            self.render_evaluation()

    def render_data_ingestion(self):
        st.header("Data Ingestion Module")
        
        # File upload
        uploaded_file = st.file_uploader("Upload your dataset", type=["csv", "xlsx", "json"])
        
        if uploaded_file:
            df = self.data_module.load_data(uploaded_file)
            st.dataframe(df)
            
            if st.button("Process and Index Data"):
                with st.spinner("Processing..."):
                    indexed_data = self.data_module.index_data(df)
                    st.success("Data processed and indexed successfully!")
                    st.session_state['indexed_data'] = indexed_data

    def render_prompt_engineering(self):
        st.header("Prompt Engineering Module")
        
        # Load templates
        templates = self.prompt_module.load_templates()
        selected_template = st.selectbox("Select Prompt Template", list(templates.keys()))
        
        # Display template details
        if selected_template:
            template = templates[selected_template]
            prompt_text = st.text_area("Customize Prompt", template['prompt'])
            
            if st.button("Generate Response"):
                with st.spinner("Generating..."):
                    response = self.prompt_module.generate_response(prompt_text)
                    st.write("Generated Response:", response)

    def render_evaluation(self):
        st.header("Evaluation Module")
        
        if 'indexed_data' not in st.session_state:
            st.warning("Please process data in the Data Ingestion module first")
            return
        
        # Evaluation metrics
        metrics = st.multiselect(
            "Select Evaluation Metrics",
            ["Accuracy", "Precision", "Recall"]
        )
        
        if st.button("Run Evaluation"):
            with st.spinner("Evaluating..."):
                results = self.eval_module.evaluate_model(
                    st.session_state['indexed_data'],
                    metrics
                )
                st.write("Evaluation Results:", results)

if __name__ == "__main__":
    app = LlamaDeveloperAid()
    app.main()