Ashar086 commited on
Commit
15fd8dd
·
verified ·
1 Parent(s): ece2261

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -0
app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import streamlit as st
3
+ import pandas as pd
4
+ from llama_index import GPTVectorStoreIndex, Document
5
+ from sklearn.metrics import accuracy_score, precision_score, recall_score
6
+ import json
7
+ import autogen
8
+ from pathlib import Path
9
+ from modules.data_ingestion import DataIngestionModule
10
+ from modules.prompt_engineering import PromptEngineeringModule
11
+ from modules.evaluation import EvaluationModule
12
+
13
+ class LlamaDeveloperAid:
14
+ def __init__(self):
15
+ st.set_page_config(page_title="Llama Developer Aid", layout="wide")
16
+ self.data_module = DataIngestionModule()
17
+ self.prompt_module = PromptEngineeringModule()
18
+ self.eval_module = EvaluationModule()
19
+
20
+ def main(self):
21
+ st.title("Llama Developer Aid")
22
+
23
+ # Sidebar for navigation
24
+ page = st.sidebar.selectbox(
25
+ "Select Module",
26
+ ["Data Ingestion", "Prompt Engineering", "Evaluation"]
27
+ )
28
+
29
+ if page == "Data Ingestion":
30
+ self.render_data_ingestion()
31
+ elif page == "Prompt Engineering":
32
+ self.render_prompt_engineering()
33
+ else:
34
+ self.render_evaluation()
35
+
36
+ def render_data_ingestion(self):
37
+ st.header("Data Ingestion Module")
38
+
39
+ # File upload
40
+ uploaded_file = st.file_uploader("Upload your dataset", type=["csv", "xlsx", "json"])
41
+
42
+ if uploaded_file:
43
+ df = self.data_module.load_data(uploaded_file)
44
+ st.dataframe(df)
45
+
46
+ if st.button("Process and Index Data"):
47
+ with st.spinner("Processing..."):
48
+ indexed_data = self.data_module.index_data(df)
49
+ st.success("Data processed and indexed successfully!")
50
+ st.session_state['indexed_data'] = indexed_data
51
+
52
+ def render_prompt_engineering(self):
53
+ st.header("Prompt Engineering Module")
54
+
55
+ # Load templates
56
+ templates = self.prompt_module.load_templates()
57
+ selected_template = st.selectbox("Select Prompt Template", list(templates.keys()))
58
+
59
+ # Display template details
60
+ if selected_template:
61
+ template = templates[selected_template]
62
+ prompt_text = st.text_area("Customize Prompt", template['prompt'])
63
+
64
+ if st.button("Generate Response"):
65
+ with st.spinner("Generating..."):
66
+ response = self.prompt_module.generate_response(prompt_text)
67
+ st.write("Generated Response:", response)
68
+
69
+ def render_evaluation(self):
70
+ st.header("Evaluation Module")
71
+
72
+ if 'indexed_data' not in st.session_state:
73
+ st.warning("Please process data in the Data Ingestion module first")
74
+ return
75
+
76
+ # Evaluation metrics
77
+ metrics = st.multiselect(
78
+ "Select Evaluation Metrics",
79
+ ["Accuracy", "Precision", "Recall"]
80
+ )
81
+
82
+ if st.button("Run Evaluation"):
83
+ with st.spinner("Evaluating..."):
84
+ results = self.eval_module.evaluate_model(
85
+ st.session_state['indexed_data'],
86
+ metrics
87
+ )
88
+ st.write("Evaluation Results:", results)
89
+
90
+ if __name__ == "__main__":
91
+ app = LlamaDeveloperAid()
92
+ app.main()
93
+ Last edited just now