Upload 13 files
Browse files- .env +3 -0
- app.py +20 -0
- crews/__init__.py +0 -0
- crews/__pycache__/__init__.cpython-312.pyc +0 -0
- crews/data_analysis_crew/__init__.py +0 -0
- crews/data_analysis_crew/__pycache__/__init__.cpython-312.pyc +0 -0
- crews/data_analysis_crew/__pycache__/data_analysis_crew.cpython-312.pyc +0 -0
- crews/data_analysis_crew/config/agents.yaml +27 -0
- crews/data_analysis_crew/config/tasks.yaml +15 -0
- crews/data_analysis_crew/data_analysis_crew.py +38 -0
- custom_ollama.py +12 -0
- fastapi_app.py +22 -0
- requirements.txt +5 -0
.env
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LITELLM_PROVIDER=ollama
|
| 2 |
+
OLLAMA_MODEL=llama2
|
| 3 |
+
OLLAMA_BASE_URL=http://localhost:11434
|
app.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from custom_ollama import query_ollama
|
| 4 |
+
|
| 5 |
+
# App title
|
| 6 |
+
st.title("AI-Powered Data Analysis with Local Ollama (FastAPI)")
|
| 7 |
+
|
| 8 |
+
# Upload dataset
|
| 9 |
+
uploaded_file = st.file_uploader("Upload your dataset (CSV)", type=["csv"])
|
| 10 |
+
if uploaded_file:
|
| 11 |
+
df = pd.read_csv(uploaded_file)
|
| 12 |
+
st.write("### Preview of Your Dataset", df.head())
|
| 13 |
+
|
| 14 |
+
# Run analysis using Ollama
|
| 15 |
+
if st.button("Run Data Analysis"):
|
| 16 |
+
with st.spinner("Analyzing your data..."):
|
| 17 |
+
prompt = f"Perform a basic exploratory data analysis on the following dataset:\n{df.head()}"
|
| 18 |
+
response = query_ollama(prompt)
|
| 19 |
+
st.success("Analysis complete!")
|
| 20 |
+
st.write(response)
|
crews/__init__.py
ADDED
|
File without changes
|
crews/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (203 Bytes). View file
|
|
|
crews/data_analysis_crew/__init__.py
ADDED
|
File without changes
|
crews/data_analysis_crew/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (222 Bytes). View file
|
|
|
crews/data_analysis_crew/__pycache__/data_analysis_crew.cpython-312.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
crews/data_analysis_crew/config/agents.yaml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
agents:
|
| 2 |
+
- name: data_cleaning_agent
|
| 3 |
+
role: "Data Cleaning Specialist"
|
| 4 |
+
goal: "Preprocess and clean the dataset by handling missing values and outliers."
|
| 5 |
+
backstory: "An expert in data preprocessing and cleaning, ensuring datasets are free of missing values and inconsistencies."
|
| 6 |
+
llm:
|
| 7 |
+
provider: "ollama"
|
| 8 |
+
model: "llama2"
|
| 9 |
+
base_url: "http://localhost:11434"
|
| 10 |
+
|
| 11 |
+
- name: eda_agent
|
| 12 |
+
role: "EDA Specialist"
|
| 13 |
+
goal: "Perform exploratory data analysis and generate insightful visualizations."
|
| 14 |
+
backstory: "You specialize in finding hidden patterns and correlations within data using various visualization techniques."
|
| 15 |
+
llm:
|
| 16 |
+
provider: "ollama"
|
| 17 |
+
model: "llama2"
|
| 18 |
+
base_url: "http://localhost:11434"
|
| 19 |
+
|
| 20 |
+
- name: ml_insight_agent
|
| 21 |
+
role: "Machine Learning Specialist"
|
| 22 |
+
goal: "Train predictive models and provide feature importance analysis."
|
| 23 |
+
backstory: "An AI expert focused on building, training, and interpreting machine learning models for accurate predictions."
|
| 24 |
+
llm:
|
| 25 |
+
provider: "ollama"
|
| 26 |
+
model: "llama2"
|
| 27 |
+
base_url: "http://localhost:11434"
|
crews/data_analysis_crew/config/tasks.yaml
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tasks:
|
| 2 |
+
- name: clean_data_task
|
| 3 |
+
description: "Clean the dataset by handling missing values, outliers, and ensuring data consistency."
|
| 4 |
+
expected_output: "A cleaned dataset with no missing values or outliers."
|
| 5 |
+
agent: "data_cleaning_agent"
|
| 6 |
+
|
| 7 |
+
- name: eda_task
|
| 8 |
+
description: "Generate summary statistics, feature distributions, and correlation heatmaps to explore the dataset."
|
| 9 |
+
expected_output: "Visualizations including summary statistics, distributions, and correlations."
|
| 10 |
+
agent: "eda_agent"
|
| 11 |
+
|
| 12 |
+
- name: ml_task
|
| 13 |
+
description: "Train machine learning models and analyze feature importance based on the cleaned dataset."
|
| 14 |
+
expected_output: "A trained machine learning model with feature importance analysis."
|
| 15 |
+
agent: "ml_insight_agent"
|
crews/data_analysis_crew/data_analysis_crew.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
from crewai import Crew, Agent, Task
|
| 3 |
+
|
| 4 |
+
# Load agents from YAML
|
| 5 |
+
with open('crews/data_analysis_crew/config/agents.yaml', 'r') as file:
|
| 6 |
+
agents_config = yaml.safe_load(file)['agents']
|
| 7 |
+
|
| 8 |
+
# Load tasks from YAML
|
| 9 |
+
with open('crews/data_analysis_crew/config/tasks.yaml', 'r') as file:
|
| 10 |
+
tasks_config = yaml.safe_load(file)['tasks']
|
| 11 |
+
|
| 12 |
+
# Create agent instances
|
| 13 |
+
agents = []
|
| 14 |
+
for agent_data in agents_config:
|
| 15 |
+
agents.append(Agent(
|
| 16 |
+
role=agent_data['role'],
|
| 17 |
+
goal=agent_data['goal'],
|
| 18 |
+
backstory=agent_data['backstory'],
|
| 19 |
+
llm=None # LLM configuration will be handled by Ollama integration within CrewAI
|
| 20 |
+
))
|
| 21 |
+
|
| 22 |
+
# Map agent names to actual objects
|
| 23 |
+
agent_dict = {agent_data['name']: agent for agent_data, agent in zip(agents_config, agents)}
|
| 24 |
+
|
| 25 |
+
# Create task instances
|
| 26 |
+
tasks = []
|
| 27 |
+
for task_data in tasks_config:
|
| 28 |
+
tasks.append(Task(
|
| 29 |
+
description=task_data['description'],
|
| 30 |
+
expected_output=task_data['expected_output'],
|
| 31 |
+
agent=agent_dict[task_data['agent']]
|
| 32 |
+
))
|
| 33 |
+
|
| 34 |
+
# Define the crew workflow
|
| 35 |
+
crew = Crew(
|
| 36 |
+
agents=agents,
|
| 37 |
+
tasks=tasks
|
| 38 |
+
)
|
custom_ollama.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
# Function to call the FastAPI server
|
| 4 |
+
def query_ollama(prompt):
|
| 5 |
+
url = "http://localhost:8000/generate/"
|
| 6 |
+
payload = {"prompt": prompt}
|
| 7 |
+
response = requests.post(url, json=payload)
|
| 8 |
+
|
| 9 |
+
if response.status_code == 200:
|
| 10 |
+
return response.json()["response"]
|
| 11 |
+
else:
|
| 12 |
+
raise Exception(f"Failed to generate response: {response.text}")
|
fastapi_app.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from transformers import pipeline
|
| 4 |
+
|
| 5 |
+
# Initialize FastAPI app
|
| 6 |
+
app = FastAPI()
|
| 7 |
+
|
| 8 |
+
# Pydantic schema for input data
|
| 9 |
+
class Query(BaseModel):
|
| 10 |
+
prompt: str
|
| 11 |
+
|
| 12 |
+
# Load a Hugging Face pipeline
|
| 13 |
+
model = pipeline("text-generation", model="gpt2")
|
| 14 |
+
|
| 15 |
+
# Define a POST endpoint for processing queries
|
| 16 |
+
@app.post("/generate/")
|
| 17 |
+
async def generate_text(query: Query):
|
| 18 |
+
try:
|
| 19 |
+
response = model(query.prompt, max_length=150, num_return_sequences=1)
|
| 20 |
+
return {"response": response[0]["generated_text"]}
|
| 21 |
+
except Exception as e:
|
| 22 |
+
raise HTTPException(status_code=500, detail=str(e))
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi[all]
|
| 2 |
+
uvicorn
|
| 3 |
+
transformers
|
| 4 |
+
pydantic
|
| 5 |
+
streamlit
|