Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- app.py +163 -0
- portfolio_data.md +96 -0
- requirements.txt +22 -0
- system_prompt.txt +21 -0
app.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Install required libraries.
|
| 2 |
+
# Files, API key, etc
|
| 3 |
+
import os
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
import gradio as gr
|
| 7 |
+
|
| 8 |
+
# LangChain imports
|
| 9 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 10 |
+
from langchain_community.document_loaders import UnstructuredMarkdownLoader
|
| 11 |
+
from langchain_text_splitters import MarkdownHeaderTextSplitter
|
| 12 |
+
from langchain_community.vectorstores import Chroma
|
| 13 |
+
from langchain_classic.chains.combine_documents import create_stuff_documents_chain
|
| 14 |
+
from langchain_classic.chains.retrieval import create_retrieval_chain
|
| 15 |
+
from langchain_core.prompts import PromptTemplate
|
| 16 |
+
from huggingface_hub import InferenceClient
|
| 17 |
+
from langchain_core.language_models.llms import LLM
|
| 18 |
+
from typing import Optional, List, Any
|
| 19 |
+
|
| 20 |
+
# Load environment variables first
|
| 21 |
+
# load_dotenv() # This will need commented out on HuggingFace. To run the code yourself on your device, keep this and comment out the api_key var
|
| 22 |
+
# via os.getenv
|
| 23 |
+
api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 24 |
+
|
| 25 |
+
print("Loading portfolio data...")
|
| 26 |
+
|
| 27 |
+
# Load the markdown portfolio
|
| 28 |
+
loader = UnstructuredMarkdownLoader("portfolio_data.md")
|
| 29 |
+
data = loader.load()
|
| 30 |
+
|
| 31 |
+
# Split text by Headers
|
| 32 |
+
headers_to_split_on = [
|
| 33 |
+
("#", "Header 1"),
|
| 34 |
+
("##", "Header 2"),
|
| 35 |
+
("###", "Header 3"),
|
| 36 |
+
]
|
| 37 |
+
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
|
| 38 |
+
splits = markdown_splitter.split_text(data[0].page_content)
|
| 39 |
+
|
| 40 |
+
# Creating searchable "Embeddings"
|
| 41 |
+
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 42 |
+
|
| 43 |
+
# Store embeddings in ChromaDB
|
| 44 |
+
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
|
| 45 |
+
|
| 46 |
+
# Load system prompt
|
| 47 |
+
try:
|
| 48 |
+
system_prompt_content = Path("system_prompt.txt").read_text(encoding="utf-8")
|
| 49 |
+
except FileNotFoundError:
|
| 50 |
+
print("Warning: system_prompt.txt not found, using default.")
|
| 51 |
+
system_prompt_content = "You are a helpful assistant."
|
| 52 |
+
|
| 53 |
+
# Create prompt template
|
| 54 |
+
template = """
|
| 55 |
+
{system_prompt}
|
| 56 |
+
|
| 57 |
+
Context: {context}
|
| 58 |
+
|
| 59 |
+
Question: {input}
|
| 60 |
+
|
| 61 |
+
Answer:"""
|
| 62 |
+
|
| 63 |
+
prompt = PromptTemplate(
|
| 64 |
+
template=template,
|
| 65 |
+
input_variables=["context", "input"],
|
| 66 |
+
partial_variables={"system_prompt": system_prompt_content}
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Get HF token
|
| 70 |
+
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 71 |
+
|
| 72 |
+
if not hf_token:
|
| 73 |
+
print("ERROR: Token not found in environment variables!")
|
| 74 |
+
exit(1)
|
| 75 |
+
|
| 76 |
+
print("Initializing model...")
|
| 77 |
+
|
| 78 |
+
# Initialize InferenceClient
|
| 79 |
+
client = InferenceClient(token=hf_token)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
# Custom LLM wrapper for InferenceClient
|
| 83 |
+
class HFInferenceClientLLM(LLM):
|
| 84 |
+
client: Any
|
| 85 |
+
model: str = "meta-llama/Llama-3.2-3B-Instruct"
|
| 86 |
+
max_new_tokens: int = 512
|
| 87 |
+
|
| 88 |
+
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
| 89 |
+
messages = [{"role": "user", "content": prompt}]
|
| 90 |
+
response = self.client.chat_completion(
|
| 91 |
+
messages=messages,
|
| 92 |
+
model=self.model,
|
| 93 |
+
max_tokens=self.max_new_tokens,
|
| 94 |
+
temperature=0.7
|
| 95 |
+
)
|
| 96 |
+
return response.choices[0].message.content
|
| 97 |
+
|
| 98 |
+
@property
|
| 99 |
+
def _llm_type(self) -> str:
|
| 100 |
+
return "huggingface_inference_client"
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# Create LLM instance
|
| 104 |
+
llm = HFInferenceClientLLM(client=client, model="meta-llama/Llama-3.2-3B-Instruct")
|
| 105 |
+
|
| 106 |
+
# Create the document processing chain
|
| 107 |
+
combine_docs_chain = create_stuff_documents_chain(llm, prompt)
|
| 108 |
+
qa_chain = create_retrieval_chain(vectorstore.as_retriever(), combine_docs_chain)
|
| 109 |
+
|
| 110 |
+
print("RAG system ready!")
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
# Gradio chat function
|
| 114 |
+
def chat_with_portfolio(message, history):
|
| 115 |
+
"""Process user message and return AI response"""
|
| 116 |
+
try:
|
| 117 |
+
response = qa_chain.invoke({"input": message})
|
| 118 |
+
return response["answer"]
|
| 119 |
+
except Exception as e:
|
| 120 |
+
return f"Error: {str(e)}"
|
| 121 |
+
|
| 122 |
+
# Aesthetics
|
| 123 |
+
# Purple Gradient
|
| 124 |
+
custom_css = """
|
| 125 |
+
@import url('https://fonts.googleapis.com/css2?family=Quicksand:wght@400;600&display=swap');
|
| 126 |
+
|
| 127 |
+
* {
|
| 128 |
+
font-family: 'Quicksand', sans-serif !important;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
.gradio-container {
|
| 132 |
+
/* Vibrant purple-to-blue gradient */
|
| 133 |
+
background: linear-gradient(135deg, #a78bfa 0%, #6366f1 50%, #3b82f6 100%);
|
| 134 |
+
background-attachment: fixed;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
/* Adding a slight white tint to the chat area for better contrast */
|
| 138 |
+
.chatbot {
|
| 139 |
+
background-color: rgba(255, 255, 255, 0.1) !important;
|
| 140 |
+
backdrop-filter: blur(8px);
|
| 141 |
+
border-radius: 15px;
|
| 142 |
+
}
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
# Layout Gradio UI
|
| 146 |
+
with gr.Blocks() as demo:
|
| 147 |
+
gr.ChatInterface(
|
| 148 |
+
fn=chat_with_portfolio,
|
| 149 |
+
title="🐈⬛ Chat with Adda-Bot",
|
| 150 |
+
description="Ask me anything about Adda's portfolio, experience, and background!",
|
| 151 |
+
examples=[
|
| 152 |
+
"What is Adda's experience with Python?",
|
| 153 |
+
"Tell me about her education.",
|
| 154 |
+
"What projects has Adda worked on?"
|
| 155 |
+
],
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# Apply the 'Pretty' settings here
|
| 159 |
+
if __name__ == "__main__":
|
| 160 |
+
demo.launch(
|
| 161 |
+
css=custom_css,
|
| 162 |
+
theme=gr.themes.Soft(), # Use Soft for rounded 'cute' corners
|
| 163 |
+
)
|
portfolio_data.md
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Professional Portfolio: Adda Weathers
|
| 2 |
+
|
| 3 |
+
## Executive Summary
|
| 4 |
+
I am an Artificial Intelligence graduate student and technical professional with a 4.0 GPA. My expertise bridges the gap between complex AI development and human-centric design. I specialize in ethical AI implementation and creating production-ready machine learning solutions that solve real-world problems.
|
| 5 |
+
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
## Education
|
| 9 |
+
### M.S. in Artificial Intelligence | University of the Cumberlands
|
| 10 |
+
*Status: Nearly Complete (GPA: 4.0)*
|
| 11 |
+
* **Relevant Coursework:** AI in Healthcare, Natural Language Processing (NLP), AI for Human-Computer Interaction.
|
| 12 |
+
* **Focus:** Developing adaptable AI strategies to meet evolving business needs and improving user experience through intelligent systems.
|
| 13 |
+
|
| 14 |
+
---
|
| 15 |
+
|
| 16 |
+
## Key Projects & Innovations
|
| 17 |
+
|
| 18 |
+
### EdgeNexus: Menopause Insight Wearable (Entrepreneur Bootcamp)
|
| 19 |
+
* **Role:** Software Engineer (Python, Gradio)
|
| 20 |
+
* **Challenge:** The goal was to build an LLM for menopause treatment, but I identified a critical lack of clean, home-treatment data, making a generative model unreliable.
|
| 21 |
+
* **Solution:** Instead of forcing an unsuitable AI model, I engineered a logic-based notification system using wearable parameters to deliver real-time insights.
|
| 22 |
+
* **Impact:** Successfully delivered a Minimum Viable Product (MVP) that prioritized data integrity and user safety over "AI hype."
|
| 23 |
+
|
| 24 |
+
### AI-Driven Co-parenting Application (In Development)
|
| 25 |
+
* **Focus:** Ethical AI & Social Impact
|
| 26 |
+
* **Description:** Developing a private application that uses NLP to detect ineffective or high-conflict communication patterns in co-parenting.
|
| 27 |
+
* **Goal:** To reduce legal expenses for families and ensure a more stable environment for children by providing objective communication feedback.
|
| 28 |
+
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
## Technical Portfolio (GitHub Highlights)
|
| 32 |
+
|
| 33 |
+
### Production-Ready Spam Classification API
|
| 34 |
+
* **Technologies:** Python, FastAPI, XGBoost, HTML/CSS
|
| 35 |
+
* **Summary:** Deployed a machine learning model as a functional web service. I built a custom GUI to ensure the tool is accessible to non-technical users, demonstrating an end-to-end understanding of the software development life cycle (SDLC).
|
| 36 |
+
|
| 37 |
+
### Spam E-Mail Detector (XGBoost & K-Means)
|
| 38 |
+
* **Technologies:** XGBoost, Scikit-learn, K-Means Clustering
|
| 39 |
+
* **Insight:** I chose XGBoost specifically to handle dataset imbalances. I further enhanced the model's accuracy by implementing K-Means cluster analysis to identify hidden patterns in mail metadata.
|
| 40 |
+
|
| 41 |
+
### Heart Disease Prediction Model
|
| 42 |
+
* **Tool:** RapidMiner
|
| 43 |
+
* **Process:** Performed full Exploratory Data Analysis (EDA), data cleaning, and Decision Tree modeling. This project served as a deep dive into advanced pre-processing and feature selection.
|
| 44 |
+
|
| 45 |
+
### SQL Data Transformation & Logistic Regression
|
| 46 |
+
* **Technologies:** SQL, RapidMiner
|
| 47 |
+
* **Challenge:** Worked with a high-noise inpatient dataset with significant missing values.
|
| 48 |
+
* **Outcome:** While the initial model performance was limited by data quality, this project highlights my skills in complex SQL data cleaning and my ability to perform "post-mortem" analysis on model failures to drive future improvements.
|
| 49 |
+
|
| 50 |
+
### Security Engineering & System Sandboxing
|
| 51 |
+
* **Environment:** Windows 10 Pro Virtual Machine
|
| 52 |
+
* **Focus:** Privacy & Enterprise Security
|
| 53 |
+
* **Insight:** Implemented enterprise-level security protocols to understand the infrastructure "bubble" software lives in. Knowledge of privacy lawsuits (e.g., Flo) informs my ethical approach to data handling.
|
| 54 |
+
|
| 55 |
+
---
|
| 56 |
+
|
| 57 |
+
## Professional Experience
|
| 58 |
+
|
| 59 |
+
### Inbound Contacts Representative 2 | Humana Military
|
| 60 |
+
* **Mentorship & Onboarding:** Trusted by leadership to assist in onboarding new hires. I actively monitor dedicated Microsoft Teams channels alongside my supervisor to provide real-time guidance and support to new team members.
|
| 61 |
+
* **Peer Coaching:** Participate in "Live Listen" sessions where I provide constructive feedback on peer calls and analyze my own performance. This culture of mutual feedback has sharpened my ability to deliver and receive high-level critiques.
|
| 62 |
+
* **ROI-Driven Innovation:** I leverage my frontline experience to ensure that the technical solutions I develop solve actual user pain points, maximizing Return on Investment (ROI).
|
| 63 |
+
|
| 64 |
+
---
|
| 65 |
+
|
| 66 |
+
## Leadership & Collaboration
|
| 67 |
+
|
| 68 |
+
### AI Strategy & Implementation (AI Use Case Challenge) | Humana Military
|
| 69 |
+
* **Focus:** Strategic Planning, Feasibility Analysis, & User Advocacy
|
| 70 |
+
* **Contribution:** Collaborated on a team to evaluate an AI Call Summary application. The solution was designed to automate mandatory documentation requirements for the Defense Health Agency (DHA).
|
| 71 |
+
* **Impact:** Our proposal addressed a critical business need—allowing representatives to focus on beneficiary care rather than manual data entry.
|
| 72 |
+
* **Recognition:** The project was judged on feasibility and Return on Investment (ROI), earning an **Honorable Mention (4th Place)**. This experience sharpened my ability to vet AI tools based on contractual compliance and operational efficiency.
|
| 73 |
+
|
| 74 |
+
### Generative AI Prompt-a-thon Participant | Humana Military
|
| 75 |
+
* **Focus:** Prompt Engineering, Rapid Problem Solving, & Creative AI
|
| 76 |
+
* **The Experience:** Competed in a live, timed event to solve complex challenges using Large Language Models (LLMs).
|
| 77 |
+
* **Cultural Takeaway:** While I didn't take home the top prize, the experience was invaluable for mastering iterative prompting techniques and observing different logic structures used by other engineers. It highlighted my commitment to staying at the forefront of AI trends and my ability to perform under pressure.
|
| 78 |
+
|
| 79 |
+
---
|
| 80 |
+
|
| 81 |
+
## Technical Skills Matrix
|
| 82 |
+
* **Languages & Frameworks:** Python (Pandas, Scikit-learn, Tensorflow), SQL, FastAPI, Gradio, HTML/CSS.
|
| 83 |
+
* **AI/ML Specializations:** Natural Language Processing (NLP), XGBoost, Linear and Logistic Regression, K-Means Clustering, Decision Trees, Prompt Engineering.
|
| 84 |
+
* **Tools & Security:** RapidMiner, GitHub, Virtual Machine Sandboxing, Enterprise Security Protocols.
|
| 85 |
+
|
| 86 |
+
---
|
| 87 |
+
|
| 88 |
+
## The Human Element
|
| 89 |
+
|
| 90 |
+
### My Journey into AI
|
| 91 |
+
While I have explored Python for years, 2024 was my "breakout" year. I dedicated myself to mastering the language and found a deep passion for the creative logic of problem-solving. I chose to pursue a Master’s in Artificial Intelligence because I am driven to stay "future-ready." I believe that adaptability is the most important skill in the modern workforce, and I thrive on the constant expansion of this field.
|
| 92 |
+
|
| 93 |
+
### Life Outside the IDE
|
| 94 |
+
* **Gaming & Family:** When I’m not at my desk, I’m likely playing Nintendo Switch with my husband and stepson. We are a competitive household—especially when it comes to Mario Party. I firmly believe that all is fair in love, war, and family game night!
|
| 95 |
+
* **Collections:** I am an avid Pokémon card collector, always on the hunt for the next addition to my deck.
|
| 96 |
+
* **The "Manager":** I share my home with a darling (if slightly lazy) cat named Nova, affectionately known as "the bum."
|
requirements.txt
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# General Utilities
|
| 2 |
+
python-dotenv
|
| 3 |
+
|
| 4 |
+
# UI Framework
|
| 5 |
+
gradio>=5.0.0
|
| 6 |
+
|
| 7 |
+
# LangChain Core and Community
|
| 8 |
+
langchain
|
| 9 |
+
langchain-core
|
| 10 |
+
langchain-community
|
| 11 |
+
langchain-huggingface
|
| 12 |
+
langchain-text-splitters
|
| 13 |
+
|
| 14 |
+
# Vector Database and Document Processing
|
| 15 |
+
chromadb
|
| 16 |
+
unstructured
|
| 17 |
+
markdown
|
| 18 |
+
|
| 19 |
+
# AI Models and Hub
|
| 20 |
+
huggingface-hub
|
| 21 |
+
sentence-transformers
|
| 22 |
+
|
system_prompt.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Role: You are the "Adda-Bot," a highly professional, intelligent, and empathetic AI career agent representing Adda. Your goal is to answer questions from recruiters, hiring managers, and collaborators based strictly on the provided portfolio data.
|
| 2 |
+
|
| 3 |
+
Persona:
|
| 4 |
+
- Tone: Professional, encouraging, and technically confident, yet humble.
|
| 5 |
+
- Voice: Use "Adda" or "She/Her" when referring to the candidate.
|
| 6 |
+
- Perspective: You are an advocate for Adda’s skills, emphasizing her 4.0 GPA and ethical approach to AI.
|
| 7 |
+
|
| 8 |
+
Operational Guidelines:
|
| 9 |
+
1. Strict Fidelity: Use the provided Context below to answer. If the answer isn't in the context, politely explain you don't have that info but can talk about her AI projects.
|
| 10 |
+
2. Highlight Culture Fit: Mention peer coaching, onboarding, and the Prompt-a-thon for culture questions.
|
| 11 |
+
3. Address "Failures" Positively: Frame setbacks as learning opportunities.
|
| 12 |
+
4. Formatting: Use bullet points for lists of projects or skills.
|
| 13 |
+
5. Personal Touch: Share details about her family, Nova the cat, and Mario Party/Pokémon, then pivot back to her academic discipline.
|
| 14 |
+
|
| 15 |
+
Context Information:
|
| 16 |
+
{context}
|
| 17 |
+
|
| 18 |
+
User Question: {input}
|
| 19 |
+
|
| 20 |
+
Final Answer:
|
| 21 |
+
|