hugobowne's picture
Upload folder using huggingface_hub
9ca6b58 verified
"""
Gradio MCP Server for querying the ZenML LLMOps Database.
Exposes 4 tools:
1. search - flexible search with optional filters
2. get_case_study_details - get full details of a case study
3. get_statistics - database statistics
4. list_options - available industries, companies, years
"""
import gradio as gr
from datasets import load_dataset
import pandas as pd
# Load the dataset once at startup
print("Loading ZenML LLMOps Database...")
ds = load_dataset("zenml/llmops-database", split="train")
df = ds.to_pandas()
print(f"Loaded {len(df)} case studies")
def search(
query: str = None,
industry: str = None,
company: str = None,
year: int = None,
tag: str = None,
limit: int = 20
) -> str:
"""
Search the LLMOps database with optional filters. All parameters can be combined.
Args:
query: Text to search for in titles and summaries (e.g., 'RAG', 'fine-tuning', 'agents')
industry: Filter by industry (e.g., 'Tech', 'Finance', 'Healthcare')
company: Filter by company (e.g., 'meta', 'google', 'openai')
year: Filter by year (e.g., 2023, 2024)
tag: Filter by tag in any tag field (e.g., 'pytorch', 'monitoring', 'rag')
limit: Maximum results to return (default 20)
Returns:
Matching case studies with title, company, industry, year, and summary
"""
# Start with all rows
mask = pd.Series([True] * len(df))
# Apply text search if provided
if query and query.strip():
query_lower = query.lower()
text_mask = (
df["title"].str.lower().str.contains(query_lower, na=False) |
df["short_summary"].str.lower().str.contains(query_lower, na=False) |
df["full_summary"].str.lower().str.contains(query_lower, na=False)
)
mask = mask & text_mask
# Apply industry filter if provided
if industry and industry.strip():
mask = mask & df["industry"].str.lower().str.contains(industry.lower(), na=False)
# Apply company filter if provided
if company and company.strip():
mask = mask & df["company"].str.lower().str.contains(company.lower(), na=False)
# Apply year filter if provided
if year:
mask = mask & (df["year"] == year)
# Apply tag filter if provided
if tag and tag.strip():
tag_lower = tag.lower()
tag_mask = (
df["tools_tags"].str.lower().str.contains(tag_lower, na=False) |
df["techniques_tags"].str.lower().str.contains(tag_lower, na=False) |
df["application_tags"].str.lower().str.contains(tag_lower, na=False) |
df["extra_tags"].str.lower().str.contains(tag_lower, na=False)
)
mask = mask & tag_mask
results = df[mask].head(limit)
if len(results) == 0:
filters = []
if query: filters.append(f"query='{query}'")
if industry: filters.append(f"industry='{industry}'")
if company: filters.append(f"company='{company}'")
if year: filters.append(f"year={year}")
if tag: filters.append(f"tag='{tag}'")
return f"No case studies found with filters: {', '.join(filters) if filters else 'none'}"
output = f"Found {len(results)} case studies:\n\n"
for _, row in results.iterrows():
output += f"## {row['title']}\n"
output += f"**Company:** {row['company']} | **Industry:** {row['industry']} | **Year:** {row['year']}\n"
output += f"**Tags:** {row['application_tags']}\n"
output += f"**Summary:** {row['short_summary']}\n"
output += f"**Source:** {row['source_url']}\n\n"
output += "---\n\n"
return output
def get_case_study_details(title: str) -> str:
"""
Get the full details of a specific case study by title.
Args:
title: The title (or part of the title) of the case study to retrieve
Returns:
Complete case study including full summary, all tags, and source URL
"""
if not title or not title.strip():
return "Please provide a title to search for"
mask = df["title"].str.lower().str.contains(title.lower(), na=False)
results = df[mask]
if len(results) == 0:
return f"No case study found with title containing '{title}'"
row = results.iloc[0]
output = f"# {row['title']}\n\n"
output += f"**Company:** {row['company']}\n"
output += f"**Industry:** {row['industry']}\n"
output += f"**Year:** {row['year']}\n"
output += f"**Source:** {row['source_url']}\n\n"
output += f"## Tags\n"
output += f"- **Application:** {row['application_tags']}\n"
output += f"- **Tools:** {row['tools_tags']}\n"
output += f"- **Techniques:** {row['techniques_tags']}\n"
output += f"- **Extra:** {row['extra_tags']}\n\n"
output += f"## Full Summary\n\n{row['full_summary']}\n"
return output
def get_statistics() -> str:
"""
Get statistics about the LLMOps Database.
Returns:
Summary statistics including total count, breakdown by industry, year, and top companies
"""
output = "# LLMOps Database Statistics\n\n"
output += f"**Total case studies:** {len(df)}\n\n"
output += "## By Industry\n"
industry_counts = df["industry"].value_counts()
for industry, count in industry_counts.items():
output += f"- {industry}: {count}\n"
output += "\n## By Year\n"
year_counts = df["year"].value_counts().sort_index()
for year, count in year_counts.items():
output += f"- {int(year)}: {count}\n"
output += "\n## Top 15 Companies\n"
company_counts = df["company"].value_counts().head(15)
for company, count in company_counts.items():
output += f"- {company}: {count}\n"
return output
def list_options() -> str:
"""
List available filter options (industries, top companies, years).
Use this to know what values you can filter by in the search function.
Returns:
Lists of available industries, companies, and years
"""
output = "# Available Filter Options\n\n"
output += "## Industries\n"
for industry in df["industry"].dropna().unique():
output += f"- {industry}\n"
output += "\n## Years\n"
for year in sorted(df["year"].dropna().unique()):
output += f"- {int(year)}\n"
output += "\n## Top 30 Companies\n"
for company in df["company"].value_counts().head(30).index:
output += f"- {company}\n"
return output
# Create the Gradio interface
with gr.Blocks(title="LLMOps Database MCP Server") as demo:
gr.Markdown("""
# 🔍 ZenML LLMOps Database MCP Server
Query the [ZenML LLMOps Database](https://huggingface.co/datasets/zenml/llmops-database) -
a collection of 1,100+ real-world LLMOps case studies.
**This app is an MCP server** - add it to your AI assistant (like Cursor) to query the database!
""")
with gr.Tab("Search"):
gr.Markdown("### Search with optional filters (all can be combined)")
with gr.Row():
query_input = gr.Textbox(label="Text Search", placeholder="e.g., RAG, fine-tuning, agents")
industry_input = gr.Textbox(label="Industry", placeholder="e.g., Tech, Finance, Healthcare")
with gr.Row():
company_input = gr.Textbox(label="Company", placeholder="e.g., meta, google, openai")
year_input = gr.Number(label="Year", value=None)
with gr.Row():
tag_input = gr.Textbox(label="Tag", placeholder="e.g., pytorch, monitoring, rag")
limit_input = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Max Results")
search_btn = gr.Button("Search")
search_output = gr.Markdown()
search_btn.click(
search,
inputs=[query_input, industry_input, company_input, year_input, tag_input, limit_input],
outputs=search_output
)
with gr.Tab("Details"):
title_input = gr.Textbox(label="Case Study Title", placeholder="Enter part of the title")
details_btn = gr.Button("Get Details")
details_output = gr.Markdown()
details_btn.click(get_case_study_details, inputs=[title_input], outputs=details_output)
with gr.Tab("Statistics"):
stats_btn = gr.Button("Get Statistics")
stats_output = gr.Markdown()
stats_btn.click(get_statistics, outputs=stats_output)
gr.Markdown("---")
options_btn = gr.Button("List Filter Options")
options_output = gr.Markdown()
options_btn.click(list_options, outputs=options_output)
# Launch with MCP server enabled
if __name__ == "__main__":
demo.launch(mcp_server=True)