Spaces:
Sleeping
Sleeping
| """AI Agency Pro - Vendor-Driven Multi-Agent System | |
| Using official HuggingFace libraries: transformers, gradio, spaces | |
| Optimized for ZeroGPU with best practices from HuggingFace documentation | |
| """ | |
| import gradio as gr | |
| import spaces | |
| from transformers import pipeline | |
| from typing import Generator | |
| import logging | |
| import torch | |
| # Configure logging (vendor best practice) | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Model configurations using official HuggingFace model IDs | |
| MODELS = { | |
| "summarization": "facebook/bart-large-cnn", | |
| "text-classification": "facebook/bart-large-mnli", | |
| "question-answering": "deepset/roberta-base-squad2", | |
| "text-generation": "mistralai/Mistral-7B-Instruct-v0.3", | |
| } | |
| # Initialize pipelines lazily for ZeroGPU | |
| summarizer = None | |
| classifier = None | |
| qa_pipeline = None | |
| def summarize_text(text: str, max_length: int = 150, min_length: int = 30) -> str: | |
| """Summarize text using HuggingFace transformers pipeline | |
| Official vendor pattern from: https://huggingface.co/docs/transformers/main_classes/pipelines | |
| """ | |
| global summarizer | |
| if not text or not text.strip(): | |
| return "Please provide text to summarize." | |
| try: | |
| if summarizer is None: | |
| summarizer = pipeline("summarization", model=MODELS["summarization"], device=0) | |
| result = summarizer( | |
| text[:1024], | |
| max_length=max_length, | |
| min_length=min_length, | |
| do_sample=False | |
| ) | |
| return result[0]["summary_text"] | |
| except Exception as e: | |
| logger.error(f"Summarization error: {e}") | |
| return f"Error: {str(e)}" | |
| def classify_text(text: str, labels: str) -> dict: | |
| """Zero-shot classification using HuggingFace transformers pipeline""" | |
| global classifier | |
| if not text.strip() or not labels.strip(): | |
| return {"error": "Please provide both text and labels (comma-separated)"} | |
| try: | |
| if classifier is None: | |
| classifier = pipeline("zero-shot-classification", model=MODELS["text-classification"], device=0) | |
| candidate_labels = [l.strip() for l in labels.split(",")] | |
| result = classifier(text, candidate_labels) | |
| return { | |
| "sequence": result["sequence"], | |
| "labels": result["labels"], | |
| "scores": [round(s, 4) for s in result["scores"]] | |
| } | |
| except Exception as e: | |
| logger.error(f"Classification error: {e}") | |
| return {"error": str(e)} | |
| def answer_question(context: str, question: str) -> str: | |
| """Question answering using HuggingFace transformers pipeline""" | |
| global qa_pipeline | |
| if not context.strip() or not question.strip(): | |
| return "Please provide both context and question." | |
| try: | |
| if qa_pipeline is None: | |
| qa_pipeline = pipeline("question-answering", model=MODELS["question-answering"], device=0) | |
| result = qa_pipeline(question=question, context=context) | |
| return f"Answer: {result['answer']}\nConfidence: {result['score']:.2%}" | |
| except Exception as e: | |
| logger.error(f"QA error: {e}") | |
| return f"Error: {str(e)}" | |
| def chat_completion(message: str, history: list) -> str: | |
| """Chat completion using HuggingFace transformers pipeline""" | |
| if not message.strip(): | |
| return "Please enter a message." | |
| try: | |
| generator = pipeline("text-generation", model=MODELS["text-generation"], device=0, torch_dtype=torch.float16) | |
| prompt = f"<s>[INST] {message} [/INST]" | |
| result = generator(prompt, max_new_tokens=512, do_sample=True, temperature=0.7) | |
| generated = result[0]["generated_text"] | |
| response = generated.split("[/INST]")[-1].strip() | |
| return response | |
| except Exception as e: | |
| logger.error(f"Chat error: {e}") | |
| return f"Error: {str(e)}" | |
| with gr.Blocks( | |
| title="AI Agency Pro", | |
| theme=gr.themes.Soft(), | |
| css=".gradio-container {max-width: 900px !important}" | |
| ) as demo: | |
| gr.Markdown(""" | |
| # AI Agency Pro | |
| ## Vendor-Driven Multi-Agent System | |
| Built with official HuggingFace libraries: | |
| - `transformers.pipeline` for local inference | |
| - `@spaces.GPU` decorator for ZeroGPU (Nvidia H200) | |
| - Gradio Blocks for UI components | |
| """) | |
| with gr.Tabs(): | |
| with gr.TabItem("Summarizer", id="summarizer"): | |
| gr.Markdown("### Text Summarization\nUsing `facebook/bart-large-cnn`") | |
| with gr.Row(): | |
| with gr.Column(): | |
| sum_input = gr.Textbox(label="Text to Summarize", lines=8, placeholder="Paste your text here (max 1024 chars)...") | |
| with gr.Row(): | |
| sum_max = gr.Slider(50, 300, value=150, label="Max Length") | |
| sum_min = gr.Slider(10, 100, value=30, label="Min Length") | |
| sum_btn = gr.Button("Summarize", variant="primary") | |
| with gr.Column(): | |
| sum_output = gr.Textbox(label="Summary", lines=8) | |
| sum_btn.click(summarize_text, [sum_input, sum_max, sum_min], sum_output) | |
| with gr.TabItem("Classifier", id="classifier"): | |
| gr.Markdown("### Zero-Shot Classification\nUsing `facebook/bart-large-mnli`") | |
| with gr.Row(): | |
| with gr.Column(): | |
| cls_text = gr.Textbox(label="Text to Classify", lines=4, placeholder="Enter text to classify...") | |
| cls_labels = gr.Textbox(label="Labels (comma-separated)", placeholder="positive, negative, neutral") | |
| cls_btn = gr.Button("Classify", variant="primary") | |
| with gr.Column(): | |
| cls_output = gr.JSON(label="Classification Results") | |
| cls_btn.click(classify_text, [cls_text, cls_labels], cls_output) | |
| with gr.TabItem("Q&A Agent", id="qa"): | |
| gr.Markdown("### Question Answering\nUsing `deepset/roberta-base-squad2`") | |
| with gr.Row(): | |
| with gr.Column(): | |
| qa_context = gr.Textbox(label="Context", lines=6, placeholder="Provide context paragraph...") | |
| qa_question = gr.Textbox(label="Question", placeholder="Ask a question about the context...") | |
| qa_btn = gr.Button("Get Answer", variant="primary") | |
| with gr.Column(): | |
| qa_output = gr.Textbox(label="Answer", lines=4) | |
| qa_btn.click(answer_question, [qa_context, qa_question], qa_output) | |
| with gr.TabItem("Chat Agent", id="chat"): | |
| gr.Markdown("### AI Chat Assistant\nUsing `mistralai/Mistral-7B-Instruct-v0.3`") | |
| with gr.Row(): | |
| with gr.Column(): | |
| chat_input = gr.Textbox(label="Your Message", lines=3, placeholder="Type your message...") | |
| chat_btn = gr.Button("Send", variant="primary") | |
| with gr.Column(): | |
| chat_output = gr.Textbox(label="Response", lines=10) | |
| chat_btn.click(chat_completion, [chat_input, gr.State([])], chat_output) | |
| gr.Markdown(""" | |
| --- | |
| **Vendor Libraries:** `transformers`, `gradio`, `spaces` | |
| **Hardware:** ZeroGPU (Nvidia H200) | |
| **Best Practices:** [HuggingFace Docs](https://huggingface.co/docs) | |
| """) | |
| # FEM Solver Tab | |
| with gr.TabItem("FEM Solver", id="fem"): | |
| gr.Markdown("### Finite Element Method Solver\n\nVendor-based FEM using `scikit-fem`, `scipy`, `pygmsh`, `meshio`") | |
| with gr.Row(): | |
| with gr.Column(): | |
| fem_problem = gr.Dropdown( | |
| ["Poisson 2D", "Helmholtz 2D", "Heat Equation"], | |
| label="Problem Type", | |
| value="Poisson 2D" | |
| ) | |
| fem_mesh_size = gr.Slider(5, 50, value=10, step=5, label="Mesh Density") | |
| fem_source = gr.Textbox( | |
| label="Source Function (Python expr)", | |
| value="lambda x: 1.0", | |
| placeholder="lambda x: np.sin(x[0])*np.sin(x[1])" | |
| ) | |
| fem_bc_value = gr.Number(label="Boundary Value", value=0.0) | |
| fem_solve_btn = gr.Button("Solve FEM Problem", variant="primary") | |
| with gr.Column(): | |
| fem_output = gr.Textbox(label="Solution Summary", lines=8) | |
| fem_plot = gr.Plot(label="Solution Visualization") | |
| def solve_fem_problem(problem, mesh_size, source_str, bc_value): | |
| """Solve FEM problem using vendor libraries.""" | |
| try: | |
| import numpy as np | |
| from fem_core.mesh_generator import create_unit_square_mesh | |
| from fem_core.solver import FEMSolver | |
| import matplotlib.pyplot as plt | |
| # Generate mesh | |
| mesh = create_unit_square_mesh(n=int(mesh_size)) | |
| # Parse source function | |
| source_func = eval(source_str) | |
| # Create solver | |
| solver = FEMSolver(mesh) | |
| # Solve based on problem type | |
| if problem == "Poisson 2D": | |
| solution = solver.solve_poisson(source_func, dirichlet_val=bc_value) | |
| elif problem == "Helmholtz 2D": | |
| solution = solver.solve_helmholtz(k_squared=1.0, source=source_func, dirichlet_val=bc_value) | |
| else: | |
| solution = solver.solve_heat(dt=0.01, num_steps=10, initial_condition=source_func) | |
| # Create visualization | |
| fig, ax = plt.subplots(figsize=(8, 6)) | |
| points = solver.basis.doflocs | |
| tri = ax.tricontourf(points[0], points[1], solution, levels=20, cmap='viridis') | |
| plt.colorbar(tri, ax=ax) | |
| ax.set_xlabel('x') | |
| ax.set_ylabel('y') | |
| ax.set_title(f'{problem} Solution') | |
| ax.set_aspect('equal') | |
| # Summary | |
| summary = f"""Problem: {problem} | |
| Mesh Points: {len(solution)} | |
| Mesh Cells: {len(mesh.cells) if hasattr(mesh, 'cells') else 'N/A'} | |
| Solution Range: ({solution.min():.4f}, {solution.max():.4f}) | |
| Mean Value: {solution.mean():.4f} | |
| """ | |
| return summary, fig | |
| except Exception as e: | |
| error_msg = f"Error solving FEM problem: {str(e)}\n\nMake sure dependencies are installed." | |
| return error_msg, None | |
| fem_solve_btn.click( | |
| solve_fem_problem, | |
| [fem_problem, fem_mesh_size, fem_source, fem_bc_value], | |
| [fem_output, fem_plot] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |