pro-demo / app.py
rjarun20's picture
Update app.py
83c4bb9 verified
raw
history blame
8.01 kB
import gradio as gr
import os
import json
import logging
# ══════════════════════════════════════════════════════════════════════════════
# πŸŽ“ MINIMAL AI RESEARCH DEMO - GRADIO 5.0.1 COMPATIBLE
# ══════════════════════════════════════════════════════════════════════════════
try:
from huggingface_hub import InferenceClient
HF_AVAILABLE = True
except ImportError:
HF_AVAILABLE = False
# Simple logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ══════════════════════════════════════════════════════════════════════════════
# πŸ”§ SIMPLE CLIENT - VERIFIED WORKING
# ══════════════════════════════════════════════════════════════════════════════
def get_client():
"""Get HuggingFace client - exactly like your working example."""
api_token = os.getenv("HF_API_TOKEN")
if not HF_AVAILABLE or not api_token:
return None
try:
client = InferenceClient(
provider="hf-inference",
api_key=api_token,
)
# Test with exact same call as your working example
test_result = client.fill_mask("The capital of France is [MASK].", model="google-bert/bert-base-uncased")
logger.info(f"βœ… Client test successful: {type(test_result)}")
return client
except Exception as e:
logger.error(f"❌ Client failed: {e}")
return None
CLIENT = get_client()
# ══════════════════════════════════════════════════════════════════════════════
# πŸ€– MINIMAL FUNCTIONS - AVOIDING GRADIO SCHEMA BUGS
# ══════════════════════════════════════════════════════════════════════════════
def run_chat(message):
"""Chat function - using your exact working model."""
if not CLIENT:
return "❌ Client not available"
try:
messages = [{"role": "user", "content": message}]
completion = CLIENT.chat.completions.create(
model="Qwen/Qwen2.5-72B-Instruct",
messages=messages,
)
return completion.choices[0].message.content
except Exception as e:
return f"❌ Error: {str(e)}"
def run_fill_mask(text):
"""Fill mask - using your exact working approach."""
if not CLIENT:
return "❌ Client not available"
if "[MASK]" not in text:
return "❌ Text must contain [MASK]"
try:
result = CLIENT.fill_mask(text, model="google-bert/bert-base-uncased")
if isinstance(result, list):
output = "🎭 **Predictions:**\n"
for i, pred in enumerate(result[:5], 1):
token = pred.get("token_str", "").strip()
score = pred.get("score", 0)
output += f"{i}. **{token}** ({score:.3f})\n"
return output
return str(result)
except Exception as e:
return f"❌ Error: {str(e)}"
def run_question_answering(question, context):
"""Q&A function."""
if not CLIENT or not question or not context:
return "❌ Client not available or missing input"
try:
answer = CLIENT.question_answering(
question=question,
context=context,
model="deepset/roberta-base-squad2",
)
return f"πŸ’‘ **Answer:** {answer.get('answer', str(answer))}"
except Exception as e:
return f"❌ Error: {str(e)}"
def run_summarization(text):
"""Summarization function."""
if not CLIENT or len(text.split()) < 10:
return "❌ Client not available or text too short"
try:
result = CLIENT.summarization(text, model="facebook/bart-large-cnn")
if isinstance(result, list) and result:
return f"πŸ“ **Summary:** {result[0].get('summary_text', str(result))}"
return f"πŸ“ **Summary:** {str(result)}"
except Exception as e:
return f"❌ Error: {str(e)}"
# ══════════════════════════════════════════════════════════════════════════════
# 🎨 MINIMAL GRADIO INTERFACE - SCHEMA-BUG PROOF
# ══════════════════════════════════════════════════════════════════════════════
# Create interface with minimal components to avoid schema bugs
with gr.Blocks(title="AI Research Demo") as demo:
gr.Markdown("# πŸŽ“ AI Research Demo\n### Working HuggingFace Inference API")
if CLIENT:
gr.Markdown("βœ… **Status:** Connected and tested successfully")
else:
gr.Markdown("❌ **Status:** Set HF_API_TOKEN environment variable")
# Chat Tab
gr.Markdown("## πŸ’¬ Chat with AI")
chat_input = gr.Textbox(label="Your Message", placeholder="Ask anything...")
chat_output = gr.Textbox(label="AI Response", lines=5)
chat_btn = gr.Button("Send", variant="primary")
chat_btn.click(run_chat, inputs=chat_input, outputs=chat_output)
gr.Markdown("---")
# Fill Mask
gr.Markdown("## 🎭 Fill Mask")
mask_input = gr.Textbox(
label="Text with [MASK]",
value="The capital of France is [MASK].",
placeholder="Enter text with [MASK] token"
)
mask_output = gr.Textbox(label="Predictions", lines=6)
mask_btn = gr.Button("Predict", variant="primary")
mask_btn.click(run_fill_mask, inputs=mask_input, outputs=mask_output)
gr.Markdown("---")
# Q&A
gr.Markdown("## ❓ Question Answering")
qa_question = gr.Textbox(label="Question", value="What is AI?")
qa_context = gr.Textbox(
label="Context",
lines=3,
value="Artificial Intelligence (AI) is the simulation of human intelligence in machines."
)
qa_output = gr.Textbox(label="Answer", lines=3)
qa_btn = gr.Button("Answer", variant="primary")
qa_btn.click(run_question_answering, inputs=[qa_question, qa_context], outputs=qa_output)
gr.Markdown("---")
# Summarization
gr.Markdown("## πŸ“ Text Summarization")
sum_input = gr.Textbox(
label="Text to Summarize",
lines=5,
value="Machine learning is a method of data analysis that automates analytical model building. It is a branch of artificial intelligence based on the idea that systems can learn from data, identify patterns and make decisions with minimal human intervention. The process involves feeding data into algorithms that learn patterns and make predictions or decisions without being explicitly programmed for each specific task."
)
sum_output = gr.Textbox(label="Summary", lines=3)
sum_btn = gr.Button("Summarize", variant="primary")
sum_btn.click(run_summarization, inputs=sum_input, outputs=sum_output)
gr.Markdown("---")
gr.Markdown("**πŸ”§ Setup:** `export HF_API_TOKEN=your_token_here`")
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)