File size: 8,010 Bytes
85cc945 60d4662 cc7228a 5e93a9c 83c4bb9 5e93a9c 9a358fc 5e93a9c 6165eb2 9a358fc 5e93a9c 83c4bb9 5e93a9c 6165eb2 83c4bb9 6165eb2 5e93a9c 83c4bb9 6165eb2 83c4bb9 7100105 83c4bb9 7100105 83c4bb9 7100105 5e93a9c 83c4bb9 5e93a9c 83c4bb9 5e93a9c cc7228a 83c4bb9 6165eb2 83c4bb9 5e93a9c 83c4bb9 cc7228a 83c4bb9 5e93a9c 83c4bb9 5e93a9c 83c4bb9 5e93a9c 6165eb2 83c4bb9 6165eb2 83c4bb9 6165eb2 5e93a9c 83c4bb9 6165eb2 83c4bb9 5e93a9c 83c4bb9 5e93a9c 6165eb2 83c4bb9 6165eb2 5e93a9c 83c4bb9 5e93a9c 6165eb2 83c4bb9 6165eb2 83c4bb9 6165eb2 5e93a9c 83c4bb9 5e93a9c 83c4bb9 5e93a9c 83c4bb9 6165eb2 83c4bb9 6165eb2 83c4bb9 5e93a9c 6165eb2 5e93a9c 6165eb2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import gradio as gr
import os
import json
import logging
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# π MINIMAL AI RESEARCH DEMO - GRADIO 5.0.1 COMPATIBLE
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
try:
from huggingface_hub import InferenceClient
HF_AVAILABLE = True
except ImportError:
HF_AVAILABLE = False
# Simple logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# π§ SIMPLE CLIENT - VERIFIED WORKING
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def get_client():
"""Get HuggingFace client - exactly like your working example."""
api_token = os.getenv("HF_API_TOKEN")
if not HF_AVAILABLE or not api_token:
return None
try:
client = InferenceClient(
provider="hf-inference",
api_key=api_token,
)
# Test with exact same call as your working example
test_result = client.fill_mask("The capital of France is [MASK].", model="google-bert/bert-base-uncased")
logger.info(f"β
Client test successful: {type(test_result)}")
return client
except Exception as e:
logger.error(f"β Client failed: {e}")
return None
CLIENT = get_client()
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# π€ MINIMAL FUNCTIONS - AVOIDING GRADIO SCHEMA BUGS
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def run_chat(message):
"""Chat function - using your exact working model."""
if not CLIENT:
return "β Client not available"
try:
messages = [{"role": "user", "content": message}]
completion = CLIENT.chat.completions.create(
model="Qwen/Qwen2.5-72B-Instruct",
messages=messages,
)
return completion.choices[0].message.content
except Exception as e:
return f"β Error: {str(e)}"
def run_fill_mask(text):
"""Fill mask - using your exact working approach."""
if not CLIENT:
return "β Client not available"
if "[MASK]" not in text:
return "β Text must contain [MASK]"
try:
result = CLIENT.fill_mask(text, model="google-bert/bert-base-uncased")
if isinstance(result, list):
output = "π **Predictions:**\n"
for i, pred in enumerate(result[:5], 1):
token = pred.get("token_str", "").strip()
score = pred.get("score", 0)
output += f"{i}. **{token}** ({score:.3f})\n"
return output
return str(result)
except Exception as e:
return f"β Error: {str(e)}"
def run_question_answering(question, context):
"""Q&A function."""
if not CLIENT or not question or not context:
return "β Client not available or missing input"
try:
answer = CLIENT.question_answering(
question=question,
context=context,
model="deepset/roberta-base-squad2",
)
return f"π‘ **Answer:** {answer.get('answer', str(answer))}"
except Exception as e:
return f"β Error: {str(e)}"
def run_summarization(text):
"""Summarization function."""
if not CLIENT or len(text.split()) < 10:
return "β Client not available or text too short"
try:
result = CLIENT.summarization(text, model="facebook/bart-large-cnn")
if isinstance(result, list) and result:
return f"π **Summary:** {result[0].get('summary_text', str(result))}"
return f"π **Summary:** {str(result)}"
except Exception as e:
return f"β Error: {str(e)}"
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# π¨ MINIMAL GRADIO INTERFACE - SCHEMA-BUG PROOF
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Create interface with minimal components to avoid schema bugs
with gr.Blocks(title="AI Research Demo") as demo:
gr.Markdown("# π AI Research Demo\n### Working HuggingFace Inference API")
if CLIENT:
gr.Markdown("β
**Status:** Connected and tested successfully")
else:
gr.Markdown("β **Status:** Set HF_API_TOKEN environment variable")
# Chat Tab
gr.Markdown("## π¬ Chat with AI")
chat_input = gr.Textbox(label="Your Message", placeholder="Ask anything...")
chat_output = gr.Textbox(label="AI Response", lines=5)
chat_btn = gr.Button("Send", variant="primary")
chat_btn.click(run_chat, inputs=chat_input, outputs=chat_output)
gr.Markdown("---")
# Fill Mask
gr.Markdown("## π Fill Mask")
mask_input = gr.Textbox(
label="Text with [MASK]",
value="The capital of France is [MASK].",
placeholder="Enter text with [MASK] token"
)
mask_output = gr.Textbox(label="Predictions", lines=6)
mask_btn = gr.Button("Predict", variant="primary")
mask_btn.click(run_fill_mask, inputs=mask_input, outputs=mask_output)
gr.Markdown("---")
# Q&A
gr.Markdown("## β Question Answering")
qa_question = gr.Textbox(label="Question", value="What is AI?")
qa_context = gr.Textbox(
label="Context",
lines=3,
value="Artificial Intelligence (AI) is the simulation of human intelligence in machines."
)
qa_output = gr.Textbox(label="Answer", lines=3)
qa_btn = gr.Button("Answer", variant="primary")
qa_btn.click(run_question_answering, inputs=[qa_question, qa_context], outputs=qa_output)
gr.Markdown("---")
# Summarization
gr.Markdown("## π Text Summarization")
sum_input = gr.Textbox(
label="Text to Summarize",
lines=5,
value="Machine learning is a method of data analysis that automates analytical model building. It is a branch of artificial intelligence based on the idea that systems can learn from data, identify patterns and make decisions with minimal human intervention. The process involves feeding data into algorithms that learn patterns and make predictions or decisions without being explicitly programmed for each specific task."
)
sum_output = gr.Textbox(label="Summary", lines=3)
sum_btn = gr.Button("Summarize", variant="primary")
sum_btn.click(run_summarization, inputs=sum_input, outputs=sum_output)
gr.Markdown("---")
gr.Markdown("**π§ Setup:** `export HF_API_TOKEN=your_token_here`")
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
) |