|
|
import gradio as gr |
|
|
import os |
|
|
import json |
|
|
import logging |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
from huggingface_hub import InferenceClient |
|
|
HF_AVAILABLE = True |
|
|
except ImportError: |
|
|
HF_AVAILABLE = False |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_client(): |
|
|
"""Get HuggingFace client - exactly like your working example.""" |
|
|
api_token = os.getenv("HF_API_TOKEN") |
|
|
|
|
|
if not HF_AVAILABLE or not api_token: |
|
|
return None |
|
|
|
|
|
try: |
|
|
client = InferenceClient( |
|
|
provider="hf-inference", |
|
|
api_key=api_token, |
|
|
) |
|
|
|
|
|
test_result = client.fill_mask("The capital of France is [MASK].", model="google-bert/bert-base-uncased") |
|
|
logger.info(f"β
Client test successful: {type(test_result)}") |
|
|
return client |
|
|
except Exception as e: |
|
|
logger.error(f"β Client failed: {e}") |
|
|
return None |
|
|
|
|
|
CLIENT = get_client() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_chat(message): |
|
|
"""Chat function - using your exact working model.""" |
|
|
if not CLIENT: |
|
|
return "β Client not available" |
|
|
|
|
|
try: |
|
|
messages = [{"role": "user", "content": message}] |
|
|
completion = CLIENT.chat.completions.create( |
|
|
model="Qwen/Qwen2.5-72B-Instruct", |
|
|
messages=messages, |
|
|
) |
|
|
return completion.choices[0].message.content |
|
|
except Exception as e: |
|
|
return f"β Error: {str(e)}" |
|
|
|
|
|
def run_fill_mask(text): |
|
|
"""Fill mask - using your exact working approach.""" |
|
|
if not CLIENT: |
|
|
return "β Client not available" |
|
|
|
|
|
if "[MASK]" not in text: |
|
|
return "β Text must contain [MASK]" |
|
|
|
|
|
try: |
|
|
result = CLIENT.fill_mask(text, model="google-bert/bert-base-uncased") |
|
|
if isinstance(result, list): |
|
|
output = "π **Predictions:**\n" |
|
|
for i, pred in enumerate(result[:5], 1): |
|
|
token = pred.get("token_str", "").strip() |
|
|
score = pred.get("score", 0) |
|
|
output += f"{i}. **{token}** ({score:.3f})\n" |
|
|
return output |
|
|
return str(result) |
|
|
except Exception as e: |
|
|
return f"β Error: {str(e)}" |
|
|
|
|
|
def run_question_answering(question, context): |
|
|
"""Q&A function.""" |
|
|
if not CLIENT or not question or not context: |
|
|
return "β Client not available or missing input" |
|
|
|
|
|
try: |
|
|
answer = CLIENT.question_answering( |
|
|
question=question, |
|
|
context=context, |
|
|
model="deepset/roberta-base-squad2", |
|
|
) |
|
|
return f"π‘ **Answer:** {answer.get('answer', str(answer))}" |
|
|
except Exception as e: |
|
|
return f"β Error: {str(e)}" |
|
|
|
|
|
def run_summarization(text): |
|
|
"""Summarization function.""" |
|
|
if not CLIENT or len(text.split()) < 10: |
|
|
return "β Client not available or text too short" |
|
|
|
|
|
try: |
|
|
result = CLIENT.summarization(text, model="facebook/bart-large-cnn") |
|
|
if isinstance(result, list) and result: |
|
|
return f"π **Summary:** {result[0].get('summary_text', str(result))}" |
|
|
return f"π **Summary:** {str(result)}" |
|
|
except Exception as e: |
|
|
return f"β Error: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(title="AI Research Demo") as demo: |
|
|
|
|
|
gr.Markdown("# π AI Research Demo\n### Working HuggingFace Inference API") |
|
|
|
|
|
if CLIENT: |
|
|
gr.Markdown("β
**Status:** Connected and tested successfully") |
|
|
else: |
|
|
gr.Markdown("β **Status:** Set HF_API_TOKEN environment variable") |
|
|
|
|
|
|
|
|
gr.Markdown("## π¬ Chat with AI") |
|
|
chat_input = gr.Textbox(label="Your Message", placeholder="Ask anything...") |
|
|
chat_output = gr.Textbox(label="AI Response", lines=5) |
|
|
chat_btn = gr.Button("Send", variant="primary") |
|
|
chat_btn.click(run_chat, inputs=chat_input, outputs=chat_output) |
|
|
|
|
|
gr.Markdown("---") |
|
|
|
|
|
|
|
|
gr.Markdown("## π Fill Mask") |
|
|
mask_input = gr.Textbox( |
|
|
label="Text with [MASK]", |
|
|
value="The capital of France is [MASK].", |
|
|
placeholder="Enter text with [MASK] token" |
|
|
) |
|
|
mask_output = gr.Textbox(label="Predictions", lines=6) |
|
|
mask_btn = gr.Button("Predict", variant="primary") |
|
|
mask_btn.click(run_fill_mask, inputs=mask_input, outputs=mask_output) |
|
|
|
|
|
gr.Markdown("---") |
|
|
|
|
|
|
|
|
gr.Markdown("## β Question Answering") |
|
|
qa_question = gr.Textbox(label="Question", value="What is AI?") |
|
|
qa_context = gr.Textbox( |
|
|
label="Context", |
|
|
lines=3, |
|
|
value="Artificial Intelligence (AI) is the simulation of human intelligence in machines." |
|
|
) |
|
|
qa_output = gr.Textbox(label="Answer", lines=3) |
|
|
qa_btn = gr.Button("Answer", variant="primary") |
|
|
qa_btn.click(run_question_answering, inputs=[qa_question, qa_context], outputs=qa_output) |
|
|
|
|
|
gr.Markdown("---") |
|
|
|
|
|
|
|
|
gr.Markdown("## π Text Summarization") |
|
|
sum_input = gr.Textbox( |
|
|
label="Text to Summarize", |
|
|
lines=5, |
|
|
value="Machine learning is a method of data analysis that automates analytical model building. It is a branch of artificial intelligence based on the idea that systems can learn from data, identify patterns and make decisions with minimal human intervention. The process involves feeding data into algorithms that learn patterns and make predictions or decisions without being explicitly programmed for each specific task." |
|
|
) |
|
|
sum_output = gr.Textbox(label="Summary", lines=3) |
|
|
sum_btn = gr.Button("Summarize", variant="primary") |
|
|
sum_btn.click(run_summarization, inputs=sum_input, outputs=sum_output) |
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("**π§ Setup:** `export HF_API_TOKEN=your_token_here`") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
share=False |
|
|
) |