File size: 1,260 Bytes
10e9b7d
 
eccf8e4
7d65c66
3c4371f
be3b5a2
 
 
 
 
4b8b9e9
 
d59f015
e80aab9
3db6293
c89d174
b2ab5d7
 
 
 
 
 
4b8b9e9
b2ab5d7
 
 
4b8b9e9
 
b2ab5d7
 
4b8b9e9
 
 
 
 
b2ab5d7
4b8b9e9
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import os
import gradio as gr
import requests
import inspect
import pandas as pd
from smolagents.models import InferenceClientModel
from smolagents import ToolCallingAgent
from smolagents import DuckDuckGoSearchTool
from smolagents import Tool
import traceback
from huggingface_hub import InferenceClient, HfApi
from huggingface_hub.utils import HfHubHTTPError
# (Keep Constants as is)
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
print("Token loaded:", bool(os.getenv("chatbotagenthf")))
from huggingface_hub import InferenceClient, HfApi
api_key=os.getenv("chatbotagenthf")
api = HfApi()
client = InferenceClient(token=api_key)

# Example list of chat/instruction models to test
models = [
    "mistralai/Mistral-7B-Instruct-v0.3",
    "HuggingFaceH4/zephyr-7b-beta",
    "google/gemma-2b-it",
    "tiiuae/falcon-7b-instruct",
    "meta-llama/Llama-3.1-8B-Instruct"
]

client = InferenceClient()
api = HfApi()

for model in models:
    print(f"🔎 Testing {model}...")
    try:
        # just check if model exists and is accessible
        api.model_info(model, token="your_HF_token_here")
        print(f"✅ Accessible: {model}")
    except HfHubHTTPError as e:
        print(f"❌ Not accessible: {model} -> {e}")