Spaces:
Sleeping
Sleeping
File size: 911 Bytes
842ca78 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import os
from huggingface_hub import InferenceClient
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("HF_TOKEN")
client = InferenceClient(api_key=api_key)
model = "HuggingFaceM4/idefics2-8b"
print(f"Testing model: {model}")
image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
# Format for Idefics2:
# User:  <text><end_of_utterance>\nAssistant:
prompt = f"User:  Describe this image.<end_of_utterance>\nAssistant:"
print(f"\n--- Testing with text_generation and specific prompt ---")
print(f"Prompt: {prompt}")
try:
# Use text_generation for models that don't support chat
response = client.text_generation(
prompt=prompt,
model=model,
max_new_tokens=100
)
print("Response:", response)
except Exception as e:
print("Failed:", e)
|