Spaces:
Sleeping
Sleeping
| from huggingface_hub import InferenceClient | |
| import os | |
| def test_ai(): | |
| print("Testing Zephyr-7B Model via InferenceClient...") | |
| client = InferenceClient( | |
| model="HuggingFaceH4/zephyr-7b-beta", | |
| token=os.getenv('HF_TOKEN') | |
| ) | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful AI."}, | |
| {"role": "user", "content": "Hello! Are you working?"} | |
| ] | |
| try: | |
| response = client.chat_completion( | |
| messages, | |
| max_tokens=50, | |
| temperature=0.7 | |
| ) | |
| print("✅ API Success!") | |
| print(f"Response: {response.choices[0].message.content}") | |
| except Exception as e: | |
| print(f"❌ Exception: {e}") | |
| if __name__ == "__main__": | |
| test_ai() | |