| import os | |
| from huggingface_hub import InferenceClient | |
| client = InferenceClient( | |
| provider="groq", | |
| api_key=os.environ["HF_TOKEN"], | |
| ) | |
| completion = client.chat.completions.create( | |
| model="meta-llama/Llama-3.3-70B-Instruct", | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": "What is the capital of France?" | |
| } | |
| ], | |
| ) | |
| print(completion.choices[0].message) |