Spaces:
Sleeping
Sleeping
| import os | |
| #from huggingface_hub import InferenceApi | |
| #import streamlit as st | |
| #x = st.slider('Select a value') | |
| #st.write(x, 'squared is', x * x) | |
| # Get the API key from the environment variable | |
| api_key = os.getenv("HF_API_KEY") | |
| # Initialize the Inference API with your model and API key | |
| #api = InferenceApi(repo_id="gpt-3", token=api_key) | |
| from huggingface_hub import InferenceClient | |
| client = InferenceClient(api_key=api_key) | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": "What is the capital of France?" | |
| } | |
| ] | |
| completion = client.chat.completions.create( | |
| model="Qwen/Qwen2.5-Coder-32B-Instruct", | |
| messages=messages, | |
| max_tokens=500 | |
| ) | |
| print(completion.choices[0].message) |