andreska commited on
Commit
9e80ceb
·
verified ·
1 Parent(s): c1c4977

Test Inference with default code for Qwen

Browse files
Files changed (1) hide show
  1. app.py +27 -5
app.py CHANGED
@@ -1,12 +1,34 @@
1
  import os
2
- from huggingface_hub import InferenceApi
3
- import streamlit as st
4
 
5
- x = st.slider('Select a value')
6
- st.write(x, 'squared is', x * x)
7
 
8
  # Get the API key from the environment variable
9
  api_key = os.getenv("HF_API_KEY")
10
 
11
  # Initialize the Inference API with your model and API key
12
- api = InferenceApi(repo_id="gpt-3", token=api_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ #from huggingface_hub import InferenceApi
3
+ #import streamlit as st
4
 
5
+ #x = st.slider('Select a value')
6
+ #st.write(x, 'squared is', x * x)
7
 
8
  # Get the API key from the environment variable
9
  api_key = os.getenv("HF_API_KEY")
10
 
11
  # Initialize the Inference API with your model and API key
12
+ #api = InferenceApi(repo_id="gpt-3", token=api_key)
13
+
14
+
15
+
16
+
17
+ from huggingface_hub import InferenceClient
18
+
19
+ client = InferenceClient(api_key=api_key)
20
+
21
+ messages = [
22
+ {
23
+ "role": "user",
24
+ "content": "What is the capital of France?"
25
+ }
26
+ ]
27
+
28
+ completion = client.chat.completions.create(
29
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
30
+ messages=messages,
31
+ max_tokens=500
32
+ )
33
+
34
+ print(completion.choices[0].message)