abhinema commited on
Commit
c0ad404
·
1 Parent(s): 2aa6c9f

Update with token.

Browse files
Files changed (1) hide show
  1. app.py +10 -68
app.py CHANGED
@@ -1,77 +1,19 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  import os
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
-
8
- hf_token = os.getenv("HF_TOKEN")
9
-
10
- # Add this to your app's UI for debugging
11
- if hf_token:
12
- print("✅ HF_TOKEN was found!")
13
- # Optional: Display the first few characters to confirm it's not empty
14
- # st.write(f"Token starts with: {hf_token[:4]}...")
15
- else:
16
- print("❌ HF_TOKEN not found in environment variables!")
17
 
18
  client = InferenceClient(
19
  provider="groq",
20
- api_key=hf_token,
21
  )
22
-
23
- def respond(
24
- message,
25
- history: list[tuple[str, str]],
26
- system_message,
27
- max_tokens,
28
- temperature,
29
- top_p,
30
- ):
31
- messages = [{"role": "system", "content": system_message}]
32
-
33
- for val in history:
34
- if val[0]:
35
- messages.append({"role": "user", "content": val[0]})
36
- if val[1]:
37
- messages.append({"role": "assistant", "content": val[1]})
38
-
39
- messages.append({"role": "user", "content": message})
40
 
41
- response = ""
42
-
43
- for message in client.chat_completion(
44
- messages,
45
- max_tokens=max_tokens,
46
- stream=True,
47
- temperature=temperature,
48
- top_p=top_p,
49
- ):
50
- token = message.choices[0].delta.content
51
-
52
- response += token
53
- yield response
54
-
55
-
56
- """
57
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
58
- """
59
- demo = gr.ChatInterface(
60
- respond,
61
- additional_inputs=[
62
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
63
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
64
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
65
- gr.Slider(
66
- minimum=0.1,
67
- maximum=1.0,
68
- value=0.95,
69
- step=0.05,
70
- label="Top-p (nucleus sampling)",
71
- ),
72
  ],
73
  )
74
 
75
-
76
- if __name__ == "__main__":
77
- demo.launch()
 
 
 
1
  import os
2
+ from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  client = InferenceClient(
5
  provider="groq",
6
+ api_key=os.environ["HF_TOKEN"],
7
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ completion = client.chat.completions.create(
10
+ model="meta-llama/Llama-3.3-70B-Instruct",
11
+ messages=[
12
+ {
13
+ "role": "user",
14
+ "content": "What is the capital of France?"
15
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  ],
17
  )
18
 
19
+ print(completion.choices[0].message)