drewvid commited on
Commit
4518937
·
1 Parent(s): 6926c36

modernised code

Browse files
Files changed (2) hide show
  1. app.py +14 -10
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
@@ -6,7 +7,10 @@ For more information on `huggingface_hub` Inference API support, please check th
6
  """
7
 
8
  # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
- client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
 
 
 
10
 
11
 
12
  def respond(
@@ -19,7 +23,7 @@ def respond(
19
 
20
  name = "Ernest"
21
  system_message = f"""As a virtual mentor in cybersecurity called {name}, your role is to provide expert guidance and advice on protecting information and systems from cyber threats. You are an expert in:
22
-
23
  1) Information Security;
24
  2) Network Security;
25
  3) Application Security;
@@ -47,20 +51,20 @@ def respond(
47
 
48
  response = ""
49
 
50
- for message in client.chat_completion(
 
51
  messages,
52
  max_tokens=max_tokens,
53
- stream=True,
54
  temperature=temperature,
55
  top_p=top_p,
 
56
  ):
57
- if message.choices:
58
- token = message.choices[0].delta.content
59
- if token:
60
- response += token
61
  yield response
62
- else:
63
- yield "Please clear the history and try again."
64
 
65
  """
66
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
1
+ import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
 
7
  """
8
 
9
  # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
+ client = InferenceClient(
11
+ "meta-llama/Meta-Llama-3-8B-Instruct",
12
+ token=os.environ.get("HF_TOKEN")
13
+ )
14
 
15
 
16
  def respond(
 
23
 
24
  name = "Ernest"
25
  system_message = f"""As a virtual mentor in cybersecurity called {name}, your role is to provide expert guidance and advice on protecting information and systems from cyber threats. You are an expert in:
26
+
27
  1) Information Security;
28
  2) Network Security;
29
  3) Application Security;
 
51
 
52
  response = ""
53
 
54
+ # Stream the model output safely
55
+ for msg in client.chat_completion(
56
  messages,
57
  max_tokens=max_tokens,
 
58
  temperature=temperature,
59
  top_p=top_p,
60
+ stream=True,
61
  ):
62
+ if hasattr(msg, "choices") and msg.choices:
63
+ delta = msg.choices[0].delta
64
+ if hasattr(delta, "content") and delta.content:
65
+ response += delta.content
66
  yield response
67
+ # Ignore any events that do not contain content
 
68
 
69
  """
70
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- huggingface_hub==0.22.2
 
 
1
+ gradio>=4.44.0
2
+ huggingface_hub>=0.23.0