Abu1998 commited on
Commit
c4b605c
·
verified ·
1 Parent(s): 4a32a9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -42
app.py CHANGED
@@ -1,12 +1,9 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
-
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -31,63 +28,59 @@ def respond(
31
  response = ""
32
 
33
  # Stream the response from the model
34
- for message in client.chat_completion(
35
  messages,
36
  max_tokens=max_tokens,
37
  stream=True,
38
  temperature=temperature,
39
  top_p=top_p,
40
  ):
41
- token = message.choices[0].delta.content
42
  response += token
43
 
44
  # Validate the response format
45
- if len(response) == 13 and all(c.isdigit() or c in 'CW' for c in response):
46
  return response
47
  else:
48
  return "Invalid response format"
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- """
52
- For information on how to customize the ChatInterface, peruse the Gradio docs: https://www.gradio.app/docs/chatinterface
53
- """
54
  demo = gr.ChatInterface(
55
  respond,
56
  additional_inputs=[
57
  gr.Textbox(
58
- value="You are tasked with labeling text data based on both emotion temperature and text type categories. The final output must be a 13-character code that consists of the following structure:\n\n"
59
- "1. Emotion Temperature Code (2 characters):\n"
60
- " - If the emotion is purely Cold: Use CC\n"
61
- " - If the emotion is purely Warm: Use WW\n"
62
- " - If the emotion is purely Hot: Use HH\n"
63
- " - If the emotion is a mix, use one of the following:\n"
64
- " - Cold and Warm: Use CW\n"
65
- " - Warm and Hot: Use WH\n"
66
- " - Cold and Hot: Use CH\n\n"
67
- "2. Text Type Codes (next 9 digits):\n"
68
- " Assign a digit for each of the following categories based on the presence in the text. Use 0 for categories not applicable:\n"
69
- " 1: Toxic\n"
70
- " 2: Appreciation\n"
71
- " 3: Constructive Criticism\n"
72
- " 4: Genuine Questions\n"
73
- " 5: Advice/Suggestions\n"
74
- " 6: Requests\n"
75
- " 7: Spam\n"
76
- " 8: Off-Topic\n"
77
- " 9: Engagement Boosters\n\n"
78
- "3. Special Categories (last 2 digits):\n"
79
- " If the text is Neutral/General: Set the 10th digit to 1; otherwise, set it to 0.\n"
80
- " If the text contains Hate: Set the last digit (11th) to 1; otherwise, set it to 0.\n\n"
81
- "Example:\n"
82
- "For the text 'I love your videos but still something is missing':\n"
83
- " - Emotion: Cold and Warm (CW)\n"
84
- " - Types Detected: 2 (Appreciation), 3 (Constructive Criticism), 5 (Advice/Suggestions)\n"
85
- " - Special Categories: Neutral/General (set the 10th digit to 1), no Hate\n\n"
86
- "The output would be: CW02305000010\n\n"
87
- "Output Format:\n"
88
- "Always return a 13-character code following this structure.",
89
  label="Instructions",
90
- lines=10,
91
  ),
92
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
93
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Initialize the inference client with the model
 
 
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
 
7
  def respond(
8
  message,
9
  history: list[tuple[str, str]],
 
28
  response = ""
29
 
30
  # Stream the response from the model
31
+ for msg in client.chat_completion(
32
  messages,
33
  max_tokens=max_tokens,
34
  stream=True,
35
  temperature=temperature,
36
  top_p=top_p,
37
  ):
38
+ token = msg.choices[0].delta.content
39
  response += token
40
 
41
  # Validate the response format
42
+ if len(response) == 13 and all(c.isdigit() for c in response):
43
  return response
44
  else:
45
  return "Invalid response format"
46
 
47
+ # Instructions for the model
48
+ instructions = (
49
+ "You are tasked with labeling text data based on both emotion temperature and text type categories. "
50
+ "The final output must be a 13-character code that consists of the following structure:\n\n"
51
+ " 0 index : Emotion Temperature code\n"
52
+ " 1 index : Informative\n"
53
+ " 2 index : Hate\n"
54
+ " 3 index : Toxic\n"
55
+ " 4 index : Appreciation\n"
56
+ " 5 index : Constructive Criticism\n"
57
+ " 6 index : Genuine Questions\n"
58
+ " 7 index : Advice/Suggestions\n"
59
+ " 8 index : Requests\n"
60
+ " 9 index : Spam\n"
61
+ " 10 index : Off-Topic\n"
62
+ " 11 index : Engagement Boosters\n"
63
+ " 12 index : Neutral/General\n\n"
64
+ "Every index should have a number between 0-9. 0 means not applicable, 4 means normal, 9 means high. "
65
+ "Choose appropriate numbers to showcase how much each category is related to the text input.\n\n"
66
+ "Example:\n"
67
+ "For the text 'I love your videos but still something is missing':\n"
68
+ " - Emotion: Cold and Warm (CW)\n"
69
+ " - Types Detected: 2 (Appreciation), 3 (Constructive Criticism), 5 (Advice/Suggestions)\n"
70
+ " - Special Categories: Neutral/General (set the 10th digit to 1), no Hate\n\n"
71
+ "The output would be: CW02305000010\n\n"
72
+ "Output Format:\n"
73
+ "Always return a 13-character code following this structure."
74
+ )
75
 
76
+ # Create the Gradio interface
 
 
77
  demo = gr.ChatInterface(
78
  respond,
79
  additional_inputs=[
80
  gr.Textbox(
81
+ value=instructions,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  label="Instructions",
83
+ lines=15,
84
  ),
85
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
86
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),