Abu1998 commited on
Commit
aebf794
·
verified ·
1 Parent(s): 638cb5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -39
app.py CHANGED
@@ -1,42 +1,106 @@
1
- # app.py
2
-
3
  import gradio as gr
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
- import torch
6
- import numpy as np
7
-
8
- # Load the model and tokenizer
9
- model_name = "HuggingFaceH4/zephyr-7b-beta" # Replace this with the actual model path
10
- tokenizer = AutoTokenizer.from_pretrained(model_name)
11
- model = AutoModelForCausalLM.from_pretrained(model_name)
12
-
13
- def get_category_scores(text):
14
- # Tokenize input text
15
- inputs = tokenizer(text, return_tensors="pt")
16
-
17
- # Generate model outputs
18
- with torch.no_grad():
19
- outputs = model(**inputs)
20
-
21
- # Process model output to extract labels
22
- # For demonstration, we'll mock the extraction with random values
23
- # Replace this part with actual logic based on model output
24
-
25
- # Example random values for demonstration (these should be extracted from model)
26
- scores = np.random.randint(0, 10, size=13) # Replace with actual logic
27
-
28
- # Convert scores to 13-digit code
29
- result_code = ''.join(map(str, scores))
30
- return result_code
31
-
32
- # Define the Gradio interface
33
- iface = gr.Interface(
34
- fn=get_category_scores,
35
- inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
36
- outputs="text",
37
- title="Text Labeling",
38
- description="Label text data based on emotion temperature and categories. Returns a 13-digit code."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  )
40
 
41
- # Launch the interface
42
- iface.launch()
 
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ # Prepare the messages, starting with the system message
19
+ messages = [{"role": "system", "content": system_message}]
20
+
21
+ # Add the conversation history to the messages
22
+ for user_message, assistant_response in history:
23
+ if user_message:
24
+ messages.append({"role": "user", "content": user_message})
25
+ if assistant_response:
26
+ messages.append({"role": "assistant", "content": assistant_response})
27
+
28
+ # Add the current user message
29
+ messages.append({"role": "user", "content": message})
30
+
31
+ response = ""
32
+
33
+ # Stream the response from the model
34
+ for message in client.chat_completion(
35
+ messages,
36
+ max_tokens=max_tokens,
37
+ stream=True,
38
+ temperature=temperature,
39
+ top_p=top_p,
40
+ ):
41
+ token = message.choices[0].delta.content
42
+ response += token
43
+ yield response
44
+
45
+
46
+ """
47
+ For information on how to customize the ChatInterface, peruse the Gradio docs: https://www.gradio.app/docs/chatinterface
48
+ """
49
+ demo = gr.ChatInterface(
50
+ respond,
51
+ additional_inputs=[
52
+ gr.Textbox(
53
+ value="""You are tasked with labeling text data based on both emotion temperature and text type categories. The final output must be a 13-character code that consists of the following structure:
54
+
55
+ 1. Emotion Temperature Code (2 characters):
56
+ - If the emotion is purely Cold: Use CC
57
+ - If the emotion is purely Warm: Use WW
58
+ - If the emotion is purely Hot: Use HH
59
+ - If the emotion is a mix, use one of the following:
60
+ - Cold and Warm: Use CW
61
+ - Warm and Hot: Use WH
62
+ - Cold and Hot: Use CH
63
+
64
+ 2. Text Type Codes (next 9 digits):
65
+ Assign a digit for each of the following categories based on the presence in the text. Use 0 for categories not applicable:
66
+ 1: Toxic
67
+ 2: Appreciation
68
+ 3: Constructive Criticism
69
+ 4: Genuine Questions
70
+ 5: Advice/Suggestions
71
+ 6: Requests
72
+ 7: Spam
73
+ 8: Off-Topic
74
+ 9: Engagement Boosters
75
+
76
+ 3. Special Categories (last 2 digits):
77
+ If the text is Neutral/General: Set the 10th digit to 1; otherwise, set it to 0.
78
+ If the text contains Hate: Set the last digit (11th) to 1; otherwise, set it to 0.
79
+
80
+ Example:
81
+ For the text "I love your videos but still something is missing":
82
+ - Emotion: Cold and Warm (CW)
83
+ - Types Detected: 2 (Appreciation), 3 (Constructive Criticism), 5 (Advice/Suggestions)
84
+ - Special Categories: Neutral/General (set the 10th digit to 1), no Hate
85
+
86
+ The output would be: CW02305000010
87
+
88
+ Output Format:
89
+ Always return a 13-character code following this structure.""",
90
+ label="Instruction",
91
+ lines=10,
92
+ ),
93
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
94
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
95
+ gr.Slider(
96
+ minimum=0.1,
97
+ maximum=1.0,
98
+ value=0.95,
99
+ step=0.05,
100
+ label="Top-p (nucleus sampling)",
101
+ ),
102
+ ],
103
  )
104
 
105
+ if __name__ == "__main__":
106
+ demo.launch()