Kim Adams commited on
Commit
f3f4123
Β·
1 Parent(s): 11ebc83

slack sentiment component

Browse files
app.py CHANGED
@@ -19,8 +19,8 @@ video_analysis=ui_recording_analysis.ui
19
  slack_sentiment=ui_sentiment_analysis.ui
20
  usaa_advisor=ui_simple_chat.ui
21
 
22
- ui = gr.TabbedInterface([prompt_builder, summarize, image_generator, video_analysis, usaa_advisor],
23
- (constants.UI_1, constants.UI_2, constants.UI_3,constants.UI_4, constants.UI_6),
24
  theme=SoftBlue())
25
 
26
  ui.launch()
 
19
  slack_sentiment=ui_sentiment_analysis.ui
20
  usaa_advisor=ui_simple_chat.ui
21
 
22
+ ui = gr.TabbedInterface([prompt_builder, summarize, image_generator, video_analysis,slack_sentiment, usaa_advisor],
23
+ (constants.UI_1, constants.UI_2, constants.UI_3,constants.UI_4, constants.UI_5,constants.UI_6),
24
  theme=SoftBlue())
25
 
26
  ui.launch()
embedding_tools/__pycache__/create_embedding.cpython-311.pyc CHANGED
Binary files a/embedding_tools/__pycache__/create_embedding.cpython-311.pyc and b/embedding_tools/__pycache__/create_embedding.cpython-311.pyc differ
 
embedding_tools/__pycache__/embeddings.cpython-311.pyc CHANGED
Binary files a/embedding_tools/__pycache__/embeddings.cpython-311.pyc and b/embedding_tools/__pycache__/embeddings.cpython-311.pyc differ
 
image_gen/__pycache__/image_generation.cpython-311.pyc CHANGED
Binary files a/image_gen/__pycache__/image_generation.cpython-311.pyc and b/image_gen/__pycache__/image_generation.cpython-311.pyc differ
 
prompts/__pycache__/system_prompts.cpython-311.pyc CHANGED
Binary files a/prompts/__pycache__/system_prompts.cpython-311.pyc and b/prompts/__pycache__/system_prompts.cpython-311.pyc differ
 
prompts/__pycache__/user_prompts.cpython-311.pyc CHANGED
Binary files a/prompts/__pycache__/user_prompts.cpython-311.pyc and b/prompts/__pycache__/user_prompts.cpython-311.pyc differ
 
slack_sentiment_analysis/__pycache__/sentiment_analysis.cpython-311.pyc CHANGED
Binary files a/slack_sentiment_analysis/__pycache__/sentiment_analysis.cpython-311.pyc and b/slack_sentiment_analysis/__pycache__/sentiment_analysis.cpython-311.pyc differ
 
slack_sentiment_analysis/__pycache__/ui_sentiment_analysis.cpython-311.pyc CHANGED
Binary files a/slack_sentiment_analysis/__pycache__/ui_sentiment_analysis.cpython-311.pyc and b/slack_sentiment_analysis/__pycache__/ui_sentiment_analysis.cpython-311.pyc differ
 
slack_sentiment_analysis/sentiment_analysis.py CHANGED
@@ -1,140 +1,90 @@
1
- import openai, json
2
  import pandas as pd
3
- from utilities import prompt_constants, constants
4
- from datetime import datetime
5
 
6
- def Completion(summary_messages):
7
  response = openai.ChatCompletion.create(
8
  model="gpt-3.5-turbo",
9
- messages=summary_messages)
 
 
10
  print("response")
11
- print(response.choices)
12
- return json.dumps(response["choices"][0]["message"]["content"])
13
 
14
  def sanitize_blob(blob_str):
15
- import re
16
  return re.sub(r"(?<=: )'", '"', re.sub(r"'(?=:)", '"', blob_str))
17
 
18
- def ReduceBlob(blob):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  sanitized_blob = sanitize_blob(blob)
20
  try:
21
  response_data = json.loads(sanitized_blob)
22
  except json.JSONDecodeError:
23
  print("Invalid JSON format.")
24
  return None
 
 
 
25
 
26
- messages = []
27
  for message in response_data["messages"]:
28
  user = message["user"]
29
  message_text = message["text"]
30
  timestamp = message["timestamp"]
 
 
 
 
 
 
31
 
32
- print(f"user: {user} message: {message_text} timestamp: {timestamp}")
 
 
 
 
 
 
33
 
34
  message_obj = {
35
  "user": user,
36
  "message": f"{message_text}",
37
- "timestamp": f"{timestamp}"
38
- }
39
- messages.append(message_obj)
40
-
41
- print("messages")
42
- print(messages)
43
- jsonobj=json.dumps(messages, ensure_ascii=False)
44
- print("jsonobj")
45
- print(jsonobj)
46
- return jsonobj
47
-
48
- '''def ValidateJSON(json_str):
49
- valid_objects = []
50
- open_brackets = 0
51
- start_index = 0
52
-
53
- for i, char in enumerate(json_str):
54
- if char == '{':
55
- if open_brackets == 0:
56
- start_index = i
57
- open_brackets += 1
58
- elif char == '}':
59
- open_brackets -= 1
60
- if open_brackets == 0:
61
- try:
62
- valid_object = json.loads(json_str[start_index:i+1])
63
- valid_objects.append(valid_object)
64
- except json.JSONDecodeError:
65
- break
66
- print("returning"+ str(valid_objects))
67
- return valid_objects'''
68
-
69
- def ValidateJSON(data):
70
- stack = []
71
- well_formed_jsons = []
72
- start_index = 0
73
- json_list = []
74
-
75
- for i, c in enumerate(data):
76
- if c == '{':
77
- stack.append('{')
78
- elif c == '}':
79
- if stack and stack[-1] == '{':
80
- stack.pop()
81
-
82
- if not stack:
83
- if start_index <= i:
84
- potential_json_str = data[start_index:i+1]
85
- try:
86
- json_object = json.loads(potential_json_str)
87
- well_formed_jsons.append(json_object)
88
- start_index = i + 1
89
- except json.JSONDecodeError:
90
- start_index = i + 1
91
- continue
92
-
93
- if well_formed_jsons:
94
- json_list.append(well_formed_jsons)
95
-
96
- response = json.dumps(json_list)
97
- print("returning"+ str(response))
98
- return response
99
-
100
- def CreateSentimentDF(sentiments):
101
- users = []
102
- sentiment_labels = []
103
- sentimentsJSON = json.loads(sentiments)
104
-
105
- print("sentimentsJSON: ")
106
- print(sentimentsJSON)
107
- for sentiment_obj in sentimentsJSON:
108
- users.append(sentiment_obj["user"])
109
- sentiment_labels.append(sentiment_obj["sentiment"])
110
-
111
- # Append the user and sentiment as a new row to the DataFrame
112
- df = pd.DataFrame({"User": users, "Sentiment": sentiment_labels})
113
- return df
114
 
115
  def AnalyzeSentiment(blob):
116
- slack_blob=ReduceBlob(blob)
117
 
118
- summary_messages = []
119
- summary_messages.append({"role": "system", "content": prompt_constants.SLACK_SENTIMENT_SYSTEM_PROMPT})
120
- summary_messages.append({"role": "user", "content": slack_blob})
121
 
122
- response_message = Completion(summary_messages)
123
- verify_response=ValidateJSON(response_message)
124
- summary_messages.append({"role": "assistant", "content": verify_response})
125
-
126
- print("verify_response")
127
- print(verify_response)
128
- #3: find topics using OpenAI gpt-3.5-turbo
129
- #sentiments = SortSentiment(response_message)
130
- df=CreateSentimentDF(verify_response)
131
 
 
132
  print("DataFrame:")
133
  print(df)
134
-
135
- print("Sentiment Counts:")
136
- for sentiment, counts in df.items():
137
- print(sentiment, counts)
138
-
139
- return summary_messages, verify_response, df
140
 
 
 
 
 
 
1
+ import openai, json, re
2
  import pandas as pd
3
+ from utilities import prompt_constants
 
4
 
5
+ def Completion(slack_message):
6
  response = openai.ChatCompletion.create(
7
  model="gpt-3.5-turbo",
8
+ messages=[
9
+ {"role": "system", "content": prompt_constants.SLACK_SENTIMENT_SYSTEM_PROMPT},
10
+ {"role": "user", "content": slack_message} ])
11
  print("response")
12
+ print(response["choices"][0]["message"]["content"])
13
+ return response["choices"][0]["message"]["content"]
14
 
15
  def sanitize_blob(blob_str):
 
16
  return re.sub(r"(?<=: )'", '"', re.sub(r"'(?=:)", '"', blob_str))
17
 
18
+ def FindScore(response):
19
+ match = re.search(r"\b(0(\.\d+)?|1(\.0+)?)\b", response)
20
+ print("after match, match:"+ str(match))
21
+ if match:
22
+ #return float(match.group(1))
23
+ return round(float(match.group(1)), 2)
24
+ else:
25
+ return 0
26
+
27
+ def CheckType(response):
28
+ if isinstance(response, float):
29
+ return round(response, 2)
30
+ elif isinstance(response, str):
31
+ return FindScore(response)
32
+
33
+ def ReduceBlob(blob, summary_messages):
34
  sanitized_blob = sanitize_blob(blob)
35
  try:
36
  response_data = json.loads(sanitized_blob)
37
  except json.JSONDecodeError:
38
  print("Invalid JSON format.")
39
  return None
40
+
41
+ slack_messages = []
42
+ summary_messages.append({"role": "system", "content": prompt_constants.SLACK_SENTIMENT_SYSTEM_PROMPT})
43
 
 
44
  for message in response_data["messages"]:
45
  user = message["user"]
46
  message_text = message["text"]
47
  timestamp = message["timestamp"]
48
+ response = Completion(message_text)
49
+ summary_messages.append({"role": "user", "content": message_text})
50
+ summary_messages.append({"role": "assistant", "content": response})
51
+
52
+ sentiment_score=CheckType(response)
53
+ print("sentiment_score: " + str(sentiment_score))
54
 
55
+ sentiment="Neutral"
56
+ if sentiment_score==0:
57
+ sentiment="Undefined"
58
+ elif sentiment_score>0 and sentiment_score<0.3:
59
+ sentiment="Negative"
60
+ elif sentiment_score>0.6:
61
+ sentiment="Positive"
62
 
63
  message_obj = {
64
  "user": user,
65
  "message": f"{message_text}",
66
+ "timestamp": f"{timestamp}",
67
+ "sentiment_score": sentiment_score,
68
+ "sentiment": sentiment,
69
+ "size":5
70
+ }
71
+ slack_messages.append(message_obj)
72
+ jsonobj = json.dumps(slack_messages, ensure_ascii=False)
73
+ return jsonobj,summary_messages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  def AnalyzeSentiment(blob):
76
+ summary_messages = []
77
 
78
+ slack_blobs,summary_messages=ReduceBlob(blob,summary_messages)
 
 
79
 
80
+ print("after reduce")
81
+ print (slack_blobs)
 
 
 
 
 
 
 
82
 
83
+ df = pd.DataFrame(summary_messages)
84
  print("DataFrame:")
85
  print(df)
 
 
 
 
 
 
86
 
87
+ sentimentDF=pd.read_json(slack_blobs)
88
+ print("sentimentDF:")
89
+ print(sentimentDF)
90
+ return df, sentimentDF
slack_sentiment_analysis/ui_sentiment_analysis.py CHANGED
@@ -5,15 +5,12 @@ import os
5
  from utilities import constants
6
  from slack_sentiment_analysis import sentiment_analysis
7
 
 
 
8
  def Summary(code):
9
- sum_messages,sum_text,sDF = sentiment_analysis.AnalyzeSentiment(code)
10
- print("SDF")
11
- print(sDF)
12
- df = pd.DataFrame(sum_messages)
13
- return sum_text,df,{
14
- "x": ["apple", "banana", "cherry"],
15
- "y": [1, 2, 3]
16
- }
17
 
18
  def InitDF():
19
  global commDF
@@ -34,24 +31,25 @@ with gr.Blocks() as ui:
34
  with gr.Column():
35
  gr.Markdown(constants.DIRECTIONS_MD)
36
  gr.Markdown(value=constants.SLACK_SENTIMENT_ANALYSIS_DIRECTIONS)
37
- with gr.Row():
38
- with gr.Column():
39
- code=gr.Textbox(label=constants.ORIGINAL_DOC, interactive=True, placeholder=constants.SUMMARY_MD_DIRECTIONS, lines=3)
40
- with gr.Column():
41
- sentiment=gr.HTML(wrap=True, label=constants.SUMMARY)
42
  gr.Markdown(constants.CODE_DOC_EXAMPLES_MD)
43
  with gr.Row():
44
- examples = gr.Radio([constants.JSON_1, constants.JSON_2],
45
  show_label=False, info=constants.ANALYZE)
46
  with gr.Row():
47
  sentimentize=gr.Button(value=constants.ANALYZE_SENTIMENT, variant="primary")
48
  with gr.Column():
49
  gr.Markdown(constants.TOPICS_MD)
50
- sentimentPlot= gr.BarPlot ()
 
 
 
51
  with gr.Row():
52
  commDF=gr.DataFrame(type="pandas", value=pd.DataFrame({"role": [""], "content": [""] }),
53
  wrap=True, show_label=False, label=constants.OPENAI_LOG)
54
  sentimentize.click(Summary, inputs=[code], outputs=[sentiment,commDF,sentimentPlot])
55
  examples.input(UpdateWithExample,inputs=[examples], outputs=[code])
56
 
57
- InitDF()
 
5
  from utilities import constants
6
  from slack_sentiment_analysis import sentiment_analysis
7
 
8
+ sentimentData=None
9
+
10
  def Summary(code):
11
+ global sentimentData
12
+ sum_messages,sentimentData = sentiment_analysis.AnalyzeSentiment(code)
13
+ return sentimentData,sum_messages,sentimentData
 
 
 
 
 
14
 
15
  def InitDF():
16
  global commDF
 
31
  with gr.Column():
32
  gr.Markdown(constants.DIRECTIONS_MD)
33
  gr.Markdown(value=constants.SLACK_SENTIMENT_ANALYSIS_DIRECTIONS)
34
+
35
+ code=gr.Textbox(label=constants.ORIGINAL_DOC, interactive=True, placeholder=constants.SUMMARY_MD_DIRECTIONS, height="500", lines=3)
36
+ sentiment=gr.DataFrame(wrap=True, label=constants.SUMMARY, height="500")
 
 
37
  gr.Markdown(constants.CODE_DOC_EXAMPLES_MD)
38
  with gr.Row():
39
+ examples = gr.Radio([constants.JSON_1, constants.JSON_2, constants.JSON_3, constants.JSON_4],value=constants.JSON_4,
40
  show_label=False, info=constants.ANALYZE)
41
  with gr.Row():
42
  sentimentize=gr.Button(value=constants.ANALYZE_SENTIMENT, variant="primary")
43
  with gr.Column():
44
  gr.Markdown(constants.TOPICS_MD)
45
+ sentimentPlot = gr.ScatterPlot(value=sentimentData, x="user", y="sentiment",label="Sentiment",
46
+ color_legend_title="Sentiment", color="sentiment", interactive=True,
47
+ height = 500, width=500,
48
+ tooltip=["user","sentiment","sentiment_score","timestamp"])
49
  with gr.Row():
50
  commDF=gr.DataFrame(type="pandas", value=pd.DataFrame({"role": [""], "content": [""] }),
51
  wrap=True, show_label=False, label=constants.OPENAI_LOG)
52
  sentimentize.click(Summary, inputs=[code], outputs=[sentiment,commDF,sentimentPlot])
53
  examples.input(UpdateWithExample,inputs=[examples], outputs=[code])
54
 
55
+ InitDF()
translators/__pycache__/translate_pdf_to_text.cpython-311.pyc CHANGED
Binary files a/translators/__pycache__/translate_pdf_to_text.cpython-311.pyc and b/translators/__pycache__/translate_pdf_to_text.cpython-311.pyc differ
 
utilities/__pycache__/constants.cpython-311.pyc CHANGED
Binary files a/utilities/__pycache__/constants.cpython-311.pyc and b/utilities/__pycache__/constants.cpython-311.pyc differ
 
utilities/__pycache__/prompt_constants.cpython-311.pyc CHANGED
Binary files a/utilities/__pycache__/prompt_constants.cpython-311.pyc and b/utilities/__pycache__/prompt_constants.cpython-311.pyc differ
 
utilities/constants.py CHANGED
@@ -50,6 +50,8 @@ JSON_PREFIX = "utilities/data/json/"
50
 
51
  JSON_1="Slack 1"
52
  JSON_2="Slack 2"
 
 
53
 
54
  LANGUAGE_1="English"
55
  LANGUAGE_2="Spanish"
 
50
 
51
  JSON_1="Slack 1"
52
  JSON_2="Slack 2"
53
+ JSON_3="Slack 3"
54
+ JSON_4="Slack 4"
55
 
56
  LANGUAGE_1="English"
57
  LANGUAGE_2="Spanish"
utilities/data/json/slack_4.json ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "messages": [
3
+ {
4
+ "user": "Anne Smith",
5
+ "text": "Hey everyone, it's a sunny morning! Makes me feel a bit better. 🌞",
6
+ "timestamp": "2023-09-08T08:00:00Z",
7
+ "replies": [
8
+ {
9
+ "user": "David Jones",
10
+ "text": "Some positivity! That's a good change! 🌞",
11
+ "timestamp": "2023-09-08T08:10:00Z"
12
+ }
13
+ ],
14
+ "reactions": {
15
+ "🌞": 4,
16
+ "😊": 5
17
+ }
18
+ },
19
+ {
20
+ "user": "Bob Wright",
21
+ "text": "Anyone else excited for the team building event tomorrow?",
22
+ "timestamp": "2023-09-08T09:00:00Z",
23
+ "replies": [
24
+ {
25
+ "user": "Nora West",
26
+ "text": "Absolutely! It's gonna be so much fun.",
27
+ "timestamp": "2023-09-08T09:10:00Z"
28
+ }
29
+ ],
30
+ "reactions": {
31
+ "πŸ‘": 3,
32
+ "πŸ˜ƒ": 5
33
+ }
34
+ },
35
+ {
36
+ "user": "Cathy Newsome",
37
+ "text": "The internet connection is driving me nuts! Can we get this fixed? 😠",
38
+ "timestamp": "2023-09-08T10:00:00Z",
39
+ "replies": [
40
+ {
41
+ "user": "Olivia Prestley",
42
+ "text": "I second that. This is unacceptable! 😀",
43
+ "timestamp": "2023-09-08T10:10:00Z"
44
+ }
45
+ ],
46
+ "reactions": {
47
+ "😠": 6,
48
+ "😀": 4
49
+ }
50
+ },
51
+ {
52
+ "user": "David Jones",
53
+ "text": "My code just won't compile and it's super frustrating! 😑",
54
+ "timestamp": "2023-09-08T11:00:00Z",
55
+ "replies": [
56
+ {
57
+ "user": "Anne Smith",
58
+ "text": "Been there, David. Hang in there! πŸ˜–",
59
+ "timestamp": "2023-09-08T11:10:00Z"
60
+ }
61
+ ],
62
+ "reactions": {
63
+ "😑": 4,
64
+ "πŸ˜–": 3
65
+ }
66
+ },
67
+ {
68
+ "user": "Ed Norton",
69
+ "text": "Anyone else noticed that the snacks have been restocked? 😁",
70
+ "timestamp": "2023-09-08T12:00:00Z",
71
+ "replies": [
72
+ {
73
+ "user": "Felix Mars",
74
+ "text": "Finally some good news! πŸ˜ƒ",
75
+ "timestamp": "2023-09-08T12:10:00Z"
76
+ }
77
+ ],
78
+ "reactions": {
79
+ "😁": 5,
80
+ "πŸ˜ƒ": 3
81
+ }
82
+ },
83
+ {
84
+ "user": "Felix Mars",
85
+ "text": "I just finished a major project. I could really use some down time. πŸ˜“",
86
+ "timestamp": "2023-09-08T13:00:00Z",
87
+ "replies": [
88
+ {
89
+ "user": "Heidi Pierce",
90
+ "text": "You've earned it, Felix! Take a break. 😌",
91
+ "timestamp": "2023-09-08T13:10:00Z"
92
+ }
93
+ ],
94
+ "reactions": {
95
+ "πŸ˜“": 4,
96
+ "😌": 3
97
+ }
98
+ },
99
+ {
100
+ "user": "Heidi Pierce",
101
+ "text": "Who else is up for a movie night this Friday? 🍿",
102
+ "timestamp": "2023-09-08T14:00:00Z",
103
+ "replies": [
104
+ {
105
+ "user": "Kristy Klaus",
106
+ "text": "Count me in! πŸ™‹β€β™€οΈ",
107
+ "timestamp": "2023-09-08T14:10:00Z"
108
+ }
109
+ ],
110
+ "reactions": {
111
+ "🍿": 4,
112
+ "πŸ™‹β€β™€οΈ": 5
113
+ }
114
+ },
115
+ {
116
+ "user": "Ingrid Gonzales",
117
+ "text": "Another system update? Why do they always come at the worst times? 😑",
118
+ "timestamp": "2023-09-08T15:00:00Z",
119
+ "replies": [
120
+ {
121
+ "user": "Laurence Welk",
122
+ "text": "So true, Ingrid. It's like they know when we're busiest. 😀",
123
+ "timestamp": "2023-09-08T15:10:00Z"
124
+ }
125
+ ],
126
+ "reactions": {
127
+ "😑": 5,
128
+ "😀": 4
129
+ }
130
+ },
131
+ {
132
+ "user": "Jessie Jacobs",
133
+ "text": "Thank you, team, for pulling off a successful project! πŸ™Œ",
134
+ "timestamp": "2023-09-08T16:00:00Z",
135
+ "replies": [
136
+ {
137
+ "user": "Nora West",
138
+ "text": "Great teamwork! So proud of us! πŸŽ‰",
139
+ "timestamp": "2023-09-08T16:10:00Z"
140
+ }
141
+ ],
142
+ "reactions": {
143
+ "πŸ™Œ": 4,
144
+ "πŸŽ‰": 5
145
+ }
146
+ },
147
+ {
148
+ "user": "Kristy Klaus",
149
+ "text": "Traffic is terrible today. I might be late. πŸ˜‘",
150
+ "timestamp": "2023-09-08T17:00:00Z",
151
+ "replies": [
152
+ {
153
+ "user": "Pierce Frank",
154
+ "text": "No worries, Kristy. We'll cover for you. πŸ‘",
155
+ "timestamp": "2023-09-08T17:10:00Z"
156
+ }
157
+ ],
158
+ "reactions": {
159
+ "πŸ˜‘": 3,
160
+ "πŸ‘": 4
161
+ }
162
+ }
163
+ ]
164
+ }
utilities/prompt_constants.py CHANGED
@@ -47,5 +47,5 @@ ANALYSIS_SYSTEM_PROMPT="You will be provided a transcript, your task is to summa
47
  KEYWORD_SYSTEM_PROMPT="You will be provided with a block of text, and your task is to extract a list of up to 15 key concepts from it, filter to keywords that appear more than once or are relevant to the central theme. Response should be just a numbered list, no title or explanation."
48
 
49
  #slack analysis presets
50
- SLACK_SENTIMENT_SYSTEM_PROMPT="You are a sentiment analysis bot. You will be provided with a blob of Slack content as JSON, and your task is to provide sentiment analysis for each message by adding 'Sentiment' field to the message. Example: {'user': 'Olivia Prestley','text': 'I cant wait to escape this office. The weekend can't come soon enough. πŸ˜’','timestamp': '2023-09-07T17:00:00Z', 'reactions': {'πŸ˜’': 5, '😞': 3, 'πŸ˜”': 2 } }, becomes: {'user': 'Olivia Prestley','text': 'I cannot wait to escape this office. The weekend cannot come soon enough. πŸ˜’','timestamp': '2023-09-07T17:00:00Z', 'reactions': {'πŸ˜’': 5, '😞': 3, 'πŸ˜”': 2 }, 'sentiment': 'Sad' }"
51
  SLACK_THEMES_SYSTEM_PROMPT="You will be provided with a blob of JSON in this format: {'user': 'Olivia Prestley','text': 'I cannot wait to escape this office. The weekend cannot come soon enough. πŸ˜’','timestamp': '2023-09-07T17:00:00Z', 'reactions': {'πŸ˜’': 5, '😞': 3, 'πŸ˜”': 2 }, 'sentiment': 'Sad' }, and your task is to extract a list of up to 15 keywords from it, filter to keywords that appear more than once or are relevant to the central theme."
 
47
  KEYWORD_SYSTEM_PROMPT="You will be provided with a block of text, and your task is to extract a list of up to 15 key concepts from it, filter to keywords that appear more than once or are relevant to the central theme. Response should be just a numbered list, no title or explanation."
48
 
49
  #slack analysis presets
50
+ SLACK_SENTIMENT_SYSTEM_PROMPT="You are a sentiment analysis bot and your task is to provide sentiment analysis for each message. Your response be a score between 0.00 - 1.00, such that [0, 0.3)(Negative), [0.3, 0.6)(Neutral), and [0.6,1.00](Positive)."
51
  SLACK_THEMES_SYSTEM_PROMPT="You will be provided with a blob of JSON in this format: {'user': 'Olivia Prestley','text': 'I cannot wait to escape this office. The weekend cannot come soon enough. πŸ˜’','timestamp': '2023-09-07T17:00:00Z', 'reactions': {'πŸ˜’': 5, '😞': 3, 'πŸ˜”': 2 }, 'sentiment': 'Sad' }, and your task is to extract a list of up to 15 keywords from it, filter to keywords that appear more than once or are relevant to the central theme."