lunarflu HF Staff commited on
Commit
94edee7
·
1 Parent(s): 3d2ef84

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +195 -0
app.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import random
5
+ import threading
6
+
7
+ import discord
8
+ import gradio as gr
9
+
10
+ from gradio_client import Client
11
+
12
+ HF_TOKEN = os.getenv("HF_TOKEN")
13
+ codellama = Client("https://huggingface-projects-codellama-13b-chat.hf.space/", HF_TOKEN)
14
+
15
+
16
+
17
+
18
+
19
+ codellama_threadid_userid_dictionary = {}
20
+ codellama_threadid_conversation = {}
21
+
22
+
23
+
24
+
25
+
26
+
27
+
28
+
29
+ intents = discord.Intents.default()
30
+ intents.message_content = True
31
+ bot = commands.Bot(command_prefix="/", intents=intents)
32
+
33
+ @bot.event
34
+ async def on_ready():
35
+ print(f"Logged in as {bot.user} (ID: {bot.user.id})")
36
+ synced = await bot.tree.sync()
37
+ print(f"Synced commands: {', '.join([s.name for s in synced])}.")
38
+ event.set()
39
+ print("------")
40
+
41
+
42
+
43
+ @bot.command(
44
+ name="codellama",
45
+ description="Enter a prompt to generate code!",
46
+ )
47
+ async def codellama(ctx, prompt: str):
48
+ """Audioldm2 generation"""
49
+ try:
50
+ await try_codellama(ctx, prompt)
51
+ except Exception as e:
52
+ print(f"Error: (app.py){e}")
53
+
54
+
55
+
56
+ @bot.event
57
+ async def on_message(message):
58
+ """Checks channel and continues codellama conversation if it's the right Discord Thread"""
59
+ try:
60
+ if not message.author.bot:
61
+ await continue_codellama(message)
62
+ except Exception as e:
63
+ print(f"Error: {e}")
64
+
65
+
66
+
67
+
68
+
69
+
70
+
71
+
72
+ async def try_codellama(ctx, prompt):
73
+ """Generates text based on a given prompt"""
74
+ try:
75
+ global codellama_threadid_userid_dictionary
76
+ global codellama_threadid_conversation
77
+
78
+ message = await ctx.send(f"**{prompt}** - {ctx.author.mention}")
79
+ thread = await message.create_thread(name=prompt[:100])
80
+
81
+
82
+
83
+ loop = asyncio.get_running_loop()
84
+ output_code = await loop.run_in_executor(None, codellama_initial_generation, prompt, thread)
85
+ codellama_threadid_userid_dictionary[thread.id] = ctx.author.id
86
+
87
+ print(output_code)
88
+ await thread.send(output_code)
89
+ except Exception as e:
90
+ print(f"try_codellama Error: {e}")
91
+
92
+
93
+
94
+
95
+
96
+ def codellama_initial_generation(prompt, thread):
97
+ """job.submit inside of run_in_executor = more consistent bot behavior"""
98
+ global codellama_threadid_conversation
99
+
100
+ chat_history = f"{thread.id}.json"
101
+ conversation = []
102
+ with open(chat_history, "w") as json_file:
103
+ json.dump(conversation, json_file)
104
+
105
+ job = codellama.submit(prompt, chat_history, fn_index=0)
106
+
107
+ while job.done() is False:
108
+ pass
109
+ else:
110
+ result = job.outputs()[-1]
111
+ with open(result, "r") as json_file:
112
+ data = json.load(json_file)
113
+ response = data[-1][-1]
114
+ conversation.append((prompt, response))
115
+ with open(chat_history, "w") as json_file:
116
+ json.dump(conversation, json_file)
117
+
118
+ codellama_threadid_conversation[thread.id] = chat_history
119
+ if len(response) > 1300:
120
+ response = response[:1300] + "...\nTruncating response due to discord api limits."
121
+ return response
122
+
123
+
124
+
125
+
126
+
127
+
128
+
129
+
130
+
131
+
132
+
133
+
134
+
135
+
136
+
137
+
138
+
139
+
140
+
141
+
142
+
143
+
144
+
145
+ async def continue_codellama(message):
146
+ """Continues a given conversation based on chat_history"""
147
+ try:
148
+ if not message.author.bot:
149
+ global codellama_threadid_userid_dictionary # tracks userid-thread existence
150
+ if message.channel.id in codellama_threadid_userid_dictionary: # is this a valid thread?
151
+ if codellama_threadid_userid_dictionary[message.channel.id] == message.author.id:
152
+ global codellama_threadid_conversation
153
+
154
+ prompt = message.content
155
+ chat_history = codellama_threadid_conversation[message.channel.id]
156
+
157
+ # Check to see if conversation is ongoing or ended (>15000 characters)
158
+ with open(chat_history, "r") as json_file:
159
+ conversation = json.load(json_file)
160
+ total_characters = 0
161
+ for item in conversation:
162
+ for string in item:
163
+ total_characters += len(string)
164
+
165
+ if total_characters < 15000:
166
+ job = codellama.submit(prompt, chat_history, fn_index=0)
167
+ while job.done() is False:
168
+ pass
169
+ else:
170
+ result = job.outputs()[-1]
171
+ with open(result, "r") as json_file:
172
+ data = json.load(json_file)
173
+ response = data[-1][-1]
174
+ with open(chat_history, "r") as json_file:
175
+ conversation = json.load(json_file)
176
+ conversation.append((prompt, response))
177
+ with open(chat_history, "w") as json_file:
178
+ json.dump(conversation, json_file)
179
+ codellama_threadid_conversation[message.channel.id] = chat_history
180
+
181
+ if len(response) > 1300:
182
+ response = response[:1300] + "...\nTruncating response due to discord api limits."
183
+
184
+ await message.reply(response)
185
+
186
+ total_characters = 0
187
+ for item in conversation:
188
+ for string in item:
189
+ total_characters += len(string)
190
+
191
+ if total_characters >= 15000:
192
+ await message.reply("Conversation ending due to length, feel free to start a new one!")
193
+
194
+ except Exception as e:
195
+ print(f"continue_codellama Error: {e}")