izuemon commited on
Commit
7b90dc7
·
verified ·
1 Parent(s): c2bd07c

Update turbowarp-server/gpt.py

Browse files
Files changed (1) hide show
  1. turbowarp-server/gpt.py +92 -105
turbowarp-server/gpt.py CHANGED
@@ -5,8 +5,8 @@ import requests
5
  PROJECT_ID = "1290918780"
6
 
7
  SYSTEM_PROMPT = {
8
- "role":"system",
9
- "content":"You are a conversational AI that runs on Scratch. Do not use Markdown in your responses; speak in natural conversation. The creator of this assistant is Izuemon."
10
  }
11
 
12
  tw = scratchcommunication.TwCloudConnection(
@@ -15,206 +15,193 @@ tw = scratchcommunication.TwCloudConnection(
15
  contact_info="contact"
16
  )
17
 
18
- slots = [f"n{i}" for i in range(1,10)]
19
-
20
- # ---------------------
21
- # クラウド変数取得
22
- # ---------------------
23
-
24
- def get_vars():
25
- return tw.get_cloud_variables()
26
-
27
- def get_var(name):
28
- return get_vars().get(name)
29
-
30
 
31
  # ---------------------
32
  # 文字テーブル
33
  # ---------------------
34
 
35
  chars = []
36
-
37
- with open("turbowarp-server/n-chars.txt",encoding="utf8") as f:
38
  for line in f:
39
  chars.append(line.strip())
40
 
41
  def encode(text):
42
-
43
- out=""
44
-
45
  for c in text:
46
-
47
  if c in chars:
48
  i = chars.index(c)
49
  out += f"{i:02d}"
50
-
51
  return out
52
 
53
  def decode(data):
54
-
55
- text=""
56
-
57
- for i in range(0,len(data),2):
58
-
59
  num = int(data[i:i+2])
60
-
61
- if num==99:
62
- text+="\n"
63
  else:
64
- text+=chars[num]
65
-
66
  return text
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  # ---------------------
70
  # n0管理
71
  # ---------------------
72
 
73
  def get_used():
74
-
75
  v = get_var("n0")
76
-
77
  if not v:
78
  return []
79
-
80
  return list(v)
81
 
82
  def add_used(i):
83
-
84
  u = get_used()
85
-
86
  if str(i) not in u:
87
  u.append(str(i))
88
-
89
- tw.set_variable("n0","".join(u))
90
 
91
  def remove_used(i):
92
-
93
  u = get_used()
94
-
95
  if str(i) in u:
96
  u.remove(str(i))
97
-
98
- tw.set_variable("n0","".join(u))
99
-
100
 
101
  # ---------------------
102
  # API
103
  # ---------------------
104
 
105
  def ask_gpt(history):
106
-
107
- messages=[SYSTEM_PROMPT]+history
108
-
109
- r=requests.post(
110
  "https://izuemon-gpt-free-api.hf.space/v1/chat/completions",
111
  json={
112
- "model":"gpt-3.5-turbo",
113
- "messages":messages
114
- }
 
115
  )
116
-
117
- data=r.json()
118
-
119
  return data["choices"][0]["message"]["content"]
120
 
121
-
122
  # ---------------------
123
  # 送信
124
  # ---------------------
125
 
126
- def send(slot,text):
 
127
 
128
- encoded=encode(text)
 
129
 
130
- size=99996
131
-
132
- packets=[encoded[i:i+size] for i in range(0,len(encoded),size)]
133
-
134
- total=len(packets)
135
 
136
  for p in packets:
137
-
138
- packet=f"1{total}0{p}"
139
-
140
- start=time.time()
141
-
142
- tw.set_variable(slot,packet)
143
 
144
  while True:
145
-
146
- v=get_var(slot)
147
-
148
- if v and len(v) > 2 and v[2]=="1":
149
  break
150
-
151
- if time.time()-start>10:
152
  return
153
-
154
  time.sleep(0.1)
155
 
156
-
157
  # ---------------------
158
  # メインループ
159
  # ---------------------
160
 
161
- buffers={}
162
 
163
  while True:
164
-
165
- vars=get_vars()
166
-
167
- for i,slot in enumerate(slots,1):
168
-
169
- v=vars.get(slot)
170
 
171
  if not v:
172
  continue
173
 
174
- if len(v)<3:
175
  continue
176
 
177
- unread=v[2]=="0"
178
-
179
  if not unread:
180
  continue
181
 
182
  add_used(i)
183
 
184
- total=int(v[1])
185
- data=v[3:]
 
 
 
 
 
 
 
186
 
187
- tw.set_variable(slot,v[:2]+"1"+v[3:])
 
 
 
 
188
 
189
  if slot not in buffers:
190
- buffers[slot]=[]
191
 
192
  buffers[slot].append(data)
193
 
194
- if len(buffers[slot])<total:
195
  continue
196
 
197
- joined="".join(buffers[slot])
198
-
199
- decoded=decode(joined)
200
-
201
- history=[]
202
 
203
- parts=decoded.split("\n")
204
-
205
- for j in range(0,len(parts),2):
206
 
 
 
 
 
207
  history.append({
208
- "role":"user",
209
- "content":parts[j]
210
  })
211
 
212
- reply=ask_gpt(history)
213
-
214
- send(slot,reply)
215
-
216
- buffers[slot]=[]
217
 
 
 
218
  remove_used(i)
219
 
220
  time.sleep(0.2)
 
5
  PROJECT_ID = "1290918780"
6
 
7
  SYSTEM_PROMPT = {
8
+ "role": "system",
9
+ "content": "You are a conversational AI that runs on Scratch. Do not use Markdown in your responses; speak in natural conversation. The creator of this assistant is Izuemon."
10
  }
11
 
12
  tw = scratchcommunication.TwCloudConnection(
 
15
  contact_info="contact"
16
  )
17
 
18
+ slots = [f"n{i}" for i in range(1, 10)]
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  # ---------------------
21
  # 文字テーブル
22
  # ---------------------
23
 
24
  chars = []
25
+ with open("turbowarp-server/n-chars.txt", encoding="utf8") as f:
 
26
  for line in f:
27
  chars.append(line.strip())
28
 
29
  def encode(text):
30
+ out = ""
 
 
31
  for c in text:
 
32
  if c in chars:
33
  i = chars.index(c)
34
  out += f"{i:02d}"
 
35
  return out
36
 
37
  def decode(data):
38
+ text = ""
39
+ for i in range(0, len(data), 2):
 
 
 
40
  num = int(data[i:i+2])
41
+ if num == 99:
42
+ text += "\n"
 
43
  else:
44
+ text += chars[num]
 
45
  return text
46
 
47
+ # ---------------------
48
+ # TwCloudConnection の正しいラッパー
49
+ # ---------------------
50
+
51
+ def get_var(name):
52
+ """
53
+ TwCloudConnection の get_variable はキーワード引数で使う必要があるので
54
+ ここでラップする(戻り値はそのまま)。
55
+ """
56
+ try:
57
+ # name_literal を必要に応じて True に切り替えてください
58
+ return tw.get_variable(name=name, name_literal=False)
59
+ except Exception:
60
+ # 例外は None を返す(呼び出し側でチェック)
61
+ return None
62
+
63
+ def set_var(name, value):
64
+ try:
65
+ return tw.set_variable(name=name, value=value, name_literal=False)
66
+ except Exception:
67
+ return None
68
 
69
  # ---------------------
70
  # n0管理
71
  # ---------------------
72
 
73
  def get_used():
 
74
  v = get_var("n0")
 
75
  if not v:
76
  return []
 
77
  return list(v)
78
 
79
  def add_used(i):
 
80
  u = get_used()
 
81
  if str(i) not in u:
82
  u.append(str(i))
83
+ set_var("n0", "".join(u))
 
84
 
85
  def remove_used(i):
 
86
  u = get_used()
 
87
  if str(i) in u:
88
  u.remove(str(i))
89
+ set_var("n0", "".join(u))
 
 
90
 
91
  # ---------------------
92
  # API
93
  # ---------------------
94
 
95
  def ask_gpt(history):
96
+ messages = [SYSTEM_PROMPT] + history
97
+ r = requests.post(
 
 
98
  "https://izuemon-gpt-free-api.hf.space/v1/chat/completions",
99
  json={
100
+ "model": "gpt-3.5-turbo",
101
+ "messages": messages
102
+ },
103
+ timeout=30
104
  )
105
+ r.raise_for_status()
106
+ data = r.json()
 
107
  return data["choices"][0]["message"]["content"]
108
 
 
109
  # ---------------------
110
  # 送信
111
  # ---------------------
112
 
113
+ def send(slot, text):
114
+ encoded = encode(text)
115
 
116
+ # TurboWarp 環境なら問題ないとのことなのでそのまま
117
+ size = 99996
118
 
119
+ packets = [encoded[i:i+size] for i in range(0, len(encoded), size)]
120
+ total = len(packets)
 
 
 
121
 
122
  for p in packets:
123
+ packet = f"1{total}0{p}"
124
+ start = time.time()
125
+ set_var(slot, packet)
 
 
 
126
 
127
  while True:
128
+ v = get_var(slot)
129
+ # 値が存在して、長さが期待を満たすか、安全に確認
130
+ if v and len(v) > 2 and v[2] == "1":
 
131
  break
132
+ if time.time() - start > 10:
133
+ # タイムアウト
134
  return
 
135
  time.sleep(0.1)
136
 
 
137
  # ---------------------
138
  # メインループ
139
  # ---------------------
140
 
141
+ buffers = {}
142
 
143
  while True:
144
+ for i, slot in enumerate(slots, 1):
145
+ v = get_var(slot)
 
 
 
 
146
 
147
  if not v:
148
  continue
149
 
150
+ if len(v) < 3:
151
  continue
152
 
153
+ unread = v[2] == "0"
 
154
  if not unread:
155
  continue
156
 
157
  add_used(i)
158
 
159
+ # v のフォーマットは "フラグ(1桁) + total(1) + '0' + データ..." の前提
160
+ try:
161
+ total = int(v[1])
162
+ except Exception:
163
+ # 不正データならスキップして使用解除
164
+ remove_used(i)
165
+ continue
166
+
167
+ data = v[3:]
168
 
169
+ # 既読フラグを立てる(インデックス 2"1" にする)
170
+ # 文字列操作で安全にセット
171
+ if len(v) >= 3:
172
+ newv = v[:2] + "1" + v[3:]
173
+ set_var(slot, newv)
174
 
175
  if slot not in buffers:
176
+ buffers[slot] = []
177
 
178
  buffers[slot].append(data)
179
 
180
+ if len(buffers[slot]) < total:
181
  continue
182
 
183
+ joined = "".join(buffers[slot])
184
+ decoded = decode(joined)
 
 
 
185
 
186
+ history = []
187
+ parts = decoded.split("\n")
 
188
 
189
+ for j in range(0, len(parts), 2):
190
+ # parts[j] がユーザーメッセージ、parts[j+1] が(将来の拡張用など)
191
+ if parts[j].strip() == "":
192
+ continue
193
  history.append({
194
+ "role": "user",
195
+ "content": parts[j]
196
  })
197
 
198
+ try:
199
+ reply = ask_gpt(history)
200
+ except Exception:
201
+ reply = "エラーが発生しました。"
 
202
 
203
+ send(slot, reply)
204
+ buffers[slot] = []
205
  remove_used(i)
206
 
207
  time.sleep(0.2)