Omnibus commited on
Commit
5e9b985
·
1 Parent(s): adc839e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -99,17 +99,18 @@ def run_gpt(
99
  prompt_template,
100
  stop_tokens,
101
  max_tokens,
 
102
  purpose,
103
  **prompt_kwargs,
104
  ):
105
-
106
  generate_kwargs = dict(
107
  temperature=0.9,
108
  max_new_tokens=max_tokens,
109
  top_p=0.95,
110
  repetition_penalty=1.0,
111
  do_sample=True,
112
- seed=random.randint(1,1000000000),
113
  )
114
 
115
  content = PREFIX.format(
@@ -133,6 +134,8 @@ def run_gpt(
133
  return resp
134
 
135
  def compress_data(c,purpose, task, history):
 
 
136
  print (c)
137
  #tot=len(purpose)
138
  #print(tot)
@@ -142,8 +145,8 @@ def compress_data(c,purpose, task, history):
142
  print(f'chunk:: {chunk}')
143
  print(f'divr:: {divr}')
144
  print (f'divi:: {divi}')
145
- #out = []
146
- out=""
147
  s=0
148
  e=chunk
149
  print(f'e:: {e}')
@@ -157,7 +160,8 @@ def compress_data(c,purpose, task, history):
157
  resp = run_gpt(
158
  COMPRESS_DATA_PROMPT,
159
  stop_tokens=["observation:", "task:", "action:", "thought:"],
160
- max_tokens=2048,
 
161
  purpose=purpose,
162
  task=task,
163
  knowledge=new_history,
@@ -172,7 +176,8 @@ def compress_data(c,purpose, task, history):
172
  resp = run_gpt(
173
  COMPRESS_DATA_PROMPT,
174
  stop_tokens=["observation:", "task:", "action:", "thought:"],
175
- max_tokens=2048,
 
176
  purpose=purpose,
177
  task=task,
178
  knowledge=new_history,
@@ -190,6 +195,7 @@ def compress_history(purpose, task, history):
190
  COMPRESS_HISTORY_PROMPT,
191
  stop_tokens=["observation:", "task:", "action:", "thought:"],
192
  max_tokens=512,
 
193
  purpose=purpose,
194
  task=task,
195
  history=history,
@@ -203,6 +209,7 @@ def call_main(purpose, task, history, action_input):
203
  MODEL_FINDER,
204
  stop_tokens=["observation:", "task:"],
205
  max_tokens=1024,
 
206
  purpose=purpose,
207
  TASKS=f'{query.tasks}',
208
  task=task,
@@ -231,6 +238,7 @@ def call_set_task(purpose, task, history, action_input):
231
  TASK_PROMPT,
232
  stop_tokens=[],
233
  max_tokens=256,
 
234
  purpose=purpose,
235
  task=task,
236
  history=history,
 
99
  prompt_template,
100
  stop_tokens,
101
  max_tokens,
102
+ seed,
103
  purpose,
104
  **prompt_kwargs,
105
  ):
106
+ print(seed)
107
  generate_kwargs = dict(
108
  temperature=0.9,
109
  max_new_tokens=max_tokens,
110
  top_p=0.95,
111
  repetition_penalty=1.0,
112
  do_sample=True,
113
+ seed=seed,
114
  )
115
 
116
  content = PREFIX.format(
 
134
  return resp
135
 
136
  def compress_data(c,purpose, task, history):
137
+ seed=random.randint(1,1000000000)
138
+
139
  print (c)
140
  #tot=len(purpose)
141
  #print(tot)
 
145
  print(f'chunk:: {chunk}')
146
  print(f'divr:: {divr}')
147
  print (f'divi:: {divi}')
148
+ out = []
149
+ #out=""
150
  s=0
151
  e=chunk
152
  print(f'e:: {e}')
 
160
  resp = run_gpt(
161
  COMPRESS_DATA_PROMPT,
162
  stop_tokens=["observation:", "task:", "action:", "thought:"],
163
+ max_tokens=512,
164
+ seed=seed,
165
  purpose=purpose,
166
  task=task,
167
  knowledge=new_history,
 
176
  resp = run_gpt(
177
  COMPRESS_DATA_PROMPT,
178
  stop_tokens=["observation:", "task:", "action:", "thought:"],
179
+ max_tokens=512,
180
+ seed=seed,
181
  purpose=purpose,
182
  task=task,
183
  knowledge=new_history,
 
195
  COMPRESS_HISTORY_PROMPT,
196
  stop_tokens=["observation:", "task:", "action:", "thought:"],
197
  max_tokens=512,
198
+ seed=random.randint(1,1000000000),
199
  purpose=purpose,
200
  task=task,
201
  history=history,
 
209
  MODEL_FINDER,
210
  stop_tokens=["observation:", "task:"],
211
  max_tokens=1024,
212
+ seed=random.randint(1,1000000000),
213
  purpose=purpose,
214
  TASKS=f'{query.tasks}',
215
  task=task,
 
238
  TASK_PROMPT,
239
  stop_tokens=[],
240
  max_tokens=256,
241
+ seed=random.randint(1,1000000000),
242
  purpose=purpose,
243
  task=task,
244
  history=history,