Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -41,9 +41,9 @@ def format_prompt(message, history, cust_p):
|
|
| 41 |
prompt+=cust_p.replace("USER_INPUT",message)
|
| 42 |
return prompt
|
| 43 |
|
| 44 |
-
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p,translate_fa
|
| 45 |
#token max=8192
|
| 46 |
-
if(
|
| 47 |
if(len(prompt) > 2000):
|
| 48 |
translatedtext1 = GoogleTranslator(source='auto', target='en').translate(prompt[0:2000])
|
| 49 |
translatedtext2 = GoogleTranslator(source='auto', target='en').translate(prompt[2000:(len(prompt))])
|
|
@@ -131,8 +131,7 @@ with gr.Blocks() as app:
|
|
| 131 |
custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
|
| 132 |
with gr.Column(scale=1):
|
| 133 |
with gr.Group():
|
| 134 |
-
translate_fa = gr.Checkbox(label="Translate
|
| 135 |
-
translate_prompt = gr.Checkbox(label="Translate Prompt to English", value=True)
|
| 136 |
rand = gr.Checkbox(label="Random Seed", value=True)
|
| 137 |
seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
|
| 138 |
tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
|
|
@@ -145,8 +144,8 @@ with gr.Blocks() as app:
|
|
| 145 |
client_choice.change(load_models,client_choice,[chat_b])
|
| 146 |
app.load(load_models,client_choice,[chat_b])
|
| 147 |
|
| 148 |
-
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt,translate_fa
|
| 149 |
-
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt,translate_fa
|
| 150 |
|
| 151 |
clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
|
| 152 |
app.queue(default_concurrency_limit=10).launch()
|
|
|
|
| 41 |
prompt+=cust_p.replace("USER_INPUT",message)
|
| 42 |
return prompt
|
| 43 |
|
| 44 |
+
def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p,translate_fa):
|
| 45 |
#token max=8192
|
| 46 |
+
if(translate_fa == True):
|
| 47 |
if(len(prompt) > 2000):
|
| 48 |
translatedtext1 = GoogleTranslator(source='auto', target='en').translate(prompt[0:2000])
|
| 49 |
translatedtext2 = GoogleTranslator(source='auto', target='en').translate(prompt[2000:(len(prompt))])
|
|
|
|
| 131 |
custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
|
| 132 |
with gr.Column(scale=1):
|
| 133 |
with gr.Group():
|
| 134 |
+
translate_fa = gr.Checkbox(label="Translate to Persian", value=True)
|
|
|
|
| 135 |
rand = gr.Checkbox(label="Random Seed", value=True)
|
| 136 |
seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
|
| 137 |
tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
|
|
|
|
| 144 |
client_choice.change(load_models,client_choice,[chat_b])
|
| 145 |
app.load(load_models,client_choice,[chat_b])
|
| 146 |
|
| 147 |
+
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt,translate_fa],[chat_b,memory])
|
| 148 |
+
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt,translate_fa],[chat_b,memory])
|
| 149 |
|
| 150 |
clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
|
| 151 |
app.queue(default_concurrency_limit=10).launch()
|