Pasula commited on
Commit
22185d4
·
1 Parent(s): cf097a6

add changes

Browse files
Files changed (1) hide show
  1. app.py +44 -43
app.py CHANGED
@@ -41,49 +41,49 @@ def generate(
41
  return output
42
 
43
 
44
- # additional_inputs=[
45
- # gr.Textbox(
46
- # label="System Prompt",
47
- # max_lines=1,
48
- # interactive=True,
49
- # ),
50
- # gr.Slider(
51
- # label="Temperature",
52
- # value=0.9,
53
- # minimum=0.0,
54
- # maximum=1.0,
55
- # step=0.05,
56
- # interactive=True,
57
- # info="Higher values produce more diverse outputs",
58
- # ),
59
- # gr.Slider(
60
- # label="Max new tokens",
61
- # value=256,
62
- # minimum=0,
63
- # maximum=1048,
64
- # step=64,
65
- # interactive=True,
66
- # info="The maximum numbers of new tokens",
67
- # ),
68
- # gr.Slider(
69
- # label="Top-p (nucleus sampling)",
70
- # value=0.90,
71
- # minimum=0.0,
72
- # maximum=1,
73
- # step=0.05,
74
- # interactive=True,
75
- # info="Higher values sample more low-probability tokens",
76
- # ),
77
- # gr.Slider(
78
- # label="Repetition penalty",
79
- # value=1.2,
80
- # minimum=1.0,
81
- # maximum=2.0,
82
- # step=0.05,
83
- # interactive=True,
84
- # info="Penalize repeated tokens",
85
- # )
86
- # ]
87
 
88
  # examples=[["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
89
  # ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
@@ -97,5 +97,6 @@ gr.ChatInterface(
97
  fn=generate,
98
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
99
  title="Compilerx-LLM",
 
100
  concurrency_limit=20,
101
  ).launch(show_api=True)
 
41
  return output
42
 
43
 
44
+ additional_inputs=[
45
+ gr.Textbox(
46
+ label="System Prompt",
47
+ max_lines=1,
48
+ interactive=True,
49
+ ),
50
+ gr.Slider(
51
+ label="Temperature",
52
+ value=0.9,
53
+ minimum=0.0,
54
+ maximum=1.0,
55
+ step=0.05,
56
+ interactive=True,
57
+ info="Higher values produce more diverse outputs",
58
+ ),
59
+ gr.Slider(
60
+ label="Max new tokens",
61
+ value=256,
62
+ minimum=0,
63
+ maximum=1048,
64
+ step=64,
65
+ interactive=True,
66
+ info="The maximum numbers of new tokens",
67
+ ),
68
+ gr.Slider(
69
+ label="Top-p (nucleus sampling)",
70
+ value=0.90,
71
+ minimum=0.0,
72
+ maximum=1,
73
+ step=0.05,
74
+ interactive=True,
75
+ info="Higher values sample more low-probability tokens",
76
+ ),
77
+ gr.Slider(
78
+ label="Repetition penalty",
79
+ value=1.2,
80
+ minimum=1.0,
81
+ maximum=2.0,
82
+ step=0.05,
83
+ interactive=True,
84
+ info="Penalize repeated tokens",
85
+ )
86
+ ]
87
 
88
  # examples=[["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
89
  # ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
 
97
  fn=generate,
98
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
99
  title="Compilerx-LLM",
100
+ additional_inputs=additional_inputs
101
  concurrency_limit=20,
102
  ).launch(show_api=True)