requirements.txt
Browse files1 llama-cpp-python==0.1.62
2
app.py
CHANGED
|
@@ -26,7 +26,7 @@ llm = Llama(model_path=filename, n_ctx=512, n_batch=126)
|
|
| 26 |
def generate_text(prompt="Who is the CEO of Apple?"):
|
| 27 |
output = llm(
|
| 28 |
prompt,
|
| 29 |
-
max_tokens=
|
| 30 |
temperature=0.1,
|
| 31 |
top_p=0.5,
|
| 32 |
echo=False,
|
|
@@ -57,4 +57,4 @@ gradio_interface = gr.Interface(
|
|
| 57 |
examples=examples,
|
| 58 |
title="Vicuna-7B",
|
| 59 |
)
|
| 60 |
-
gradio_interface.launch()
|
|
|
|
| 26 |
def generate_text(prompt="Who is the CEO of Apple?"):
|
| 27 |
output = llm(
|
| 28 |
prompt,
|
| 29 |
+
max_tokens=2000,
|
| 30 |
temperature=0.1,
|
| 31 |
top_p=0.5,
|
| 32 |
echo=False,
|
|
|
|
| 57 |
examples=examples,
|
| 58 |
title="Vicuna-7B",
|
| 59 |
)
|
| 60 |
+
gradio_interface.launch()
|