Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -22,10 +22,10 @@ For more details, please check [our post](https://huggingface.co/blog/llama32).
|
|
| 22 |
access_token = os.getenv('HF_TOKEN')
|
| 23 |
# Download the Base model
|
| 24 |
#model_id = "./models/Llama-32-3B-Instruct"
|
| 25 |
-
model_id = "
|
| 26 |
-
MAX_MAX_NEW_TOKENS =
|
| 27 |
-
DEFAULT_MAX_NEW_TOKENS =
|
| 28 |
-
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "
|
| 29 |
|
| 30 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 31 |
|
|
@@ -81,7 +81,7 @@ def generate(
|
|
| 81 |
|
| 82 |
|
| 83 |
|
| 84 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=
|
| 85 |
generate_kwargs = dict(
|
| 86 |
input_ids=input_ids,
|
| 87 |
streamer=streamer,
|
|
|
|
| 22 |
access_token = os.getenv('HF_TOKEN')
|
| 23 |
# Download the Base model
|
| 24 |
#model_id = "./models/Llama-32-3B-Instruct"
|
| 25 |
+
model_id = "nvidia/Llama-3_1-Nemotron-51B-Instruct"
|
| 26 |
+
MAX_MAX_NEW_TOKENS = 6144
|
| 27 |
+
DEFAULT_MAX_NEW_TOKENS = 6144
|
| 28 |
+
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "6144"))
|
| 29 |
|
| 30 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 31 |
|
|
|
|
| 81 |
|
| 82 |
|
| 83 |
|
| 84 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=2000.0, skip_prompt=True, skip_special_tokens=True)
|
| 85 |
generate_kwargs = dict(
|
| 86 |
input_ids=input_ids,
|
| 87 |
streamer=streamer,
|