clean up debug prints
Browse files
app.py
CHANGED
|
@@ -66,7 +66,6 @@ if False:
|
|
| 66 |
|
| 67 |
@spaces.GPU(duration=60)
|
| 68 |
def extend(text, max_new_tokens, chunk_size, progress=gr.Progress()):
|
| 69 |
-
print(f"DEBUG start: {text}")
|
| 70 |
PREFIX = "<bos>\n" # Model just repeats the last token without this
|
| 71 |
progress(0, desc="Tokenizing...")
|
| 72 |
token_ids = tokenizer.encode(PREFIX + text, add_special_tokens=False, return_tensors="pt")
|
|
@@ -100,7 +99,6 @@ def extend(text, max_new_tokens, chunk_size, progress=gr.Progress()):
|
|
| 100 |
yield text, "New text somehow deleted existing text!\n\n" + new_text
|
| 101 |
return
|
| 102 |
yield new_text, f"New tokens generated: {done_tokens}/{max_new_tokens}"
|
| 103 |
-
print(f"DEBUG succeeded: {new_text}")
|
| 104 |
except Exception as e:
|
| 105 |
yield text, f"# ERROR: {e!r}"
|
| 106 |
|
|
|
|
| 66 |
|
| 67 |
@spaces.GPU(duration=60)
|
| 68 |
def extend(text, max_new_tokens, chunk_size, progress=gr.Progress()):
|
|
|
|
| 69 |
PREFIX = "<bos>\n" # Model just repeats the last token without this
|
| 70 |
progress(0, desc="Tokenizing...")
|
| 71 |
token_ids = tokenizer.encode(PREFIX + text, add_special_tokens=False, return_tensors="pt")
|
|
|
|
| 99 |
yield text, "New text somehow deleted existing text!\n\n" + new_text
|
| 100 |
return
|
| 101 |
yield new_text, f"New tokens generated: {done_tokens}/{max_new_tokens}"
|
|
|
|
| 102 |
except Exception as e:
|
| 103 |
yield text, f"# ERROR: {e!r}"
|
| 104 |
|