fix nltk for HFSPACES
Browse files- app.py +1 -1
- download_dependencies.py +1 -1
app.py
CHANGED
|
@@ -14,7 +14,7 @@ nlp = spacy.load("en_core_web_sm")
|
|
| 14 |
annotator = errant.load('en', nlp)
|
| 15 |
errant_path = os.path.join(os.path.dirname("./"), 'errant_verbose.json')
|
| 16 |
errant_verbose = json.load(open(errant_path, "r"))
|
| 17 |
-
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
|
| 18 |
|
| 19 |
# Load text editor (TinyLlama)
|
| 20 |
text_editor = Llama(
|
|
|
|
| 14 |
annotator = errant.load('en', nlp)
|
| 15 |
errant_path = os.path.join(os.path.dirname("./"), 'errant_verbose.json')
|
| 16 |
errant_verbose = json.load(open(errant_path, "r"))
|
| 17 |
+
sent_detector = nltk.data.load('./nltk_data/tokenizers/punkt/english.pickle')
|
| 18 |
|
| 19 |
# Load text editor (TinyLlama)
|
| 20 |
text_editor = Llama(
|
download_dependencies.py
CHANGED
|
@@ -5,4 +5,4 @@ id = "1TnPssg0CkWQ_thuAH8cY3hdB2J18A0Kl"
|
|
| 5 |
output = "texteditor-model/coedit-tinyllama-chat-bnb-4bit-unsloth.Q4_K_M.gguf"
|
| 6 |
gdown.download(id=id, output=output)
|
| 7 |
|
| 8 |
-
nltk.download('punkt', download_dir="
|
|
|
|
| 5 |
output = "texteditor-model/coedit-tinyllama-chat-bnb-4bit-unsloth.Q4_K_M.gguf"
|
| 6 |
gdown.download(id=id, output=output)
|
| 7 |
|
| 8 |
+
nltk.download('punkt', download_dir="./nltk_data")
|