Spaces:
Runtime error
Runtime error
Trying with gpt2
Browse files
app.py
CHANGED
|
@@ -21,9 +21,17 @@ from transformers import pipeline
|
|
| 21 |
#OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like WizardLM/WizardLM-7B-V1.0 is not the path to a directory containing a file named config.json.
|
| 22 |
#Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.
|
| 23 |
|
| 24 |
-
|
| 25 |
hub_model_id = "tiiuae/falcon-7b-instruct"
|
| 26 |
response = pipeline("text-generation", model=hub_model_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
def resp(text):
|
| 29 |
return response(text)
|
|
|
|
| 21 |
#OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like WizardLM/WizardLM-7B-V1.0 is not the path to a directory containing a file named config.json.
|
| 22 |
#Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.
|
| 23 |
|
|
|
|
| 24 |
hub_model_id = "tiiuae/falcon-7b-instruct"
|
| 25 |
response = pipeline("text-generation", model=hub_model_id)
|
| 26 |
+
#obtuve este error
|
| 27 |
+
#runtime error
|
| 28 |
+
#Memory limit exceeded (16Gi)
|
| 29 |
+
#obtuve este error - despues de hacer hw upgrade
|
| 30 |
+
#runtime error
|
| 31 |
+
#Memory limit exceeded (32Gi)
|
| 32 |
+
|
| 33 |
+
response = pipeline('text-generation', model='gpt2')
|
| 34 |
+
set_seed(42)
|
| 35 |
|
| 36 |
def resp(text):
|
| 37 |
return response(text)
|