vhpvmx commited on
Commit
f3fab95
·
1 Parent(s): 06e6e70

Trying with gpt2 v2

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -21,8 +21,8 @@ from transformers import pipeline
21
  #OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like WizardLM/WizardLM-7B-V1.0 is not the path to a directory containing a file named config.json.
22
  #Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.
23
 
24
- hub_model_id = "tiiuae/falcon-7b-instruct"
25
- response = pipeline("text-generation", model=hub_model_id)
26
  #obtuve este error
27
  #runtime error
28
  #Memory limit exceeded (16Gi)
 
21
  #OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like WizardLM/WizardLM-7B-V1.0 is not the path to a directory containing a file named config.json.
22
  #Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.
23
 
24
+ #hub_model_id = "tiiuae/falcon-7b-instruct"
25
+ #response = pipeline("text-generation", model=hub_model_id)
26
  #obtuve este error
27
  #runtime error
28
  #Memory limit exceeded (16Gi)