Kakaarot commited on
Commit
bc5db28
·
verified ·
1 Parent(s): aa8ac50

tried gemma-2b again

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -46,8 +46,8 @@ import torch
46
  # Cache the model and tokenizer to avoid reloading on every run
47
  # So first run will load and save resources to global cache, and as user interact and causes rerun of load_model_and_tokenizer(), instead of loading again it will directly use cached resources from memory
48
  def load_model_and_tokenizer():
49
- #model_name = "google/gemma-2b" # using gemma-2b for prototype for my GSOC Proposal. Wish me luck.
50
- model_name = "openai-community/gpt2"
51
  tokenizer = AutoTokenizer.from_pretrained(model_name)
52
  # Responsible for automatically downloading and loading the tokenizer configuration and vocabulary associated with the specified pre-trained model.
53
  # Downloads and loads the tokenizer config and vocab for the given model
 
46
  # Cache the model and tokenizer to avoid reloading on every run
47
  # So first run will load and save resources to global cache, and as user interact and causes rerun of load_model_and_tokenizer(), instead of loading again it will directly use cached resources from memory
48
  def load_model_and_tokenizer():
49
+ model_name = "google/gemma-2b" # using gemma-2b for prototype for my GSOC Proposal. Wish me luck.
50
+ #model_name = "openai-community/gpt2"
51
  tokenizer = AutoTokenizer.from_pretrained(model_name)
52
  # Responsible for automatically downloading and loading the tokenizer configuration and vocabulary associated with the specified pre-trained model.
53
  # Downloads and loads the tokenizer config and vocab for the given model