Loading llama-13b using 'AutoTokenizer, AutoModelForCausalLM'

#1
by Teink - opened

I am trying loading this tokenizer and model using the below code:
'''
from transformers import AutoTokenizer, AutoModelForCausalLM

tokenizer = AutoTokenizer.from_pretrained("https://huggingface.co/circulus/llama-13b/tree/main/tokenizer.model")
model = AutoModelForCausalLM.from_pretrained("https://huggingface.co/circulus/llama-13b/tree/main/pytorch_model-00041-of-00041.bin")
'''

However it gives me the error :
'''

KeyError Traceback (most recent call last)
/tmp/ipykernel_1596897/927730692.py in
8
9 # Instantiate a text-generation pipeline
---> 10 text_generator = pipeline("text-generation", model="circulus/llama-13b")
11
12 # Generate text using the model

~/.local/lib/python3.10/site-packages/transformers/pipelines/init.py in pipeline(task, model, config, tokenizer, feature_extractor, image_processor, framework, revision, use_fast, use_auth_token, device, device_map, torch_dtype, trust_remote_code, model_kwargs, pipeline_class, **kwargs)
690 hub_kwargs["_commit_hash"] = config._commit_hash
691 elif config is None and isinstance(model, str):
--> 692 config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
693 hub_kwargs["_commit_hash"] = config._commit_hash
694

~/.local/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py in from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
915 return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
916 elif "model_type" in config_dict:
--> 917 config_class = CONFIG_MAPPING[config_dict["model_type"]]
918 return config_class.from_dict(config_dict, **unused_kwargs)
919 else:

~/.local/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py in getitem(self, key)
621 return self._extra_content[key]
622 if key not in self._mapping:
--> 623 raise KeyError(key)
624 value = self._mapping[key]
625 module_name = model_type_to_module_name(key)

KeyError: 'llama'
'''
Which suggest it cannot find the model and tokenizer.
May I ask what is the correct way to import the model from hugging face and run it , without downloading it to my local machine?

Many thanks!

Sign up or log in to comment