Upload modeling_mic21.py with huggingface_hub
Browse files- modeling_mic21.py +11 -4
modeling_mic21.py
CHANGED
|
@@ -13,17 +13,24 @@ class MIC21SummarizerModel(PreTrainedModel):
|
|
| 13 |
model_parallel = True
|
| 14 |
place_model_on_device = False
|
| 15 |
model_wrapped = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
def __init__(self,config):
|
| 18 |
super().__init__(config)
|
| 19 |
#Init Image Processing Model
|
| 20 |
self.components = {"image_model":None,"llm":None,"tokenizer":None,"image_processor":None}
|
| 21 |
#self.components["image_model"] = ResNetForImageClassification.from_pretrained(config.hf_image_model,device_map=f"cuda:{config.im_model_cuda_id}")
|
| 22 |
-
self.components["image_model"] = ResNetForImageClassification.from_pretrained(config.hf_image_model).cpu().cuda()
|
| 23 |
|
| 24 |
-
self.components["image_processor"] = AutoImageProcessor.from_pretrained(config.hf_image_model)
|
| 25 |
|
| 26 |
-
self.components["llm"] = AutoModelForCausalLM.from_pretrained(config.hf_text_model,torch_dtype=torch.float16).cpu().cuda()
|
| 27 |
|
| 28 |
#self.quantization_config = BitsAndBytesConfig(load_in_4bit=True,bnb_4bit_compute_dtype=torch.bfloat16)
|
| 29 |
#self.components["llm"] = AutoModelForCausalLM.from_pretrained(
|
|
@@ -34,7 +41,7 @@ class MIC21SummarizerModel(PreTrainedModel):
|
|
| 34 |
# attn_implementation=config.attn_implementation,
|
| 35 |
# #quantization_config=self.quantization_config
|
| 36 |
#)
|
| 37 |
-
self.components["tokenizer"] = AutoTokenizer.from_pretrained(config.hf_text_model)
|
| 38 |
|
| 39 |
#self.in_device = config.in_device
|
| 40 |
#self.out_device = config.out_device
|
|
|
|
| 13 |
model_parallel = True
|
| 14 |
place_model_on_device = False
|
| 15 |
model_wrapped = {}
|
| 16 |
+
|
| 17 |
+
def init_components(self):
|
| 18 |
+
self.components["image_model"] = ResNetForImageClassification.from_pretrained(config.hf_image_model).cuda()
|
| 19 |
+
self.components["image_processor"] = AutoImageProcessor.from_pretrained(config.hf_image_model)
|
| 20 |
+
|
| 21 |
+
self.components["llm"] = AutoModelForCausalLM.from_pretrained(config.hf_text_model,torch_dtype=torch.float16).cuda()
|
| 22 |
+
self.components["tokenizer"] = AutoTokenizer.from_pretrained(config.hf_text_model)
|
| 23 |
|
| 24 |
def __init__(self,config):
|
| 25 |
super().__init__(config)
|
| 26 |
#Init Image Processing Model
|
| 27 |
self.components = {"image_model":None,"llm":None,"tokenizer":None,"image_processor":None}
|
| 28 |
#self.components["image_model"] = ResNetForImageClassification.from_pretrained(config.hf_image_model,device_map=f"cuda:{config.im_model_cuda_id}")
|
| 29 |
+
#self.components["image_model"] = ResNetForImageClassification.from_pretrained(config.hf_image_model).cpu().cuda()
|
| 30 |
|
| 31 |
+
#self.components["image_processor"] = AutoImageProcessor.from_pretrained(config.hf_image_model)
|
| 32 |
|
| 33 |
+
#self.components["llm"] = AutoModelForCausalLM.from_pretrained(config.hf_text_model,torch_dtype=torch.float16).cpu().cuda()
|
| 34 |
|
| 35 |
#self.quantization_config = BitsAndBytesConfig(load_in_4bit=True,bnb_4bit_compute_dtype=torch.bfloat16)
|
| 36 |
#self.components["llm"] = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 41 |
# attn_implementation=config.attn_implementation,
|
| 42 |
# #quantization_config=self.quantization_config
|
| 43 |
#)
|
| 44 |
+
#self.components["tokenizer"] = AutoTokenizer.from_pretrained(config.hf_text_model)
|
| 45 |
|
| 46 |
#self.in_device = config.in_device
|
| 47 |
#self.out_device = config.out_device
|