text stringlengths 0 142 |
|---|
import argparse |
import time |
from time import perf_counter |
from optimum.utils import NormalizedTextConfig, NormalizedConfigManager |
from optimum.intel.openvino import OVModelForCausalLM |
from optimum.intel.openvino.utils import OV_XML_FILE_NAME |
from transformers import (PretrainedConfig, AutoTokenizer, AutoConfig, |
TextIteratorStreamer, StoppingCriteriaList, StoppingCriteria) |
from typing import Optional, Union, Dict, List, Tuple |
from pathlib import Path |
from threading import Thread |
import torch |
class StopOnTokens(StoppingCriteria): |
def __init__(self, token_ids): |
self.token_ids = token_ids |
def __call__( |
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs |
) -> bool: |
for stop_id in self.token_ids: |
if input_ids[0][-1] == stop_id: |
return True |
return False |
class OVCHATGLMModel(OVModelForCausalLM): |
""" |
Optimum intel compatible model wrapper for CHATGLM2 |
""" |
def _reshape( |
self, |
model: "Model", |
*args, **kwargs |
): |
shapes = {} |
for inputs in model.inputs: |
shapes[inputs] = inputs.get_partial_shape() |
shapes[inputs][0] = -1 |
input_name = inputs.get_any_name() |
if input_name.startswith('beam_idx'): |
continue |
if input_name.startswith('past_key_values'): |
shapes[inputs][1] = -1 |
shapes[inputs][2] = 2 |
elif shapes[inputs].rank.get_length() > 1: |
shapes[inputs][1] = -1 |
model.reshape(shapes) |
return model |
if __name__ == "__main__": |
parser = argparse.ArgumentParser(add_help=False) |
parser.add_argument('-h', |
'--help', |
action='help', |
help='Show this help message and exit.') |
parser.add_argument('-m', |
'--model_path', |
required=True, |
type=str, |
help='Required. model path') |
parser.add_argument('-l', |
'--max_sequence_length', |
default=256, |
required=False, |
type=int, |
help='Required. maximun length of output') |
parser.add_argument('-d', |
'--device', |
default='CPU', |
required=False, |
type=str, |
help='Required. device for inference') |
args = parser.parse_args() |
model_dir = args.model_path |
ov_config = {"PERFORMANCE_HINT": "LATENCY", |
"NUM_STREAMS": "1", "CACHE_DIR": ""} |
#model start time |
model_start_time=time.perf_counter() |
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) |
print("====Compiling model====") |
ov_model = OVCHATGLMModel.from_pretrained( |
model_dir, |
device=args.device, |
ov_config=ov_config, |
config=AutoConfig.from_pretrained(model_dir, trust_remote_code=True), |
trust_remote_code=True, |
) |
model_end_time=time.perf_counter() |
print("Model_loading_before Inference:::: ", model_end_time-model_start_time ) |
End of preview. Expand in Data Studio
No dataset card yet
- Downloads last month
- 3