code stringlengths 101 5.91M |
|---|
_cache(None)
def cuda_toolkit_available():
try:
call(['nvcc'], stdout=DEVNULL, stderr=DEVNULL)
return True
except FileNotFoundError:
return False |
def random_normal(size=(1,), trunc_val=2.5, rnd_state=None):
if (rnd_state is None):
rnd_state = np.random
len = np.array(size).prod()
result = np.empty((len,), dtype=np.float32)
for i in range(len):
while True:
x = rnd_state.normal()
if ((x >= (- trunc_val)) and (x <= trunc_val)):
break
result[i] = (x / trunc_val)
return result.reshape(size) |
def create_custom_splits_for_experiments(task_path):
data_keys = [i[:(- 4)] for i in subfiles(os.path.join(task_path, 'nnUNetData_plans_v2.1_2D_stage0'), join=False, suffix='npz')]
existing_splits = os.path.join(task_path, 'splits_final.pkl')
splits = load_pickle(existing_splits)
splits = splits[:5]
unique_a_only = np.unique([i.split('_')[0] for i in data_keys if (i.find('_A_') != (- 1))])
unique_b_only = np.unique([i.split('_')[0] for i in data_keys if (i.find('_B_') != (- 1))])
num_train_a = int(np.round((0.8 * len(unique_a_only))))
num_train_b = int(np.round((0.8 * len(unique_b_only))))
p = RandomState(1234)
idx_a_train = p.choice(len(unique_a_only), num_train_a, replace=False)
idx_b_train = p.choice(len(unique_b_only), num_train_b, replace=False)
identifiers_a_train = [unique_a_only[i] for i in idx_a_train]
identifiers_b_train = [unique_b_only[i] for i in idx_b_train]
identifiers_a_val = [i for i in unique_a_only if (i not in identifiers_a_train)]
identifiers_b_val = [i for i in unique_b_only if (i not in identifiers_b_train)]
splits.append({'train': [i for i in data_keys if (i.split('_')[0] in identifiers_a_train)], 'val': ([i for i in data_keys if (i.split('_')[0] in identifiers_a_val)] + [i for i in data_keys if (i.split('_')[0] in identifiers_b_val)])})
splits.append({'train': [i for i in data_keys if (i.split('_')[0] in identifiers_b_train)], 'val': ([i for i in data_keys if (i.split('_')[0] in identifiers_a_val)] + [i for i in data_keys if (i.split('_')[0] in identifiers_b_val)])})
splits.append({'train': ([i for i in data_keys if (i.split('_')[0] in identifiers_b_train)] + [i for i in data_keys if (i.split('_')[0] in identifiers_a_train)]), 'val': ([i for i in data_keys if (i.split('_')[0] in identifiers_a_val)] + [i for i in data_keys if (i.split('_')[0] in identifiers_b_val)])})
save_pickle(splits, existing_splits) |
def _config_dict_write(fp: IO[str], config: ConfigDict) -> None:
fp.write(config.to_json(indent=4)) |
def get_rap_jitter(frequencies, p_floor, p_ceil, max_p_factor):
counter = 0
cumsum = 0
mean_period = get_mean_period(frequencies, p_floor, p_ceil, max_p_factor)
for (freq1, freq2, freq3) in shifted_sequence(frequencies, 3):
if validate_frequencies([freq1, freq2, freq3], p_floor, p_ceil, max_p_factor):
cumsum += np.abs(((1 / freq2) - ((((1 / freq1) + (1 / freq2)) + (1 / freq3)) / 3)))
counter += 1
if (counter != 0):
rap_jitter = (((cumsum / counter) / mean_period) if (mean_period != 0) else None)
return rap_jitter
return None |
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, padding_side='right', trust_remote_code=True)
if args.fixed_length:
lengths = [args.fixed_length]
tokens = [len(tokenizer.encode(generate_prompt(args.fixed_length)[0]))]
print(f'Prompt is {tokens[0]} tokens')
else:
if args.tokens_step:
tokens = [x for x in range(args.min_tokens, (args.max_tokens + 1), args.tokens_step)]
else:
tokens = [args.min_tokens]
while (args.min_tokens < args.max_tokens):
point = (tokens[(- 1)] * 2)
if (point <= args.max_tokens):
tokens.append(point)
else:
break
lengths = []
last_n = 0
for target in tqdm(tokens, desc='Determining sequence lengths'):
num_tokens = 0
n = last_n
while (num_tokens < target):
last_n = n
n += args.length_step
prompt = generate_prompt(n)[0]
num_tokens = len(tokenizer.encode(prompt))
lengths.append(last_n)
results = []
for model in tqdm(models, desc='Model', leave=False):
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
pipe = pipeline('text-generation', model=loaded, tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
result = ([0] * len(lengths))
for (i, length) in tenumerate(lengths, desc='Lengths', leave=False):
for _ in trange(0, args.iterations, desc='Iterations', leave=False):
(prompt_text, pass_key) = generate_prompt(length)
num_tokens = len(pipe.tokenizer.encode(prompt_text))
answer = test_model(pipe, prompt_text, pass_key)
if (answer == pass_key):
result[i] += 1
result[i] /= args.iterations
print(f'{model}: {tokens[i]}={int((result[i] * 100))}%')
result.insert(0, model)
results.append(result)
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write(f''',{','.join([str(x) for x in tokens])}
''')
for result in results:
f.write(f'''{','.join([str(x) for x in result])}
''') |
def wind(cost, data, params, width):
predicted_cp = []
for dataset in data:
stsc = StandardScaler()
signal = stsc.fit_transform(dataset.drop('changepoint', axis=1))
algo = rpt.Window(model=cost, params=params, width=width, jump=1)
algo.fit(signal)
my_bkps = algo.predict(n_bkps=len(dataset[(dataset['changepoint'] == 1)]))
single_predicted_cp = pd.Series(data=0, index=dataset.index)
single_predicted_cp[single_predicted_cp.index[my_bkps[:(- 1)]]] = 1
predicted_cp.append(single_predicted_cp)
true_cp = [dataset.changepoint for dataset in data]
nab = evaluating_change_point(true_cp, predicted_cp, metric='nab', numenta_time='30 sec')
return nab |
def main():
clorsv = input('client or server ? (type c or s) ')
if (clorsv == 'c'):
client()
else:
from phcpy import solver
if (clorsv == 's'):
nbsys = eval(input('-> how many systems ? '))
probs = [solver.random_trinomials() for i in range(0, nbsys)]
nbclt = eval(input('-> how many clients ? '))
start_server(probs, nbclt)
else:
print('sorry, expected c or s, please try again') |
def saliency_map_I_gradient(numpy_image, model, attr_objective, baseline='gaus', fold=10, interp='linear'):
numpy_baseline = np.moveaxis((IG_baseline((np.moveaxis(numpy_image, 0, 2) * 255.0), mode=baseline) / 255.0), 2, 0)
(grad_list, result_list, _) = I_gradient(numpy_image, numpy_baseline, model, attr_objective, fold, interp='linear')
final_grad = (grad_list.mean(axis=0) * (numpy_image - numpy_baseline))
return (final_grad, result_list[(- 1)]) |
class UserAgent(object):
def __init__(self, id: str, group=None, batch_size=0, learning_rate=0, beta=0, lamda=0, local_epochs=0, optimizer='sgd', K=None, personal_learning_rate=None, loss='xent', label_mode='supervised'):
self.id = id
(self.group, self.group_name) = group
self.label_mode = label_mode
self.batch_size = batch_size
self.beta = beta
self.lamda = lamda
self.local_epochs = local_epochs
if isinstance(optimizer, DictConfig):
self.optimizer_name = optimizer.name
self.learning_rate = optimizer.learning_rate
self.personal_learning_rate = optimizer.personal_learning_rate
self.optimizer_config = optimizer
else:
self.optimizer_name = optimizer
self.learning_rate = learning_rate
self.personal_learning_rate = personal_learning_rate
self.K = K
self.model = None |
def load_state_dict(checkpoint_file: Union[(str, os.PathLike)], variant: Optional[str]=None):
try:
if (os.path.basename(checkpoint_file) == _add_variant(WEIGHTS_NAME, variant)):
return torch.load(checkpoint_file, map_location='cpu')
else:
return safetensors.torch.load_file(checkpoint_file, device='cpu')
except Exception as e:
try:
with open(checkpoint_file) as f:
if f.read().startswith('version'):
raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.')
else:
raise ValueError(f'Unable to locate the file {checkpoint_file} which is necessary to load this pretrained model. Make sure you have saved the model properly.') from e
except (UnicodeDecodeError, ValueError):
raise OSError(f"Unable to load weights from checkpoint file for '{checkpoint_file}' at '{checkpoint_file}'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True.") |
class STRes16UNetIN50(STRes16UNet50):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN |
class MinitaurAlternatingLegsEnv(minitaur_gym_env.MinitaurGymEnv):
metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 66}
def __init__(self, urdf_version=None, control_time_step=0.006, action_repeat=6, control_latency=0, pd_latency=0, on_rack=False, motor_kp=1.0, motor_kd=0.02, remove_default_joint_damping=False, render=False, num_steps_to_log=1000, env_randomizer=None, log_path=None):
self._swing_offset = np.zeros(NUM_LEGS)
self._extension_offset = np.zeros(NUM_LEGS)
super(MinitaurAlternatingLegsEnv, self).__init__(urdf_version=urdf_version, accurate_motor_model_enabled=True, motor_overheat_protection=True, hard_reset=False, motor_kp=motor_kp, motor_kd=motor_kd, remove_default_joint_damping=remove_default_joint_damping, control_latency=control_latency, pd_latency=pd_latency, on_rack=on_rack, render=render, num_steps_to_log=num_steps_to_log, env_randomizer=env_randomizer, log_path=log_path, control_time_step=control_time_step, action_repeat=action_repeat)
action_dim = 8
action_high = np.array(([0.1] * action_dim))
self.action_space = spaces.Box((- action_high), action_high)
self._cam_dist = 1.0
self._cam_yaw = 30
self._cam_pitch = (- 30)
def _reset(self):
self.desired_pitch = DESIRED_PITCH
init_pose = [(INIT_SWING_POS + self._swing_offset[0]), (INIT_SWING_POS + self._swing_offset[1]), (INIT_SWING_POS + self._swing_offset[2]), (INIT_SWING_POS + self._swing_offset[3]), (INIT_EXTENSION_POS + self._extension_offset[0]), (INIT_EXTENSION_POS + self._extension_offset[1]), (INIT_EXTENSION_POS + self._extension_offset[2]), (INIT_EXTENSION_POS + self._extension_offset[3])]
initial_motor_angles = self._convert_from_leg_model(init_pose)
super(MinitaurAlternatingLegsEnv, self)._reset(initial_motor_angles=initial_motor_angles, reset_duration=0.5)
return self._get_observation()
def _convert_from_leg_model(self, leg_pose):
motor_pose = np.zeros(NUM_MOTORS)
for i in range(NUM_LEGS):
motor_pose[(2 * i)] = (leg_pose[(NUM_LEGS + i)] - (((- 1) ** (i / 2)) * leg_pose[i]))
motor_pose[((2 * i) + 1)] = (leg_pose[(NUM_LEGS + i)] + (((- 1) ** (i / 2)) * leg_pose[i]))
return motor_pose
def _signal(self, t):
initial_pose = np.array([INIT_SWING_POS, INIT_SWING_POS, INIT_SWING_POS, INIT_SWING_POS, INIT_EXTENSION_POS, INIT_EXTENSION_POS, INIT_EXTENSION_POS, INIT_EXTENSION_POS])
amplitude = STEP_AMPLITUDE
period = STEP_PERIOD
extension = (amplitude * ((- 1.0) + math.cos((((2 * math.pi) / period) * t))))
ith_leg = (int((t / period)) % 2)
first_leg = np.array([0, 0, 0, 0, 0, extension, extension, 0])
second_leg = np.array([0, 0, 0, 0, extension, 0, 0, extension])
if ith_leg:
signal = (initial_pose + second_leg)
else:
signal = (initial_pose + first_leg)
return signal
def _transform_action_to_motor_command(self, action):
action[0:4] += self._swing_offset
action[4:8] += self._extension_offset
action += self._signal(self.minitaur.GetTimeSinceReset())
action = self._convert_from_leg_model(action)
return action
def is_fallen(self):
orientation = self.minitaur.GetBaseOrientation()
rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)
local_up = rot_mat[6:]
return (np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85)
def _reward(self):
return 1.0
def _get_true_observation(self):
observation = []
(roll, pitch, _) = self.minitaur.GetTrueBaseRollPitchYaw()
(roll_rate, pitch_rate, _) = self.minitaur.GetTrueBaseRollPitchYawRate()
observation.extend([roll, pitch, roll_rate, pitch_rate])
observation[1] -= self.desired_pitch
self._true_observation = np.array(observation)
return self._true_observation
def _get_observation(self):
observation = []
(roll, pitch, _) = self.minitaur.GetBaseRollPitchYaw()
(roll_rate, pitch_rate, _) = self.minitaur.GetBaseRollPitchYawRate()
observation.extend([roll, pitch, roll_rate, pitch_rate])
observation[1] -= self.desired_pitch
self._observation = np.array(observation)
return self._observation
def _get_observation_upper_bound(self):
upper_bound = np.zeros(self._get_observation_dimension())
upper_bound[0:2] = (2 * math.pi)
upper_bound[2:4] = ((2 * math.pi) / self._time_step)
return upper_bound
def _get_observation_lower_bound(self):
lower_bound = (- self._get_observation_upper_bound())
return lower_bound
def set_swing_offset(self, value):
self._swing_offset = value
def set_extension_offset(self, value):
self._extension_offset = value
def set_desired_pitch(self, value):
self.desired_pitch = value |
def drop_table():
db = database()
sql_script = '\n DROP TABLE `class`;\n DROP TABLE `method`;\n '
db.execute(sql_script) |
def load_tokens(filename):
with open(filename) as infile:
data = json.load(infile)
tokens = []
for d in data:
tokens += d['token']
print('{} tokens from {} examples loaded from {}.'.format(len(tokens), len(data), filename))
return tokens |
class PolyBUnit(PolyBaseUnit):
def __init__(self, two_way_scale, poly_scale):
super(PolyBUnit, self).__init__(two_way_scale=two_way_scale, two_way_block=TwoWayBBlock, poly_scale=poly_scale, poly_res_block=poly_res_b_block, poly_pre_block=PolyPreBBlock) |
class AuxiliaryLossAverageMeter(object):
def __init__(self):
self.aux_loss = {}
def update(self, value_dict, accu_steps, n=1):
if (value_dict is not None):
for (key, value) in value_dict.items():
if (key not in self.aux_loss):
self.aux_loss[key] = AverageMeter()
self.aux_loss[key].update((value / accu_steps), n)
else:
self.aux_loss[key].update((value / accu_steps), n)
def reset(self):
if (len(self.aux_loss) > 0):
for key in self.aux_loss.keys():
self.aux_loss[key].reset()
def avg_infos(self):
if (len(self.aux_loss) == 0):
return ''
else:
infos = []
for key in self.aux_loss.keys():
infos.append(('%s: %.3f' % (key, self.aux_loss[key].avg)))
return (', ' + ', '.join(infos)) |
def load_state_from_resource(model, resource: str):
print(f'Using pretrained resource {resource}')
if resource.startswith('url::'):
url = resource.split('url::')[1]
return load_state_dict_from_url(model, url, progress=True)
elif resource.startswith('local::'):
path = resource.split('local::')[1]
return load_wts(model, path)
else:
raise ValueError('Invalid resource type, only url:: and local:: are supported') |
class BartTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token)
cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:(- 1)]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if ((is_split_into_words or add_prefix_space) and ((len(text) > 0) and (not text[0].isspace()))):
text = (' ' + text)
return (text, kwargs) |
def get_new_pallete(num_cls):
n = num_cls
pallete = ([0] * (n * 3))
for j in range(0, n):
lab = j
pallete[((j * 3) + 0)] = 0
pallete[((j * 3) + 1)] = 0
pallete[((j * 3) + 2)] = 0
i = 0
while (lab > 0):
pallete[((j * 3) + 0)] |= (((lab >> 0) & 1) << (7 - i))
pallete[((j * 3) + 1)] |= (((lab >> 1) & 1) << (7 - i))
pallete[((j * 3) + 2)] |= (((lab >> 2) & 1) << (7 - i))
i = (i + 1)
lab >>= 3
return pallete |
class TimeSeriesData():
def __init__(self, variable: Iterable=None, start_time_step: int=None, end_time_step: int=None):
self.variable = (variable if (variable is None) else np.array(variable))
self.start_time_step = start_time_step
self.end_time_step = end_time_step
def __getattr__(self, name: str, start_time_step: int=None, end_time_step: int=None):
try:
variable = self.__dict__[f'_{name}']
except KeyError:
raise AttributeError(f'_{name}')
if isinstance(variable, Iterable):
start_time_step = (self.start_time_step if (start_time_step is None) else start_time_step)
start_index = (0 if (start_time_step is None) else start_time_step)
end_time_step = (self.end_time_step if (end_time_step is None) else end_time_step)
end_index = (len(variable) if (end_time_step is None) else (end_time_step + 1))
return variable[start_index:end_index]
else:
return variable
def __setattr__(self, name: str, value: Any):
self.__dict__[f'_{name}'] = value |
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert (index.dtype == self._dtype)
for size in index.sizes:
self._sizes.append(size)
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes) |
def _download_bsd300(dest='dataset'):
output_image_dir = join(dest, 'BSDS300/images')
if (not exists(output_image_dir)):
makedirs(dest, exist_ok=True)
url = '
print('downloading url ', url)
data = urllib.request.urlopen(url)
file_path = join(dest, basename(url))
with open(file_path, 'wb') as f:
f.write(data.read())
print('Extracting data')
with tarfile.open(file_path) as tar:
for item in tar:
tar.extract(item, dest)
remove(file_path)
return output_image_dir |
def split_dataset(filenames, train_set, val_set, test_set, val_len=None, test_len=None):
len_filenames = len(filenames)
len_all_sets = ((len(train_set) + len(val_set)) + len(test_set))
files_added = (len_filenames - len_all_sets)
print('Total {} files, with {} files already in txt files.'.format(len_filenames, len_all_sets))
print('{} files added'.format(files_added))
if (files_added is 0):
print('No need to split. Returning original results.')
return (train_set, val_set, test_set)
if (files_added < 0):
raise Exception('split_dataset: Total file count is smaller than combined length of train/val/test set!')
if ((len(test_set) is 0) and (test_len is None)):
raise ValueError('split_dataset: test_set is empty and no test_len is specified!')
if ((len(val_set) is 0) and (val_len is None)):
raise ValueError('split_dataset: val_set is empty and no val_len is specified!')
if (test_len is None):
test_len = len(test_set)
if (val_len is None):
val_len = len(val_set)
if ((len(val_set) is 0) and (len(test_set) is 0)):
if (len(train_set) != 0):
not_train_set = [filename for filename in filenames if (filename not in train_set)]
else:
not_train_set = filenames
val_set = not_train_set[0:val_len]
test_set = not_train_set[val_len:(val_len + test_len)]
train_set += not_train_set[(val_len + test_len):]
else:
not_val_or_test_set = [filename for filename in filenames if ((filename not in val_set) and (filename not in test_set))]
len_diff = (val_len - len(val_set))
if (len_diff > 0):
val_set += not_val_or_test_set[0:len_diff]
not_val_or_test_set = not_val_or_test_set[len_diff:]
print('Expected val set length: {}, current val set length: {}'.format(val_len, len(val_set)))
print(('Added %d files to val set.' % len_diff))
print('Unusual behavior. Do you really want to add files to val set?')
elif (len_diff < 0):
print('Expected val set length: {}, current val set length: {}'.format(val_len, len(val_set)))
raise RuntimeError('split_dataset: Expected val length is smaller than current length!')
len_diff = (test_len - len(test_set))
if (len_diff > 0):
test_set += not_val_or_test_set[0:len_diff]
not_val_or_test_set = not_val_or_test_set[len_diff:]
print('Expected test set length: {}, current test set length: {}'.format(val_len, len(val_set)))
print(('Added %d files to test set.' % len_diff))
print('Unusual behavior. Do you really want to add files to test set?')
elif (len_diff < 0):
print('Expected test set length: {}, current test set length: {}'.format(val_len, len(val_set)))
raise RuntimeError('split_dataset: Expected test length is smaller than current length!')
train_set = not_val_or_test_set
return (train_set, val_set, test_set) |
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = (attn / self.temperature)
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return (output, attn, log_attn) |
def build_pose_evaluator(args):
classes_path = (args.dataset_path + args.class_info)
classes = load_classes(classes_path)
models_path = (args.dataset_path + args.models)
(models, models_info) = load_models(models_path, classes)
symmetries_path = (args.dataset_path + args.model_symmetry)
model_symmetry = load_model_symmetry(symmetries_path, classes)
classes = [classes[k] for k in classes]
if (args.dataset == 'ycbv'):
evaluator = PoseEvaluator(models, classes, models_info, model_symmetry)
elif (args.dataset == 'lmo'):
evaluator = PoseEvaluatorLMO(models, classes, models_info, model_symmetry)
else:
raise ValueError('Unknown dataset.')
return evaluator |
def load_flax_weights_in_pytorch_model(pt_model, flax_state):
try:
import torch
except ImportError:
logger.error('Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see and for installation instructions.')
raise
is_type_bf16 = flatten_dict(jax.tree_map((lambda x: (x.dtype == jnp.bfloat16)), flax_state)).values()
if any(is_type_bf16):
logger.warning('Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` before loading those in PyTorch model.')
flax_state = jax.tree_map((lambda params: (params.astype(np.float32) if (params.dtype == jnp.bfloat16) else params)), flax_state)
flax_state_dict = flatten_dict(flax_state)
pt_model_dict = pt_model.state_dict()
load_model_with_head_into_base_model = ((pt_model.base_model_prefix in flax_state) and (pt_model.base_model_prefix not in set([k.split('.')[0] for k in pt_model_dict.keys()])))
load_base_model_into_model_with_head = ((pt_model.base_model_prefix not in flax_state) and (pt_model.base_model_prefix in set([k.split('.')[0] for k in pt_model_dict.keys()])))
unexpected_keys = []
missing_keys = set(pt_model_dict.keys())
for (flax_key_tuple, flax_tensor) in flax_state_dict.items():
has_base_model_prefix = (flax_key_tuple[0] == pt_model.base_model_prefix)
require_base_model_prefix = ('.'.join(((pt_model.base_model_prefix,) + flax_key_tuple)) in pt_model_dict)
if (load_model_with_head_into_base_model and has_base_model_prefix):
flax_key_tuple = flax_key_tuple[1:]
elif (load_base_model_into_model_with_head and require_base_model_prefix):
flax_key_tuple = ((pt_model.base_model_prefix,) + flax_key_tuple)
if ((flax_key_tuple[(- 1)] == 'kernel') and (flax_tensor.ndim == 4) and ('.'.join(flax_key_tuple) not in pt_model_dict)):
flax_key_tuple = (flax_key_tuple[:(- 1)] + ('weight',))
flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1))
elif ((flax_key_tuple[(- 1)] == 'kernel') and ('.'.join(flax_key_tuple) not in pt_model_dict)):
flax_key_tuple = (flax_key_tuple[:(- 1)] + ('weight',))
flax_tensor = flax_tensor.T
elif (flax_key_tuple[(- 1)] in ['scale', 'embedding']):
flax_key_tuple = (flax_key_tuple[:(- 1)] + ('weight',))
flax_key = '.'.join(flax_key_tuple)
if (flax_key in pt_model_dict):
if (flax_tensor.shape != pt_model_dict[flax_key].shape):
raise ValueError(f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.')
else:
flax_tensor = (np.asarray(flax_tensor) if (not isinstance(flax_tensor, np.ndarray)) else flax_tensor)
pt_model_dict[flax_key] = torch.from_numpy(flax_tensor)
missing_keys.remove(flax_key)
else:
unexpected_keys.append(flax_key)
pt_model.load_state_dict(pt_model_dict)
missing_keys = list(missing_keys)
if (len(unexpected_keys) > 0):
logger.warning(f'''Some weights of the Flax model were not used when initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}
- This IS expected if you are initializing {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).
- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect to be exactly identical (e.g. initializing a BertForSequenceClassification model from a FlaxBertForSequenceClassification model).''')
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.
''')
if (len(missing_keys) > 0):
logger.warning(f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly initialized: {missing_keys}
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''')
else:
logger.warning(f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.
If your task is similar to the task the model of the checkpoint was trained on, you can already use {pt_model.__class__.__name__} for predictions without further training.''')
return pt_model |
class LookUpTable(object):
def __init__(self, objlist):
self.idx2obj = dict(enumerate(objlist))
self.obj2idx = {v: k for (k, v) in six.iteritems(self.idx2obj)}
def size(self):
return len(self.idx2obj)
def get_obj(self, idx):
return self.idx2obj[idx]
def get_idx(self, obj):
return self.obj2idx[obj]
def __str__(self):
return self.idx2obj.__str__() |
class Morphology(lazylist):
def __init__(self, path='', known={}):
self.known = known
self._path = path
self._cmd = set(('word', 'char', 'haspref', 'hassuf', 'addpref', 'addsuf', 'deletepref', 'deletesuf', 'goodleft', 'goodright'))
self._cmd.update([('f' + x) for x in self._cmd])
def path(self):
return self._path
def load(self):
list.extend(self, (x.split() for x in _read(self._path)))
def apply(self, token, previous=(None, None), next=(None, None)):
w = token[0]
for r in self:
if (r[1] in self._cmd):
(f, x, pos, cmd) = (bool(0), r[0], r[(- 2)], r[1].lower())
if (r[2] in self._cmd):
(f, x, pos, cmd) = (bool(1), r[1], r[(- 2)], r[2].lower().lstrip('f'))
if (f and (token[1] != r[0])):
continue
if (((cmd == 'word') and (x == w)) or ((cmd == 'char') and (x in w)) or ((cmd == 'haspref') and w.startswith(x)) or ((cmd == 'hassuf') and w.endswith(x)) or ((cmd == 'addpref') and ((x + w) in self.known)) or ((cmd == 'addsuf') and ((w + x) in self.known)) or ((cmd == 'deletepref') and w.startswith(x) and (w[len(x):] in self.known)) or ((cmd == 'deletesuf') and w.endswith(x) and (w[:(- len(x))] in self.known)) or ((cmd == 'goodleft') and (x == next[0])) or ((cmd == 'goodright') and (x == previous[0]))):
token[1] = pos
return token
def insert(self, i, tag, affix, cmd='hassuf', tagged=None):
if (affix.startswith('-') and affix.endswith('-')):
(affix, cmd) = (affix[(+ 1):(- 1)], 'char')
if affix.startswith('-'):
(affix, cmd) = (affix[(+ 1):(- 0)], 'hassuf')
if affix.endswith('-'):
(affix, cmd) = (affix[(+ 0):(- 1)], 'haspref')
if tagged:
r = [tagged, affix, ('f' + cmd.lstrip('f')), tag, 'x']
else:
r = [affix, cmd.lstrip('f'), tag, 'x']
lazylist.insert(self, i, r)
def append(self, *args, **kwargs):
self.insert((len(self) - 1), *args, **kwargs)
def extend(self, rules=[]):
for r in rules:
self.append(*r) |
def process(stride, path, scene_name):
ds = Dataset()
ds.load((path + scene_name))
features = []
labels = []
processor = IrlDataProcessor(ds)
scene = ds.get('scene', ds.list_scenes()[0])
frame_list = []
frame_token = scene['first_frame']
while frame_token:
frame_list.append(frame_token)
frame = ds.get('frame', frame_token)
frame_token = frame['next']
for frame_idx in tqdm(range(0, len(frame_list), stride)):
frame_token = frame_list[frame_idx]
frame = ds.get('frame', frame_token)
for inst_token in frame['instances']:
instance = ds.get('instance', inst_token)
agent = ds.get('agent', instance['agent_token'])
if (agent['type'] not in {'Pedestrian', 'Undefined'}):
try:
if (ds.get_inst_mode(inst_token) != 'incoming'):
continue
spot_centers = processor.detect_center(inst_token, 'spot')
if (spot_centers == []):
continue
agent_centers = processor.detect_center(inst_token, 'agent')
thres = 3.5
ego_speed = instance['speed']
feature = np.zeros((NUM_FEATURE, len(spot_centers)))
for (center_idx, center_coords) in enumerate(spot_centers):
local_offset = processor.compute_relative_offset(inst_token, center_coords)
(astar_dist, astar_dir, astar_graph) = processor.compute_Astar_dist_dir(inst_token, center_coords)
nearby_agents = 0
for center_agent in agent_centers:
dist = astar_graph.dist_to_graph(center_agent)
if (dist < thres):
nearby_agents += 1
feature[(0, center_idx)] = local_offset[0]
feature[(1, center_idx)] = local_offset[1]
feature[(2, center_idx)] = astar_dist
feature[(3, center_idx)] = astar_dir
feature[(4, center_idx)] = nearby_agents
feature[(5, center_idx)] = ego_speed
label = processor.get_intent_label(inst_token, spot_centers)
features.append(feature)
labels.append(label)
except Exception as err:
print(('\nError occured for instance %s' % inst_token))
traceback.print_exc()
frame_token = frame['next']
if (not os.path.exists(DATA_PATH)):
os.mkdir(DATA_PATH)
with open((DATA_PATH + ('/%s_feature.pkl' % scene_name)), 'wb') as f:
pickle.dump(features, f)
with open((DATA_PATH + ('/%s_label.pkl' % scene_name)), 'wb') as f:
pickle.dump(labels, f) |
class RelaxedOneHotCategoricalStraightThrough2D(RelaxedOneHotCategorical2D):
event_dim = 3
def rsample(self, sample_shape=torch.Size()):
soft_sample = super().rsample(sample_shape)
soft_sample = clamp_probs(soft_sample)
hard_sample = QuantizeCategorical2D.apply(soft_sample)
return hard_sample
def log_prob(self, value):
value = getattr(value, '_unquantize', value)
return super().log_prob(value) |
class Boxban_Env1(BoxobanEnv):
metadata = {'render.modes': ['human', 'rgb_array', 'tiny_human', 'tiny_rgb_array']}
def __init__(self):
super(Boxban_Env1, self).__init__(max_steps=200, difficulty='medium') |
class DinoV2Features(TorchFeatureExtractor):
tag = 'dinov2'
license = 'Apache-2.0'
citation = '\{oquab2023dinov2,\n title={DINOv2: Learning Robust Visual Features without Supervision},\n author={Oquab, Maxime and Darcet, Timothee and Moutakanni, Theo and Vo, Huy V. and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and Howes, Russell and Huang, Po-Yao and Xu, Hu and Sharma, Vasu and Li, Shang-Wen and Galuba, Wojciech and Rabbat, Mike and Assran, Mido and Ballas, Nicolas and Synnaeve, Gabriel and Misra, Ishan and Jegou, Herve and Mairal, Julien and Labatut, Patrick and Joulin, Armand and Bojanowski, Piotr},\n journal={arXiv:2304.07193},\n year={2023}\n}\n'
def __init__(self, cfg, weights, device=None, center_crop=False):
super().__init__()
self.cfg = cfg
self.weights = weights
self.device = torch_utils.get_device(device)
self.model = build_model_for_eval(OmegaConf.load(cfg), weights)
self.model.to(self.device)
self.model.eval()
self.num_features = 1024
all_transforms = ([transforms.CenterCrop(224)] if center_crop else [])
all_transforms += [transforms.Lambda((lambda x: (x / 255.0))), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.transform = transforms.Compose(all_transforms)
self.preprocess_kwargs = dict(standardize=False)
self._center_crop = center_crop
def dump_config(self):
cls_name = self.__class__.__name__
return {'class': f'slideflow.model.extractors.dinov2.{cls_name}', 'kwargs': {'center_crop': self._center_crop, 'cfg': self.cfg, 'weights': self.weights}} |
class BackEnd(Model):
def __init__(self, half_res=False):
super(BackEnd, self).__init__()
self.half_res = half_res
self.upsample = layers.UpSampling2D(2, interpolation='bilinear')
self.conv1 = BaseConv(256, 1, 1, activation='relu', use_bn=True)
self.conv2 = BaseConv(256, 3, 1, activation='relu', use_bn=True)
self.conv3 = BaseConv(128, 1, 1, activation='relu', use_bn=True)
self.conv4 = BaseConv(128, 3, 1, activation='relu', use_bn=True)
self.conv5 = BaseConv(64, 1, 1, activation='relu', use_bn=True)
self.conv6 = BaseConv(64, 3, 1, activation='relu', use_bn=True)
self.conv7 = BaseConv(32, 3, 1, activation='relu', use_bn=True)
if (not self.half_res):
self.conv8 = BaseConv(32, 1, 1, activation='relu', use_bn=True)
self.conv9 = BaseConv(32, 3, 1, activation='relu', use_bn=True)
self.conv10 = BaseConv(32, 3, 1, activation='relu', use_bn=True)
def call(self, inputs):
if self.half_res:
(conv2_2, conv3_3, conv4_3, conv5_3) = inputs
else:
(conv1_2, conv2_2, conv3_3, conv4_3, conv5_3) = inputs
x = self.upsample(conv5_3)
x = tf.concat([x, conv4_3], axis=(- 1))
x = self.conv1(x)
x = self.conv2(x)
x = self.upsample(x)
x = tf.concat([x, conv3_3], axis=(- 1))
x = self.conv3(x)
x = self.conv4(x)
x = self.upsample(x)
x = tf.concat([x, conv2_2], axis=(- 1))
x = self.conv5(x)
x = self.conv6(x)
x = self.conv7(x)
if (not self.half_res):
x = self.upsample(x)
x = tf.concat([x, conv1_2], axis=(- 1))
x = self.conv8(x)
x = self.conv9(x)
x = self.conv10(x)
return x |
class DepthFirstSearch(AbstractSearch):
def __init__(self, policies):
self.policies = policies
def __call__(self, root, max_expansions=10, *args, **kwargs):
start_time = timeit.default_timer()
self.policies.initialize(root)
nodes_to_visit = [root]
best_reward = (- float('inf'))
best_node = None
while (len(nodes_to_visit) > 0):
node = nodes_to_visit.pop()
if node.terminal:
if (node.accumulatedReward() > best_reward):
best_reward = node.accumulatedReward()
best_node = node
else:
expanded = 0
for child in node.children:
expanded += 1
self.policies.instantiate(node, child)
nodes_to_visit.append(child)
if (expanded > max_expansions):
break
path = []
if (best_node is not None):
path.append(best_node)
while (best_node.parent is not None):
best_node = best_node.parent
path.reverse()
elapsed = (timeit.default_timer() - start_time)
return (elapsed, path) |
def mobilenet_v2(pretrained=False, progress=True, quantize=False, **kwargs):
model = QuantizableMobileNetV2(block=QuantizableInvertedResidual, **kwargs)
_replace_relu(model)
if quantize:
backend = 'qnnpack'
quantize_model(model, backend)
else:
assert (pretrained in [True, False])
if pretrained:
if quantize:
model_url = quant_model_urls[('mobilenet_v2_' + backend)]
else:
model_url = model_urls['mobilenet_v2']
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
return model |
class ClMetrics(_FullMetric):
def __init__(self, **kwargs):
super(ClMetrics, self).__init__(**kwargs)
self.metrics = [average.MeanAccuracy(name='mean_accu_out')] |
def qresnet18(pretrained=None, num_classes=1000):
model = QResNet(block=BasicBlock, layers=[2, 2, 2, 2], num_classes=num_classes)
if pretrained:
ch = torch.load(pretrained)
ch = {n.replace('module.', ''): v for (n, v) in ch['state_dict'].items()}
model.load_state_dict(ch)
print('loading pretrained')
return model |
class SigmoidFocalClassificationLossTest(tf.test.TestCase):
def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.97)], [_logit(0.9)], [_logit(0.73)], [_logit(0.27)], [_logit(0.09)], [_logit(0.03)]]], tf.float32)
target_tensor = tf.constant([[[1], [1], [1], [0], [0], [0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=True, gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(anchorwise_output=True)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
(sigmoid_loss, focal_loss) = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10, np.floor(np.log10((sigmoid_loss / focal_loss))))
self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]])
def testHardExamplesProduceLossComparableToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.5)], [_logit(0.48)], [_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=True, gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(anchorwise_output=True)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
(sigmoid_loss, focal_loss) = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10, np.floor(np.log10((sigmoid_loss / focal_loss))))
self.assertAllClose(order_of_ratio, [[1.0, 1.0, 1.0, 1.0, 1.0]])
def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.5)], [_logit(0.48)], [_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=False, gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(anchorwise_output=False)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
(sigmoid_loss, focal_loss) = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10, np.floor(np.log10((sigmoid_loss / focal_loss))))
self.assertAlmostEqual(order_of_ratio, 1.0)
def testIgnoreNegativeExampleLossViaAlphaMultiplier(self):
prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.5)], [_logit(0.48)], [_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=True, gamma=2.0, alpha=1.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(anchorwise_output=True)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
(sigmoid_loss, focal_loss) = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(focal_loss[0][3:], [0.0, 0.0])
order_of_ratio = np.power(10, np.floor(np.log10((sigmoid_loss[0][:3] / focal_loss[0][:3]))))
self.assertAllClose(order_of_ratio, [1.0, 1.0, 1.0])
def testIgnorePositiveExampleLossViaAlphaMultiplier(self):
prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.5)], [_logit(0.48)], [_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=True, gamma=2.0, alpha=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(anchorwise_output=True)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
(sigmoid_loss, focal_loss) = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(focal_loss[0][:3], [0.0, 0.0, 0.0])
order_of_ratio = np.power(10, np.floor(np.log10((sigmoid_loss[0][3:] / focal_loss[0][3:]))))
self.assertAllClose(order_of_ratio, [1.0, 1.0])
def testSimilarToSigmoidXEntropyWithHalfAlphaAndZeroGammaUpToAScale(self):
prediction_tensor = tf.constant([[[(- 100), 100, (- 100)], [100, (- 100), (- 100)], [100, 0, (- 100)], [(- 100), (- 100), 100]], [[(- 100), 0, 100], [(- 100), 100, (- 100)], [100, 100, 100], [0, 0, (- 1)]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1], [1, 1, 1, 0]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=True, alpha=0.5, gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(anchorwise_output=True)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
(sigmoid_loss, focal_loss) = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(sigmoid_loss, (focal_loss * 2))
def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self):
prediction_tensor = tf.constant([[[(- 100), 100, (- 100)], [100, (- 100), (- 100)], [100, 0, (- 100)], [(- 100), (- 100), 100]], [[(- 100), 0, 100], [(- 100), 100, (- 100)], [100, 100, 100], [0, 0, (- 1)]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1], [1, 1, 1, 0]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=True, alpha=None, gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(anchorwise_output=True)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
(sigmoid_loss, focal_loss) = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(sigmoid_loss, focal_loss)
def testExpectedLossWithAlphaOneAndZeroGamma(self):
prediction_tensor = tf.constant([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1], [1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=False, alpha=1.0, gamma=0.0)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
focal_loss = sess.run(focal_loss)
self.assertAllClose((((- math.log(0.5)) * 1.0) * 8), focal_loss)
def testExpectedLossWithAlpha75AndZeroGamma(self):
prediction_tensor = tf.constant([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1], [1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(anchorwise_output=False, alpha=0.75, gamma=0.0)
focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights)
with self.test_session() as sess:
focal_loss = sess.run(focal_loss)
self.assertAllClose(((- math.log(0.5)) * ((0.75 * 8) + ((0.25 * 8) * 2))), focal_loss) |
def detokenize(tokens, src_dict, idx2bpe):
raw_inds = map(int, src_dict.string(tokens).split())
raw_chrs = ''.join([idx2bpe[raw_ind] for raw_ind in raw_inds])
raw_chrs = raw_chrs.replace('G', ' ')
return raw_chrs |
class EntropyMinTrainerHook(TrainerHook):
def __init__(self, name: str, weight: float):
super().__init__(name)
self._weight = weight
self._criterion = Entropy()
def __call__(self):
return _EntropyEpocherHook(name=self._hook_name, weight=self._weight, criterion=self._criterion) |
def minimum_spanning_tree(weight_by_edge):
group_by_node = {}
mst_edges = set()
for edge in sorted(weight_by_edge, key=weight_by_edge.__getitem__):
(u, v) = edge
if (group_by_node.setdefault(u, {u}) != group_by_node.setdefault(v, {v})):
mst_edges.add(edge)
group_by_node[u].update(group_by_node[v])
for node in group_by_node[v]:
group_by_node[node].update(group_by_node[u])
return mst_edges |
class Combinator(abc.ABC):
def combine(self, data: List[np.ndarray]) -> np.ndarray:
raise NotImplementedError() |
def get_pi_trainable_variables(scope):
return [v for v in get_trainable_variables(scope) if ('pi' in v.name[len(scope):].split('/'))] |
def isstring(obj):
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str) |
def test_passed_sym_captured_as_dep_for_mutated_obj():
run_cell('\n class Foo:\n def __init__(self, x):\n self.x = x\n\n def mutate(foo, x):\n foo.x = x\n ')
run_cell('foo = Foo(5)')
run_cell('y = 7')
run_cell('mutate(foo, y)')
run_cell('logging.info(foo.x)')
assert_not_detected()
run_cell('y = 42')
run_cell('logging.info(foo.x)')
assert_detected('`foo.x` depends on old value of `y`') |
def main(cfg, gpu, save_dir):
start = time.time()
best_mIoU = 0.0
best_epoch = 0
num_workers = 8
device = torch.device(cfg['DEVICE'])
(train_cfg, eval_cfg) = (cfg['TRAIN'], cfg['EVAL'])
(dataset_cfg, model_cfg) = (cfg['DATASET'], cfg['MODEL'])
(loss_cfg, optim_cfg, sched_cfg) = (cfg['LOSS'], cfg['OPTIMIZER'], cfg['SCHEDULER'])
(epochs, lr) = (train_cfg['EPOCHS'], optim_cfg['LR'])
resume_path = cfg['MODEL']['RESUME']
gpus = int(os.environ['WORLD_SIZE'])
traintransform = get_train_augmentation(train_cfg['IMAGE_SIZE'], seg_fill=dataset_cfg['IGNORE_LABEL'])
valtransform = get_val_augmentation(eval_cfg['IMAGE_SIZE'])
trainset = eval(dataset_cfg['NAME'])(dataset_cfg['ROOT'], 'train', traintransform, dataset_cfg['MODALS'])
valset = eval(dataset_cfg['NAME'])(dataset_cfg['ROOT'], 'val', valtransform, dataset_cfg['MODALS'])
class_names = trainset.CLASSES
model = eval(model_cfg['NAME'])(model_cfg['BACKBONE'], trainset.n_classes, dataset_cfg['MODALS'])
resume_checkpoint = None
if os.path.isfile(resume_path):
resume_checkpoint = torch.load(resume_path, map_location=torch.device('cpu'))
msg = model.load_state_dict(resume_checkpoint['model_state_dict'])
logger.info(msg)
else:
model.init_pretrained(model_cfg['PRETRAINED'])
model = model.to(device)
iters_per_epoch = ((len(trainset) // train_cfg['BATCH_SIZE']) // gpus)
loss_fn = get_loss(loss_cfg['NAME'], trainset.ignore_label, None)
start_epoch = 0
optimizer = get_optimizer(model, optim_cfg['NAME'], lr, optim_cfg['WEIGHT_DECAY'])
scheduler = get_scheduler(sched_cfg['NAME'], optimizer, int(((epochs + 1) * iters_per_epoch)), sched_cfg['POWER'], (iters_per_epoch * sched_cfg['WARMUP']), sched_cfg['WARMUP_RATIO'])
if train_cfg['DDP']:
sampler = DistributedSampler(trainset, dist.get_world_size(), dist.get_rank(), shuffle=True)
sampler_val = None
model = DDP(model, device_ids=[gpu], output_device=0, find_unused_parameters=True)
else:
sampler = RandomSampler(trainset)
sampler_val = None
if resume_checkpoint:
start_epoch = (resume_checkpoint['epoch'] - 1)
optimizer.load_state_dict(resume_checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(resume_checkpoint['scheduler_state_dict'])
loss = resume_checkpoint['loss']
best_mIoU = resume_checkpoint['best_miou']
trainloader = DataLoader(trainset, batch_size=train_cfg['BATCH_SIZE'], num_workers=num_workers, drop_last=True, pin_memory=False, sampler=sampler)
valloader = DataLoader(valset, batch_size=eval_cfg['BATCH_SIZE'], num_workers=num_workers, pin_memory=False, sampler=sampler_val)
scaler = GradScaler(enabled=train_cfg['AMP'])
if ((train_cfg['DDP'] and (torch.distributed.get_rank() == 0)) or (not train_cfg['DDP'])):
writer = SummaryWriter(str(save_dir))
logger.info(' model complexity ')
cal_flops(model, dataset_cfg['MODALS'], logger)
logger.info(' model structure ')
logger.info(model)
logger.info(' training config ')
logger.info(cfg)
for epoch in range(start_epoch, epochs):
model.train()
if train_cfg['DDP']:
sampler.set_epoch(epoch)
train_loss = 0.0
lr = scheduler.get_lr()
lr = (sum(lr) / len(lr))
pbar = tqdm(enumerate(trainloader), total=iters_per_epoch, desc=f'Epoch: [{(epoch + 1)}/{epochs}] Iter: [{0}/{iters_per_epoch}] LR: {lr:.8f} Loss: {train_loss:.8f}')
for (iter, (sample, lbl)) in pbar:
optimizer.zero_grad(set_to_none=True)
sample = [x.to(device) for x in sample]
lbl = lbl.to(device)
with autocast(enabled=train_cfg['AMP']):
logits = model(sample)
loss = loss_fn(logits, lbl)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
scheduler.step()
torch.cuda.synchronize()
lr = scheduler.get_lr()
lr = (sum(lr) / len(lr))
if (lr <= 1e-08):
lr = 1e-08
train_loss += loss.item()
pbar.set_description(f'Epoch: [{(epoch + 1)}/{epochs}] Iter: [{(iter + 1)}/{iters_per_epoch}] LR: {lr:.8f} Loss: {(train_loss / (iter + 1)):.8f}')
train_loss /= (iter + 1)
if ((train_cfg['DDP'] and (torch.distributed.get_rank() == 0)) or (not train_cfg['DDP'])):
writer.add_scalar('train/loss', train_loss, epoch)
torch.cuda.empty_cache()
if (((((epoch + 1) % train_cfg['EVAL_INTERVAL']) == 0) and ((epoch + 1) > train_cfg['EVAL_START'])) or ((epoch + 1) == epochs)):
if ((train_cfg['DDP'] and (torch.distributed.get_rank() == 0)) or (not train_cfg['DDP'])):
(acc, macc, _, _, ious, miou) = evaluate(model, valloader, device)
writer.add_scalar('val/mIoU', miou, epoch)
if (miou > best_mIoU):
prev_best_ckp = (save_dir / f"{model_cfg['NAME']}_{model_cfg['BACKBONE']}_{dataset_cfg['NAME']}_epoch{best_epoch}_{best_mIoU}_checkpoint.pth")
prev_best = (save_dir / f"{model_cfg['NAME']}_{model_cfg['BACKBONE']}_{dataset_cfg['NAME']}_epoch{best_epoch}_{best_mIoU}.pth")
if os.path.isfile(prev_best):
os.remove(prev_best)
if os.path.isfile(prev_best_ckp):
os.remove(prev_best_ckp)
best_mIoU = miou
best_epoch = (epoch + 1)
cur_best_ckp = (save_dir / f"{model_cfg['NAME']}_{model_cfg['BACKBONE']}_{dataset_cfg['NAME']}_epoch{best_epoch}_{best_mIoU}_checkpoint.pth")
cur_best = (save_dir / f"{model_cfg['NAME']}_{model_cfg['BACKBONE']}_{dataset_cfg['NAME']}_epoch{best_epoch}_{best_mIoU}.pth")
torch.save((model.module.state_dict() if train_cfg['DDP'] else model.state_dict()), cur_best)
torch.save({'epoch': best_epoch, 'model_state_dict': (model.module.state_dict() if train_cfg['DDP'] else model.state_dict()), 'optimizer_state_dict': optimizer.state_dict(), 'loss': train_loss, 'scheduler_state_dict': scheduler.state_dict(), 'best_miou': best_mIoU}, cur_best_ckp)
logger.info(print_iou(epoch, ious, miou, acc, macc, class_names))
logger.info(f'Current epoch:{epoch} mIoU: {miou} Best mIoU: {best_mIoU}')
if ((train_cfg['DDP'] and (torch.distributed.get_rank() == 0)) or (not train_cfg['DDP'])):
writer.close()
pbar.close()
end = time.gmtime((time.time() - start))
table = [['Best mIoU', f'{best_mIoU:.2f}'], ['Total Training Time', time.strftime('%H:%M:%S', end)]]
logger.info(tabulate(table, numalign='right')) |
def wrap_time(func):
def wrapped_func(*args, **kwargs):
from time import time
begin = time()
func(*args, **kwargs)
end = time()
print(f"func {func.__name__}'s execution time: {(end - begin)}")
return wrapped_func |
def get_rank():
if (not dist.is_nccl_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank() |
class UNetMidBlock2DCrossAttn(nn.Module):
def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[(int, Tuple[int])]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, output_scale_factor: float=1.0, cross_attention_dim: int=1280, dual_cross_attention: bool=False, use_linear_projection: bool=False, upcast_attention: bool=False, attention_type: str='default'):
super().__init__()
self.has_cross_attention = True
self.num_attention_heads = num_attention_heads
resnet_groups = (resnet_groups if (resnet_groups is not None) else min((in_channels // 4), 32))
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = ([transformer_layers_per_block] * num_layers)
resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)]
attentions = []
for i in range(num_layers):
if (not dual_cross_attention):
attentions.append(Transformer2DModel(num_attention_heads, (in_channels // num_attention_heads), in_channels=in_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type))
else:
attentions.append(DualTransformer2DModel(num_attention_heads, (in_channels // num_attention_heads), in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups))
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, cross_attention_kwargs: Optional[Dict[(str, Any)]]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:
lora_scale = (cross_attention_kwargs.get('scale', 1.0) if (cross_attention_kwargs is not None) else 1.0)
hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)
for (attn, resnet) in zip(self.attentions, self.resnets[1:]):
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if (return_dict is not None):
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[(str, Any)] = ({'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {})
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0]
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs)
else:
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0]
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
return hidden_states |
def main():
(args, *_) = cast(tuple[(Args, ...)], HfArgumentParser(Args).parse_args_into_dataclasses())
split = (f'train[:{args.max_considered_data}]' if (args.max_considered_data is not None) else 'train')
assert (magicoder.utils.OPENAI_CLIENT is not None)
dataset: Dataset = load_dataset(args.dataset_name, data_dir=args.data_dir, split=split, num_proc=magicoder.utils.N_CORES)
random.seed(args.seed)
dataset = dataset.map(function=map_dataset, fn_kwargs=dict(args=args), with_indices=True, batched=True, batch_size=args.chunk_size)
dataset = dataset.shuffle(seed=args.seed)
dataset = dataset.map((lambda _, index: {'index': index}), with_indices=True)
start_index = args.seed_code_start_index
end_index = min((start_index + args.max_new_data), len(dataset))
dataset = dataset.select(range(start_index, end_index))
prompt_template = Path('data/prompt.txt').read_text()
timestamp = magicoder.utils.timestamp()
data_fingerprint = args.fingerprint(prompt_template)
if (args.continue_from is not None):
assert (data_fingerprint in args.continue_from), 'Fingerprint mismatch'
assert (f'{start_index}_{end_index}' in args.continue_from), 'Index mismatch'
old_path = Path(args.continue_from)
assert old_path.exists()
old_data = magicoder.utils.read_jsonl(old_path)
assert (len(old_data) > 0)
last_index = old_data[(- 1)]['index']
n_skipped = ((last_index - start_index) + 1)
print('Continuing from', old_path)
f_out = old_path.open('a')
else:
tag = ('' if (args.tag == '') else f'-{args.tag}')
path = Path(f'data{tag}-{data_fingerprint}-{start_index}_{end_index}-{timestamp}.jsonl')
assert (not path.exists())
f_out = path.open('w')
print('Saving to', path)
n_skipped = 0
for (index, example) in enumerate(tqdm(dataset)):
if (index < n_skipped):
continue
assert ((index + start_index) == example['index'])
prompt = prompt_template.format(code=example['seed'])
max_new_tokens = min(args.max_new_tokens, ((args.model_max_tokens - magicoder.utils.num_tokens_from_string(prompt, args.model)) - ERROR_MARGIN))
if (max_new_tokens <= 0):
continue
messages = [{'role': 'system', 'content': SYSTEM}, {'role': 'user', 'content': prompt}]
openai_seed = (args.seed + example['index'])
response = magicoder.utils.chat_completions_with_backoff(model=args.model, messages=messages, max_tokens=max_new_tokens, n=1, temperature=args.temperature, seed=openai_seed)
print(openai_seed)
choice = response.choices[0]
if (choice.finish_reason != 'stop'):
continue
parsing_result = parse_problem_solution(choice.message.content)
if (parsing_result is None):
continue
(problem, solution) = parsing_result
if ((len(problem) == 0) or (len(solution) == 0)):
continue
fingerprint = response.system_fingerprint
assert (fingerprint is not None)
data = dict(raw_index=example['raw_index'], index=example['index'], seed=example['seed'], openai_fingerprint=fingerprint, problem=problem, solution=solution)
print('[Problem Description]', problem, sep='\n', end='\n\n')
print('[Solution]', solution, sep='\n')
f_out.write((json.dumps(data) + '\n')) |
class RecursiveListEnv(Environment):
def __init__(self, length=10, encoding_dim=32):
assert (length > 1), 'length must be a positive integer superior to 1'
self.length = length
self.start_pos = 0
self.end_pos = (length - 1)
self.scratchpad_ints = np.zeros((length,))
self.p1_pos = 0
self.p2_pos = 0
self.encoding_dim = encoding_dim
self.has_been_reset = False
self.programs_library = {'PTR_1_LEFT': {'level': 0, 'recursive': False}, 'STOP': {'level': (- 1), 'recursive': False}, 'PTR_2_LEFT': {'level': 0, 'recursive': False}, 'PTR_1_RIGHT': {'level': 0, 'recursive': False}, 'PTR_2_RIGHT': {'level': 0, 'recursive': False}, 'SWAP': {'level': 0, 'recursive': False}, 'RSHIFT': {'level': 1, 'recursive': False}, 'LSHIFT': {'level': 1, 'recursive': False}, 'COMPSWAP': {'level': 1, 'recursive': False}, 'RESET': {'level': 2, 'recursive': True}, 'BUBBLE': {'level': 2, 'recursive': True}, 'BUBBLESORT': {'level': 3, 'recursive': True}}
for (i, key) in enumerate(sorted(list(self.programs_library.keys()))):
self.programs_library[key]['index'] = i
self.prog_to_func = {'STOP': self._stop, 'PTR_1_LEFT': self._ptr_1_left, 'PTR_2_LEFT': self._ptr_2_left, 'PTR_1_RIGHT': self._ptr_1_right, 'PTR_2_RIGHT': self._ptr_2_right, 'SWAP': self._swap}
self.prog_to_precondition = {'STOP': self._stop_precondition, 'RSHIFT': self._rshift_precondition, 'LSHIFT': self._lshift_precondition, 'COMPSWAP': self._compswap_precondition, 'RESET': self._reset_precondition, 'BUBBLE': self._bubble_precondition, 'BUBBLESORT': self._bubblesort_precondition, 'PTR_1_LEFT': self._ptr_1_left_precondition, 'PTR_2_LEFT': self._ptr_2_left_precondition, 'PTR_1_RIGHT': self._ptr_1_right_precondition, 'PTR_2_RIGHT': self._ptr_2_right_precondition, 'SWAP': self._swap_precondition}
self.prog_to_postcondition = {'RSHIFT': self._rshift_postcondition, 'LSHIFT': self._lshift_postcondition, 'COMPSWAP': self._compswap_postcondition, 'RESET': self._reset_postcondition, 'BUBBLE': self._bubble_postcondition, 'BUBBLESORT': self._bubblesort_postcondition}
super(RecursiveListEnv, self).__init__(self.programs_library, self.prog_to_func, self.prog_to_precondition, self.prog_to_postcondition)
def _decr_length_right(self):
assert self._decr_length_right_precondition(), 'precondition not verified'
if (self.end_pos > self.start_pos):
self.end_pos -= 1
def _decr_length_right_precondition(self):
return ((self.end_pos > self.start_pos) and ((self.p1_pos < self.end_pos) and (self.p2_pos < self.end_pos)))
def _decr_length_left(self):
assert self._decr_length_left_precondition(), 'precondition not verified'
if (self.start_pos < self.end_pos):
self.start_pos += 1
def _decr_length_left_precondition(self):
return ((self.start_pos < self.end_pos) and ((self.p1_pos > self.start_pos) and (self.p2_pos > self.start_pos)))
def _incr_length_left(self):
assert self._incr_length_left_precondition(), 'precondition not verified'
if (self.start_pos > 0):
self.start_pos -= 1
def _incr_length_left_precondition(self):
return (self.start_pos > 0)
def _incr_length_right(self):
assert self._incr_length_right_precondition(), 'precondition not verified'
if (self.end_pos < (self.length - 1)):
self.end_pos += 1
def _incr_length_right_precondition(self):
return (self.end_pos < (self.length - 1))
def _ptr_1_left(self):
assert self._ptr_1_left_precondition(), 'precondition not verified'
if (self.p1_pos > self.start_pos):
self.p1_pos -= 1
def _ptr_1_left_precondition(self):
return (self.p1_pos > self.start_pos)
def _stop(self):
assert self._stop_precondition(), 'precondition not verified'
pass
def _stop_precondition(self):
return True
def _ptr_2_left(self):
assert self._ptr_2_left_precondition(), 'precondition not verified'
if (self.p2_pos > self.start_pos):
self.p2_pos -= 1
def _ptr_2_left_precondition(self):
return (self.p2_pos > self.start_pos)
def _ptr_1_right(self):
assert self._ptr_1_right_precondition(), 'precondition not verified'
if (self.p1_pos < self.end_pos):
self.p1_pos += 1
def _ptr_1_right_precondition(self):
return (self.p1_pos < self.end_pos)
def _ptr_2_right(self):
assert self._ptr_2_right_precondition(), 'precondition not verified'
if (self.p2_pos < self.end_pos):
self.p2_pos += 1
def _ptr_2_right_precondition(self):
return (self.p2_pos < self.end_pos)
def _swap(self):
assert self._swap_precondition(), 'precondition not verified'
self.scratchpad_ints[[self.p1_pos, self.p2_pos]] = self.scratchpad_ints[[self.p2_pos, self.p1_pos]]
def _swap_precondition(self):
return (self.p1_pos != self.p2_pos)
def _compswap_precondition(self):
list_length = ((self.end_pos - self.start_pos) + 1)
bool = (list_length > 1)
bool &= (self.p1_pos < self.end_pos)
bool &= ((self.p2_pos == self.p1_pos) or (self.p2_pos == (self.p1_pos + 1)))
return bool
def _compswap_postcondition(self, init_state, state):
(new_scratchpad_ints, new_p1_pos, new_p2_pos, new_start_pos, new_end_pos) = init_state
new_scratchpad_ints = np.copy(new_scratchpad_ints)
if ((new_p1_pos == new_p2_pos) and (new_p2_pos < new_end_pos)):
new_p2_pos += 1
idx_left = min(new_p1_pos, new_p2_pos)
idx_right = max(new_p1_pos, new_p2_pos)
if (new_scratchpad_ints[idx_left] > new_scratchpad_ints[idx_right]):
new_scratchpad_ints[[idx_left, idx_right]] = new_scratchpad_ints[[idx_right, idx_left]]
new_state = (new_scratchpad_ints, new_p1_pos, new_p2_pos, new_start_pos, new_end_pos)
return self.compare_state(state, new_state)
def _lshift_postcondition(self, init_state, state):
(init_scratchpad_ints, init_p1_pos, init_p2_pos, init_start_pos, init_end_pos) = init_state
(scratchpad_ints, p1_pos, p2_pos, start_pos, end_pos) = state
bool = np.array_equal(init_scratchpad_ints, scratchpad_ints)
bool &= (init_start_pos == start_pos)
bool &= (init_end_pos == end_pos)
if (init_p1_pos > init_start_pos):
bool &= (p1_pos == (init_p1_pos - 1))
else:
bool &= (p1_pos == init_p1_pos)
if (init_p2_pos > init_start_pos):
bool &= (p2_pos == (init_p2_pos - 1))
else:
bool &= (p2_pos == init_p2_pos)
return bool
def _rshift_postcondition(self, init_state, state):
(init_scratchpad_ints, init_p1_pos, init_p2_pos, init_start_pos, init_end_pos) = init_state
(scratchpad_ints, p1_pos, p2_pos, start_pos, end_pos) = state
bool = np.array_equal(init_scratchpad_ints, scratchpad_ints)
bool &= (init_start_pos == start_pos)
bool &= (init_end_pos == end_pos)
if (init_p1_pos < init_end_pos):
bool &= (p1_pos == (init_p1_pos + 1))
else:
bool &= (p1_pos == init_p1_pos)
if (init_p2_pos < init_end_pos):
bool &= (p2_pos == (init_p2_pos + 1))
else:
bool &= (p2_pos == init_p2_pos)
return bool
def _reset_postcondition(self, init_state, state):
(init_scratchpad_ints, init_p1_pos, init_p2_pos, init_start_pos, init_end_pos) = init_state
(scratchpad_ints, p1_pos, p2_pos, start_pos, end_pos) = state
bool = np.array_equal(init_scratchpad_ints, scratchpad_ints)
bool &= (init_start_pos == start_pos)
bool &= (init_end_pos == end_pos)
bool &= ((p1_pos == start_pos) and (p2_pos == start_pos))
return bool
def _bubblesort_postcondition(self, init_state, state):
(init_scratchpad_ints, init_p1_pos, init_p2_pos, init_start_pos, init_end_pos) = init_state
(scratchpad_ints, p1_pos, p2_pos, start_pos, end_pos) = state
bool = (init_start_pos == start_pos)
bool &= (init_end_pos == end_pos)
bool &= np.all((scratchpad_ints[:end_pos] <= scratchpad_ints[(start_pos + 1):(end_pos + 1)]))
return bool
def _bubble_postcondition(self, init_state, state):
(new_scratchpad_ints, new_p1_pos, new_p2_pos, new_start_pos, new_end_pos) = init_state
new_scratchpad_ints = np.copy(new_scratchpad_ints)
for idx in range(new_start_pos, new_end_pos):
if (new_scratchpad_ints[(idx + 1)] < new_scratchpad_ints[idx]):
new_scratchpad_ints[[idx, (idx + 1)]] = new_scratchpad_ints[[(idx + 1), idx]]
new_p1_pos = new_end_pos
new_p2_pos = new_end_pos
new_state = (new_scratchpad_ints, new_p1_pos, new_p2_pos, new_start_pos, new_end_pos)
return self.compare_state(state, new_state)
def _lshift_precondition(self):
return ((self.p1_pos > self.start_pos) or (self.p2_pos > self.start_pos))
def _rshift_precondition(self):
return ((self.p1_pos < self.end_pos) or (self.p2_pos < self.end_pos))
def _bubble_precondition(self):
bubble_index = self.programs_library['BUBBLE']['index']
if (self.current_task_index != bubble_index):
bool = (self.p1_pos == self.start_pos)
bool &= ((self.p2_pos == self.start_pos) or (self.p2_pos == (self.start_pos + 1)))
else:
bool = (self.p1_pos == (self.start_pos + 1))
bool &= ((self.p2_pos == self.start_pos) or (self.p2_pos == (self.start_pos + 1)))
bool &= self._decr_length_left_precondition()
return bool
def _reset_precondition(self):
bool = ((self.p1_pos > self.start_pos) or (self.p2_pos > self.start_pos))
reset_index = self.programs_library['RESET']['index']
if (self.current_task_index == reset_index):
bool &= self._decr_length_right_precondition()
return bool
def _bubblesort_precondition(self):
bool = (self.p1_pos == self.start_pos)
bool &= (self.p2_pos == self.start_pos)
bubblesort_index = self.programs_library['BUBBLESORT']['index']
if (self.current_task_index == bubblesort_index):
bool &= self._decr_length_right_precondition()
return bool
def _one_hot_encode(self, digit, basis=10):
encoding = np.zeros(basis)
encoding[digit] = 1
return encoding
def _one_hot_decode(self, one_encoding):
return np.argmax(one_encoding)
def reset_env(self):
assert (self.length > 1), 'list length must be greater than 1'
self.start_pos = 0
self.end_pos = (self.length - 1)
self.scratchpad_ints = np.random.randint(10, size=self.length)
current_task_name = self.get_program_from_index(self.current_task_index)
if ((current_task_name == 'BUBBLE') or (current_task_name == 'BUBBLESORT')):
init_pointers_pos1 = 0
init_pointers_pos2 = 0
elif (current_task_name == 'RESET'):
while True:
init_pointers_pos1 = int(np.random.randint(0, self.length))
init_pointers_pos2 = int(np.random.randint(0, self.length))
if (not ((init_pointers_pos1 == 0) and (init_pointers_pos2 == 0))):
break
elif (current_task_name == 'LSHIFT'):
while True:
init_pointers_pos1 = int(np.random.randint(0, self.length))
init_pointers_pos2 = int(np.random.randint(0, self.length))
if (not ((init_pointers_pos1 == 0) and (init_pointers_pos2 == 0))):
break
elif (current_task_name == 'RSHIFT'):
while True:
init_pointers_pos1 = int(np.random.randint(0, self.length))
init_pointers_pos2 = int(np.random.randint(0, self.length))
if (not ((init_pointers_pos1 == (self.length - 1)) and (init_pointers_pos2 == (self.length - 1)))):
break
elif (current_task_name == 'COMPSWAP'):
init_pointers_pos1 = int(np.random.randint(0, (self.length - 1)))
init_pointers_pos2 = int(np.random.choice([init_pointers_pos1, (init_pointers_pos1 + 1)]))
else:
raise NotImplementedError('Unable to reset env for this program...')
self.p1_pos = init_pointers_pos1
self.p2_pos = init_pointers_pos2
self.has_been_reset = True
def get_observation(self):
assert self.has_been_reset, 'Need to reset the environment before getting observations'
p1_val = self.scratchpad_ints[self.p1_pos]
p2_val = self.scratchpad_ints[self.p2_pos]
is_sorted = int(self._is_sorted())
pointers_same_pos = int((self.p1_pos == self.p2_pos))
pt_1_left = int((self.p1_pos == self.start_pos))
pt_2_left = int((self.p2_pos == self.start_pos))
pt_1_right = int((self.p1_pos == self.end_pos))
pt_2_right = int((self.p2_pos == self.end_pos))
p1p2 = np.eye(10)[[p1_val, p2_val]].reshape((- 1))
bools = np.array([pt_1_left, pt_1_right, pt_2_left, pt_2_right, pointers_same_pos, is_sorted])
return np.concatenate((p1p2, bools), axis=0)
def get_observation_dim(self):
return ((2 * 10) + 6)
def get_state(self):
assert self.has_been_reset, 'Need to reset the environment before getting states'
return (np.copy(self.scratchpad_ints), self.p1_pos, self.p2_pos, self.start_pos, self.end_pos)
def reset_to_state(self, state):
self.scratchpad_ints = state[0].copy()
self.p1_pos = state[1]
self.p2_pos = state[2]
self.start_pos = state[3]
self.end_pos = state[4]
def get_state_str(self, state):
scratchpad = state[0].copy()
p1_pos = state[1]
p2_pos = state[2]
start_pos = state[3]
end_pos = state[4]
scratchpad = scratchpad[start_pos:(end_pos + 1)]
str = 'list: {}, p1 : {}, p2 : {}, start_pos: {}, end_pos: {}'.format(scratchpad, p1_pos, p2_pos, start_pos, end_pos)
return str
def compare_state(self, state1, state2):
bool = np.array_equal(state1[0], state2[0])
bool &= (state1[1] == state2[1])
bool &= (state1[2] == state2[2])
bool &= (state1[3] == state2[3])
bool &= (state1[4] == state2[4])
return bool
def _is_sorted(self):
arr = self.scratchpad_ints
return np.all((arr[:(self.length - 1)] <= arr[1:self.length]))
def start_task(self, task_index):
if (self.tasks_list.count(task_index) > 0):
task = self.get_program_from_index(task_index)
if ((task == 'RESET') or (task == 'BUBBLESORT')):
self._decr_length_right()
if (task == 'BUBBLE'):
self._decr_length_left()
return super(RecursiveListEnv, self).start_task(task_index)
def end_task(self):
current_task = self.get_program_from_index(self.current_task_index)
if ((current_task == 'RESET') or (current_task == 'BUBBLESORT')):
if (self.tasks_list.count(self.current_task_index) > 1):
self._incr_length_right()
if (current_task == 'BUBBLE'):
if (self.tasks_list.count(self.current_task_index) > 1):
self._incr_length_left()
super(RecursiveListEnv, self).end_task() |
_registry
class TensorFlowAdaptor(Adaptor):
unify_op_type_mapping = {'Conv2D': 'conv2d', 'Conv3D': 'conv3d', 'DepthwiseConv2dNative': 'conv2d', 'FusedBatchNormV3': 'batchnorm', '_MklFusedInstanceNorm': 'instancenorm', 'MaxPool': 'pooling', 'MaxPool3D': 'pooling', 'AvgPool': 'pooling', 'ConcatV2': 'concat', 'MatMul': 'matmul', 'BatchMatMul': 'matmul', 'BatchMatMulV2': 'matmul', 'Pad': 'pad', 'Conv2DBackpropInput': 'deconv2d', 'Conv3DBackpropInputV2': 'deconv3d'}
def __init__(self, framework_specific_info):
super().__init__(framework_specific_info)
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
self.quantize_config = {'op_wise_config': {}}
self.framework_specific_info = framework_specific_info
self.approach = deep_get(self.framework_specific_info, 'approach', False)
self.device = self.framework_specific_info['device']
self.work_dir = os.path.abspath(self.framework_specific_info['workspace_path'])
self.recipes = deep_get(self.framework_specific_info, 'recipes', {})
self.performance_only = deep_get(self.framework_specific_info, 'performance_only', False)
self.use_bf16 = deep_get(self.framework_specific_info, 'use_bf16', False)
self.backend = self.framework_specific_info['backend']
self.format = self.framework_specific_info['format']
os.makedirs(self.work_dir, exist_ok=True)
self.model = None
self.pre_optimized_model = None
self.pre_optimizer_handle = None
self.bf16_ops = []
self.fp32_ops = []
self.smooth_quant_mul_ops = []
self.dump_times = 0
cfg_yaml_name = '{}.yaml'.format(self.__class__.__name__[:(- len('Adaptor'))].lower())
self.itex_mode = ((self.backend == 'itex') or (cfg_yaml_name == 'tensorflow_itex.yaml'))
if self.itex_mode:
self._check_itex()
self.query_handler = TensorflowQuery(local_config_file=os.path.join(os.path.dirname(__file__), cfg_yaml_name), performance_only=self.performance_only, itex_mode=self.itex_mode)
import tensorflow as tf
from pkg_resources import parse_version
self.new_api = (tf.version.VERSION in spr_base_verions)
self.qdq_enabled = (self.itex_mode or (self.format == 'QDQ') or self.new_api)
self.op_wise_sequences = self.query_handler.get_eightbit_patterns(self.qdq_enabled)
self.fp32_results = []
self.fp32_preds_as_label = False
self.benchmark = (GLOBAL_STATE.STATE == MODE.BENCHMARK)
self.callbacks = []
self.optype_statistics = None
self._last_dequantize_ops = None
self.smooth_quant_model = None
def _check_itex(self):
try:
import intel_extension_for_tensorflow
except:
raise ImportError('The Intel Extension for TensorFlow is not installed. Please install it to run models on ITEX backend')
def _log_histogram(self, writer, tag, values, step=0, bins=1000):
import tensorflow as tf
values = np.array(values)
with writer.as_default():
tf.summary.histogram(tag, values, step)
writer.flush()
def _pre_hook_for_hvd(self, dataloader=None):
import horovod.tensorflow as hvd
self.hvd = hvd
self.hvd.init()
_elapsed_time(customized_msg='Model training')
def train(self, model, dataloader, optimizer_tuple, criterion_tuple, hooks, postprocess, **kwargs):
import tensorflow as tf
from neural_compressor.model.tensorflow_model import get_model_type
tf.random.set_seed(1)
self.model_type = get_model_type(model._model)
optimizer = optimizer_tuple[0](**optimizer_tuple[1])
criterion = criterion_tuple[0](**criterion_tuple[1])
start_epochs = kwargs['kwargs'].get('start_epoch', None)
end_epochs = kwargs['kwargs'].get('end_epoch', None)
epochs = kwargs['kwargs'].get('epoch', None)
iters = kwargs['kwargs'].get('iteration', None)
callbacks = kwargs['kwargs'].get('callbacks', None)
execution_mode = kwargs['kwargs'].get('execution_mode', None)
distributed = getattr(dataloader, 'distributed', False)
from neural_compressor.compression.distillation.criterions import TensorflowKnowledgeDistillationLoss
if isinstance(criterion, TensorflowKnowledgeDistillationLoss):
input_model = model._model
else:
input_model = tf.keras.models.load_model(model._model)
hooks = callbacks['tf_pruning'](model, input_model, hooks)
hooks['on_train_begin']()
train_loss_results = []
if distributed:
try:
len_dataloader = len(dataloader)
except:
logger.info('The length of the distributed training dataloader is unknown.When the iteration of training dataloader in each process is inconsistent, an error may occur.')
else:
list_len_dataloader = self.hvd.allgather_object(len_dataloader)
if (self.hvd.rank() == 0):
for i in range((len(list_len_dataloader) - 1)):
if (list_len_dataloader[i] != list_len_dataloader[(i + 1)]):
raise AttributeError("The training dataloader's iteration isdifferent between processes, please reset dataloader's batch_size.")
def training_step(x, y, first_batch):
with tf.GradientTape() as tape:
tape.watch(input_model.trainable_variables)
y_ = input_model(x, training=True)
loss_value = criterion(y, y_)
loss_value = hooks['on_after_compute_loss'](x, y_, loss_value)
tape = (self.hvd.DistributedGradientTape(tape) if distributed else tape)
grads = tape.gradient(loss_value, input_model.trainable_variables)
optimizer.apply_gradients(zip(grads, input_model.trainable_variables))
if (distributed and first_batch):
self.hvd.broadcast_variables(input_model.variables, root_rank=0)
self.hvd.broadcast_variables(optimizer.variables(), root_rank=0)
return loss_value
training_step = (training_step if (execution_mode == 'eager') else tf.function(training_step))
if ((start_epochs is not None) and (end_epochs is not None)):
epochs = (end_epochs - start_epochs)
for epoch in range(epochs):
cnt = 0
epoch_loss_avg = tf.keras.metrics.Mean()
hooks['on_epoch_begin'](epoch)
for (iter, data) in enumerate(dataloader):
(x, y) = (postprocess(data) if (postprocess is not None) else data)
hooks['on_step_begin'](iter)
cnt += 1
loss_value = training_step(x, y, (iter == 0))
epoch_loss_avg.update_state(loss_value)
hooks['on_step_end']()
if ((iters is not None) and (cnt >= iters)):
break
model._sess = None
hooks['on_epoch_end']()
train_loss_results.append(epoch_loss_avg.result())
if distributed:
logger.info('Epoch-{:03d} training on rank {!s} have been done.'.format((epoch + 1), self.hvd.allgather_object(self.hvd.rank())))
logger.info('Epoch {:03d}: Loss: {:.3f}'.format((epoch + 1), epoch_loss_avg.result()))
hooks['on_train_end']()
model._sess = None
if (not isinstance(criterion, TensorflowKnowledgeDistillationLoss)):
if distributed:
if (self.hvd.rank() == 0):
input_model.save(model._model)
rank_list = self.hvd.allgather_object(self.hvd.rank())
logger.info(f"rank 0 has saved the pruned model to '{model._model}',all ranks {rank_list} ready.")
else:
input_model.save(model._model)
else:
input_model.save('distillation_model')
_elapsed_time(customized_msg='Model inference')
def evaluate(self, model, dataloader, postprocess=None, metrics=None, measurer=None, iteration=(- 1), tensorboard=False, fp32_baseline=False):
import tensorflow as tf
from .tf_utils.util import iterator_sess_run
outputs = model.output_tensor_names
if getattr(dataloader, 'distributed', False):
import horovod.tensorflow as hvd
hvd.init()
for metric in metrics:
metric.hvd = hvd
try:
len_dataloader = len(dataloader)
except:
logger.info('The length of the distributed evaluation dataloader is unknown.When the iteration of evaluation dataloader in each process is inconsistent, an error may occur.')
else:
list_len_dataloader = hvd.allgather_object(len_dataloader)
if (hvd.rank() == 0):
for i in range((len(list_len_dataloader) - 1)):
if (list_len_dataloader[i] != list_len_dataloader[(i + 1)]):
raise AttributeError("The evaluation dataloader's iteration isdifferent between processes, please reset dataloader's batch_size.")
logger.info("Rank {!s} dataloaders' data distribution balance check for evaluation have been finished.".format(hvd.allgather_object(hvd.rank())))
if tensorboard:
from tensorflow.python.framework import tensor_util
from .tf_utils.graph_util import GraphAnalyzer
output_postfix = '_fp32.output'
inspect_node_types = ['Conv2D', 'DepthwiseConv2dNative', 'MaxPool', 'AvgPool', 'ConcatV2', 'MatMul', 'FusedBatchNormV3', 'FusedBatchNorm', 'BiasAdd', '_MklFusedInstanceNorm', 'Relu', 'Relu6', 'Dequantize']
fp32_inspect_node_name = []
int8_inspect_node_name = []
q_node_scale = {}
if (self.dump_times == 0):
temp_dir = './runs/eval/baseline'
else:
temp_dir = ('./runs/eval/tune_' + str(self.dump_times))
if os.path.isdir(temp_dir):
import shutil
shutil.rmtree(temp_dir, ignore_errors=True)
writer = tf.summary.create_file_writer(temp_dir)
with writer.as_default():
tf.summary.graph(model.graph)
cur_graph = GraphAnalyzer()
cur_graph.graph = model.graph_def
cur_graph.parse_graph()
graph_info = cur_graph.node_name_details
for node in model.graph_def.node:
if (node.op in inspect_node_types):
fp32_inspect_node_name.append(node.name)
elif (node.op.find('Requantize') != (- 1)):
out_min = (- 2)
out_max = (- 1)
if (node.op.find('Sum') != (- 1)):
out_min = (- 5)
out_max = (- 4)
q_out_min = graph_info[node.input[out_min]].node.attr['value'].tensor.float_val[0]
q_out_max = graph_info[node.input[out_max]].node.attr['value'].tensor.float_val[0]
q_node_scale[node.name] = (node.op, q_out_min, q_out_max)
int8_inspect_node_name.append(node.name)
if ((node.op == 'Const') and (graph_info[graph_info[node.name].outputs[0]].node.op in ['Conv2D', 'DepthwiseConv2dNative', 'MatMul', 'FusedBatchNormV3', '_MklFusedInstanceNorm', 'BiasAdd'])):
const_value = tensor_util.MakeNdarray(node.attr.get('value').tensor).astype(np.float32)
self._log_histogram(writer, node.name, const_value)
outputs.extend(fp32_inspect_node_name)
if (len(int8_inspect_node_name) > 0):
output_postfix = '_int8.output'
outputs.extend(int8_inspect_node_name)
if metrics:
for metric in metrics:
metric.reset()
self.fp32_preds_as_label = any([(hasattr(metric, 'compare_label') and (not metric.compare_label)) for metric in metrics])
origin_output_tensor_names = model.output_tensor_names
model.output_tensor_names = outputs
input_tensor = model.input_tensor
output_tensor = (model.output_tensor if (len(model.output_tensor) > 1) else model.output_tensor[0])
logger.info('Start to evaluate the TensorFlow model.')
def eval_func(dataloader):
results = []
for (idx, (inputs, labels)) in enumerate(dataloader):
if (len(input_tensor) == 1):
feed_dict = {}
if (isinstance(inputs, dict) or isinstance(inputs, OrderedDict) or isinstance(inputs, UserDict)):
for name in inputs:
for tensor in input_tensor:
pos = tensor.name.rfind(':')
t_name = (tensor.name if (pos < 0) else tensor.name[:pos])
if (name == t_name):
feed_dict[tensor] = inputs[name]
break
else:
feed_dict = {input_tensor[0]: inputs}
else:
assert (len(input_tensor) == len(inputs)), 'inputs len must equal with input_tensor'
feed_dict = {}
if (isinstance(inputs, dict) or isinstance(inputs, OrderedDict) or isinstance(inputs, UserDict)):
for name in inputs:
for tensor in input_tensor:
pos = tensor.name.rfind(':')
t_name = (tensor.name if (pos < 0) else tensor.name[:pos])
if (name == t_name):
feed_dict[tensor] = inputs[name]
break
else:
feed_dict = dict(zip(input_tensor, inputs))
if model.iter_op:
predictions = iterator_sess_run(model.sess, model.iter_op, feed_dict, output_tensor, iteration, measurer)
elif (measurer is not None):
measurer.start()
predictions = model.sess.run(output_tensor, feed_dict)
measurer.end()
else:
predictions = model.sess.run(output_tensor, feed_dict)
if self.fp32_preds_as_label:
(self.fp32_results.append(predictions) if fp32_baseline else results.append(predictions))
if ((idx == 0) and tensorboard):
for (index, node_name) in enumerate(outputs):
tensor = predictions[index]
if (node_name in int8_inspect_node_name):
tensor = Dequantize(predictions[index], q_node_scale[node_name])
self._log_histogram(writer, (node_name + output_postfix), tensor.astype(np.float32), idx)
writer.close()
if isinstance(predictions, list):
if (len(origin_output_tensor_names) == 1):
predictions = predictions[0]
elif (len(origin_output_tensor_names) > 1):
predictions = predictions[:len(origin_output_tensor_names)]
if (postprocess is not None):
(predictions, labels) = postprocess((predictions, labels))
if metrics:
for metric in metrics:
if ((not hasattr(metric, 'compare_label')) or (hasattr(metric, 'compare_label') and metric.compare_label)):
metric.update(predictions, labels)
if ((idx + 1) == iteration):
break
return results
if (isinstance(dataloader, BaseDataLoader) and (not self.benchmark)):
try:
results = eval_func(dataloader)
except Exception:
logger.warning('Fail to forward with batch size={}, set to {} now.'.format(dataloader.batch_size, 1))
dataloader.batch(1)
results = eval_func(dataloader)
else:
results = eval_func(dataloader)
if self.fp32_preds_as_label:
from .tf_utils.util import collate_tf_preds
if fp32_baseline:
results = collate_tf_preds(self.fp32_results)
reference = results
else:
reference = collate_tf_preds(self.fp32_results)
results = collate_tf_preds(results)
for metric in metrics:
if (hasattr(metric, 'compare_label') and (not metric.compare_label)):
metric.update(results, reference)
acc = (0 if (metrics is None) else [metric.result() for metric in metrics])
if tensorboard:
new_dir = ((temp_dir + '_acc_') + str(acc))
writer.close()
if os.path.isdir(new_dir):
import shutil
shutil.rmtree(new_dir, ignore_errors=True)
os.rename(temp_dir, new_dir)
self.dump_times += 1
model.output_tensor_names = origin_output_tensor_names
return (acc if ((not isinstance(acc, list)) or (len(acc) > 1)) else acc[0])
def _tuning_cfg_to_fw(self, tuning_cfg):
self.quantize_config['calib_iteration'] = tuning_cfg['calib_iteration']
self.quantize_config['device'] = self.device
self.quantize_config['advance'] = deep_get(tuning_cfg, 'advance')
fp32_ops = []
bf16_ops = []
dispatched_op_names = [j[0] for j in tuning_cfg['op']]
invalid_op_names = [i for i in self.quantize_config['op_wise_config'] if (i not in dispatched_op_names)]
for op_name in invalid_op_names:
self.quantize_config['op_wise_config'].pop(op_name)
for each_op_info in tuning_cfg['op']:
op_name = each_op_info[0]
if (tuning_cfg['op'][each_op_info]['activation']['dtype'] in ['fp32', 'bf16']):
if (op_name in self.quantize_config['op_wise_config']):
self.quantize_config['op_wise_config'].pop(op_name)
if (tuning_cfg['op'][each_op_info]['activation']['dtype'] == 'fp32'):
fp32_ops.append(op_name)
if (tuning_cfg['op'][each_op_info]['activation']['dtype'] == 'bf16'):
bf16_ops.append(op_name)
continue
is_perchannel = False
bit = None
if ('weight' in tuning_cfg['op'][each_op_info]):
is_perchannel = (tuning_cfg['op'][each_op_info]['weight']['granularity'] == 'per_channel')
weight_bit = (bit if bit else 7.0)
algorithm = tuning_cfg['op'][each_op_info]['activation']['algorithm']
is_asymmetric = False
if ('activation' in tuning_cfg['op'][each_op_info]):
is_asymmetric = (tuning_cfg['op'][each_op_info]['activation']['scheme'] == 'asym')
self.quantize_config['op_wise_config'][op_name] = (is_perchannel, algorithm, is_asymmetric, weight_bit)
self.fp32_ops = fp32_ops
self.bf16_ops = bf16_ops
_elapsed_time('Pass quantize model')
def quantize(self, tune_cfg, model, data_loader, q_func=None):
assert (self.approach != 'post_training_dynamic_quant'), 'Dynamic quantization is not supported on TensorFlow framework now!'
if (self.approach == 'quant_aware_training'):
assert (q_func is not None), 'quantization aware training mode is not configured correctly'
from neural_compressor.model import Model
qat_model = q_func(model)
return self.convert(Model(qat_model), 'QAT', 'default')
assert (q_func is None), 'post-training quantization mode is not support calibration function for Tensorflow!'
self._tuning_cfg_to_fw(tune_cfg)
self.bf16_ops.extend(self.smooth_quant_mul_ops)
logger.debug('Dump quantization configurations:')
logger.debug(self.quantize_config)
from .tf_utils.graph_converter import GraphConverter
calib_sampling_size = tune_cfg.get('calib_sampling_size', 1)
if isinstance(data_loader, BaseDataLoader):
batch_size = data_loader.batch_size
try:
for i in range(batch_size):
if ((calib_sampling_size % (batch_size - i)) == 0):
calib_batch_size = (batch_size - i)
if (i != 0):
logger.warning(('Reset `calibration.dataloader.batch_size` field to {}'.format(calib_batch_size) + ' to make sure the sampling_size is divisible exactly by batch size'))
break
tmp_iterations = int(math.ceil((calib_sampling_size / calib_batch_size)))
data_loader.batch(calib_batch_size)
self.quantize_config['calib_iteration'] = tmp_iterations
converted_model = GraphConverter(model, qt_config=self.quantize_config, recipes=self.recipes, int8_sequences=self.op_wise_sequences, fp32_ops=self.fp32_ops, bf16_ops=self.bf16_ops, data_loader=data_loader, calib_func=q_func, qdq_enabled=self.qdq_enabled, new_api=self.new_api, performance_only=self.performance_only, use_bf16=self.use_bf16).convert()
except Exception:
from .tf_utils.util import get_model_input_shape
batch_size = get_model_input_shape(model)
logger.warning('Fail to forward with batch size={}, set to {} now.'.format(data_loader.batch_size, batch_size))
data_loader.batch(batch_size)
self.quantize_config['calib_iteration'] = calib_sampling_size
converted_model = GraphConverter(model, qt_config=self.quantize_config, recipes=self.recipes, int8_sequences=self.op_wise_sequences, fp32_ops=self.fp32_ops, bf16_ops=self.bf16_ops, data_loader=data_loader, calib_func=q_func, qdq_enabled=self.qdq_enabled, new_api=self.new_api, performance_only=self.performance_only, use_bf16=self.use_bf16).convert()
else:
if (hasattr(data_loader, 'batch_size') and ((calib_sampling_size % data_loader.batch_size) != 0)):
iter = self.quantize_config['calib_iteration']
logger.warning("Please note that calibration sampling size {} isn't divisible exactly by batch size {}. So the real sampling size is {}.".format(calib_sampling_size, data_loader.batch_size, (data_loader.batch_size * iter)))
converted_model = GraphConverter(model, qt_config=self.quantize_config, recipes=self.recipes, int8_sequences=self.op_wise_sequences, fp32_ops=self.fp32_ops, bf16_ops=self.bf16_ops, data_loader=data_loader, calib_func=q_func, qdq_enabled=self.qdq_enabled, new_api=self.new_api, performance_only=self.performance_only, use_bf16=self.use_bf16).convert()
converted_model.q_config.update({'framework_specific_info': self.framework_specific_info})
self._dump_model_op_stats(converted_model.graph_def)
return converted_model
def _dump_model_op_stats(self, model_graphdef):
fp32_op_list_uint8 = copy.deepcopy(self.query_handler.get_op_types_by_precision(precision='uint8'))
fp32_op_list_int8 = copy.deepcopy(self.query_handler.get_op_types_by_precision(precision='int8'))
fp32_op_list = list(set(fp32_op_list_uint8).union(set(fp32_op_list_int8)))
int8_op_prefix_list = ['QuantizedConv2D', '_FusedQuantizedConv3D', 'QuantizedDepthwise', 'QuantizedMaxPool', 'QuantizedAvgPool', 'QuantizedConcatV2', 'QuantizedMatMul', '_QuantizedFusedBatchNorm', '_QuantizedMatMul', '_QuantizedBatchMatMul', '_QuantizedFusedInstanceNorm', '_FusedQuantizedDeconv2D', '_FusedQuantizedDeconv3D']
from tensorflow.python.framework import dtypes
res = {}
for op_type in fp32_op_list:
res[op_type] = {'INT8': 0, 'BF16': 0, 'FP32': 0}
res['QuantizeV2'] = {'INT8': 0, 'BF16': 0, 'FP32': 0}
res['Dequantize'] = {'INT8': 0, 'BF16': 0, 'FP32': 0}
res['Cast'] = {'INT8': 0, 'BF16': 0, 'FP32': 0}
fp32_op_list.extend(['QuantizeV2', 'Dequantize', 'Cast'])
for i in model_graphdef.node:
if (i.op == 'Const'):
continue
possible_int8_res = [name for name in int8_op_prefix_list if (i.op.find(name) != (- 1))]
if any(possible_int8_res):
origin_op_type = possible_int8_res[0].split('Quantized')[(- 1)]
if (origin_op_type == 'FusedBatchNorm'):
origin_op_type = 'FusedBatchNormV3'
if (origin_op_type == 'FusedInstanceNorm'):
origin_op_type = '_MklFusedInstanceNorm'
if (origin_op_type == 'Depthwise'):
origin_op_type = 'DepthwiseConv2dNative'
if (origin_op_type == 'BatchMatMul'):
origin_op_type = 'BatchMatMulV2'
if (origin_op_type == 'FusedBatchMatMulV2'):
origin_op_type = '_MklFusedBatchMatMulV2'
if (origin_op_type == 'Deconv2D'):
origin_op_type = 'Conv2DBackpropInput'
if (origin_op_type == 'Deconv3D'):
origin_op_type = 'Conv3DBackpropInputV2'
res[origin_op_type]['INT8'] += 1
if (i.op in fp32_op_list):
if (('T' not in i.attr) and (i.op != 'Cast')):
continue
if (i.op == 'Cast'):
if (i.attr['DstT'].type == dtypes.bfloat16):
res[i.op]['BF16'] += 1
elif (i.attr['DstT'].type == dtypes.float32):
res[i.op]['FP32'] += 1
elif (i.attr['T'].type == dtypes.bfloat16):
res[i.op]['BF16'] += 1
elif (i.attr['T'].type in (dtypes.quint8, dtypes.qint8)):
res[i.op]['INT8'] += 1
else:
res[i.op]['FP32'] += 1
field_names = ['Op Type', 'Total', 'INT8', 'BF16', 'FP32']
output_data = [[op_type, sum(res[op_type].values()), res[op_type]['INT8'], res[op_type]['BF16'], res[op_type]['FP32']] for op_type in fp32_op_list]
Statistics(output_data, header='Mixed Precision Statistics', field_names=field_names).print_stat()
self.optype_statistics = (field_names, output_data)
def _query_bf16_ops(self, matched_nodes):
self.bf16_op_details = OrderedDict()
valid_precision = self.query_handler.get_mixed_precision_combination()
if ((('bf16' in valid_precision) and CpuInfo().bf16) or (os.getenv('FORCE_BF16') == '1')):
for details in matched_nodes:
node_op = details[(- 1)][0]
node_name = details[0]
self.bf16_op_details[(node_name, node_op)] = [{'weight': {'dtype': ['bf16']}, 'activation': {'dtype': ['bf16']}}, {'weight': {'dtype': 'fp32'}, 'activation': {'dtype': 'fp32'}}]
def _query_quantizable_ops(self, matched_nodes):
bf16_common_config = {'weight': {'dtype': 'bf16'}, 'activation': {'dtype': 'bf16'}}
fp32_common_config = {'weight': {'dtype': 'fp32'}, 'activation': {'dtype': 'fp32'}}
uint8_type = self.query_handler.get_op_types_by_precision(precision='uint8')
int8_type = self.query_handler.get_op_types_by_precision(precision='int8')
bf16_type = self.query_handler.get_op_types_by_precision(precision='bf16')
tf_quantizable_op_type = list(set(uint8_type).union(set(int8_type)))
valid_precision = self.query_handler.get_mixed_precision_combination()
op_capability = self.query_handler.get_quantization_capability()
conv_config = copy.deepcopy(op_capability['Conv2D'])
conv3d_config = (copy.deepcopy(op_capability['Conv3D']) if ('Conv3D' in op_capability) else None)
matmul_config = copy.deepcopy(op_capability['MatMul'])
other_config = copy.deepcopy(op_capability['default'])
self.quantizable_op_details = OrderedDict()
self.recipes_ops = {}
self._init_op_stat = {i: [] for i in tf_quantizable_op_type}
exclude_first_quantizable_op = (True if (('first_conv_or_matmul_quantization' in self.recipes) and (not self.recipes['first_conv_or_matmul_quantization'])) else False)
for details in matched_nodes:
node_op = details[(- 1)][0]
node_name = details[0]
patterns = details[(- 1)]
pat_length = len(patterns)
pattern_info = {'sequence': [[','.join(patterns[:(pat_length - i)]) for i in range(pat_length)][0]], 'precision': ['int8']}
first_conv_or_matmul_node = []
if ((node_op in tf_quantizable_op_type) and (node_name not in self.exclude_node_names) and ((node_name, self.unify_op_type_mapping[node_op]) not in self.quantizable_op_details)):
if (((self.unify_op_type_mapping[node_op].find('conv2d') != (- 1)) or (self.unify_op_type_mapping[node_op].find('matmul') != (- 1))) and (len(first_conv_or_matmul_node) == 0)):
first_conv_or_matmul_node.append((node_name, self.unify_op_type_mapping[node_op]))
self.recipes_ops['first_conv_or_matmul_quantization'] = first_conv_or_matmul_node
if (exclude_first_quantizable_op and ((self.unify_op_type_mapping[node_op].find('conv2d') != (- 1)) or (self.unify_op_type_mapping[node_op].find('matmul') != (- 1)))):
exclude_first_quantizable_op = False
self.exclude_node_names.append(node_name)
continue
self._init_op_stat[node_op].append(node_name)
if (self.unify_op_type_mapping[node_op].find('conv2d') != (- 1)):
conv2d_int8_config = copy.deepcopy(conv_config)
conv2d_int8_config['pattern'] = pattern_info
self.quantizable_op_details[(node_name, self.unify_op_type_mapping[node_op])] = [conv2d_int8_config, fp32_common_config]
elif (self.unify_op_type_mapping[node_op].find('conv3d') != (- 1)):
conv3d_int8_config = copy.deepcopy(conv3d_config)
conv3d_int8_config['pattern'] = pattern_info
self.quantizable_op_details[(node_name, self.unify_op_type_mapping[node_op])] = [conv3d_int8_config, fp32_common_config]
elif (self.unify_op_type_mapping[node_op].find('matmul') != (- 1)):
matmul_int8_config = copy.deepcopy(matmul_config)
matmul_int8_config['pattern'] = pattern_info
matmul_scheme = ['asym']
matmul_int8_config['activation']['scheme'] = matmul_scheme
self.quantizable_op_details[(node_name, self.unify_op_type_mapping[node_op])] = [matmul_int8_config, fp32_common_config]
else:
self.quantizable_op_details[(node_name, self.unify_op_type_mapping[node_op])] = [copy.deepcopy(other_config), fp32_common_config]
if ((node_op in bf16_type) and ((('bf16' in valid_precision) and CpuInfo().bf16) or (os.getenv('FORCE_BF16') == '1'))):
self.quantizable_op_details[(node_name, self.unify_op_type_mapping[node_op])].insert(1, bf16_common_config)
self.quantize_config['op_wise_config'][node_name] = (False, 'minmax', False)
return self.quantizable_op_details
def _filter_unquantizable_concat(self, matched_nodes):
target_concat_nodes = [i[0] for i in matched_nodes if (i[(- 1)][0] == 'ConcatV2')]
from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper
from neural_compressor.adaptor.tf_utils.util import GraphAnalyzer
g = GraphAnalyzer()
g.graph = self.pre_optimized_model.graph_def
graph_info = g.parse_graph()
concat_nodes = g.query_fusion_pattern_nodes([['ConcatV2']])
for i in concat_nodes:
concat_node_name = i[0]
if (concat_node_name not in target_concat_nodes):
continue
input_positive_status = []
for index in range(graph_info[concat_node_name].node.attr['N'].i):
each_input_name = GraphRewriterHelper.node_name_from_input(graph_info[concat_node_name].node.input[index])
each_input_node = graph_info[each_input_name].node
positive_input = False
if (each_input_node.op in ('Relu', 'Relu6')):
positive_input = True
else:
positive_input = g.has_positive_input(each_input_node.name)
input_positive_status.append(positive_input)
if (not any(input_positive_status)):
matched_nodes.remove(i)
def _filter_unquantizable_concat_performance_only(self, matched_nodes):
target_concat_nodes = [i[0] for i in matched_nodes if (i[(- 1)][0] == 'ConcatV2')]
from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper
from neural_compressor.adaptor.tf_utils.util import GraphAnalyzer
g = GraphAnalyzer()
g.graph = self.pre_optimized_model.graph_def
graph_info = g.parse_graph()
concat_nodes = g.query_fusion_pattern_nodes([['ConcatV2']])
for i in concat_nodes:
concat_node_name = i[0]
if (concat_node_name not in target_concat_nodes):
continue
input_positive_status = []
control_flow = False
for index in range(graph_info[concat_node_name].node.attr['N'].i):
each_input_name = GraphRewriterHelper.node_name_from_input(graph_info[concat_node_name].node.input[index])
each_input_node = graph_info[each_input_name].node
if (each_input_node.op in 'Switch'):
control_flow = True
if control_flow:
matched_nodes.remove(i)
def query_fw_capability(self, model):
if (self.pre_optimized_model is None):
from .tf_utils.graph_rewriter.generic.pre_optimize import PreOptimization
self.pre_optimizer_handle = PreOptimization(model, self.new_api, self.device)
self.pre_optimized_model = self.pre_optimizer_handle.get_optimized_model(self.itex_mode)
model.graph_def = self.pre_optimized_model.graph_def
self.exclude_node_names = self.pre_optimizer_handle.get_excluded_node_names()
patterns = self.query_handler.generate_internal_patterns()
bf16_patterns = self.query_handler.get_bf16_patterns()
matched_nodes = self.pre_optimizer_handle.get_matched_nodes(patterns)
matched_bf16_nodes = self.pre_optimizer_handle.get_matched_nodes(bf16_patterns)
original_graph_node_name = [i.name for i in model.graph_def.node]
matched_nodes = sorted(matched_nodes, reverse=True, key=(lambda i: (original_graph_node_name.index(i[0]), len(i[(- 1)]))))
def check_match(patterns, input_pattern):
for i in patterns:
if (input_pattern == [i for i in i.replace('+', ' ').strip().split(' ') if i]):
return True
return False
if ((self.new_api and self.performance_only) or self.itex_mode or (os.getenv('TF_FORCE_CONCAT_OPTS') == '1')):
self._filter_unquantizable_concat_performance_only(matched_nodes)
else:
self._filter_unquantizable_concat(matched_nodes)
copied_matched_nodes = copy.deepcopy(matched_nodes)
for i in copied_matched_nodes:
if (i[(- 1)][0] in self.query_handler.get_op_types()['int8']):
continue
if ((not self.pre_optimizer_handle.has_positive_input(i[0])) and (not check_match(self.query_handler.get_fuse_patterns()['int8'], i[(- 1)]))):
matched_nodes.remove(i)
del copied_matched_nodes
copied_matched_nodes = copy.deepcopy(matched_bf16_nodes)
for i in copied_matched_nodes:
for j in matched_nodes:
if ((i[0] == j[0]) and (i in matched_bf16_nodes)):
matched_bf16_nodes.remove(i)
del copied_matched_nodes
self._query_quantizable_ops(matched_nodes)
self._query_bf16_ops(matched_bf16_nodes)
capability = {'optypewise': self.get_optype_wise_ability(), 'recipes_ops': self.recipes_ops}
capability['opwise'] = copy.deepcopy(self.quantizable_op_details)
capability['opwise'].update(self.bf16_op_details)
logger.debug('Dump framework quantization capability:')
logger.debug(capability)
return capability
def set_tensor(self, model, tensor_dict):
from .tf_utils.graph_util import GraphAnalyzer
g = GraphAnalyzer()
g.graph = model.graph_def
graph_info = g.parse_graph()
def _get_fp32_op_name(model, tensor_name):
is_weight = False
is_biasadd = False
last_node_name = None
current_node_name = None
for each_node in model.graph_def.node:
if (tensor_name in each_node.input):
tensor_index = list(each_node.input).index(tensor_name)
if ((each_node.op.find('Quantized') != (- 1)) and (tensor_index == 2)):
is_biasadd = True
last_node_name = each_node.input[0]
current_node_name = each_node.name
if ((tensor_name + '_qint8_const') in each_node.input):
pass
return (is_weight, is_biasadd, current_node_name, last_node_name)
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes, tensor_util
from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper as Helper
qint32_type = dtypes.qint32.as_datatype_enum
for (tensor_name, tensor_content) in tensor_dict.items():
(is_weight, is_biasadd, current_node_name, last_node_name) = _get_fp32_op_name(model, tensor_name)
if is_biasadd:
is_biasadd_dtype_is_fp32 = (graph_info[current_node_name].node.attr['Tbias'] == attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
current_node = graph_info[current_node_name].node
bias_add_node = graph_info[current_node.input[2]].node
if is_biasadd_dtype_is_fp32:
bias_add_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(tensor_content, dtypes.float32, tensor_content.shape)))
else:
last_node = graph_info[last_node_name].node
min_input = graph_info[last_node.input[(- 2)]].node.attr['value'].tensor.float_val[0]
max_input = graph_info[last_node.input[(- 1)]].node.attr['value'].tensor.float_val[0]
channel_size = tensor_content.shape[0]
max_filter_node = graph_info[current_node.input[6]].node
min_filter_node = graph_info[current_node.input[5]].node
if max_filter_node.attr['value'].tensor.float_val:
max_filter_tensor = []
min_filter_tensor = []
max_filter_tensor.append(max_filter_node.attr['value'].tensor.float_val[0])
min_filter_tensor.append(min_filter_node.attr['value'].tensor.float_val[0])
else:
max_filter_tensor = tensor_util.MakeNdarray(min_filter_node.attr['value'].tensor)
min_filter_tensor = tensor_util.MakeNdarray(min_filter_node.attr['value'].tensor)
activation_range = (127.0 if (current_node.attr['Tinput'].type == dtypes.qint8) else 255.0)
updated_bias = Helper.generate_int32_bias_for_conv(tensor_content, channel_size, max_input, min_input, max_filter_tensor, min_filter_tensor, activation_range)
bias_add_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=qint32_type))
bias_add_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(updated_bias, dtypes.int32, tensor_content.shape)))
bias_add_node.attr['value'].tensor.dtype = qint32_type
current_node.attr['Tbias'].CopyFrom(attr_value_pb2.AttrValue(type=qint32_type))
if is_weight:
tmp_const_node = Helper.create_constant_node((current_node.name + '_weights_tmp'), tensor_content.transpose(2, 3, 1, 0), dtypes.float32)
min_filter_node = graph_info[current_node.input[5]].node
per_channel = (True if min_filter_node.attr['value'].tensor.tensor_shape else False)
from .tf_utils.quantize_graph_common import QuantizeGraphHelper
original_fp32_op = current_node.op.split('With')[0].split('Quantized')[(- 1)]
if (original_fp32_op.find('Depthwise') != (- 1)):
original_fp32_op = 'DepthwiseConv2dNative'
(qint8_const_node, min_node, max_node) = QuantizeGraphHelper.generate_quantized_weight_node(original_fp32_op, tmp_const_node, per_channel)
g.add_node(qint8_const_node, [], [current_node.name])
g.add_node(min_node, [], [current_node.name])
g.add_node(max_node, [], [current_node.name])
g.replace_constant_graph_with_constant_node(qint8_const_node, tensor_name)
g.replace_constant_graph_with_constant_node(min_node, current_node.input[5])
g.replace_constant_graph_with_constant_node(max_node, current_node.input[6])
def inspect_weight_and_bias(self, node_list, graph_def, graph_info, graph_node_name_mapping):
import tensorflow as tf
from neural_compressor.adaptor.tf_utils.util import get_tensor_val_from_graph_node
from neural_compressor.utils.utility import dequantize_weight
from .tf_utils.util import int8_node_name_reverse
weights_result = {}
inspect_nodes = []
node_set = set(node_list)
for node in graph_def.node:
node_name = node.name
if ('Quantized' in node.op):
node_name = int8_node_name_reverse(node)
if ((node_name in node_set) and (('Conv' in node.op) or ('Mul' in node.op))):
inspect_nodes.append(node)
logger.debug(f'Start to inspect weight and bias for: {[node.name for node in inspect_nodes]}.')
for node in inspect_nodes:
node_name = node.name
weight_node_name = node.input[1]
weight_node = graph_node_name_mapping[weight_node_name]
if (weight_node.op != 'Const'):
continue
weight_node_val = get_tensor_val_from_graph_node(graph_node_name_mapping, weight_node_name)
weight_node_val = weight_node_val.astype('float32')
if ('Quantized' in node.op):
node_name = int8_node_name_reverse(node)
weight_node_name_pre = weight_node_name.split('_qint8_const')[0]
min_filter_node = (weight_node_name_pre + '_min')
max_filter_node = (weight_node_name_pre + '_max')
if graph_info[min_filter_node].node.attr['value'].tensor.float_val:
min_filter_val = graph_info[min_filter_node].node.attr['value'].tensor.float_val
max_filter_val = graph_info[max_filter_node].node.attr['value'].tensor.float_val
else:
min_filter_val = get_tensor_val_from_graph_node(graph_node_name_mapping, min_filter_node)
max_filter_val = get_tensor_val_from_graph_node(graph_node_name_mapping, max_filter_node)
weight_node_val = dequantize_weight(weight_node_val, min_filter_val, max_filter_val)
weights_result[node_name] = {weight_node_name: weight_node_val}
return weights_result
def fused_node_mapping(self, node_list, pattern_mapping, graph_info, graph_node_name_mapping):
fused_mapping = {}
fused_mapping_reverse = {}
for node_name in node_list:
fused_seq = pattern_mapping[node_name]['sequence'].split(',')
if (len(fused_seq) == 1):
fused_mapping[node_name] = node_name
fused_mapping_reverse[node_name] = node_name
continue
_next_node_name = node_name
for _next_node_op_type in fused_seq[1:]:
node_details = graph_info[_next_node_name]
for node_output_name in node_details.outputs:
if (graph_node_name_mapping[node_output_name].op == 'Cast'):
cast_node = graph_node_name_mapping[node_output_name]
node_output_name = graph_info[cast_node.name].outputs[0]
if (graph_node_name_mapping[node_output_name].op in [_next_node_op_type, 'Cast']):
_next_node_name = node_output_name
fused_mapping[node_name] = _next_node_name
fused_mapping_reverse[_next_node_name] = node_name
return (fused_mapping, fused_mapping_reverse)
def _inspect_tensor_inference(self, inspect_node_dict, model, dataloader, iteration_list):
out_tensor_lst = []
out_tensor_lst += [{n: [((n + ':') + str(i)) for i in range(3)]} for n in inspect_node_dict['qreq_node']]
out_tensor_lst += [{n: (n + ':0')} for n in inspect_node_dict['qdq_node']]
out_tensor_lst += [{n: (n + ':0')} for n in inspect_node_dict['f_node']]
out_cnt = len(out_tensor_lst)
iteration_list = set(iteration_list)
input_tensor = model.input_tensor
logger.info('Start to do inference for inspect activation.')
activation_result = []
for (idx, (inputs, labels)) in enumerate(dataloader):
model_out = []
if ((idx + 1) > max(iteration_list)):
break
if ((idx + 1) not in iteration_list):
continue
if (len(input_tensor) == 1):
feed_dict = {input_tensor[0]: inputs}
else:
assert (len(input_tensor) == len(inputs)), 'inputs len must equal with input_tensor'
feed_dict = dict(zip(input_tensor, inputs))
for (i, out_t) in enumerate(out_tensor_lst):
logger.debug(f'Finished inspect {i}/{out_cnt} nodes, current inspect node {out_t.keys()}.')
model_out.append(model.sess.run(out_t, feed_dict))
activation_result.append(model_out)
return activation_result
def inspect_activation(self, node_list, graph_def, graph_node_name_mapping, quantization_cfg, dataloader, iteration_list, graph_info):
from neural_compressor.model import Model
original_graph_node_mapping = {}
for node in graph_def.node:
original_graph_node_mapping[node.name] = node
inspect_node_dict = {'qdq_node': [], 'qreq_node': [], 'f_node': []}
for node_name in node_list:
node = graph_node_name_mapping[node_name]
if (('Quantized' in node.op) and ('Dequantize' in node.op)):
inspect_node_dict['qdq_node'].append(node.name)
elif (('Quantized' in node.op) or ('_Quantized' in node.op) or ('Requantize' in node.op)):
inspect_node_dict['qreq_node'].append(node.name)
else:
inspect_node_dict['f_node'].append(node_name)
pattern_mapping = {}
node_dict = quantization_cfg['op']
for node_name_and_type in node_dict.keys():
(node_name, _) = node_name_and_type
if ('pattern' in node_dict[node_name_and_type]):
pattern_mapping[node_name] = node_dict[node_name_and_type]['pattern']
else:
pattern_mapping[node_name] = {'sequence': node_name}
if inspect_node_dict['f_node']:
(fuse_map, fuse_map_reverse) = self.fused_node_mapping(inspect_node_dict['f_node'], pattern_mapping, graph_info, graph_node_name_mapping)
inspect_node_dict['f_node'] = [fuse_map[n] for n in inspect_node_dict['f_node']]
model = Model(graph_def)
activation_result = self._inspect_tensor_inference(inspect_node_dict, model, dataloader, iteration_list)
final_result = []
int8_postfix = '_eightbit'
for iter_res in activation_result:
tmp_iter_result = {}
for res in iter_res:
(node_name, val) = (list(res.keys())[0], list(res.values())[0])
val = (Dequantize(val[0], (node_name, val[1], val[2])) if (len(val) == 3) else val)
val = val.astype(np.float32)
index_postfix = node_name.find(int8_postfix)
if (index_postfix != (- 1)):
node_name = node_name[:index_postfix]
tmp_iter_result[node_name] = {node_name: val}
else:
tmp_iter_result[fuse_map_reverse[node_name]] = {fuse_map_reverse[node_name]: val}
final_result.append(tmp_iter_result)
return final_result
def inspect_tensor(self, model, dataloader=None, op_list=[], iteration_list=[], inspect_type='activation', save_to_disk=False, save_path=None, quantization_cfg=None):
import tensorflow as tf
from neural_compressor.adaptor.tf_utils.graph_util import GraphAnalyzer
from neural_compressor.model.tensorflow_model import TensorflowBaseModel
from neural_compressor.utils.utility import dump_data_to_local, load_data_from_pkl
from .tf_utils.util import int8_node_name_reverse
if isinstance(model, TensorflowBaseModel):
model = model.graph_def
if (not quantization_cfg):
quantization_cfg = load_data_from_pkl('./nc_workspace/', 'cfg.pkl')
node_list = op_list
graph_node_name_mapping = {}
quan_model_flag = False
for node in model.node:
node_name = int8_node_name_reverse(node)
if ('Quantized' in node.op):
quan_model_flag = True
node_name = int8_node_name_reverse(node)
if (node.attr['value'].tensor.dtype == tf.dtypes.bfloat16.as_datatype_enum):
quan_model_flag = True
graph_node_name_mapping[node_name] = node
if quan_model_flag:
logger.info('Dump the tensor for quantized model.')
g = GraphAnalyzer()
g.graph = model
graph_info = g.parse_graph()
inspect_result = {}
if ((inspect_type == 'weight') or (inspect_type == 'all')):
logger.info('Start to inspect weight and bias.')
weights_result = self.inspect_weight_and_bias(node_list, model, graph_info, graph_node_name_mapping)
inspect_result['weight'] = weights_result
if ((inspect_type == 'activation') or (inspect_type == 'all')):
logger.info('Start to inspect activation.')
activation_result = self.inspect_activation(node_list, model, graph_node_name_mapping, quantization_cfg, dataloader, iteration_list, graph_info)
inspect_result['activation'] = activation_result
if save_to_disk:
if (not save_path):
save_path = './nc_workspace/tmp/'
dump_data_to_local(inspect_result, save_path, 'inspect_result.pkl')
logger.info(f'Dumped the inspect tensor to {save_path}')
return inspect_result
def quantize_input(self, model):
scale = None
import tensorflow as tf
if version1_lt_version2(tf.version.VERSION, '2.1.0'):
logger.warning('Quantize input needs tensorflow 2.1.0 and newer.')
return (model, scale)
graph_def = model.as_graph_def()
node_name_mapping = {}
quantize_nodes = []
for node in graph_def.node:
node_name_mapping[node.name] = node
if (node.op == 'QuantizeV2'):
quantize_nodes.append(node)
target_quantize_nodes = []
for node in quantize_nodes:
if (((node_name_mapping[node.input[0]].op == 'Pad') and (node_name_mapping[node_name_mapping[node.input[0]].input[0]].op == 'Placeholder')) or (node_name_mapping[node.input[0]].op == 'Placeholder')):
target_quantize_nodes.append(node)
assert (len(target_quantize_nodes) == 1), 'only support 1 QuantizeV2 from Placeholder'
quantize_node = target_quantize_nodes[0]
quantize_node_input = node_name_mapping[quantize_node.input[0]]
quantize_node_outputs = [node for node in graph_def.node if (quantize_node.name in node.input)]
from .tf_utils.graph_util import GraphRewriterHelper
if (quantize_node_input.op == 'Pad'):
pad_node_input = node_name_mapping[quantize_node_input.input[0]]
assert (pad_node_input.op == 'Placeholder'), 'only support Pad between QuantizeV2 and Placeholder'
from tensorflow.python.framework import tensor_util
paddings_tensor = tensor_util.MakeNdarray(node_name_mapping[quantize_node_input.input[1]].attr['value'].tensor).flatten()
quantize_node.input[0] = quantize_node_input.input[0]
for conv_node in quantize_node_outputs:
assert ('Conv2D' in conv_node.op), 'only support QuantizeV2 to Conv2D'
GraphRewriterHelper.set_attr_int_list(conv_node, 'padding_list', paddings_tensor)
graph_def.node.remove(quantize_node_input)
from tensorflow.python.framework import dtypes
GraphRewriterHelper.set_attr_dtype(node_name_mapping[quantize_node.input[0]], 'dtype', dtypes.qint8)
for conv_node in quantize_node_outputs:
for (index, conv_input) in enumerate(conv_node.input):
if (conv_input == quantize_node.name):
conv_node.input[index] = quantize_node.input[0]
elif (conv_input == (quantize_node.name + ':1')):
conv_node.input[index] = quantize_node.input[1]
elif (conv_input == (quantize_node.name + ':2')):
conv_node.input[index] = quantize_node.input[2]
max_node = node_name_mapping[quantize_node.input[2]]
min_node = node_name_mapping[quantize_node.input[1]]
max_value = max_node.attr['value'].tensor.float_val[0]
min_value = min_node.attr['value'].tensor.float_val[0]
scale = (127.0 / max(abs(max_value), abs(min_value)))
graph_def.node.remove(quantize_node)
graph = tensorflow.Graph()
with graph.as_default():
tensorflow.import_graph_def(graph_def, name='')
return (graph, scale)
def get_optype_wise_ability(self):
res = OrderedDict()
for op in self.quantizable_op_details:
if (op[1] not in res):
res[op[1]] = {'activation': self.quantizable_op_details[op][0]['activation']}
if ('weight' in self.quantizable_op_details[op][0]):
res[op[1]]['weight'] = self.quantizable_op_details[op][0]['weight']
for op in self.bf16_op_details:
if (op[1] not in res):
res[op[1]] = {'activation': {'dtype': ['bf16']}, 'weight': {'dtype': ['bf16']}}
return res
def _pre_hook_for_qat(self, dataloader=None):
self.model.model = self.qat_convert(self.model.model)
def _post_hook_for_qat(self):
pass
def _pre_eval_hook(self, model):
return model
def _post_eval_hook(self, model, **kwargs):
pass
def save(self, model, path):
pass
def convert(self, model, source, destination):
assert ((source.lower() == 'qat') and (destination.lower() == 'default'))
capability = self.query_fw_capability(model)
quantize_config = {'op_wise_config': {}}
for each_op_info in capability['opwise']:
is_perchannel = False
weight_bit = 7.0
for op_cap in capability['opwise'][each_op_info]:
if (('activation' in op_cap) and ('quant_mode' in op_cap['activation'])):
activation = op_cap['activation']
if ('weight' in op_cap):
weight = op_cap['weight']
is_perchannel = (True if (weight['granularity'][0] == 'per_channel') else False)
algorithm = activation['algorithm'][0]
is_asymmetric = False
if ('activation' in op_cap):
is_asymmetric = (True if (activation['scheme'][0] == 'asym') else False)
quantize_config['op_wise_config'][each_op_info[0]] = (is_perchannel, algorithm, is_asymmetric, weight_bit)
from .tf_utils.graph_converter import GraphConverter
tmp_graphdef = copy.deepcopy(model.graph_def)
for i in tmp_graphdef.node:
if ((i.op == 'Const') and i.input):
i.ClearField('input')
model.graph_def = tmp_graphdef
converter = GraphConverter(model, qt_config=quantize_config, int8_sequences=self.op_wise_sequences, fake_quant=True, new_api=self.new_api, performance_only=self.performance_only, use_bf16=self.use_bf16)
return converter.convert()
def qat_convert(self, model, quantize_recipe=None):
import tensorflow as tf
assert isinstance(model, tf.keras.Model), 'The model to be converted is expected to be a `tf.keras.Model` instance. You should not pass an instance of type: {input}.'.format(input=model.__class__.__name__)
assert (model.__class__.__name__ in ['Functional', 'Sequential']), 'Only `Functional` or `Sequential` keras model is supported for QAT.'
from .tf_utils.quantize_graph.qat.quantize_config import global_config
from .tf_utils.quantize_graph.qat.quantize_helper import init_quantize_config, qat_clone_function
config = init_quantize_config(model, quantize_recipe)
q_model = tf.keras.models.clone_model(model, input_tensors=None, clone_function=qat_clone_function)
global_config.clear()
return q_model
_elapsed_time('Pass recover model')
def recover_tuned_model(self, model, q_config):
from .tf_utils.graph_rewriter.generic.pre_optimize import PreOptimization
self.pre_optimizer_handle = PreOptimization(model, self.new_api, self.device)
self.pre_optimized_model = self.pre_optimizer_handle.get_optimized_model(self.itex_mode)
model.graph_def = self.pre_optimized_model.graph_def
from .tf_utils.graph_converter_without_calib import GraphConverterWithoutCalib
converter = GraphConverterWithoutCalib(model, recover_config=q_config, new_api=self.new_api, performance_only=self.performance_only, use_bf16=self.use_bf16)
return converter.convert_without_calib()
def diagnosis_helper(self, fp32_model, quan_model, tune_cfg, save_path):
from .tf_utils.util import tf_diagnosis_helper
return tf_diagnosis_helper(fp32_model, quan_model, tune_cfg, save_path)
def get_output_op_names(self, qmodel):
from .tf_utils.graph_util import GraphAnalyzer
graph_def = GraphAnalyzer().parse_graph(qmodel.graph_def)
output_op_names = set()
def _add_output_op_name(opname):
if opname.endswith('_dequantize'):
output_op_names.add(opname[:(- len('_dequantize'))])
elif opname.endswith('__dequant'):
pass
else:
output_op_names.add(opname)
for output_opname in qmodel.output_node_names:
op_count = 0
stack = [output_opname]
while stack:
opname = stack.pop()
while True:
op_count += 1
if (opname not in graph_def):
break
op = graph_def[opname]
if (op.node.op == 'Dequantize'):
_add_output_op_name(opname)
break
next_opnames = op.node.input
if (not next_opnames):
break
elif (len(next_opnames) > 1):
stack += next_opnames[1:]
opname = next_opnames[0]
output_op_names = list(output_op_names)
logger.debug(f'output op names: {output_op_names}')
return output_op_names
def calculate_op_sensitivity(self, model, dataloader, tune_cfg, output_op_names, confidence_batches, fallback=True, requantize_cfgs=None):
from copy import deepcopy
fp32_op_cfg = {'activation': {'dtype': 'fp32', 'quant_mode': 'fp32'}, 'weight': {'dtype': 'fp32'}}
if fallback:
ops_list = [op for (op, config) in tune_cfg['op'].items() if (config['activation']['quant_mode'] in ('static', 'dynamic'))]
replace_cfgs = {op: fp32_op_cfg for op in tune_cfg['op']}
else:
ops_list = [op for (op, config) in tune_cfg['op'].items() if ((config['activation']['quant_mode'] == 'fp32') and (op in requantize_cfgs))]
replace_cfgs = requantize_cfgs
mse_result = self._get_mse_order(model, deepcopy(tune_cfg), replace_cfgs, ops_list, dataloader, output_op_names, confidence_batches)
mse_order = [op for (op, _) in sorted(mse_result.items(), key=(lambda i: i[1]))]
logger.debug('Dump MSE order:')
for op in mse_order:
logger.debug(f'{op}: {mse_result[op]}')
return mse_order
def _get_mse_order(self, fp32_model, tune_cfg, replace_cfgs, ops_lst, dataloader, output_op_names, confidence_batches):
op_cfg = tune_cfg['op']
mse_result = {}
partial_dataloader = self._partial_dataloader(dataloader, confidence_batches)
fp32_output = self._inference_model_on_batches(fp32_model, tune_cfg, partial_dataloader, output_op_names)
for op in ops_lst:
backup_cfg = op_cfg[op]
op_cfg[op] = replace_cfgs[op]
q_model = self.quantize(tune_cfg, fp32_model, partial_dataloader)
q_output = self._inference_model_on_batches(q_model, tune_cfg, partial_dataloader, output_op_names)
mse_result[op] = self._calculate_mse(fp32_output, q_output)
op_cfg[op] = backup_cfg
return mse_result
def _partial_dataset_of(self, dataloader, confidence_batches):
from neural_compressor.data.datasets.dummy_dataset import DummyDataset
from neural_compressor.data.datasets.dummy_dataset import DummyDataset as DummyDataset_v2_x
if (isinstance(dataloader.dataset, DummyDataset) or isinstance(dataloader.dataset, DummyDataset_v2_x)):
assert isinstance(confidence_batches, int)
ds = copy.deepcopy(dataloader.dataset)
ds.dataset = ds.dataset[:confidence_batches]
return ds
else:
return dataloader.dataset.take(confidence_batches)
def _partial_dataloader(self, dataloader, confidence_batches):
return type(dataloader)(dataset=self._partial_dataset_of(dataloader, confidence_batches), batch_size=dataloader.batch_size, last_batch=dataloader.last_batch, collate_fn=dataloader.collate_fn, sampler=dataloader.sampler, batch_sampler=dataloader.batch_sampler, num_workers=dataloader.num_workers, pin_memory=dataloader.pin_memory, shuffle=dataloader.shuffle, distributed=dataloader.distributed)
def _calculate_mse(self, fp32_output, q_output):
result = []
for (i, j) in zip(fp32_output, q_output):
result.append(np.square((i - j)).mean())
return np.array(result).mean()
def _inference_model_on_batches(self, model, tune_cfg, dataloader, output_op_names):
from .tf_utils.util import generate_feed_dict
input_tensors = model.input_tensor
output_tensors = []
for op in output_op_names:
for tensor in model.graph.get_operation_by_name(op).outputs:
output_tensors.append(tensor)
predictions = []
for (index, (inputs, _)) in enumerate(dataloader):
feed_dict = generate_feed_dict(input_tensors, inputs)
pred = model.sess.run(output_tensors, feed_dict)
for item in pred:
predictions.append(item)
return predictions
def smooth_quant(self, model, dataloader, calib_iter=1, alpha=0.5, folding=False, percentile=99.999, op_types=['MatMul', 'Conv2D'], scales_per_op=True, record_max_info=False, weight_clip=True, auto_alpha_args={'alpha_min': 0.0, 'alpha_max': 1.0, 'alpha_step': 0.1, 'shared_criterion': 'mean', 'do_blockwise': False}, default_alpha=0.5):
logger.info('Start Smoothing process for Smooth Quantization.')
if (self.smooth_quant_model is not None):
return self.smooth_quant_model
if (model.model_type == 'llm_saved_model'):
return self.smooth_quant_LLM(model, dataloader, calib_iter, alpha, folding, percentile, op_types, scales_per_op)
from .tf_utils.graph_rewriter.generic.pre_optimize import PreOptimization
self.pre_optimizer_handle = PreOptimization(model, self.new_api, self.device)
self.pre_optimized_model = self.pre_optimizer_handle.get_optimized_model(self.itex_mode)
model.graph_def = self.pre_optimized_model.graph_def
from .tf_utils.smooth_quant_calibration import SmoothQuantCalibration
calibration = SmoothQuantCalibration(model, dataloader, calib_iter, op_types, percentile)
(max_vals_per_channel, sq_weight_node_names) = calibration()
from neural_compressor.adaptor.tf_utils.util import get_weight_from_input_tensor
(sq_weight_tensors, sq_weights_nodes) = get_weight_from_input_tensor(model, max_vals_per_channel.keys(), op_types)
from .tf_utils.smooth_quant_scaler import SmoothQuantScaler
scaler = SmoothQuantScaler(model, dataloader, alpha, scales_per_op)
(model, mul_list) = scaler.transform(max_vals_per_channel, sq_weight_tensors, sq_weights_nodes, sq_weight_node_names)
self.smooth_quant_mul_ops.extend(mul_list)
self.smooth_quant_model = model
return self.smooth_quant_model
def smooth_quant_LLM(self, model, dataloader, calib_iter=1, alpha=0.5, folding=False, percentile=99.999, op_types=['MatMul', 'Conv2D'], scales_per_op=True):
from .tf_utils.graph_rewriter.generic.pre_optimize import PreOptimization
self.pre_optimizer_handle = PreOptimization(model, self.new_api, self.device)
self.pre_optimized_model = self.pre_optimizer_handle.get_optimized_model(self.itex_mode)
model.graph_def = self.pre_optimized_model.graph_def
op_types = ['MatMul']
llm_temp_dir = (self.work_dir + '/temp_saved_model')
from .tf_utils.smooth_quant_calibration import SmoothQuantCalibrationLLM
calibration = SmoothQuantCalibrationLLM(model._model, dataloader, calib_iter, op_types, percentile, llm_temp_dir, model.weight_name_mapping)
(max_vals_per_channel, sq_target_node_names, sq_weight_tensor_dict, sq_graph_def) = calibration(model.input_node_names, model.output_node_names)
from .tf_utils.smooth_quant_scaler import SmoothQuantScalerLLM
scaler = SmoothQuantScalerLLM(sq_graph_def, alpha, scales_per_op, op_types)
(sq_graph_def, sq_weight_scale_dict, mul_list) = scaler.transform(max_vals_per_channel, sq_weight_tensor_dict, sq_target_node_names)
model.graph_def = sq_graph_def
model.model_path = llm_temp_dir
model.sq_weight_scale_dict = sq_weight_scale_dict
self.smooth_quant_mul_ops.extend(mul_list)
self.smooth_quant_model = model
return self.smooth_quant_model |
def seed(seed):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) |
def train(args):
transform = transforms.Compose([transforms.RandomCrop(args.crop_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
transform_dev = transforms.Compose([transforms.CenterCrop(args.crop_size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
vocab = Vocabulary()
vocab.load(DICT.format(args.data_set))
data_loader = get_loader(IMAGE_ROOT.format(args.data_set), CAPT.format(args.data_set, 'train'), vocab, transform, args.batch_size, shuffle=True, return_target=True, num_workers=args.num_workers)
data_loader_dev = get_loader(IMAGE_ROOT.format(args.data_set), CAPT.format(args.data_set, 'val'), vocab, transform_dev, args.batch_size, shuffle=False, return_target=True, num_workers=args.num_workers)
ranker = Ranker(root=IMAGE_ROOT.format(args.data_set), image_split_file=SPLIT.format(args.data_set, 'val'), transform=transform_dev, num_workers=args.num_workers)
save_folder = '{}/{}-{}'.format(args.save, args.data_set, time.strftime('%Y%m%d-%H%M%S'))
create_exp_dir(save_folder, scripts_to_save=['models.py', 'data_loader.py', 'train.py', 'build_vocab.py', 'utils.py'])
def logging(s, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(os.path.join(save_folder, 'log.txt'), 'a+') as f_log:
f_log.write((s + '\n'))
logging(str(args))
image_encoder = DummyImageEncoder(args.embed_size).to(device)
caption_encoder = DummyCaptionEncoder(vocab_size=len(vocab), vocab_embed_size=(args.embed_size * 2), embed_size=args.embed_size).to(device)
image_encoder.train()
caption_encoder.train()
params = (image_encoder.get_trainable_parameters() + caption_encoder.get_trainable_parameters())
current_lr = args.learning_rate
optimizer = torch.optim.Adam(params, lr=current_lr)
cur_patient = 0
best_score = float('-inf')
stop_train = False
total_step = len(data_loader)
for epoch in range(1):
for (i, (target_images, candidate_images, captions, lengths, meta_info)) in enumerate(data_loader):
target_images = target_images.to(device)
target_ft = image_encoder.forward(target_images)
candidate_images = candidate_images.to(device)
candidate_ft = image_encoder.forward(candidate_images)
captions = captions.to(device)
caption_ft = caption_encoder(captions, lengths)
m = target_images.size(0)
random_index = [((m - 1) - n) for n in range(m)]
random_index = torch.LongTensor(random_index)
negative_ft = target_ft[random_index]
loss = triplet_avg(anchor=(candidate_ft + caption_ft), positive=target_ft, negative=negative_ft)
caption_encoder.zero_grad()
image_encoder.zero_grad()
loss.backward()
optimizer.step()
if ((i % args.log_step) == 0):
logging('| epoch {:3d} | step {:6d}/{:6d} | lr {:06.6f} | train loss {:8.3f}'.format(epoch, i, total_step, current_lr, loss.item()))
image_encoder.eval()
caption_encoder.eval()
logging(('-' * 77))
metrics = eval_batch(data_loader_dev, image_encoder, caption_encoder, ranker)
logging('| eval loss: {:8.3f} | score {:8.5f} / {:8.5f} '.format(metrics['loss'], metrics['score'], best_score))
logging(('-' * 77))
image_encoder.train()
caption_encoder.train()
dev_score = metrics['score']
if (dev_score > best_score):
best_score = dev_score
resnet = image_encoder.delete_resnet()
torch.save(image_encoder.state_dict(), os.path.join(save_folder, 'image-{}.th'.format(args.embed_size)))
image_encoder.load_resnet(resnet)
torch.save(caption_encoder.state_dict(), os.path.join(save_folder, 'cap-{}.th'.format(args.embed_size)))
cur_patient = 0
else:
cur_patient += 1
if (cur_patient >= args.patient):
current_lr /= 2
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
if (current_lr < (args.learning_rate * 0.001)):
stop_train = True
break
if stop_train:
break
logging('best_dev_score: {}'.format(best_score)) |
def add_eval_lm_args(parser):
group = parser.add_argument_group('LM Evaluation')
add_common_eval_args(group)
group.add_argument('--output-word-probs', action='store_true', help='if set, outputs words and their predicted log probabilities to standard output')
group.add_argument('--output-word-stats', action='store_true', help='if set, outputs word statistics such as word count, average probability, etc')
group.add_argument('--target-idx', type=int, default=0, help='if set and the language model has multiple targets, evaluates language model for this particular target') |
class TestCleaner():
def cleaner(self) -> Cleaner:
generator = DummyGenerator()
return Cleaner(generator=generator)
def key(self) -> chex.PRNGKey:
return jax.random.PRNGKey(0)
def test_cleaner__reset_jit(self, cleaner: Cleaner) -> None:
chex.clear_trace_counter()
reset_fn = jax.jit(chex.assert_max_traces(cleaner.reset, n=1))
key = jax.random.PRNGKey(0)
(state, timestep) = reset_fn(key)
(state, timestep) = reset_fn(key)
assert isinstance(timestep, TimeStep)
assert isinstance(state, State)
def test_cleaner__reset(self, cleaner: Cleaner, key: chex.PRNGKey) -> None:
reset_fn = jax.jit(cleaner.reset)
(state, timestep) = reset_fn(key)
assert isinstance(timestep, TimeStep)
assert isinstance(state, State)
assert jnp.all((state.agents_locations == jnp.zeros((cleaner.num_agents, 2))))
assert (jnp.sum((state.grid == CLEAN)) == 1)
assert (state.step_count == 0)
assert_is_jax_array_tree(state)
def test_cleaner__step_jit(self, cleaner: Cleaner) -> None:
key = jax.random.PRNGKey(0)
(state, timestep) = cleaner.reset(key)
action = jnp.array([1, 2, 3], jnp.int32)
chex.clear_trace_counter()
step_fn = jax.jit(chex.assert_max_traces(cleaner.step, n=1))
(next_state, next_timestep) = step_fn(state, action)
(next_state, next_timestep) = step_fn(state, action)
assert isinstance(next_timestep, TimeStep)
assert isinstance(next_state, State)
def test_cleaner__step(self, cleaner: Cleaner, key: chex.PRNGKey) -> None:
(initial_state, timestep) = cleaner.reset(key)
step_fn = jax.jit(cleaner.step)
actions = jnp.array(([1] * cleaner.num_agents))
(state, timestep) = step_fn(initial_state, actions)
assert (jnp.sum((state.grid != initial_state.grid)) == 1)
assert (state.grid[(0, 1)] == CLEAN)
assert (timestep.reward == (1 - cleaner.penalty_per_timestep))
assert jnp.all((state.agents_locations == jnp.array([0, 1])))
actions = jnp.array([2, 3, 2])
(state, timestep) = step_fn(state, actions)
assert (jnp.sum((state.grid != initial_state.grid)) == 2)
assert (state.grid[(0, 1)] == CLEAN)
assert (state.grid[(1, 1)] == CLEAN)
assert (timestep.reward == (1 - cleaner.penalty_per_timestep))
assert (timestep.step_type == StepType.MID)
assert jnp.all((state.agents_locations[0] == jnp.array([1, 1])))
assert jnp.all((state.agents_locations[1] == jnp.array([0, 0])))
assert jnp.all((state.agents_locations[2] == jnp.array([1, 1])))
def test_cleaner__step_invalid_action(self, cleaner: Cleaner, key: chex.PRNGKey) -> None:
(state, _) = cleaner.reset(key)
step_fn = jax.jit(cleaner.step)
actions = jnp.array([0, 1, 1])
(state, timestep) = step_fn(state, actions)
assert (timestep.step_type == StepType.LAST)
assert jnp.all((state.agents_locations[0] == jnp.array([0, 0])))
assert jnp.all((state.agents_locations[1] == jnp.array([0, 1])))
assert jnp.all((state.agents_locations[2] == jnp.array([0, 1])))
assert (timestep.reward == (1 - cleaner.penalty_per_timestep))
def test_cleaner__initial_action_mask(self, cleaner: Cleaner, key: chex.PRNGKey) -> None:
(state, _) = cleaner.reset(key)
expected_action_mask = jnp.array([[False, True, False, False] for _ in range(cleaner.num_agents)])
assert jnp.all((state.action_mask == expected_action_mask))
action_mask = cleaner._compute_action_mask(state.grid, state.agents_locations)
assert jnp.all((action_mask == expected_action_mask))
def test_cleaner__action_mask(self, cleaner: Cleaner, key: chex.PRNGKey) -> None:
(state, _) = cleaner.reset(key)
agents_locations = jnp.array([[1, 1], [2, 2], [4, 4]])
action_mask = cleaner._compute_action_mask(state.grid, agents_locations)
assert jnp.all((action_mask[0] == jnp.array([True, False, True, False])))
assert jnp.all((action_mask[1] == jnp.array([False, True, False, True])))
assert jnp.all((action_mask[2] == jnp.array([False, False, False, True])))
def test_cleaner__does_not_smoke(self, cleaner: Cleaner) -> None:
def select_actions(key: chex.PRNGKey, observation: Observation) -> chex.Array:
def select_action(key: chex.PRNGKey, agent_action_mask: chex.Array) -> chex.Array:
return jax.random.choice(key, jnp.arange(4), p=agent_action_mask.flatten())
subkeys = jax.random.split(key, cleaner.num_agents)
return select_action(subkeys, observation.action_mask)
check_env_does_not_smoke(cleaner, select_actions)
def test_cleaner__compute_extras(self, cleaner: Cleaner, key: chex.PRNGKey) -> None:
(state, _) = cleaner.reset(key)
extras = cleaner._compute_extras(state)
assert (list(extras.keys()) == ['ratio_dirty_tiles', 'num_dirty_tiles'])
assert (0 <= extras['ratio_dirty_tiles'] <= 1)
grid = state.grid
assert (extras['ratio_dirty_tiles'] == (jnp.sum((grid == DIRTY)) / jnp.sum((grid != WALL))))
assert (extras['num_dirty_tiles'] == jnp.sum((grid == DIRTY))) |
class SigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float=2.0, alpha: float=0.25):
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
loss = ((torch.clamp(input, min=0) - (input * target)) + torch.log1p(torch.exp((- torch.abs(input)))))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = ((target * self.alpha) + ((1 - target) * (1 - self.alpha)))
pt = ((target * (1.0 - pred_sigmoid)) + ((1.0 - target) * pred_sigmoid))
focal_weight = (alpha_weight * torch.pow(pt, self.gamma))
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = (focal_weight * bce_loss)
weights = weights.unsqueeze((- 1))
assert (weights.shape.__len__() == loss.shape.__len__())
return (loss * weights) |
def check_collisions():
defining_modules = defaultdict(list)
for module in _MODEL_TAG_MODULES:
name = module.__name__
for tag in extract_tags(module):
defining_modules[tag].append(name)
duplicate_defs = {k: v for (k, v) in defining_modules.items() if (len(v) > 1)}
for key in duplicate_defs.keys():
print('Variable {} defined multiple times'.format(key)) |
def flatten(xs: Iterable[Any]) -> List[Any]:
return sum((([x] if (not isinstance(x, Iterable)) else flatten(x)) for x in xs), []) |
def check_existing_pt_files(opt):
for t in ['train', 'valid', 'vocab']:
pattern = (((opt.save_data + '.') + t) + '*.pt')
if glob.glob(pattern):
sys.stderr.write(('Please backup exisiting pt file: %s, to avoid tampering!\n' % pattern))
sys.exit(1) |
class BaseDHDataset(BaseDataset):
def __init__(self, pipeline, test_mode=False):
super().__init__(pipeline, test_mode)
def scan_folder(path):
if isinstance(path, (str, Path)):
path = str(path)
else:
raise TypeError(f"'path' must be a str or a Path object, but received {type(path)}.")
images = list(scandir(path, suffix=IMG_EXTENSIONS, recursive=False))
images = [osp.join(path, v) for v in images]
images.sort()
assert images, f'{path} has no valid image file.'
return images
def __getitem__(self, idx):
results = copy.deepcopy(self.data_infos[idx])
results['scale'] = 1
return self.pipeline(results)
def evaluate(self, results, logger=None):
if (not isinstance(results, list)):
raise TypeError(f'results must be a list, but got {type(results)}')
assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}'
results = [res['eval_result'] for res in results]
eval_result = defaultdict(list)
for res in results:
for (metric, val) in res.items():
eval_result[metric].append(val)
for (metric, val_list) in eval_result.items():
assert (len(val_list) == len(self)), f'Length of evaluation result of {metric} is {len(val_list)}, should be {len(self)}'
eval_result = {metric: (sum(values) / len(self)) for (metric, values) in eval_result.items()}
return eval_result |
def sort_imports(file, check_only=True):
with open(file, encoding='utf-8') as f:
code = f.read()
if ('_import_structure' not in code):
return
main_blocks = split_code_in_indented_blocks(code, start_prompt='_import_structure = {', end_prompt='if TYPE_CHECKING:')
for block_idx in range(1, (len(main_blocks) - 1)):
block = main_blocks[block_idx]
block_lines = block.split('\n')
line_idx = 0
while ((line_idx < len(block_lines)) and ('_import_structure' not in block_lines[line_idx])):
if ('import dummy' in block_lines[line_idx]):
line_idx = len(block_lines)
else:
line_idx += 1
if (line_idx >= len(block_lines)):
continue
internal_block_code = '\n'.join(block_lines[line_idx:(- 1)])
indent = get_indent(block_lines[1])
internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent)
pattern = (_re_direct_key if ('_import_structure = {' in block_lines[0]) else _re_indirect_key)
keys = [(pattern.search(b).groups()[0] if (pattern.search(b) is not None) else None) for b in internal_blocks]
keys_to_sort = [(i, key) for (i, key) in enumerate(keys) if (key is not None)]
sorted_indices = [x[0] for x in sorted(keys_to_sort, key=(lambda x: x[1]))]
count = 0
reorderded_blocks = []
for i in range(len(internal_blocks)):
if (keys[i] is None):
reorderded_blocks.append(internal_blocks[i])
else:
block = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(block)
count += 1
main_blocks[block_idx] = '\n'.join(((block_lines[:line_idx] + reorderded_blocks) + [block_lines[(- 1)]]))
if (code != '\n'.join(main_blocks)):
if check_only:
return True
else:
print(f'Overwriting {file}.')
with open(file, 'w', encoding='utf-8') as f:
f.write('\n'.join(main_blocks)) |
class mit_b4_kd(Segformer):
def __init__(self, **kwargs):
super(mit_b4_kd, self).__init__(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], drop_rate=0.0, drop_path_rate=0.1, decoder_dim=768) |
def test_visualize_transforms_n_2(download_functional_test_files):
__data_testing_dir__ = Path(__tmp_dir__, 'data_functional_testing')
__input_file__ = Path(__data_testing_dir__, 'sub-unf01/anat/sub-unf01_T1w.nii.gz')
__output_dir__ = Path(__tmp_dir__, 'output_visualize_transforms_n_2')
__config_file__ = Path(__data_testing_dir__, 'model_config.json')
__label_file__ = Path(__data_testing_dir__, 'derivatives/labels/sub-test001/anat/sub-unf01_T1w_seg-manual.nii.gz')
visualize_transforms.main(args=['--input', str(__input_file__), '--output', str(__output_dir__), '--config', str(__config_file__), '-r', str(__label_file__), '-n', '2'])
assert __output_dir__.exists()
output_files = [f.name for f in __output_dir__.iterdir()]
assert (len(output_files) == 10)
for output_file in output_files:
assert ('Resample' in output_file)
assert ('slice' in output_file)
assert ('.png' in output_file) |
def plot_clep_median_error(metric, all_errors):
median_error = []
for td in range(1, 22):
median_error.append(np.median(all_errors['ensemble'][td][:71]))
plt.figure(figsize=(4, 3), dpi=200)
ax = plt.subplot(111)
plt.plot(np.arange(1, 22, 1), median_error, color='lightcoral', linewidth=3)
if (metric == 'mae'):
plt.ylabel('Median of raw scale MAE', fontsize=15)
elif (metric == 'mape'):
plt.ylabel('Median of MAPE', fontsize=15)
elif (metric == 'sqrt'):
plt.ylabel('Median of square root MAE', fontsize=15)
plt.xlabel('Horizon', fontsize=15)
plt.yticks(fontsize=12)
plt.xticks(fontsize=12)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xticks([1, 6, 11, 16, 21])
plt.tight_layout()
filename = os.path.join(result_dir, f'median_{metric}_clep_21_day.pdf')
plt.savefig(filename) |
def main():
parser = ArgumentParser('Transformers CLI tool', usage='transformers-cli <command> [<args>]')
commands_parser = parser.add_subparsers(help='transformers-cli command helpers')
ConvertCommand.register_subcommand(commands_parser)
DownloadCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
RunCommand.register_subcommand(commands_parser)
ServeCommand.register_subcommand(commands_parser)
UserCommands.register_subcommand(commands_parser)
AddNewModelCommand.register_subcommand(commands_parser)
AddNewModelLikeCommand.register_subcommand(commands_parser)
LfsCommands.register_subcommand(commands_parser)
PTtoTFCommand.register_subcommand(commands_parser)
args = parser.parse_args()
if (not hasattr(args, 'func')):
parser.print_help()
exit(1)
service = args.func(args)
service.run() |
def loss_function(real, pred, loss_object):
loss_ = loss_object(real, pred)
return tf.reduce_mean(loss_) |
def KL_Divergence(summary_freq, doc_freq):
sum_val = 0
for (w, f) in summary_freq.items():
if (w in doc_freq):
sum_val += (f * math.log((f / float(doc_freq[w]))))
if np.isnan(sum_val):
raise Exception('KL_Divergence returns NaN')
return sum_val |
def switchInterface(screen, cfg):
screen.fill(cfg.BACKGROUNDCOLOR)
clock = pygame.time.Clock()
while True:
button_1 = Button(screen, (95, 150), 'next', cfg)
button_2 = Button(screen, (95, 305), 'exit', cfg)
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event.type == pygame.MOUSEBUTTONDOWN):
if button_1.collidepoint(pygame.mouse.get_pos()):
return
elif button_2.collidepoint(pygame.mouse.get_pos()):
pygame.quit()
sys.exit(0)
clock.tick(60)
pygame.display.update() |
def get_activation(model, activation, name):
def hook(model, input, output):
activation[name] = output.cpu().detach()
return hook |
class BigdlNativeLLM(LLM):
logging.warning('BigdlNativeLLM has been deprecated, please switch to the new LLM API for sepcific models.')
model_family: str = 'llama'
family_info = {'llama': {'module': 'bigdl.llm.models', 'class': 'Llama'}, 'bloom': {'module': 'bigdl.llm.models', 'class': 'Bloom'}, 'gptneox': {'module': 'bigdl.llm.models', 'class': 'Gptneox'}, 'starcoder': {'module': 'bigdl.llm.models', 'class': 'Starcoder'}, 'chatglm': {'module': 'bigdl.llm.ggml.model.chatglm', 'class': 'ChatGLM'}}
client: Any
model_path: str
lora_base: Optional[str] = None
lora_path: Optional[str] = None
n_ctx: int = Field(512, alias='n_ctx')
n_parts: int = Field((- 1), alias='n_parts')
seed: int = Field((- 1), alias='seed')
f16_kv: bool = Field(True, alias='f16_kv')
logits_all: bool = Field(False, alias='logits_all')
vocab_only: bool = Field(False, alias='vocab_only')
use_mlock: bool = Field(False, alias='use_mlock')
n_threads: Optional[int] = Field((- 1), alias='n_threads')
n_batch: Optional[int] = Field(512, alias='n_batch')
n_gpu_layers: Optional[int] = Field(0, alias='n_gpu_layers')
suffix: Optional[str] = Field(None)
max_tokens: Optional[int] = 256
temperature: Optional[float] = 0.8
top_p: Optional[float] = 0.95
logprobs: Optional[int] = Field(None)
echo: Optional[bool] = False
stop: Optional[List[str]] = []
repeat_penalty: Optional[float] = 1.1
top_k: Optional[int] = 40
last_n_tokens_size: Optional[int] = 64
use_mmap: Optional[bool] = True
streaming: bool = True
_validator()
def validate_environment(cls, values: Dict) -> Dict:
model_path = values['model_path']
model_param_names = ['lora_path', 'lora_base', 'n_ctx', 'n_parts', 'seed', 'f16_kv', 'logits_all', 'vocab_only', 'use_mlock', 'n_threads', 'n_batch', 'use_mmap', 'last_n_tokens_size']
model_params = {k: values[k] for k in model_param_names}
if (values['n_gpu_layers'] is not None):
model_params['n_gpu_layers'] = values['n_gpu_layers']
model_family = values['model_family'].lower()
if (model_family not in list(values['family_info'].keys())):
raise ValueError(("Model family '%s' is not supported. Valid values are %s" % (values['model_family'], ','.join(list(values['family_info'].keys())))))
try:
b_info = values['family_info'][model_family]
module = importlib.import_module(b_info['module'])
class_ = getattr(module, b_info['class'])
values['client'] = class_(model_path, **model_params)
except ImportError:
raise ModuleNotFoundError('Could not import bigdl-llm library. Please install the bigdl-llm library to use this embedding model: pip install bigdl-llm')
except Exception as e:
raise ValueError(f'Could not load model from path: {model_path}. Please make sure the model family {model_family} matches the model you want to load.Received error {e}')
return values
def _default_params(self) -> Dict[(str, Any)]:
return {'suffix': self.suffix, 'max_tokens': self.max_tokens, 'temperature': self.temperature, 'top_p': self.top_p, 'logprobs': self.logprobs, 'echo': self.echo, 'stop_sequences': self.stop, 'repeat_penalty': self.repeat_penalty, 'top_k': self.top_k}
def _identifying_params(self) -> Dict[(str, Any)]:
return {**{'model_path': self.model_path, 'model_family': self.model_family}, **self._default_params}
def _llm_type(self) -> str:
return 'BigDL'
def _get_parameters(self, stop: Optional[List[str]]=None) -> Dict[(str, Any)]:
if (self.stop and (stop is not None)):
raise ValueError('`stop` found in both the input and default params.')
params = self._default_params
params.pop('stop_sequences')
params['stop'] = (self.stop or stop or [])
return params
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None) -> str:
if self.streaming:
combined_text_output = ''
for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager):
combined_text_output += token['choices'][0]['text']
return combined_text_output
else:
params = self._get_parameters(stop)
result = self.client(prompt=prompt, **params)
return result['choices'][0]['text']
def stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None) -> Generator[(Dict, None, None)]:
params = self._get_parameters(stop)
result = self.client(prompt=prompt, stream=True, **params)
for chunk in result:
token = chunk['choices'][0]['text']
log_probs = chunk['choices'][0].get('logprobs', None)
if run_manager:
run_manager.on_llm_new_token(token=token, verbose=self.verbose, log_probs=log_probs)
(yield chunk)
def get_num_tokens(self, text: str) -> int:
tokenized_text = self.client.tokenize(text.encode('utf-8'))
return len(tokenized_text) |
def rollouts_to_dataframe(rollouts: Iterable[Rollout], avg_over_repeats: bool=True, index_value_precision: Optional[int]=None) -> pd.DataFrame:
rollouts = [(rollout.rollout_params, rollout.metrics) for rollout in rollouts]
index_cols = list(rollouts[0][0].keys())
df = pd.DataFrame([{**params, **metrics} for (params, metrics) in rollouts])
if (index_value_precision is not None):
for col in index_cols:
df[col] = df[col].round(index_value_precision).astype(str)
if (len(index_cols) > 0):
if avg_over_repeats:
df = df.groupby(index_cols).mean().reset_index()
df = df.set_index(index_cols)
return df |
def fastfood_torched(x, DD, param_list=None, device=0):
dd = x.size(0)
if (not param_list):
(BB, Pi, GG, divisor, LL) = fastfood_vars(DD, device=device)
else:
(BB, Pi, GG, divisor, LL) = param_list
dd_pad = F.pad(x, pad=(0, (LL - dd)), value=0, mode='constant')
mul_1 = torch.mul(BB, dd_pad)
mul_2 = fast_walsh_hadamard_torched(mul_1, 0, normalize=False)
mul_3 = mul_2[Pi]
mul_4 = torch.mul(mul_3, GG)
mul_5 = fast_walsh_hadamard_torched(mul_4, 0, normalize=False)
ret = torch.div(mul_5[:DD], (divisor * np.sqrt((float(DD) / LL))))
return ret |
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, bn=True, relu=True, **kwargs):
super(BasicConv, self).__init__()
self.relu = relu
self.use_bn = bn
if is_3d:
if deconv:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm3d(out_channels)
else:
if deconv:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs)
else:
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.LeakyReLU = nn.LeakyReLU()
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.relu:
x = self.LeakyReLU(x)
return x |
def read_data(filenames, is_test=False):
instances = []
done = set()
for filename in filenames:
name = filename
for ending in ['.annotation.txt', '.ascii.txt', '.raw.txt', '.tok.txt']:
if filename.endswith(ending):
name = filename[:(- len(ending))]
if (name in done):
continue
done.add(name)
text_ascii = [l.strip().split() for l in open((name + '.ascii.txt'))]
text_tok = []
for l in open((name + '.tok.txt')):
l = l.strip().split()
if ((len(l) > 0) and (l[(- 1)] == '</s>')):
l = l[:(- 1)]
if ((len(l) == 0) or (l[0] != '<s>')):
l.insert(0, '<s>')
text_tok.append(l)
(info, target_info) = lines_to_info(text_ascii)
links = {}
if is_test:
for i in range(args.test_start, min(args.test_end, len(text_ascii))):
links[i] = []
else:
for line in open((name + '.annotation.txt')):
nums = [int(v) for v in line.strip().split() if (v != '-')]
links.setdefault(max(nums), []).append(min(nums))
for (link, nums) in links.items():
instances.append(((name + '.annotation.txt'), link, nums, text_ascii, text_tok, info, target_info))
return instances |
def get_tensorflow_node_shape_attr(node):
dims = None
try:
shape = get_tensorflow_node_attr(node, 'shape')
if (not shape.unknown_rank):
dims = [int(d.size) for d in shape.dim]
except:
pass
return dims |
def alpha_repair_compilation_rates() -> CompilationRateResult:
compile_rates_1000: list[float] = []
compile_rates_100: list[float] = []
compile_rates_30: list[float] = []
compile_rates_5000: list[float] = []
for file in Path('data/large/uniapr_pfl').glob('*result.txt'):
assert any((file.name.startswith(bug_id) for bug_id in rq_utils.D4J1_CONSIDERED_BUGS))
t = file.read_text()
patches = t.split('')
top_100_compile = 0
top_30_compile = 0
top_1000_compile = 0
compiled = 0
total = 0
for patch in patches:
patch_number = 0
if ('Patch Number :' in patch):
for line in patch.splitlines():
if ('Patch Number :' in line):
patch_number = int(line.split('Patch Number :')[(- 1)].strip())
if (('Compiled Error' not in patch) and ('Syntax Error' not in patch)):
assert (patch_number != 0)
if (patch_number <= 100):
top_100_compile += 1
if (patch_number <= 30):
top_30_compile += 1
if (patch_number <= 1000):
top_1000_compile += 1
compiled += 1
if (patch_number > 5000):
break
total += 1
if (total == 0):
continue
compile_rates_5000.append((compiled / total))
compile_rates_1000.append((top_1000_compile / 1000))
compile_rates_30.append((top_30_compile / 30))
compile_rates_100.append((top_100_compile / 100))
return (f'{(st.mean(compile_rates_30) * 100):.0f}%', f'{(st.mean(compile_rates_100) * 100):.0f}%', f'{(st.mean(compile_rates_1000) * 100):.0f}%', f'{(st.mean(compile_rates_5000) * 100):.0f}%') |
def create_path(node1, node2, corridor_radius, map):
coin_flip = np.random.random()
(x1, x2) = sorted([node1[0], node2[0]])
(y1, y2) = sorted([node1[1], node2[1]])
if (get_constellation(node1, node2) == 1):
if (coin_flip >= 0.5):
map[(slice((x1 - corridor_radius), ((x1 + corridor_radius) + 1)), range((y1 - corridor_radius), ((y2 + 1) + corridor_radius), 1))] = 0
map[(range((x1 - corridor_radius), ((x2 + 1) + corridor_radius), 1), slice((y1 - corridor_radius), ((y1 + corridor_radius) + 1)))] = 0
else:
map[(slice((x2 - corridor_radius), ((x2 + corridor_radius) + 1)), range((y1 - corridor_radius), ((y2 + 1) + corridor_radius), 1))] = 0
map[(range((x1 - corridor_radius), ((x2 + 1) + corridor_radius), 1), slice((y2 - corridor_radius), ((y2 + corridor_radius) + 1)))] = 0
elif (coin_flip >= 0.5):
map[(slice((x1 - corridor_radius), ((x1 + corridor_radius) + 1)), range((y1 - corridor_radius), ((y2 + 1) + corridor_radius), 1))] = 0
map[(range((x1 - corridor_radius), ((x2 + 1) + corridor_radius), 1), slice((y2 - corridor_radius), ((y2 + corridor_radius) + 1)))] = 0
else:
map[(slice((x2 - corridor_radius), ((x2 + corridor_radius) + 1)), range((y1 - corridor_radius), ((y2 + 1) + corridor_radius), 1))] = 0
map[(range((x1 - corridor_radius), ((x2 + 1) + corridor_radius), 1), slice((y1 - corridor_radius), ((y1 + corridor_radius) + 1)))] = 0 |
def import_fns(path, file, fns_name='StaticFns'):
full_path = os.path.join(path, file)
import_path = full_path.replace('/', '.')
module = importlib.import_module(import_path)
fns = getattr(module, fns_name)
return fns |
class TuckER(torch.nn.Module):
def __init__(self, d, d1, d2, **kwargs):
super(TuckER, self).__init__()
self.E = torch.nn.Embedding(len(d.entities), d1, padding_idx=0)
self.R = torch.nn.Embedding(len(d.relations), d2, padding_idx=0)
self.W = torch.nn.Parameter(torch.tensor(np.random.uniform((- 1), 1, (d2, d1, d1)), dtype=torch.float, device='cuda', requires_grad=True))
self.input_dropout = torch.nn.Dropout(kwargs['input_dropout'])
self.hidden_dropout1 = torch.nn.Dropout(kwargs['hidden_dropout1'])
self.hidden_dropout2 = torch.nn.Dropout(kwargs['hidden_dropout2'])
self.loss = torch.nn.BCELoss()
self.bn0 = torch.nn.BatchNorm1d(d1)
self.bn1 = torch.nn.BatchNorm1d(d1)
def init(self):
xavier_normal_(self.E.weight.data)
xavier_normal_(self.R.weight.data)
def forward(self, e1_idx, r_idx):
e1 = self.E(e1_idx)
x = self.bn0(e1)
x = self.input_dropout(x)
x = x.view((- 1), 1, e1.size(1))
r = self.R(r_idx)
W_mat = torch.mm(r, self.W.view(r.size(1), (- 1)))
W_mat = W_mat.view((- 1), e1.size(1), e1.size(1))
W_mat = self.hidden_dropout1(W_mat)
x = torch.bmm(x, W_mat)
x = x.view((- 1), e1.size(1))
x = self.bn1(x)
x = self.hidden_dropout2(x)
x = torch.mm(x, self.E.weight.transpose(1, 0))
pred = torch.sigmoid(x)
return pred |
_func
def ClearAllIntegers(data):
if (type(data) == int):
return 0
if (type(data) == list):
for i in range(0, len(data)):
data[i] = ClearAllIntegers(data[i])
if (type(data) == dict):
for (k, v) in data:
data[k] = ClearAllIntegers(v)
return data |
class _PostSampledEmdAutoSavingManager(_EmdAutoSavingManager):
def path(self):
return get_eval_path('emd', 'postsampled', str(self._n_samples), ('%s.json' % self._model_id))
def get_inferred_cloud_dataset(self):
return clouds.get_inferred_cloud_dataset(pre_sampled=False, model_id=self._model_id, n_samples=self._n_samples, **self._kwargs) |
def compute_nas_score(gpu, model, resolution, batch_size):
model.train()
model.requires_grad_(True)
model.zero_grad()
if (gpu is not None):
torch.cuda.set_device(gpu)
model = model.cuda(gpu)
network_weight_gaussian_init(model)
input = torch.randn(size=[batch_size, 3, resolution, resolution])
if (gpu is not None):
input = input.cuda(gpu)
output = model(input)
num_classes = output.shape[1]
y = torch.randint(low=0, high=num_classes, size=[batch_size])
one_hot_y = F.one_hot(y, num_classes).float()
if (gpu is not None):
one_hot_y = one_hot_y.cuda(gpu)
loss = cross_entropy(output, one_hot_y)
loss.backward()
norm2_sum = 0
with torch.no_grad():
for p in model.parameters():
if (hasattr(p, 'grad') and (p.grad is not None)):
norm2_sum += (torch.norm(p.grad) ** 2)
grad_norm = float(torch.sqrt(norm2_sum))
return grad_norm |
class LayoutLMv3FeatureExtractor(LayoutLMv3ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class LayoutLMv3FeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use LayoutLMv3ImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
class DataArguments():
corpus_path: str = field()
out_corpus_dir: str = field()
query_path: str = field()
out_query_dir: str = field()
qrel_path: str = field(default=None) |
def postprocess(B, graph_thres=0.3):
B = np.copy(B)
B[(np.abs(B) <= graph_thres)] = 0
(B, _) = threshold_till_dag(B)
return B |
def parse_log(root: Path, name: str='') -> Dict:
with root.joinpath('log.txt').open() as f:
result = {'path': root, 'name': (f'({name}) {root.name}' if name else root.name), 'params': {}, 'train': [], 'valid': []}
for line in f:
param_match = re.match('^\\s+- (\\w+) : (.+)', line)
if param_match:
(key, value) = param_match.groups()
result['params'][key] = value
elif line.startswith('| epoch'):
item = {field: float(re.search(f'{field}\s+(\d+\.?\d*)', line).groups()[0]) for field in ['step', 'epoch', 'lr', 'loss', 'ppl']}
result['train'].append(item)
elif line.startswith('| Eval'):
item = {field: float(re.search(f'{field}\s+(\d+\.?\d*)', line).groups()[0]) for field in ['Eval', 'step', 'valid loss', 'valid ppl']}
result['valid'].append(item)
return result |
def generate_problem_data(P, q, A, l, u, problem_name, sols_data={}):
n = P.shape[0]
m = A.shape[0]
f = open((problem_name + '/data.h'), 'w')
f.write((('#ifndef ' + problem_name.upper()) + '_DATA_H\n'))
f.write((('#define ' + problem_name.upper()) + '_DATA_H\n'))
f.write('#include "osqp.h"\n')
f.write('\n\n')
f.write('/* create additional data and solutions structure */\n')
f.write('typedef struct {\n')
for (key, value) in sols_data.items():
if isinstance(value, str):
f.write(('c_int %s;\n' % key))
elif isinstance(value, np.ndarray):
if isinstance(value.flatten(order='F')[0], int):
f.write(('c_int * %s;\n' % key))
elif isinstance(value.flatten(order='F')[0], float):
f.write(('c_float * %s;\n' % key))
elif isinstance(value, int):
f.write(('c_int %s;\n' % key))
elif isinstance(value, float):
f.write(('c_float %s;\n' % key))
f.write(('} %s_sols_data;\n\n' % problem_name))
f.write('/* function prototypes */\n')
f.write(('OSQPData * generate_problem_%s();\n' % problem_name))
f.write(('void clean_problem_%s(OSQPData * data);\n' % problem_name))
f.write(('%s_sols_data * generate_problem_%s_sols_data();\n' % (problem_name, problem_name)))
f.write(('void clean_problem_%s_sols_data(%s_sols_data * data);\n' % (problem_name, problem_name)))
f.write('\n\n')
f.write('/* function to generate QP problem data */\n')
f.write(('OSQPData * generate_problem_%s(){\n\n' % problem_name))
f.write('OSQPData * data = (OSQPData *)c_malloc(sizeof(OSQPData));\n\n')
f.write('// Problem dimensions\n')
write_int(f, n, 'n', 'data')
write_int(f, m, 'm', 'data')
f.write('\n')
f.write('// Problem vectors\n')
write_vec_float(f, l, 'l', 'data')
write_vec_float(f, u, 'u', 'data')
write_vec_float(f, q, 'q', 'data')
f.write('\n')
write_mat_sparse(f, A, 'A', 'data')
write_mat_sparse(f, P, 'P', 'data')
f.write('return data;\n\n')
f.write('}\n\n')
f.write('/* function to clean problem data structure */\n')
f.write(('void clean_problem_%s(OSQPData * data){\n\n' % problem_name))
f.write('// Clean vectors\n')
clean_vec(f, 'l', 'data')
clean_vec(f, 'u', 'data')
clean_vec(f, 'q', 'data')
f.write('\n')
f.write('//Clean Matrices\n')
clean_mat(f, 'A', 'data')
clean_mat(f, 'P', 'data')
f.write('\n')
f.write('c_free(data);\n\n')
f.write('}\n\n')
f.write('/* function to define solutions and additional data struct */\n')
f.write(('%s_sols_data * generate_problem_%s_sols_data(){\n\n' % (problem_name, problem_name)))
f.write(('%s_sols_data * data = (%s_sols_data *)c_malloc(sizeof(%s_sols_data));\n\n' % (problem_name, problem_name, problem_name)))
for (key, value) in sols_data.items():
if isinstance(value, str):
if (value == 'optimal'):
f.write(('data->%s = %s;\n' % (key, 'OSQP_SOLVED')))
elif (value == 'optimal_inaccurate'):
f.write(('data->%s = %s;\n' % (key, 'OSQP_SOLVED_INACCURATE')))
elif (value == 'primal_infeasible'):
f.write(('data->%s = %s;\n' % (key, 'OSQP_PRIMAL_INFEASIBLE')))
elif (value == 'primal_infeasible_inaccurate'):
f.write(('data->%s = %s;\n' % (key, 'OSQP_PRIMAL_INFEASIBLE_INACCURATE')))
elif (value == 'dual_infeasible'):
f.write(('data->%s = %s;\n' % (key, 'OSQP_DUAL_INFEASIBLE')))
elif (value == 'dual_infeasible_inaccurate'):
f.write(('data->%s = %s;\n' % (key, 'OSQP_DUAL_INFEASIBLE_INACCURATE')))
if (type(value) is np.ndarray):
if isinstance(value.flatten(order='F')[0], int):
write_vec_int(f, value.flatten(order='F'), key, 'data')
elif isinstance(value.flatten(order='F')[0], float):
write_vec_float(f, value.flatten(order='F'), key, 'data')
elif isinstance(value, int):
write_int(f, value, key, 'data')
elif isinstance(value, float):
write_float(f, value, key, 'data')
f.write('\nreturn data;\n\n')
f.write('}\n\n')
f.write('/* function to clean solutions and additional data struct */\n')
f.write(('void clean_problem_%s_sols_data(%s_sols_data * data){\n\n' % (problem_name, problem_name)))
for (key, value) in sols_data.items():
if (type(value) is np.ndarray):
clean_vec(f, key, 'data')
f.write('\nc_free(data);\n\n')
f.write('}\n\n')
f.write('#endif\n')
f.close() |
class FTB_block(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.conv1 = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=2, dilation=2, bias=True)
self.bn1 = nn.BatchNorm2d(self.dim_out, momentum=0.5)
self.relu = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=2, dilation=2, bias=False)
def forward(self, x):
x = self.conv1(x)
residual = x
out = self.conv2(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv3(out)
out += residual
out = self.relu(out)
return out |
def get_memory(tn_orig, rpn):
tn = tn_orig.clone()
cost = []
for item in rpn:
if (item == (- 1)):
c1 = cost.pop()
c0 = cost.pop()
index1 = c1[0]
index0 = c0[0]
t0 = tn.tensors[index0]
t1 = tn.tensors[index1]
(bc, br0, br1) = tn.find_bonds(index0, index1)
mem_start = (c0[2] + c1[2])
mem_end = 1.0
for b in (br0 + br1):
mem_end *= BOND_DIMS[b]
mem_req = max((c0[1] + c1[2]), (c0[1] + c1[3]), (c0[2] + c1[1]), (c0[3] + c1[1]), ((mem_end + c0[3]) + c1[3]))
tn = tn.contract(index0, index1, bc, br0, br1)
cost.append((index0, mem_req, mem_start, mem_end))
else:
t = tn.tensors[item]
val = 1.0
for b in t.bonds:
val *= BOND_DIMS[b]
cost.append((item, val, val, val))
return cost[0][1] |
class Gather(Function):
def forward(ctx, target_device, dim, *inputs):
assert all(map((lambda i: i.is_cuda), inputs))
ctx.target_device = target_device
ctx.dim = dim
ctx.input_gpus = tuple(map((lambda i: i.get_device()), inputs))
ctx.input_sizes = tuple(map((lambda i: i.size(ctx.dim)), inputs))
return comm.gather(inputs, ctx.dim, ctx.target_device)
def backward(ctx, grad_output):
return ((None, None) + Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)) |
class TensorflowFeatureExtractor(BaseFeatureExtractor):
def __init__(self, device: Optional[str]=None):
super().__init__(backend='tensorflow')
self.device = device
if isinstance(device, str):
self.device = device.replace('cuda', 'gpu')
def _predict(self, batch_images):
if (batch_images.dtype == tf.uint8):
batch_images = tf.map_fn(self.transform, batch_images, dtype=tf.float32)
with (tf.device(self.device) if self.device else no_scope()):
return self.model(batch_images, training=False)[0]
def __call__(self, obj, **kwargs):
import tensorflow as tf
if isinstance(obj, sf.WSI):
grid = features_from_slide(self, obj, preprocess_fn=self.transform, **kwargs)
return np.ma.masked_where((grid == sf.heatmap.MASK), grid)
elif kwargs:
raise ValueError(f'{self.__class__.__name__} does not accept keyword arguments when extracting features from a batch of images.')
assert (obj.dtype in (tf.float32, tf.uint8))
return self._predict(obj) |
class SoftCrossEntropyLoss(nn.Module):
__constants__ = ['reduction', 'ignore_index', 'smooth_factor']
def __init__(self, reduction: str='mean', smooth_factor: Optional[float]=None, ignore_index: Optional[int]=(- 100), dim: int=1):
super().__init__()
self.smooth_factor = smooth_factor
self.ignore_index = ignore_index
self.reduction = reduction
self.dim = dim
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
log_prob = F.log_softmax(y_pred, dim=self.dim)
return label_smoothed_nll_loss(log_prob, y_true, epsilon=self.smooth_factor, ignore_index=self.ignore_index, reduction=self.reduction, dim=self.dim) |
def train(opt):
(opt.use_fc, opt.use_att) = utils.if_use_feat(opt.caption_model)
opt.use_att = True
opt.use_fc = True
if opt.use_box:
opt.att_feat_size = (opt.att_feat_size + 5)
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
tb_summary_writer = (tb and tb.SummaryWriter(opt.checkpoint_path))
infos = {}
histories = {}
if (opt.start_from is not None):
with open(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl')), 'rb') as f:
infos = utils.pickle_load(f)
saved_model_opt = infos['opt']
need_be_same = ['caption_model', 'rnn_type', 'rnn_size', 'num_layers']
for checkme in need_be_same:
assert (vars(saved_model_opt)[checkme] == vars(opt)[checkme]), ("Command line argument and saved model disagree on '%s' " % checkme)
if os.path.isfile(os.path.join(opt.start_from, (('histories_' + opt.id) + '.pkl'))):
with open(os.path.join(opt.start_from, (('histories_' + opt.id) + '.pkl')), 'rb') as f:
histories = utils.pickle_load(f)
else:
infos['iter'] = 0
infos['epoch'] = 0
infos['iterators'] = loader.iterators
infos['split_ix'] = loader.split_ix
infos['vocab'] = loader.get_vocab()
infos['opt'] = opt
iteration = infos.get('iter', 0)
epoch = infos.get('epoch', 0)
val_result_history = histories.get('val_result_history', {})
loss_history = histories.get('loss_history', {})
lr_history = histories.get('lr_history', {})
ss_prob_history = histories.get('ss_prob_history', {})
loader.iterators = infos.get('iterators', loader.iterators)
loader.split_ix = infos.get('split_ix', loader.split_ix)
if (opt.load_best_score == 1):
best_val_score = infos.get('best_val_score', None)
if (opt.teacher_caption_model != opt.caption_model):
teacher_opt = copy.deepcopy(opt)
teacher_opt.caption_model = opt.teacher_caption_model
teacher_opt.start_from = None
if (opt.teacher_caption_model == 'topdown'):
teacher_opt.num_layers = 1
teacher_opt.rnn_size = 1024
if (opt.teacher_caption_model == 'show_tell'):
teacher_opt.num_layers = 1
teacher_opt.rnn_size = 1024
if (opt.teacher_caption_model == 'transformer'):
teacher_opt.num_layers = 6
teacher_opt.rnn_size = 2048
else:
teacher_opt = opt
teacher = models.setup(teacher_opt).cuda()
if (opt.teacher_caption_model == 'topdown'):
teacher.load_state_dict(torch.load('save/up-down-baseline/model-best.pth'))
elif (opt.teacher_caption_model == 'show_tell'):
teacher.load_state_dict(torch.load('save/student-showtell-teacher-updown-1.5/model-best.pth'))
elif (opt.teacher_caption_model == 'transformer'):
teacher.load_state_dict(torch.load('save/nsc-transformer-baseline/model-best.pth'))
teacher = torch.nn.DataParallel(teacher)
teacher.eval()
model = models.setup(opt).cuda()
if ((opt.start_from == None) and (opt.caption_model == 'sat') and (opt.K > 1)):
model.load_state_dict(torch.load('save/nsc-transformer-baseline/model-best.pth'))
dp_model = torch.nn.DataParallel(model)
lw_model = LossWrapper(model, teacher, opt)
dp_lw_model = torch.nn.DataParallel(lw_model)
epoch_done = True
dp_lw_model.train()
if opt.noamopt:
assert ((opt.caption_model == 'transformer') or (opt.caption_model == 'sat')), 'noamopt can only work with transformer'
optimizer = utils.get_std_opt(model, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
optimizer._step = iteration
elif opt.reduce_on_plateau:
optimizer = utils.build_optimizer(model.parameters(), opt)
optimizer = utils.ReduceLROnPlateau(optimizer, factor=0.5, patience=3)
else:
optimizer = utils.build_optimizer(model.parameters(), opt)
if ((vars(opt).get('start_from', None) is not None) and os.path.isfile(os.path.join(opt.start_from, 'optimizer.pth'))):
optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))
def save_checkpoint(model, infos, optimizer, histories=None, append=''):
if (len(append) > 0):
append = ('-' + append)
if (not os.path.isdir(opt.checkpoint_path)):
os.makedirs(opt.checkpoint_path)
checkpoint_path = os.path.join(opt.checkpoint_path, ('model%s.pth' % append))
torch.save(model.state_dict(), checkpoint_path)
print('model saved to {}'.format(checkpoint_path))
optimizer_path = os.path.join(opt.checkpoint_path, ('optimizer%s.pth' % append))
torch.save(optimizer.state_dict(), optimizer_path)
with open(os.path.join(opt.checkpoint_path, (('infos_' + opt.id) + ('%s.pkl' % append))), 'wb') as f:
utils.pickle_dump(infos, f)
if histories:
with open(os.path.join(opt.checkpoint_path, (('histories_' + opt.id) + ('%s.pkl' % append))), 'wb') as f:
utils.pickle_dump(histories, f)
try:
while True:
if epoch_done:
if ((not opt.noamopt) and (not opt.reduce_on_plateau)):
if ((epoch > opt.learning_rate_decay_start) and (opt.learning_rate_decay_start >= 0)):
frac = ((epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every)
decay_factor = (opt.learning_rate_decay_rate ** frac)
opt.current_lr = (opt.learning_rate * decay_factor)
else:
opt.current_lr = opt.learning_rate
utils.set_lr(optimizer, opt.current_lr)
if ((epoch > opt.scheduled_sampling_start) and (opt.scheduled_sampling_start >= 0)):
frac = ((epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every)
opt.ss_prob = min((opt.scheduled_sampling_increase_prob * frac), opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
if ((opt.self_critical_after != (- 1)) and (epoch >= opt.self_critical_after)):
sc_flag = True
init_scorer(opt.cached_tokens)
else:
sc_flag = False
epoch_done = False
start = time.time()
data = loader.get_batch('train')
print('Read data:', (time.time() - start))
torch.cuda.synchronize()
start = time.time()
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]
tmp = [(_ if (_ is None) else _.cuda()) for _ in tmp]
(fc_feats, att_feats, labels, masks, att_masks) = tmp
optimizer.zero_grad()
model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag)
loss = model_out['loss'].mean()
loss.backward()
utils.clip_gradient(optimizer, opt.grad_clip)
optimizer.step()
train_loss = loss.item()
torch.cuda.synchronize()
end = time.time()
if (not sc_flag):
print('iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, train_loss, (end - start)))
else:
print('iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, model_out['reward'].mean(), (end - start)))
iteration += 1
if data['bounds']['wrapped']:
epoch += 1
epoch_done = True
if ((iteration % opt.losses_log_every) == 0):
add_summary_value(tb_summary_writer, 'train_loss', train_loss, iteration)
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif opt.reduce_on_plateau:
opt.current_lr = optimizer.current_lr
add_summary_value(tb_summary_writer, 'learning_rate', opt.current_lr, iteration)
add_summary_value(tb_summary_writer, 'scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
add_summary_value(tb_summary_writer, 'avg_reward', model_out['reward'].mean(), iteration)
loss_history[iteration] = (train_loss if (not sc_flag) else model_out['reward'].mean())
lr_history[iteration] = opt.current_lr
ss_prob_history[iteration] = model.ss_prob
infos['iter'] = iteration
infos['epoch'] = epoch
infos['iterators'] = loader.iterators
infos['split_ix'] = loader.split_ix
if ((iteration % opt.save_checkpoint_every) == 0):
eval_kwargs = {'split': 'val', 'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
(val_loss, predictions, lang_stats) = eval_utils.eval_split(dp_model, lw_model.crit, loader, eval_kwargs)
if opt.reduce_on_plateau:
if ('CIDEr' in lang_stats):
optimizer.scheduler_step((- lang_stats['CIDEr']))
else:
optimizer.scheduler_step(val_loss)
add_summary_value(tb_summary_writer, 'validation loss', val_loss, iteration)
if (lang_stats is not None):
for (k, v) in lang_stats.items():
add_summary_value(tb_summary_writer, k, v, iteration)
val_result_history[iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
if (opt.language_eval == 1):
current_score = lang_stats['CIDEr']
else:
current_score = (- val_loss)
best_flag = False
if ((best_val_score is None) or (current_score > best_val_score)):
best_val_score = current_score
best_flag = True
infos['best_val_score'] = best_val_score
histories['val_result_history'] = val_result_history
histories['loss_history'] = loss_history
histories['lr_history'] = lr_history
histories['ss_prob_history'] = ss_prob_history
save_checkpoint(model, infos, optimizer, histories)
if opt.save_history_ckpt:
save_checkpoint(model, infos, optimizer, append=str(iteration))
if best_flag:
save_checkpoint(model, infos, optimizer, append='best')
if ((epoch >= opt.max_epochs) and (opt.max_epochs != (- 1))):
break
except (RuntimeError, KeyboardInterrupt):
print('Save ckpt on exception ...')
save_checkpoint(model, infos, optimizer)
print('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace) |
class AlbuSeg3d(Repr):
def __init__(self, albumentation: Callable):
self.albumentation = A.ReplayCompose([albumentation])
def __call__(self, inp: np.ndarray, tar: np.ndarray):
tar = tar.astype(np.uint8)
input_copy = np.copy(inp)
target_copy = np.copy(tar)
replay_dict = self.albumentation(image=inp[0])['replay']
for (index, (input_slice, target_slice)) in enumerate(zip(inp, tar)):
result = A.ReplayCompose.replay(replay_dict, image=input_slice, mask=target_slice)
input_copy[index] = result['image']
target_copy[index] = result['mask']
return (input_copy, target_copy) |
class TestWindowedIterator(TestBase):
def test(self):
for n in [0, 2, 3, 8, 9, 10, 11, 12]:
seq = list(range(n))
it = WindowedIterator(NativeCheckpointableIterator(seq), 3)
actual0 = list(itertools.islice(it, ((n * 3) // 10)))
checkpoint = it.getstate()
actual1a = list(it)
it.setstate(checkpoint)
actual1b = list(it)
actual = (actual0 + actual1a)
expected = list(zip(seq, itertools.islice(seq, 1, None), itertools.islice(seq, 2, None)))
self.assertListEqual(actual, expected)
self.assertListEqual(actual1a, actual1b) |
class TestDatasets(unittest.TestCase):
def _test_dset(self, dset, n_train, n_test, shape, output_size):
dset = dset()
self.assertEqual(len(dset.train_data), n_train)
self.assertEqual(len(dset.test_data), n_test)
self.assertEqual(dset.shape, shape)
self.assertEqual(dset.train_data[[0]][0][0].shape, shape)
self.assertEqual(dset.output_size, output_size)
(os.getenv('TEST_DATASETS', False), 'Datasets need not be tested all the time')
def test_datasets(self):
datasets = [(CIFAR10, 50000, 10000, (32, 32, 3), 10), (CIFARSanityCheck, 40000, 2000, (32, 32, 3), 2), (MNIST, 60000, 10000, (28, 28, 1), 10), (partial(CanevetICML2016, N=256), (int(((256 ** 2) - ((256 ** 2) * 0.33))) + 1), int(((256 ** 2) * 0.33)), (2,), 2), (compose(partial(OntheflyAugmentedImages, augmentation_params=dict(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False)), CIFAR10), 500000, 10000, (32, 32, 3), 10), (partial(PennTreeBank, 20), 887521, 70390, (20,), 10000)]
for args in datasets:
self._test_dset(*args)
def test_zca_whitening(self):
dset = ZCAWhitening(InMemoryImageDataset(np.random.rand(1000, 32, 32, 3), (np.random.rand(1000, 1) * 10).astype(np.int32), np.random.rand(1000, 32, 32, 3), (np.random.rand(1000, 1) * 10).astype(np.int32)))
self.assertEqual(len(dset.train_data), 1000)
self.assertEqual(len(dset.test_data), 1000)
idxs = np.random.choice(len(dset.train_data), 100)
(x_r, y_r) = dset.train_data[idxs]
self.assertEqual(x_r.shape, (100, 32, 32, 3))
self.assertEqual(y_r.shape, (100, 10))
for i in range(10):
(x, y) = dset.train_data[idxs]
self.assertTrue(np.all((x_r == x)))
self.assertTrue(np.all((y_r == y)))
def test_image_augmentation(self):
orig_dset = InMemoryImageDataset(np.random.rand(1000, 32, 32, 3), (np.random.rand(1000, 1) * 10).astype(np.int32), np.random.rand(1000, 32, 32, 3), (np.random.rand(1000, 1) * 10).astype(np.int32))
dset = OntheflyAugmentedImages(orig_dset, dict(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False))
idxs = np.random.choice(len(dset.train_data), 100)
(x_r, y_r) = dset.train_data[idxs]
for i in range(10):
(x, y) = dset.train_data[idxs]
self.assertTrue(np.all((x_r == x)))
self.assertTrue(np.all((y_r == y)))
dset = OntheflyAugmentedImages(orig_dset, dict(featurewise_center=True, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=True, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False))
idxs = np.random.choice(len(dset.train_data), 100)
(x_r, y_r) = dset.train_data[idxs]
for i in range(10):
(x, y) = dset.train_data[idxs]
self.assertTrue(np.all((x_r == x)))
self.assertTrue(np.all((y_r == y)))
def test_generator(self):
def data():
while True:
(yield (np.random.rand(32, 10), np.random.rand(32, 1)))
dset = GeneratorDataset(data())
self.assertEqual(dset.shape, (10,))
self.assertEqual(dset.output_size, 1)
with self.assertRaises(RuntimeError):
len(dset.train_data)
(x, y) = dset.train_data[:10]
self.assertEqual(10, len(x))
self.assertEqual(10, len(y))
(x, y) = dset.train_data[:100]
self.assertEqual(100, len(x))
self.assertEqual(100, len(y))
(x, y) = dset.train_data[[1, 2, 17]]
self.assertEqual(3, len(x))
self.assertEqual(3, len(y))
(x, y) = dset.train_data[99]
self.assertEqual(1, len(x))
self.assertEqual(1, len(y))
with self.assertRaises(RuntimeError):
len(dset.test_data)
with self.assertRaises(RuntimeError):
dset.test_data[0]
test_data = (np.random.rand(120, 10), np.random.rand(120, 1))
dset = GeneratorDataset(data(), test_data)
self.assertEqual(dset.shape, (10,))
self.assertEqual(dset.output_size, 1)
with self.assertRaises(RuntimeError):
len(dset.train_data)
(x, y) = dset.train_data[:10]
self.assertEqual(10, len(x))
self.assertEqual(10, len(y))
(x, y) = dset.train_data[:100]
self.assertEqual(100, len(x))
self.assertEqual(100, len(y))
(x, y) = dset.train_data[[1, 2, 17]]
self.assertEqual(3, len(x))
self.assertEqual(3, len(y))
(x, y) = dset.train_data[99]
self.assertEqual(1, len(x))
self.assertEqual(1, len(y))
self.assertEqual(120, len(dset.test_data))
self.assertTrue(np.all((test_data[0] == dset.test_data[:][0])))
idxs = [10, 20, 33]
self.assertTrue(np.all((test_data[1][idxs] == dset.test_data[idxs][1])))
dset = GeneratorDataset(data(), data(), 120)
self.assertEqual(120, len(dset.test_data))
(x, y) = dset.test_data[:10]
self.assertEqual(10, len(x))
self.assertEqual(10, len(y))
(x, y) = dset.test_data[:100]
self.assertEqual(100, len(x))
self.assertEqual(100, len(y))
(x, y) = dset.test_data[[1, 2, 17]]
self.assertEqual(3, len(x))
self.assertEqual(3, len(y))
(x, y) = dset.test_data[99]
self.assertEqual(1, len(x))
self.assertEqual(1, len(y)) |
_cache()
def setup_logger(name='log', logging_level='debug') -> logging.Logger:
if (logging_level == 'debug'):
logging_level = logging.DEBUG
elif (logging_level == 'info'):
logging_level = logging.INFO
logger = logging.getLogger(name)
logger.setLevel(logging_level)
logger.propagate = False
formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt='%m/%d %H:%M:%S')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging_level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger |
def main(exp, args, num_gpu):
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed testing. This will turn on the CUDNN deterministic setting, ')
is_distributed = (num_gpu > 1)
cudnn.benchmark = True
rank = args.local_rank
file_name = os.path.join(exp.output_dir, args.experiment_name)
if (rank == 0):
os.makedirs(file_name, exist_ok=True)
results_folder = os.path.join(file_name, 'track_results_sort')
os.makedirs(results_folder, exist_ok=True)
setup_logger(file_name, distributed_rank=rank, filename='val_log.txt', mode='a')
logger.info('Args: {}'.format(args))
if (args.conf is not None):
exp.test_conf = args.conf
if (args.nms is not None):
exp.nmsthre = args.nms
if (args.tsize is not None):
exp.test_size = (args.tsize, args.tsize)
model = exp.get_model()
logger.info('Model Summary: {}'.format(get_model_info(model, exp.test_size)))
val_loader = exp.get_eval_loader(args.batch_size, is_distributed, args.test)
evaluator = MOTEvaluator(args=args, dataloader=val_loader, img_size=exp.test_size, confthre=exp.test_conf, nmsthre=exp.nmsthre, num_classes=exp.num_classes)
torch.cuda.set_device(rank)
model.cuda(rank)
model.eval()
if ((not args.speed) and (not args.trt)):
if (args.ckpt is None):
ckpt_file = os.path.join(file_name, 'best_ckpt.pth.tar')
else:
ckpt_file = args.ckpt
logger.info('loading checkpoint')
loc = 'cuda:{}'.format(rank)
ckpt = torch.load(ckpt_file, map_location=loc)
model.load_state_dict(ckpt['model'])
logger.info('loaded checkpoint done.')
if is_distributed:
model = DDP(model, device_ids=[rank])
if args.fuse:
logger.info('\tFusing model...')
model = fuse_model(model)
if args.trt:
assert ((not args.fuse) and (not is_distributed) and (args.batch_size == 1)), 'TensorRT model is not support model fusing and distributed inferencing!'
trt_file = os.path.join(file_name, 'model_trt.pth')
assert os.path.exists(trt_file), 'TensorRT model is not found!\n Run tools/trt.py first!'
model.head.decode_in_inference = False
decoder = model.head.decode_outputs
else:
trt_file = None
decoder = None
(*_, summary) = evaluator.evaluate_sort(model, is_distributed, args.fp16, trt_file, decoder, exp.test_size, results_folder)
logger.info(('\n' + summary))
mm.lap.default_solver = 'lap'
gt_type = '_val_half'
print('gt_type', gt_type)
gtfiles = glob.glob(os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type)))
print('gt_files', gtfiles)
tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if (not os.path.basename(f).startswith('eval'))]
logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
logger.info("Default LAP solver '{}'".format(mm.lap.default_solver))
logger.info('Loading files.')
gt = OrderedDict([(Path(f).parts[(- 3)], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles])
ts = OrderedDict([(os.path.splitext(Path(f).parts[(- 1)])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=(- 1))) for f in tsfiles])
mh = mm.metrics.create()
(accs, names) = compare_dataframes(gt, ts)
logger.info('Running metrics')
metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked', 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']
summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
div_dict = {'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'], 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']}
for divisor in div_dict:
for divided in div_dict[divisor]:
summary[divided] = (summary[divided] / summary[divisor])
fmt = mh.formatters
change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked', 'partially_tracked', 'mostly_lost']
for k in change_fmt_list:
fmt[k] = fmt['mota']
print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names))
metrics = (mm.metrics.motchallenge_metrics + ['num_objects'])
summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
logger.info('Completed') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.