code stringlengths 101 5.91M |
|---|
def TrainSVM(Xtrain, ytrain):
SVM_GRID_PARAMS = [{'kernel': ['rbf'], 'gamma': [0.001, 0.01, 0.1, 1], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [3], 'gamma': [0.1, 0.01, 0.001]}]
class_weight = 'balanced'
clf = sklearn.svm.SVC(class_weight=class_weight, probability=True, gamma='scale', kernel='linear')
clf = sklearn.model_selection.GridSearchCV(clf, SVM_GRID_PARAMS, scoring=None, n_jobs=(- 1), iid=True, refit=True, cv=3, verbose=3, pre_dispatch='2*n_jobs', error_score='raise', return_train_score=True)
clf.fit(Xtrain, ytrain)
print(clf.best_params_)
return clf |
def eval_noise_wer(trans_path, result_path):
whisper_trans = fileList(trans_path)
truth_path = '/data/sls/scratch/yuangong/whisper-a/src/noisy_exp/ground_truth_trans/'
truth_trans = fileList(truth_path)
print(len(whisper_trans), len(truth_trans))
def preprocess_text(cur_trans):
cur_trans = jiwer.ToUpperCase()(cur_trans)
cur_trans = jiwer.RemovePunctuation()(cur_trans)
return cur_trans
all_wer_list = []
for db in [(- 20), (- 15), (- 10), (- 5), 0, 5, 10, 15, 20]:
wer_list = []
for cla in range(50):
(cur_trans_list, cur_truth_list) = ([], [])
for trans_name in whisper_trans:
if ((int(trans_name.split('/')[(- 1)].split('_')[0]) == db) and (int(trans_name.split('/')[(- 1)].split('_')[1]) == cla)):
with open(trans_name, 'r') as f:
cur_trans = f.read()
cur_trans = preprocess_text(cur_trans)
cur_trans_list.append(cur_trans)
cur_truth_name = (('/data/sls/scratch/yuangong/whisper-a/src/noisy_exp/ground_truth_trans/' + trans_name.split('/')[(- 1)].split('_mix_')[0].split('_')[2]) + '.txt')
with open(cur_truth_name, 'r') as f:
cur_truth = f.read()
cur_truth = preprocess_text(cur_truth)
cur_truth_list.append(cur_truth)
wer = calculate_wer(cur_trans_list, cur_truth_list)
wer_list.append(wer)
all_wer_list.append(wer_list)
np.savetxt(result_path, all_wer_list, delimiter=',') |
def get_frame_shift(data_folder):
process = run(['utils/data/get_frame_shift.sh', data_folder])
return float(process.stdout.decode('utf-8')) |
class SigmoidActivationMixin():
def init_activation(self, upperbound=1.0, eps=0.0001, **kwargs):
self.eps = eps
self._activation_func = nn.Sigmoid()
self._log_activation_func = nn.LogSigmoid()
self.upperbound = torch.tensor(upperbound)
self.activation_func = (lambda x: (self.upperbound * self._activation_func(x)))
self.log_activation_func = (lambda x: (torch.log(self.upperbound) + self._log_activation_func(x)))
def upperbound_cond_int(self, history=None, dim=None) -> float:
if (dim is None):
dim = self.dim
return ((self.upperbound + self.eps) * dim) |
class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin):
def has_state(self):
return True
_to_config
def __init__(self, sigma_min: float=0.02, sigma_max: float=100, s_noise: float=1.007, s_churn: float=80, s_min: float=0.05, s_max: float=50):
pass
def create_state(self):
return KarrasVeSchedulerState.create()
def set_timesteps(self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple=()) -> KarrasVeSchedulerState:
timesteps = jnp.arange(0, num_inference_steps)[::(- 1)].copy()
schedule = [((self.config.sigma_max ** 2) * (((self.config.sigma_min ** 2) / (self.config.sigma_max ** 2)) ** (i / (num_inference_steps - 1)))) for i in timesteps]
return state.replace(num_inference_steps=num_inference_steps, schedule=jnp.array(schedule, dtype=jnp.float32), timesteps=timesteps)
def add_noise_to_input(self, state: KarrasVeSchedulerState, sample: jnp.ndarray, sigma: float, key: random.KeyArray) -> Tuple[(jnp.ndarray, float)]:
if (self.config.s_min <= sigma <= self.config.s_max):
gamma = min((self.config.s_churn / state.num_inference_steps), ((2 ** 0.5) - 1))
else:
gamma = 0
key = random.split(key, num=1)
eps = (self.config.s_noise * random.normal(key=key, shape=sample.shape))
sigma_hat = (sigma + (gamma * sigma))
sample_hat = (sample + ((((sigma_hat ** 2) - (sigma ** 2)) ** 0.5) * eps))
return (sample_hat, sigma_hat)
def step(self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, return_dict: bool=True) -> Union[(FlaxKarrasVeOutput, Tuple)]:
pred_original_sample = (sample_hat + (sigma_hat * model_output))
derivative = ((sample_hat - pred_original_sample) / sigma_hat)
sample_prev = (sample_hat + ((sigma_prev - sigma_hat) * derivative))
if (not return_dict):
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state)
def step_correct(self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, sample_prev: jnp.ndarray, derivative: jnp.ndarray, return_dict: bool=True) -> Union[(FlaxKarrasVeOutput, Tuple)]:
pred_original_sample = (sample_prev + (sigma_prev * model_output))
derivative_corr = ((sample_prev - pred_original_sample) / sigma_prev)
sample_prev = (sample_hat + ((sigma_prev - sigma_hat) * ((0.5 * derivative) + (0.5 * derivative_corr))))
if (not return_dict):
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state)
def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps):
raise NotImplementedError() |
def _get_plugin():
fn = os.path.join(os.path.dirname(__file__), 'tf_all.cu')
return plugin_loader.get_plugin(fn, extra_nvcc_options=(_get_gl_opts() + ['-DNVDR_TENSORFLOW'])) |
def get_dataset(args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate: bool=False, cache_dir: Optional[str]=None):
def _dataset(file_path, ref_path=None):
if args.line_by_line:
if (ref_path is not None):
if ((not args.whole_word_mask) or (not args.mlm)):
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask')
return LineByLineWithRefDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, ref_path=ref_path)
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=cache_dir)
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(f) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file, args.train_ref_file) |
def switch_interpolation(transforms: Callable[([T], Union[(T, Tensor)])], *, interp: str):
assert (interp in ('bilinear', 'nearest')), interp
previous_inters = OrderedDict()
transforms = get_transform(transforms)
interpolation = get_interpolation(interp)
for (id_, t) in enumerate(transforms):
if hasattr(t, 'interpolation'):
previous_inters[id_] = t.interpolation
t.interpolation = interpolation
(yield)
transforms = get_transform(transforms)
for (id_, t) in enumerate(transforms):
if hasattr(t, 'interpolation'):
t.interpolation = previous_inters[id_] |
class ResNet18(nn.Module):
def __init__(self, num_classes, pretrained=True, include_top=False, freeze=True):
super().__init__()
backbone = vision.resnet18(pretrained=pretrained, include_top=include_top, freeze=freeze)
output_size = backbone.get_output_size()
head = nn.Linear(output_size, num_classes)
self.model = nn.Sequential(backbone, head)
def forward(self, x):
return self.model(x) |
class MILFeatures():
uq = None
def __init__(self, model: Optional[Union[(str, 'torch.nn.Module')]], bags: Union[(np.ndarray, List[str], str)], *, slides: Optional[list]=None, config: Optional[_TrainerConfig]=None, dataset: Optional['sf.Dataset']=None, attention_pooling: Optional[str]='avg', device: Optional[Any]=None) -> None:
if ((attention_pooling is not None) and (attention_pooling not in ('avg', 'max'))):
raise ValueError("Unrecognized attention pooling strategy '{}'".format(attention_pooling))
self.attention_pooling = attention_pooling
bags = self._find_bags(bags, dataset, slides)
self.slides = np.array([path_to_name(b) for b in bags])
if (model is not None):
(self.model, self.use_lens) = self._load_model(model, config)
self.set_device(device)
self.model.to(self.device)
if (not hasattr(self.model, 'get_last_layer_activations')):
raise errors.ModelError(f"Model {model.__class__.__name__} is not supported; could not find method 'get_last_layer_activations'")
(self.num_features, self.predictions, self.attentions, self.activations) = self._get_mil_activations(bags)
self.locations = self._get_bag_locations(bags)
else:
self.model = None
self.use_lens = None
self.device = None
self.num_features = None
self.predictions = None
self.attentions = None
self.activations = None
self.locations = None
def _find_bags(self, bags: Union[(np.ndarray, List[str], str)], dataset: Optional['sf.Dataset'], slides: Optional[List[str]]) -> np.ndarray:
if (isinstance(bags, str) and (dataset is not None)):
return dataset.pt_files(bags)
elif (isinstance(bags, str) and slides):
return np.array([join(bags, f) for f in os.listdir(bags) if (f.endswith('.pt') and (path_to_name(f) in slides))])
elif isinstance(bags, str):
return np.array([join(bags, f) for f in os.listdir(bags) if f.endswith('.pt')])
elif slides:
return np.array([b for b in bags if (path_to_name(b) in slides)])
elif dataset:
return np.array([b for b in bags if (path_to_name(b) in dataset.slides())])
else:
return np.array(bags)
def _get_bag_locations(self, bags: List[str]) -> Optional[Dict[(str, np.ndarray)]]:
if (bags is None):
return None
locations = {}
for bag in bags:
slide = path_to_name(bag)
bag_index = join(dirname(bag), f'{slide}.index.npz')
if (not exists(bag_index)):
log.warning(f'Could not find index file for bag {bag}. Unable to determine tile location information.')
return None
locations[slide] = np.load(bag_index)['arr_0']
return locations
def _load_model(self, model: Union[(str, 'torch.nn.Module')], config: Optional[_TrainerConfig]) -> Tuple[(Callable, bool)]:
if isinstance(model, str):
(model, config) = load_model_weights(model, config)
if (isinstance(model, Attention_MIL) or isinstance(model, TransMIL)):
use_lens = config.model_config.use_lens
else:
use_lens = False
else:
use_lens = isinstance(model, Attention_MIL)
return (model, use_lens)
def _get_mil_activations(self, bags: Union[(np.ndarray, List[str])]):
if (not self.model):
return (None, None, None, None)
y_pred = []
y_att = []
hs = []
log.info('Calculating layer activations...')
for bag in bags:
loaded = torch.load(bag).to(self.device)
loaded = torch.unsqueeze(loaded, dim=0)
with torch.no_grad():
if self.use_lens:
lens = torch.from_numpy(np.array([loaded.shape[1]])).to(self.device)
model_args = (loaded, lens)
else:
model_args = (loaded,)
if isinstance(self.model, (Attention_MIL, TransMIL)):
model_out = self.model(*model_args)
h = self.model.get_last_layer_activations(*model_args)
att = torch.squeeze(self.model.calculate_attention(*model_args))
if ((len(att.shape) == 2) and (not self.attention_pooling)):
raise ValueError('Attention pooling required for 2D attention')
elif (len(att.shape) == 2):
att = self._attention_pool(att)
y_att.append(att.cpu().numpy())
elif isinstance(self.model, (MIL_fc, MIL_fc_mc)):
model_out = self.model(*model_args)
h = self.model.get_last_layer_activations(*model_args)
y_att = None
else:
model_out = self.model(*model_args)[0]
(h, A) = self.model.get_last_layer_activations(*model_args)
if (A.shape[0] == 1):
y_att.append(A.cpu().numpy()[0])
else:
y_att.append(A.cpu().numpy())
hs.append(h.cpu())
yp = torch.nn.functional.softmax(model_out, dim=1).cpu().numpy()
y_pred.append(yp)
yp = np.concatenate(y_pred, axis=0)
(num_features, acts) = self._get_activations(hs)
atts = self._get_attentions(y_att)
preds = self._get_predictions(yp)
return (num_features, preds, atts, acts)
def _attention_pool(self, att):
assert (len(att.shape) == 2)
if (self.attention_pooling == 'avg'):
return torch.mean(att, dim=(- 1))
elif (self.attention_pooling == 'max'):
return torch.amax(att, dim=(- 1))
else:
raise ValueError(f'Unknown attention pooling strategy {self.attention_pooling}')
def _get_activations(self, hlw):
if ((hlw is None) or (self.slides is None)):
return (None, {})
activations = {}
for (slide, h) in zip(self.slides, hlw):
activations[slide] = h.numpy()
num_features = hlw[0].shape[1]
return (num_features, activations)
def _get_annotations(self, annotations):
if (annotations is None):
return None
elif isinstance(annotations, str):
return pd.read_csv(annotations)
else:
return annotations
def _get_attentions(self, atts):
if ((atts is None) or (self.slides is None)):
return None
attentions = {}
for (slide, att) in zip(self.slides, atts):
attentions[slide] = att
return attentions
def _get_predictions(self, preds):
if ((preds is None) or (self.slides is None)):
return None
predictions = {}
for (slide, pred) in zip(self.slides, preds):
predictions[slide] = pred
return predictions
def _format(self, column):
if ((type(column) != 'str') or (column.dtype == 'float32')):
return column
numbers = re.findall('-?\\d+\\.\\d+', column)
return np.array([float(num) for num in numbers])
def set_device(self, device: Any) -> None:
if (device is not None):
self.device = device
elif (self.model is None):
self.device = None
else:
self.device = next(self.model.parameters()).device
log.debug(f'Using device {self.device}')
def from_df(cls, df: 'pd.core.frame.DataFrame', *, annotations: Union[(str, 'pd.core.frame.DataFrame')]=None) -> None:
obj = cls(None, None)
if ('slide' in df.columns):
obj.slides = df['slide'].values
elif (df.index.name == 'slide'):
obj.slides = df.index.values
df['slide'] = df.index
else:
raise ValueError('No slides in DataFrame columns')
if ('activations' in df.columns):
df['activations'] = df['activations'].apply(obj._format)
obj.activations = {s: np.stack(df.loc[(df.slide == s)].activations.values) for s in obj.slides}
else:
act_cols = [col for col in df.columns if ('activations_' in col)]
if act_cols:
obj.activations = {}
for c in act_cols:
df[c] = df[c].apply(obj._format)
for s in obj.slides:
r = [df.loc[(df.slide == s)][act_cols].values.tolist()[0]]
if (len(r[0]) > 2):
raise NotImplementedError('More than 1 attention branches not implemented')
obj.activations[s] = np.vstack((r[0][0], r[0][1]))
else:
raise ValueError('No activations in DataFrame columns')
if ('predictions' in df.columns):
df['predictions'] = df['predictions'].apply(obj._format)
obj.predictions = {s: np.stack(df.loc[(df.slide == s)].predictions.values[0]) for s in obj.slides}
if ('attentions' in df.columns):
df['attentions'] = df['attentions'].apply(obj._format)
obj.attentions = {s: np.stack(df.loc[(df.slide == s)].attentions.values[0]) for s in obj.slides}
else:
att_cols = [col for col in df.columns if ('attentions_' in col)]
if att_cols:
obj.attentions = {}
for c in att_cols:
df[c] = df[c].apply(obj._format)
for s in obj.slides:
r = [df.loc[(df.slide == s)][att_cols].values.tolist()[0]]
if (len(r[0]) > 2):
raise NotImplementedError('More than 1 attention branches not implemented')
obj.attentions[s] = np.vstack((r[0][0], r[0][1]))
if annotations:
obj.annotations = obj._get_annotations(annotations)
return obj
def to_df(self, predictions: bool=True, attentions: bool=True) -> pd.core.frame.DataFrame:
assert (self.activations is not None)
assert (self.slides is not None)
index = [s for s in self.slides]
df_dict = {}
branches = list(self.activations.values())[0].shape
if (len(branches) == 1):
branches = 1
else:
branches = branches[0]
if (branches == 1):
df_dict.update({'activations': pd.Series([self.activations[s][0] for s in self.slides], index=index)})
else:
for b in range(branches):
name = 'activations_{}'.format(b)
df_dict.update({name: pd.Series([self.activations[s][b] for s in self.slides], index=index)})
if (predictions and self.predictions):
df_dict.update({'predictions': pd.Series([self.predictions[s] for s in self.slides], index=index)})
if (attentions and self.attentions):
if (branches == 1):
df_dict.update({'attentions': pd.Series([(list(self.attentions[s]) if (len(self.attentions[s]) == 1) else self.attentions[s]) for s in self.slides], index=index)})
else:
for b in range(branches):
name = 'attentions_{}'.format(b)
df_dict.update({name: pd.Series([self.attentions[s][b] for s in self.slides], index=index)})
df = pd.DataFrame.from_dict(df_dict)
df['slide'] = df.index
return df
def map_activations(self, **kwargs) -> 'sf.SlideMap':
return sf.SlideMap.from_features(self, **kwargs) |
def build_fake_yaml():
fake_yaml = "\n model:\n name: gradient_sensitivity_prune\n framework: pytorch\n pruning:\n approach:\n weight_compression:\n start_epoch: 0\n end_epoch: 1\n pruners:\n - !Pruner\n start_epoch: 0\n end_epoch: 1\n prune_type: gradient_sensitivity\n update_frequency: 1\n names: [\n 'bert.encoder.layer.*.attention.self.query.weight',\n 'bert.encoder.layer.*.attention.self.query.bias',\n 'bert.encoder.layer.*.attention.self.key.weight',\n 'bert.encoder.layer.*.attention.self.key.bias',\n 'bert.encoder.layer.*.attention.self.value.weight',\n 'bert.encoder.layer.*.attention.self.value.bias',\n ]\n parameters: {\n target: 8,\n normalize: True,\n stride: 64,\n transpose: False,\n importance_inputs: ['head_mask'],\n importance_metric: abs_gradient,\n }\n\n - !Pruner\n start_epoch: 0\n end_epoch: 1\n prune_type: gradient_sensitivity\n update_frequency: 1\n names: [\n 'bert.encoder.layer.*.attention.output.dense.weight',\n ]\n parameters: {\n target: 8,\n normalize: True,\n stride: 64,\n transpose: True,\n importance_inputs: ['head_mask'],\n importance_metric: abs_gradient,\n }\n\n - !Pruner\n prune_type: gradient_sensitivity\n names: [\n 'bert.encoder.layer.*.intermediate.dense.weight',\n 'bert.encoder.layer.*.intermediate.dense.bias',\n ]\n parameters: {\n target: 600,\n normalize: False,\n stride: 1,\n transpose: False,\n importance_inputs: [\n 'bert.encoder.layer.*.intermediate.dense.weight',\n 'bert.encoder.layer.*.intermediate.dense.bias',\n ],\n importance_metric: 'weighted_gradient',\n }\n\n - !Pruner\n prune_type: gradient_sensitivity\n names: [\n 'bert.encoder.layer.*.output.dense.weight',\n ]\n parameters: {\n target: 600,\n normalize: False,\n stride: 1,\n transpose: True,\n importance_inputs: [\n 'bert.encoder.layer.*.intermediate.dense.weight',\n 'bert.encoder.layer.*.intermediate.dense.bias',\n ],\n importance_metric: 'weighted_gradient',\n }\n\n tuning:\n accuracy_criterion:\n relative: 0.1 # only verifying workflow, accuracy loss percentage: 10%\n exit_policy:\n timeout: 0 # tuning timeout (seconds)\n random_seed: 9527 # random seed\n "
with open('fake.yaml', 'w', encoding='utf-8') as f:
f.write(fake_yaml) |
def set_default_style(color_scheme='dark', spacing=9, indent=23, scrollbar=27):
s = imgui.get_style()
s.window_padding = [spacing, spacing]
s.item_spacing = [spacing, spacing]
s.item_inner_spacing = [spacing, spacing]
s.columns_min_spacing = spacing
s.indent_spacing = indent
s.scrollbar_size = scrollbar
s.frame_padding = [4, 3]
s.window_border_size = 1
s.child_border_size = 1
s.popup_border_size = 1
s.frame_border_size = 1
s.window_rounding = 0
s.child_rounding = 0
s.popup_rounding = 3
s.frame_rounding = 3
s.scrollbar_rounding = 3
s.grab_rounding = 3
getattr(imgui, f'style_colors_{color_scheme}')(s)
c0 = s.colors[imgui.COLOR_MENUBAR_BACKGROUND]
c1 = s.colors[imgui.COLOR_FRAME_BACKGROUND]
s.colors[imgui.COLOR_POPUP_BACKGROUND] = ([((x * 0.7) + (y * 0.3)) for (x, y) in zip(c0, c1)][:3] + [1]) |
def is_flaky(max_attempts: int=5, wait_before_retry: Optional[float]=None, description: Optional[str]=None):
def decorator(test_func_ref):
(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while (retry_count < max_attempts):
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f'Test failed with {err} at try {retry_count}/{max_attempts}.', file=sys.stderr)
if (wait_before_retry is not None):
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator |
def dis_down_noins(images, kernel_size, stride, n_scale, ch, name):
backpack = images[0]
for i in range(n_scale):
if (i == (n_scale - 1)):
images[i] = num_steps_noins(backpack, ch, kernel_size, stride, n_scale, (name + str(i)))
else:
images[i] = one_step_noins(images[(i + 1)], ch, kernel_size, 1, (name + str(i)))
return images |
class ExplanationJSONEncoder(JSONEncoder):
def default(self, o):
from interpret.newapi.explanation import Explanation
from interpret.newapi.component import Component
if isinstance(o, np.ndarray):
return {'_type': 'array', 'value': o.tolist()}
elif isinstance(o, Explanation):
return {'_type': 'explanation', 'module': o.__class__.__module__, 'class': o.__class__.__name__, 'components': list(o.components.values())}
elif isinstance(o, Component):
return {'_type': 'component', 'module': o.__class__.__module__, 'class': o.__class__.__name__, 'fields': o.fields}
elif isinstance(o, Obj):
return {'_type': 'obj', 'value': o.o, 'dim': o.dim}
elif isinstance(o, Alias):
return {'_type': 'alias', 'value': o.o, 'dim': o.dim}
elif isinstance(o, Dim):
return {'_type': 'dim', 'value': o.o}
else:
return JSONEncoder.default(self, o) |
class XLMTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', cls_token='</s>', mask_token='<special1>', additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], **kwargs):
super(XLMTokenizer, self).__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
try:
import ftfy
import spacy
self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat'])
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning('ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.')
self.nlp = BasicTokenizer(do_lower_case=True)
self.fix_text = None
self.encoder = json.load(open(vocab_file, encoding='utf-8'))
self.decoder = {v: k for (k, v) in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[:(- 1)]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
if (token in self.cache):
return self.cache[token]
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if (word == '\n </w>'):
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text):
split_tokens = []
if (self.fix_text is None):
text = self.nlp.tokenize(text)
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
else:
text = self.nlp(text_standardize(self.fix_text(text)))
for token in text:
split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
return (([self._convert_token_to_id(self.cls_token)] + token_ids) + [self._convert_token_to_id(self.sep_token)])
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def save_vocabulary(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file))
index = token_index
writer.write((' '.join(bpe_tokens) + u'\n'))
index += 1
return (vocab_file, merge_file) |
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
return (((function == DEFINITE) and definite_article(word, gender, role)) or indefinite_article(word, gender, role)) |
def define_G(opt):
opt_net = opt['network_G']
which_model = opt_net['which_model_G']
if (which_model == 'MSRResNet'):
netG = SRResNet_arch.MSRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale'])
elif (which_model == 'RRDBNet'):
netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'])
else:
raise NotImplementedError('Generator model [{:s}] not recognized'.format(which_model))
return netG |
def AddLstmLayer(config_lines, name, input, cell_dim, recurrent_projection_dim=0, non_recurrent_projection_dim=0, clipping_threshold=30.0, zeroing_threshold=15.0, zeroing_interval=20, ng_per_element_scale_options='', ng_affine_options='', lstm_delay=(- 1), self_repair_scale_nonlinearity=None, max_change_per_component=0.75):
assert ((recurrent_projection_dim >= 0) and (non_recurrent_projection_dim >= 0))
components = config_lines['components']
component_nodes = config_lines['component-nodes']
input_descriptor = input['descriptor']
input_dim = input['dimension']
name = name.strip()
if (recurrent_projection_dim == 0):
add_recurrent_projection = False
recurrent_projection_dim = cell_dim
recurrent_connection = 'm_t'
else:
add_recurrent_projection = True
recurrent_connection = 'r_t'
if (non_recurrent_projection_dim == 0):
add_non_recurrent_projection = False
else:
add_non_recurrent_projection = True
self_repair_nonlinearity_string = ('self-repair-scale={0:.10f}'.format(self_repair_scale_nonlinearity) if (self_repair_scale_nonlinearity is not None) else '')
ng_per_element_scale_options += ' param-mean=0.0 param-stddev=1.0 '
max_change_options = ('max-change={0:.2f}'.format(max_change_per_component) if (max_change_per_component is not None) else '')
components.append('# Input gate control : W_i* matrices')
components.append('component name={0}_W_i-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}'.format(name, (input_dim + recurrent_projection_dim), cell_dim, ng_affine_options, max_change_options))
components.append('# note : the cell outputs pass through a diagonal matrix')
components.append('component name={0}_w_ic type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}'.format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append('# Forget gate control : W_f* matrices')
components.append('component name={0}_W_f-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}'.format(name, (input_dim + recurrent_projection_dim), cell_dim, ng_affine_options, max_change_options))
components.append('# note : the cell outputs pass through a diagonal matrix')
components.append('component name={0}_w_fc type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}'.format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append('# Output gate control : W_o* matrices')
components.append('component name={0}_W_o-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}'.format(name, (input_dim + recurrent_projection_dim), cell_dim, ng_affine_options, max_change_options))
components.append('# note : the cell outputs pass through a diagonal matrix')
components.append('component name={0}_w_oc type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}'.format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append('# Cell input matrices : W_c* matrices')
components.append('component name={0}_W_c-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}'.format(name, (input_dim + recurrent_projection_dim), cell_dim, ng_affine_options, max_change_options))
components.append('# Defining the non-linearities')
components.append('component name={0}_i type=SigmoidComponent dim={1} {2}'.format(name, cell_dim, self_repair_nonlinearity_string))
components.append('component name={0}_f type=SigmoidComponent dim={1} {2}'.format(name, cell_dim, self_repair_nonlinearity_string))
components.append('component name={0}_o type=SigmoidComponent dim={1} {2}'.format(name, cell_dim, self_repair_nonlinearity_string))
components.append('component name={0}_g type=TanhComponent dim={1} {2}'.format(name, cell_dim, self_repair_nonlinearity_string))
components.append('component name={0}_h type=TanhComponent dim={1} {2}'.format(name, cell_dim, self_repair_nonlinearity_string))
components.append('# Defining the cell computations')
components.append('component name={0}_c1 type=ElementwiseProductComponent input-dim={1} output-dim={2}'.format(name, (2 * cell_dim), cell_dim))
components.append('component name={0}_c2 type=ElementwiseProductComponent input-dim={1} output-dim={2}'.format(name, (2 * cell_dim), cell_dim))
components.append('component name={0}_m type=ElementwiseProductComponent input-dim={1} output-dim={2}'.format(name, (2 * cell_dim), cell_dim))
components.append('component name={0}_c type=BackpropTruncationComponent dim={1} clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} recurrence-interval={5}'.format(name, cell_dim, clipping_threshold, zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append('component-node name={0}_c_t component={0}_c input=Sum({0}_c1_t, {0}_c2_t)'.format(name))
c_tminus1_descriptor = 'IfDefined(Offset({0}_c_t, {1}))'.format(name, lstm_delay)
component_nodes.append('# i_t')
component_nodes.append('component-node name={0}_i1 component={0}_W_i-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))'.format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append('component-node name={0}_i2 component={0}_w_ic input={1}'.format(name, c_tminus1_descriptor))
component_nodes.append('component-node name={0}_i_t component={0}_i input=Sum({0}_i1, {0}_i2)'.format(name))
component_nodes.append('# f_t')
component_nodes.append('component-node name={0}_f1 component={0}_W_f-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))'.format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append('component-node name={0}_f2 component={0}_w_fc input={1}'.format(name, c_tminus1_descriptor))
component_nodes.append('component-node name={0}_f_t component={0}_f input=Sum({0}_f1,{0}_f2)'.format(name))
component_nodes.append('# o_t')
component_nodes.append('component-node name={0}_o1 component={0}_W_o-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))'.format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append('component-node name={0}_o2 component={0}_w_oc input={0}_c_t'.format(name))
component_nodes.append('component-node name={0}_o_t component={0}_o input=Sum({0}_o1, {0}_o2)'.format(name))
component_nodes.append('# h_t')
component_nodes.append('component-node name={0}_h_t component={0}_h input={0}_c_t'.format(name))
component_nodes.append('# g_t')
component_nodes.append('component-node name={0}_g1 component={0}_W_c-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))'.format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append('component-node name={0}_g_t component={0}_g input={0}_g1'.format(name))
component_nodes.append('# parts of c_t')
component_nodes.append('component-node name={0}_c1_t component={0}_c1 input=Append({0}_f_t, {1})'.format(name, c_tminus1_descriptor))
component_nodes.append('component-node name={0}_c2_t component={0}_c2 input=Append({0}_i_t, {0}_g_t)'.format(name))
component_nodes.append('# m_t')
component_nodes.append('component-node name={0}_m_t component={0}_m input=Append({0}_o_t, {0}_h_t)'.format(name))
if (add_recurrent_projection and add_non_recurrent_projection):
components.append('# projection matrices : Wrm and Wpm')
components.append('component name={0}_W-m type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}'.format(name, cell_dim, (recurrent_projection_dim + non_recurrent_projection_dim), ng_affine_options, max_change_options))
components.append('component name={0}_r type=BackpropTruncationComponent dim={1} clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} recurrence-interval={5}'.format(name, recurrent_projection_dim, clipping_threshold, zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append('# r_t and p_t')
component_nodes.append('component-node name={0}_rp_t component={0}_W-m input={0}_m_t'.format(name))
component_nodes.append('dim-range-node name={0}_r_t_preclip input-node={0}_rp_t dim-offset=0 dim={1}'.format(name, recurrent_projection_dim))
component_nodes.append('component-node name={0}_r_t component={0}_r input={0}_r_t_preclip'.format(name))
output_descriptor = '{0}_rp_t'.format(name)
output_dim = (recurrent_projection_dim + non_recurrent_projection_dim)
elif add_recurrent_projection:
components.append('# projection matrices : Wrm')
components.append('component name={0}_Wrm type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}'.format(name, cell_dim, recurrent_projection_dim, ng_affine_options, max_change_options))
components.append('component name={0}_r type=BackpropTruncationComponent dim={1} clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} recurrence-interval={5}'.format(name, recurrent_projection_dim, clipping_threshold, zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append('# r_t')
component_nodes.append('component-node name={0}_r_t_preclip component={0}_Wrm input={0}_m_t'.format(name))
component_nodes.append('component-node name={0}_r_t component={0}_r input={0}_r_t_preclip'.format(name))
output_descriptor = '{0}_r_t'.format(name)
output_dim = recurrent_projection_dim
else:
components.append('component name={0}_r type=BackpropTruncationComponent dim={1} clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} recurrence-interval={5}'.format(name, cell_dim, clipping_threshold, zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append('component-node name={0}_r_t component={0}_r input={0}_m_t'.format(name))
output_descriptor = '{0}_r_t'.format(name)
output_dim = cell_dim
return {'descriptor': output_descriptor, 'dimension': output_dim} |
def HPathExists(filepath: str):
if (not filepath.startswith('hdfs://')):
return os.path.exists(filepath)
pass |
def process_rot(rotation):
if (rotation[0] > 0):
rotation[0] = (np.pi - rotation[0])
else:
rotation[0] = ((- np.pi) - rotation[0])
if (rotation[2] > 0):
rotation[2] = (np.pi - rotation[2])
else:
rotation[2] = ((- np.pi) - rotation[2])
return np.array([(- rotation[2]), rotation[1], (- rotation[0])]) |
def read_requirements(file: str) -> list[str]:
return [line for line in open(file) if (not (line.startswith('#') or line.startswith('--')))] |
_HEADS_REGISTRY.register()
class AttributeStandardROIHeads(AttributeROIHeads, StandardROIHeads):
def __init__(self, cfg, input_shape):
super(StandardROIHeads, self).__init__(cfg, input_shape)
self._init_box_head(cfg, input_shape)
self._init_mask_head(cfg, input_shape)
self._init_keypoint_head(cfg, input_shape)
def _init_box_head(self, cfg, input_shape):
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(((1.0 / input_shape[k].stride) for k in self.in_features))
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
self.train_on_pred_boxes = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
self.attribute_on = cfg.MODEL.ATTRIBUTE_ON
in_channels = [input_shape[f].channels for f in self.in_features]
assert (len(set(in_channels)) == 1), in_channels
in_channels = in_channels[0]
self.box_pooler = ROIPooler(output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type)
self.box_head = build_box_head(cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution))
self.box_predictor = FastRCNNOutputLayers(cfg, self.box_head.output_shape)
if self.attribute_on:
self.attribute_predictor = AttributePredictor(cfg, self.box_head.output_shape.channels)
def _forward_box(self, features, proposals):
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
if self.training:
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(predictions, proposals)
for (proposals_per_image, pred_boxes_per_image) in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
losses = self.box_predictor.losses(predictions, proposals)
if self.attribute_on:
losses.update(self.forward_attribute_loss(proposals, box_features))
del box_features
return losses
else:
(pred_instances, _) = self.box_predictor.inference(predictions, proposals)
return pred_instances
def get_conv5_features(self, features):
assert (len(self.in_features) == 1)
features = [features[f] for f in self.in_features]
return features[0] |
def test_for_loop_binding():
run_cell('a = 0')
run_cell('b = 1')
run_cell('c = 2')
run_cell('lst = [a, b, c]')
run_cell('\n for i in lst:\n pass\n ')
run_cell('a = 3')
run_cell('logging.info(i)')
assert_false_positive('`i` should not depend on `a` at end of for loop but this is hard') |
class IICTrainer(_FeatureExtractor, SemiTrainer):
def _init(self):
super(IICTrainer, self)._init()
config = deepcopy(self._config['IICRegParameters'])
self._mi_estimator_array = IICEstimatorArray()
self._mi_estimator_array.add_encoder_interface(feature_names=self.feature_positions, **config['EncoderParams'])
self._mi_estimator_array.add_decoder_interface(feature_names=self.feature_positions, **config['DecoderParams'], **config['LossParams'])
self._reg_weight = float(config['weight'])
self._enforce_matching = config['enforce_matching']
def epocher_class(self) -> Type[TrainEpocher]:
return MITrainEpocher
def _run_epoch(self, epocher: MITrainEpocher, *args, **kwargs) -> EpochResultDict:
epocher.init(reg_weight=self._reg_weight, mi_estimator_array=self._mi_estimator_array, enforce_matching=self._enforce_matching)
result = epocher.run()
return result
def _init_optimizer(self):
super(IICTrainer, self)._init_optimizer()
optim_config = self._config['Optim']
self._optimizer.add_param_group({'params': self._mi_estimator_array.parameters(), 'lr': optim_config['lr'], 'weight_decay': optim_config['weight_decay']}) |
class PointNetCls(nn.Module):
def __init__(self, c=3, k=40, dropout=0.3, sync_bn=False):
super(PointNetCls, self).__init__()
self.feat = PointNetFeat(c, global_feat=True)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = x.transpose(1, 2)
x = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.fc2(x)))
x = self.dropout(x)
x = self.fc3(x)
return x |
_module()
class OBBTwoStageDetector(OBBBaseDetector, RotateAugRPNTestMixin):
def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None):
super(OBBTwoStageDetector, self).__init__()
self.backbone = build_backbone(backbone)
if (neck is not None):
self.neck = build_neck(neck)
if (rpn_head is not None):
rpn_train_cfg = (train_cfg.rpn if (train_cfg is not None) else None)
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if (roi_head is not None):
rcnn_train_cfg = (train_cfg.rcnn if (train_cfg is not None) else None)
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def with_rpn(self):
return (hasattr(self, 'rpn_head') and (self.rpn_head is not None))
def with_roi_head(self):
return (hasattr(self, 'roi_head') and (self.roi_head is not None))
def init_weights(self, pretrained=None):
super(OBBTwoStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
outs = ()
x = self.extract_feat(img)
proposal_type = 'hbb'
if self.with_rpn:
proposal_type = getattr(self.rpn_head, 'bbox_type', 'hbb')
rpn_outs = self.rpn_head(x)
outs = (outs + (rpn_outs,))
if (proposal_type == 'hbb'):
proposals = torch.randn(1000, 4).to(img.device)
elif (proposal_type == 'obb'):
proposals = torch.randn(1000, 5).to(img.device)
else:
proposals = None
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = (outs + (roi_outs,))
return outs
def forward_train(self, img, img_metas, gt_bboxes, gt_obboxes, gt_labels, gt_bboxes_ignore=None, gt_obboxes_ignore=None, proposals=None, **kwargs):
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
proposal_type = getattr(self.rpn_head, 'bbox_type', 'hbb')
target_bboxes = (gt_bboxes if (proposal_type == 'hbb') else gt_obboxes)
target_bboxes_ignore = (gt_bboxes_ignore if (proposal_type == 'hbb') else gt_obboxes_ignore)
proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn)
(rpn_losses, proposal_list) = self.rpn_head.forward_train(x, img_metas, target_bboxes, gt_labels=None, gt_bboxes_ignore=target_bboxes_ignore, proposal_cfg=proposal_cfg)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, gt_bboxes, gt_obboxes, gt_labels, gt_bboxes_ignore, gt_obboxes_ignore, **kwargs)
losses.update(roi_losses)
return losses
async def async_simple_test(self, img, img_meta, proposals=None, rescale=False):
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if (proposals is None):
proposal_list = (await self.rpn_head.async_simple_test_rpn(x, img_meta))
else:
proposal_list = proposals
return (await self.roi_head.async_simple_test(x, proposal_list, img_meta, rescale=rescale))
def simple_test(self, img, img_metas, proposals=None, rescale=False):
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if (proposals is None):
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(x, proposal_list, img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
x = self.extract_feats(imgs)
proposal_list = self.rotate_aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale) |
def cosine_similarity(x1, x2=None, eps=1e-08):
x2 = (x1 if (x2 is None) else x2)
w1 = x1.norm(p=2, dim=1, keepdim=True)
w2 = (w1 if (x2 is x1) else x2.norm(p=2, dim=1, keepdim=True))
sim = (torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps))
return sim |
class Results(list):
def __init__(self, source=None, query=None, type=SEARCH, total=0):
self.source = source
self.query = query
self.type = type
self.total = total |
class FancyUnbiasedRiskEstimatorCut1(RiskEstimator):
def __init__(self, loss, dataset, *args):
super().__init__(loss)
self.N = len(dataset.test_idxs)
def estimate(self, predictions, observed, acq_weights):
l_i = self.loss(predictions, observed)
N = self.N
M = len(predictions)
if (M < N):
m = np.arange(1, (M + 1))
v = (1 + (((N - M) / (N - m)) * ((1 / (((N - m) + 1) * acq_weights)) - 1)))
v_sum = v.sum()
cut = 0.1
v[(v > (cut * v_sum))] = 0
else:
v = 1
R = ((1 / M) * (v * l_i).sum())
if DEBUG_WEIGHTS:
if isinstance(v, int):
v = [v]
with open('weights_cut10.csv', 'a') as f:
data = str(list(v)).replace('[', '').replace(']', '')
f.write(f'''{len(v)}, {data}
''')
return self.return_and_save(R) |
_criterion('speech_and_text_translation', dataclass=SpeechAndTextTranslationCriterionConfig)
class SpeechAndTextTranslationCriterion(LabelSmoothedCrossEntropyCriterion):
def __init__(self, task, sentence_avg, label_smoothing, ignore_prefix_size=0, report_accuracy=False, mt_finetune=False):
super().__init__(task, sentence_avg, label_smoothing, ignore_prefix_size, report_accuracy)
self.mt_finetune = mt_finetune
def forward_st(self, model, sample, reduce):
audio_input = {'src_tokens': sample['net_input']['audio'], 'src_lengths': sample['net_input']['audio_lengths'], 'mode': 'st', 'prev_output_tokens': sample['net_input']['prev_output_tokens']}
audio_output = model(**audio_input)
(loss, _) = self.compute_loss(model, audio_output, sample, reduce=reduce)
return loss
def forward_mt(self, model, sample, reduce):
text_input = {'src_tokens': sample['net_input']['source'], 'src_lengths': sample['net_input']['source_lengths'], 'mode': 'mt', 'prev_output_tokens': sample['net_input']['prev_output_tokens']}
text_output = model(**text_input)
(loss, _) = self.compute_loss(model, text_output, sample, reduce=reduce)
return loss
def forward_ext_mt(self, model, sample, reduce):
text_output = model(**sample['net_input'])
(loss, _) = self.compute_loss(model, text_output, sample, reduce=reduce)
return loss
def forward(self, model, sample, reduce=True):
(st_loss, mt_loss, ext_mt_loss) = (torch.Tensor([0]).cuda(), torch.Tensor([0]).cuda(), torch.Tensor([0]).cuda())
(st_size, mt_size, ext_mt_size) = (0, 0, 0)
mode = sample['net_input']['mode']
if (mode == 'st'):
if (self.mt_finetune and self.training):
st_loss = self.forward_st(model, sample, reduce)
mt_loss = self.forward_mt(model, sample, reduce)
loss = (st_loss + mt_loss)
st_size = mt_size = sample_size = sample['ntokens']
else:
loss = st_loss = self.forward_st(model, sample, reduce)
st_size = sample_size = sample['ntokens']
elif (mode == 'ext_mt'):
loss = ext_mt_loss = self.forward_ext_mt(model, sample, reduce)
ext_mt_size = sample_size = sample['ntokens']
logging_output = {'loss': loss.data, 'st_loss': st_loss.data, 'st_sample_size': st_size, 'mt_loss': mt_loss.data, 'mt_sample_size': mt_size, 'ext_mt_loss': ext_mt_loss.data, 'ext_mt_sample_size': ext_mt_size, 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def reduce_metrics(cls, logging_outputs) -> None:
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
st_loss_sum = sum((log.get('st_loss', 0) for log in logging_outputs))
mt_loss_sum = sum((log.get('mt_loss', 0) for log in logging_outputs))
ext_mt_loss_sum = sum((log.get('ext_mt_loss', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
st_sample_size = sum((log.get('st_sample_size', 0) for log in logging_outputs))
mt_sample_size = sum((log.get('mt_sample_size', 0) for log in logging_outputs))
ext_mt_sample_size = sum((log.get('ext_mt_sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_scalar('st_loss', (((st_loss_sum / st_sample_size) / math.log(2)) if (st_sample_size != 0) else 0), st_sample_size, round=3)
metrics.log_scalar('mt_loss', (((mt_loss_sum / mt_sample_size) / math.log(2)) if (mt_sample_size != 0) else 0), mt_sample_size, round=3)
metrics.log_scalar('ext_mt_loss', (((ext_mt_loss_sum / ext_mt_sample_size) / math.log(2)) if (ext_mt_sample_size != 0) else 0), ext_mt_sample_size, round=3)
def logging_outputs_can_be_summed() -> bool:
return True |
def get_torch_version():
try:
torch_version = torch.__version__.split('+')[0]
except ValueError as e:
assert False, 'Got an unknown version of torch: {}'.format(e)
version = Version(torch_version)
return version |
def build_one_cycle_optimizer(model, optimizer_config):
if optimizer_config.fixed_wd:
optimizer_func = partial(torch.optim.Adam, betas=(0.9, 0.99), amsgrad=optimizer_config.amsgrad)
else:
optimizer_func = partial(torch.optim.Adam, amsgrad=optimizer_cfg.amsgrad)
optimizer = OptimWrapper.create(optimizer_func, 0.003, get_layer_groups(model), wd=optimizer_config.wd, true_wd=optimizer_config.fixed_wd, bn_wd=True)
return optimizer |
def init_multiprocessing(rank, sync_device):
global _rank, _sync_device
assert (not _sync_called)
_rank = rank
_sync_device = sync_device |
def style_transfer(sess, dataloader):
time_list = []
output_name = add_import_to_name(sess, 'transformer/expand/conv3/conv/Sigmoid:0', 3)
style_name = add_import_to_name(sess, 'style_input:0', 3)
content_name = add_import_to_name(sess, 'content_input:0', 3)
stylized_images = sess.graph.get_tensor_by_name(output_name)
for (idx, ((content_img_np, style_img_np), _)) in enumerate(dataloader):
start_time = time.time()
stylized_image_res = sess.run(stylized_images, feed_dict={style_name: style_img_np, content_name: content_img_np})
duration = (time.time() - start_time)
time_list.append(duration)
if ((idx + 1) == 20):
break
warm_up = 1
throughput = ((len(time_list) - warm_up) / np.array(time_list[warm_up:]).sum())
print('Batch size = {}'.format(FLAGS.batch_size))
print('Latency: {:.3f} ms'.format((np.array(time_list[warm_up:]).mean() * 1000)))
print('Throughput: {:.3f} images/sec'.format(throughput)) |
def build_loaders(train_dataset, val_dataset):
train_loader = DataLoader(train_dataset, batch_size=5, shuffle=True, num_workers=multiprocessing.cpu_count(), pin_memory=torch.cuda.is_available())
val_loader = DataLoader(val_dataset, batch_size=5, shuffle=False, num_workers=multiprocessing.cpu_count(), pin_memory=torch.cuda.is_available())
return (train_loader, val_loader) |
def replace_from_right(string: str, old: str, new: str, count: int=(- 1)):
assert isinstance(string, str)
assert isinstance(old, str)
assert isinstance(new, str)
assert isinstance(count, int)
string = string.rsplit(old, count)
return new.join(string) |
.parametrize('device_type', ['cpu', pytest.param('cuda:0', marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support'))])
def test_multiscale_deformable_attention(device_type):
with pytest.raises(ValueError):
MultiScaleDeformableAttention(embed_dims=256, num_heads=7)
device = torch.device(device_type)
msda = MultiScaleDeformableAttention(embed_dims=3, num_levels=2, num_heads=3)
msda.init_weights()
num_query = 5
bs = 1
embed_dims = 3
query = torch.rand(num_query, bs, embed_dims).to(device)
key = torch.rand(num_query, bs, embed_dims).to(device)
spatial_shapes = torch.Tensor([[2, 2], [1, 1]]).long().to(device)
level_start_index = torch.Tensor([0, 4]).long().to(device)
reference_points = torch.rand(bs, num_query, 2, 2).to(device)
msda.to(device)
msda(query, key, key, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index) |
def conv_bn_relu(in_channels, out_channels, kernel_size, stride, padding, groups, dilation=1):
if (padding is None):
padding = (kernel_size // 2)
result = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, dilation=dilation)
result.add_module('nonlinear', nn.ReLU())
return result |
def _parse_args():
parser = ArgumentParser()
parser.add_argument('--cluster_mode', type=str, default='local', help='The cluster mode, such as local, yarn, standalone or spark-submit.')
parser.add_argument('--master', type=str, default=None, help='The master url, only used when cluster mode is standalone.')
parser.add_argument('--executor_cores', type=int, default=48, help='The executor core number.')
parser.add_argument('--executor_memory', type=str, default='160g', help='The executor memory.')
parser.add_argument('--num_executors', type=int, default=8, help='The number of executors.')
parser.add_argument('--driver_cores', type=int, default=4, help='The driver core number.')
parser.add_argument('--driver_memory', type=str, default='36g', help='The driver memory.')
parser.add_argument('--input_transaction', type=str, required=True, help='The path to the user transaction file.')
parser.add_argument('--input_meta', type=str, required=True, help='The path to the item metadata file.')
parser.add_argument('--output', type=str, default='./', help='The path to save the preprocessed data.')
args = parser.parse_args()
return args |
def download_model(url, model_name, retry_times=5):
if os.path.isfile(model_name):
print(f'{model_name} exists, skip download')
return True
print('download model...')
retries = 0
while (retries < retry_times):
try:
request.urlretrieve(url, model_name, schedule)
break
except KeyboardInterrupt:
return False
except:
retries += 1
print(f"Download failed{(', Retry downloading...' if (retries < retry_times) else '!')}")
return (retries < retry_times) |
class RandomIntensityScale(object):
def __init__(self, min: float=0.9, max: float=1.1):
super().__init__()
self.min = min
self.max = max
def __call__(self, img_and_mask: Tuple[(np.ndarray, np.ndarray, np.ndarray)]) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
(modalities, _, mask) = img_and_mask
scale = random.uniform(self.min, self.max)
modalities = (modalities * scale)
return (modalities, img_and_mask[1], img_and_mask[2]) |
def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[(Tuple, bool)]:
dynamic = False
if isinstance(padding, str):
padding = padding.lower()
if (padding == 'same'):
if is_static_pad(kernel_size, **kwargs):
padding = get_padding(kernel_size, **kwargs)
else:
padding = 0
dynamic = True
elif (padding == 'valid'):
padding = 0
else:
padding = get_padding(kernel_size, **kwargs)
return (padding, dynamic) |
def pack_trackingnet_results(tracker_name, param_name, run_id=None, output_name=None):
if (output_name is None):
if (run_id is None):
output_name = '{}_{}'.format(tracker_name, param_name)
else:
output_name = '{}_{}_{:03d}'.format(tracker_name, param_name, run_id)
output_path = os.path.join(env_settings().tn_packed_results_path, output_name)
if (not os.path.exists(output_path)):
os.makedirs(output_path)
results_path = env_settings().results_path
tn_dataset = get_dataset('trackingnet')
for seq in tn_dataset:
seq_name = seq.name
if (run_id is None):
seq_results_path = '{}/{}/{}/{}.txt'.format(results_path, tracker_name, param_name, seq_name)
else:
seq_results_path = '{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name)
results = np.loadtxt(seq_results_path, dtype=np.float64)
np.savetxt('{}/{}.txt'.format(output_path, seq_name), results, delimiter=',', fmt='%.2f')
shutil.make_archive(output_path, 'zip', output_path)
shutil.rmtree(output_path) |
def main(_):
calib_dataset = COCORecordDataset(root=args.dataset_location, filter=LabelBalanceCOCORecordFilter(size=1))
calib_dataloader = DataLoader(framework='tensorflow', dataset=calib_dataset, batch_size=1)
if args.tune:
from neural_compressor import quantization
from neural_compressor.config import PostTrainingQuantConfig
config = PostTrainingQuantConfig(inputs=['image_tensor'], outputs=['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes'], calibration_sampling_size=[50])
q_model = quantization.fit(model=args.input_graph, conf=config, calib_dataloader=calib_dataloader, eval_func=evaluate)
q_model.save(args.output_model)
if args.benchmark:
from neural_compressor.benchmark import fit
from neural_compressor.config import BenchmarkConfig
if (args.mode == 'performance'):
conf = BenchmarkConfig(inputs=['image_tensor'], outputs=['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes'], cores_per_instance=28, num_of_instance=1)
fit(args.input_graph, conf, b_func=evaluate)
else:
accuracy = evaluate(args.input_graph)
print(('Batch size = %d' % args.batch_size))
print(('Accuracy: %.5f' % accuracy)) |
def retrieve_data(data, key):
if isinstance(data, dict):
identifier = '_{}'.format(key)
out_data = {k.replace(identifier, ''): v for (k, v) in data.items() if (identifier in k)}
return out_data |
def mixed_spec(singly_nested_spec: specs.Spec, not_jumanji_type_spec: specs.Spec) -> specs.Spec:
return specs.Spec(namedtuple('mixed_type', ['singly_nested', 'not_jumanji_type']), 'MixedSpec', singly_nested=singly_nested_spec, not_jumanji_type=not_jumanji_type_spec) |
def evaluate_levircd(self, test_dataloader, config=None):
self.model.eval()
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
metric_op = er.metric.PixelMetric(2, self.model_dir, logger=self.logger)
with torch.no_grad():
for (img, ret_gt) in tqdm(test_dataloader):
img = img.to(device)
change = (self.model.module(img).sigmoid() > 0.5)
pr_change = change.cpu().numpy().astype(np.uint8)
gt_change = ret_gt['change']
gt_change = gt_change.numpy()
y_true = gt_change.ravel()
y_pred = pr_change.ravel()
y_true = np.where((y_true > 0), np.ones_like(y_true), np.zeros_like(y_true))
metric_op.forward(y_true, y_pred)
metric_op.summary_all()
torch.cuda.empty_cache() |
_start_docstrings('The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.', POOLFORMER_START_DOCSTRING)
class PoolFormerModel(PoolFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.encoder = PoolFormerEncoder(config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
_start_docstrings_to_model_forward(POOLFORMER_INPUTS_DOCSTRING)
_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple, BaseModelOutputWithNoAttention)]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if (not return_dict):
return ((sequence_output, None) + encoder_outputs[1:])
return BaseModelOutputWithNoAttention(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states) |
def recreate_local_parser_cache():
import iheartla.la_parser.parser
PM = iheartla.la_parser.parser._parser_manager
print('## Clearing the cache dir:', PM.cache_dir)
shutil.rmtree(PM.cache_dir)
Path(PM.cache_dir).mkdir()
la_local_parsers = (PM.grammar_dir.parent / 'la_local_parsers')
print('## Clearing the la_local_parsers dir:', la_local_parsers)
shutil.rmtree(la_local_parsers)
la_local_parsers.mkdir()
print('## Reloading the ParserManager.')
PM.reload()
print('## Re-creating the parsers.')
iheartla.la_parser.parser.create_parser()
print('## Waiting for them to be saved.')
for thread in PM.parser_file_manager.save_threads:
thread.join()
print('## Copying the cache dir contents into the local dir.')
for f in Path(PM.cache_dir).glob('*.py'):
shutil.copy(f, la_local_parsers)
print('## Modifying default parsers')
PM.parser_file_manager.generate_new_parser_files()
print('## Done.') |
def test_construct_arguments_does_not_overwrite_args_and_kwargs():
s = Signature(bariza)
(args, kwargs) = s.construct_arguments([1, 2], {'c': 3}, {'a': 6, 'b': 6, 'c': 6})
assert (args == [1, 2])
assert (kwargs == {'c': 3}) |
class SaveWrapper(AbstractTrainerWrapper):
def __init__(self, *args, model_root_directory=None, saving_period=10000, **kwargs):
super().__init__(*args, **kwargs)
if (model_root_directory is None):
from ..configuration import configuration
model_root_directory = configuration.get('models_path')
self.model_root_directory = model_root_directory
self._last_save = 0
self.saving_period = saving_period
self._enabled = True
def process(self, **kwargs):
res = self.trainer.process(**kwargs)
(tdiff, _, _) = res
self._last_save += tdiff
if (self._last_save >= self.saving_period):
self._save()
self._last_save = 0
return res
def _save(self):
if (not self._enabled):
return
print('Saving')
path = os.path.join(self.model_root_directory, self.unwrapped.name)
if (not os.path.exists(path)):
os.makedirs(path)
self.save(path)
def run(self, *args, **kwargs):
try:
ret = super().run(*args, **kwargs)
except KeyboardInterrupt:
self._save()
raise
self._save()
return ret
def __repr__(self):
return ('<Save %s>' % repr(self.trainer)) |
class TFElectraForPreTraining(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True):
state_dict = load_state_dict(checkpoint_path, use_ema)
model.load_state_dict(state_dict, strict=strict) |
def main():
args = get_args()
tgt_dir = pathlib.Path(args.out_dir)
tgt_dir.mkdir(exist_ok=True, parents=True)
(total_files, sufficiently_long) = (0, 0)
with open(args.prompts_description, 'r') as f:
description = json.loads(f.read())
for src_f in pathlib.Path(args.samples_dir).glob('*.wav'):
name_prompt = src_f.with_suffix('').name.split('__')[0]
assert (name_prompt in description), f'Cannot find {name_prompt}!'
target_length = description[name_prompt][0]
tgt_f = (tgt_dir / src_f.name)
is_long_enough = cut(src_f, tgt_f, target_length)
sufficiently_long += is_long_enough
if (not is_long_enough):
print(f'{src_f} is not long enough')
total_files += 1
print(f'Total files: {total_files}; sufficiently long: {sufficiently_long}') |
def get_scheduler(optimizer, n_iter_per_epoch, args):
if ('cosine' in args.lr_scheduler):
return WarmUpCosineAnnealingLR(optimizer=optimizer, warm_multiplier=args.warmup_multiplier, warm_duration=(args.warmup_epoch * n_iter_per_epoch), cos_duration=((args.epochs - args.warmup_epoch) * n_iter_per_epoch), eta_min=1e-06)
else:
raise NotImplementedError('scheduler {} not supported'.format(args.lr_scheduler)) |
_registry.register_trainer(name='ddppo')
class DDPPOTrainer(PPOTrainer):
SHORT_ROLLOUT_THRESHOLD: float = 0.25
def __init__(self, config=None):
interrupted_state = load_interrupted_state()
if (interrupted_state is not None):
config = interrupted_state['config']
super().__init__(config)
def _setup_actor_critic_agent(self, ppo_cfg: Config) -> None:
logger.add_filehandler(self.config.LOG_FILE)
policy = baseline_registry.get_policy(self.config.RL.POLICY.name)
self.actor_critic = policy(observation_space=self.envs.observation_spaces[0], action_space=self.envs.action_spaces[0], hidden_size=ppo_cfg.hidden_size, rsetup_actor_crinn_type=self.config.RL.DDPPO.rnn_type, num_recurrent_layers=self.config.RL.DDPPO.num_recurrent_layers, backbone=self.config.RL.DDPPO.backbone, force_blind_policy=self.config.FORCE_BLIND_POLICY)
self.actor_critic.to(self.device)
if (self.config.RL.DDPPO.pretrained_encoder or self.config.RL.DDPPO.pretrained):
pretrained_state = torch.load(self.config.RL.DDPPO.pretrained_weights, map_location='cpu')
if self.config.RL.DDPPO.pretrained:
self.actor_critic.load_state_dict({k[len('actor_critic.'):]: v for (k, v) in pretrained_state['state_dict'].items()})
elif self.config.RL.DDPPO.pretrained_encoder:
prefix = 'actor_critic.net.visual_encoder.'
self.actor_critic.net.visual_encoder.load_state_dict({k[len(prefix):]: v for (k, v) in pretrained_state['state_dict'].items() if k.startswith(prefix)})
if (not self.config.RL.DDPPO.train_encoder):
self._static_encoder = True
for param in self.actor_critic.net.visual_encoder.parameters():
param.requires_grad_(False)
if self.config.RL.DDPPO.reset_critic:
nn.init.orthogonal_(self.actor_critic.critic.fc.weight)
nn.init.constant_(self.actor_critic.critic.fc.bias, 0)
self.agent = DDPPO(actor_critic=self.actor_critic, clip_param=ppo_cfg.clip_param, ppo_epoch=ppo_cfg.ppo_epoch, num_mini_batch=ppo_cfg.num_mini_batch, value_loss_coef=ppo_cfg.value_loss_coef, entropy_coef=ppo_cfg.entropy_coef, lr=ppo_cfg.lr, eps=ppo_cfg.eps, max_grad_norm=ppo_cfg.max_grad_norm, use_normalized_advantage=ppo_cfg.use_normalized_advantage)
def train(self, ckpt_path='', ckpt=(- 1), start_updates=0) -> None:
(self.local_rank, tcp_store) = init_distrib_slurm(self.config.RL.DDPPO.distrib_backend)
add_signal_handlers()
num_rollouts_done_store = distrib.PrefixStore('rollout_tracker', tcp_store)
num_rollouts_done_store.set('num_done', '0')
self.world_rank = distrib.get_rank()
self.world_size = distrib.get_world_size()
self.config.defrost()
self.config.TORCH_GPU_ID = self.local_rank
self.config.SIMULATOR_GPU_ID = self.local_rank
self.config.TASK_CONFIG.SEED += (self.world_rank * self.config.NUM_PROCESSES)
self.config.freeze()
random.seed(self.config.TASK_CONFIG.SEED)
np.random.seed(self.config.TASK_CONFIG.SEED)
torch.manual_seed(self.config.TASK_CONFIG.SEED)
if torch.cuda.is_available():
self.device = torch.device('cuda', self.local_rank)
torch.cuda.set_device(self.device)
else:
self.device = torch.device('cpu')
self.envs = construct_envs(self.config, get_env_class(self.config.ENV_NAME), workers_ignore_signals=True)
ppo_cfg = self.config.RL.PPO
if ((not os.path.isdir(self.config.CHECKPOINT_FOLDER)) and (self.world_rank == 0)):
os.makedirs(self.config.CHECKPOINT_FOLDER)
self._setup_actor_critic_agent(ppo_cfg)
self.agent.init_distributed(find_unused_params=True)
if (self.world_rank == 0):
logger.info('agent number of trainable parameters: {}'.format(sum((param.numel() for param in self.agent.parameters() if param.requires_grad))))
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
obs_space = self.envs.observation_spaces[0]
if self._static_encoder:
self._encoder = self.actor_critic.net.visual_encoder
obs_space = SpaceDict({'visual_features': spaces.Box(low=np.finfo(np.float32).min, high=np.finfo(np.float32).max, shape=self._encoder.output_shape, dtype=np.float32), **obs_space.spaces})
with torch.no_grad():
batch['visual_features'] = self._encoder(batch)
rollouts = RolloutStorage(ppo_cfg.num_steps, self.envs.num_envs, obs_space, self.envs.action_spaces[0], ppo_cfg.hidden_size, num_recurrent_layers=self.actor_critic.net.num_recurrent_layers)
rollouts.to(self.device)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
batch = None
observations = None
current_episode_reward = torch.zeros(self.envs.num_envs, 1, device=self.device)
running_episode_stats = dict(count=torch.zeros(self.envs.num_envs, 1, device=self.device), reward=torch.zeros(self.envs.num_envs, 1, device=self.device))
window_episode_stats = defaultdict((lambda : deque(maxlen=ppo_cfg.reward_window_size)))
t_start = time.time()
env_time = 0
pth_time = 0
count_steps = 0
count_checkpoints = 0
start_update = 0
prev_time = 0
lr_scheduler = LambdaLR(optimizer=self.agent.optimizer, lr_lambda=(lambda x: linear_decay(x, self.config.NUM_UPDATES)))
interrupted_state = load_interrupted_state()
if (interrupted_state is not None):
self.agent.load_state_dict(interrupted_state['state_dict'])
self.agent.optimizer.load_state_dict(interrupted_state['optim_state'])
lr_scheduler.load_state_dict(interrupted_state['lr_sched_state'])
requeue_stats = interrupted_state['requeue_stats']
env_time = requeue_stats['env_time']
pth_time = requeue_stats['pth_time']
count_steps = requeue_stats['count_steps']
count_checkpoints = requeue_stats['count_checkpoints']
start_update = requeue_stats['start_update']
prev_time = requeue_stats['prev_time']
if (ckpt != (- 1)):
logger.info(f'Resuming runs at checkpoint {ckpt}. Timing statistics are not tracked properly.')
assert ((ppo_cfg.use_linear_lr_decay is False) and (ppo_cfg.use_linear_clip_decay is False)), 'Resuming with decay not supported'
count_checkpoints = (ckpt + 1)
count_steps = ((start_updates * ppo_cfg.num_steps) * self.config.NUM_PROCESSES)
ckpt_dict = self.load_checkpoint(ckpt_path, map_location='cpu')
self.agent.load_state_dict(ckpt_dict['state_dict'])
if ('optim_state' in ckpt_dict):
self.agent.optimizer.load_state_dict(ckpt_dict['optim_state'])
else:
logger.warn('No optimizer state loaded, results may be funky')
if (('extra_state' in ckpt_dict) and ('step' in ckpt_dict['extra_state'])):
count_steps = ckpt_dict['extra_state']['step']
with (TensorboardWriter(self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs) if (self.world_rank == 0) else contextlib.suppress()) as writer:
for update in range(start_update, self.config.NUM_UPDATES):
if ppo_cfg.use_linear_lr_decay:
lr_scheduler.step()
if ppo_cfg.use_linear_clip_decay:
self.agent.clip_param = (ppo_cfg.clip_param * linear_decay(update, self.config.NUM_UPDATES))
if EXIT.is_set():
self.envs.close()
if (REQUEUE.is_set() and (self.world_rank == 0)):
requeue_stats = dict(env_time=env_time, pth_time=pth_time, count_steps=count_steps, count_checkpoints=count_checkpoints, start_update=update, prev_time=((time.time() - t_start) + prev_time))
save_interrupted_state(dict(state_dict=self.agent.state_dict(), optim_state=self.agent.optimizer.state_dict(), lr_sched_state=lr_scheduler.state_dict(), config=self.config, requeue_stats=requeue_stats))
requeue_job()
return
count_steps_delta = 0
self.agent.eval()
for step in range(ppo_cfg.num_steps):
(delta_pth_time, delta_env_time, delta_steps) = self._collect_rollout_step(rollouts, current_episode_reward, running_episode_stats)
pth_time += delta_pth_time
env_time += delta_env_time
count_steps_delta += delta_steps
if ((step >= (ppo_cfg.num_steps * self.SHORT_ROLLOUT_THRESHOLD)) and (int(num_rollouts_done_store.get('num_done')) > (self.config.RL.DDPPO.sync_frac * self.world_size))):
break
num_rollouts_done_store.add('num_done', 1)
self.agent.train()
if self._static_encoder:
self._encoder.eval()
(delta_pth_time, value_loss, action_loss, dist_entropy) = self._update_agent(ppo_cfg, rollouts)
pth_time += delta_pth_time
stats_ordering = list(sorted(running_episode_stats.keys()))
stats = torch.stack([running_episode_stats[k] for k in stats_ordering], 0)
distrib.all_reduce(stats)
for (i, k) in enumerate(stats_ordering):
window_episode_stats[k].append(stats[i].clone())
stats = torch.tensor([value_loss, action_loss, count_steps_delta], device=self.device)
distrib.all_reduce(stats)
count_steps += stats[2].item()
if (self.world_rank == 0):
num_rollouts_done_store.set('num_done', '0')
losses = [(stats[0].item() / self.world_size), (stats[1].item() / self.world_size)]
deltas = {k: ((v[(- 1)] - v[0]).sum().item() if (len(v) > 1) else v[0].sum().item()) for (k, v) in window_episode_stats.items()}
deltas['count'] = max(deltas['count'], 1.0)
writer.add_scalar('reward', (deltas['reward'] / deltas['count']), count_steps)
writer.add_scalar('success', (deltas['success'] / deltas['count']), count_steps)
metrics = {k: (v / deltas['count']) for (k, v) in deltas.items() if (k not in {'reward', 'count'})}
if (len(metrics) > 0):
writer.add_scalars('metrics', metrics, count_steps)
writer.add_scalars('losses', {k: l for (l, k) in zip(losses, ['value', 'policy'])}, count_steps)
if ((update > 0) and ((update % self.config.LOG_INTERVAL) == 0)):
logger.info('update: {}\tfps: {:.3f}\t'.format(update, (count_steps / ((time.time() - t_start) + prev_time))))
logger.info('update: {}\tenv-time: {:.3f}s\tpth-time: {:.3f}s\tframes: {}'.format(update, env_time, pth_time, count_steps))
logger.info('Average window size: {} {}'.format(len(window_episode_stats['count']), ' '.join(('{}: {:.3f}'.format(k, (v / deltas['count'])) for (k, v) in deltas.items() if (k != 'count')))))
if ((update % self.config.CHECKPOINT_INTERVAL) == 0):
self.save_checkpoint(f'ckpt.{count_checkpoints}.pth', dict(step=count_steps))
count_checkpoints += 1
self.envs.close() |
def test_item_based_cf():
cf_based_similarity = ItemCFBasedSimilarity(data_file='../data/Sports_and_Outdoors_sample.txt', similarity_path='../data/item_cf_iuf_similarity.pkl', model_type='ItemCF_IUF')
print(cf_based_similarity.most_similar('2', top_k=4)) |
_TFIntersection.register('hard')
class TFHardIntersection(_TFIntersection):
def __call__(self, left: TFTBoxTensor, right: TFTBoxTensor) -> TFTBoxTensor:
return tf_hard_intersection(left, right) |
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName('Form')
Form.resize(1920, 1080)
self.add_brush_widgets(Form)
self.add_top_buttons(Form)
self.add_label_buttons(Form)
self.add_tool_buttons(Form)
self.add_checkbox_widgets(Form)
self.add_input_img_button(Form)
self.graphicsView = QtWidgets.QGraphicsView(Form)
self.graphicsView.setGeometry(QtCore.QRect(652, 140, 518, 518))
self.graphicsView.setObjectName('graphicsView')
self.graphicsView_2 = QtWidgets.QGraphicsView(Form)
self.graphicsView_2.setGeometry(QtCore.QRect(1204, 140, 518, 518))
self.graphicsView_2.setObjectName('graphicsView_2')
self.graphicsView_GT = QtWidgets.QGraphicsView(Form)
self.graphicsView_GT.setGeometry(QtCore.QRect(100, 140, 518, 518))
self.graphicsView_GT.setObjectName('graphicsView_GT')
self.referDialog = ReferenceDialog(self)
self.referDialog.setObjectName('Reference Dialog')
self.referDialog.setWindowTitle('Style Image')
self.referDialogImage = QtWidgets.QLabel(self.referDialog)
self.referDialogImage.setFixedSize(512, 512)
self.snapshotDialog = SnapshotDialog(self)
self.snapshotDialog.setObjectName('Snapshot Dialog')
self.snapshotDialog.setWindowTitle('Reference Image:')
self.snapshotDialogImage = QtWidgets.QLabel(self.snapshotDialog)
self.snapshotDialogImage.setFixedSize(512, 512)
self.add_intermediate_results_button(Form)
self.add_alpha_bar(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate('Form', "Let's Party Face Manipulation"))
self.pushButton.setText(_translate('Form', 'Open Image'))
self.pushButton_2.setText(_translate('Form', 'Mask'))
self.pushButton_3.setText(_translate('Form', 'Sketches'))
self.pushButton_4.setText(_translate('Form', 'Color'))
self.saveImg.setText(_translate('Form', 'Save Img'))
def add_alpha_bar(self, Form):
self.alphaLabel = QtWidgets.QLabel(Form)
self.alphaLabel.setObjectName('alphaLabel')
self.alphaLabel.setGeometry(QtCore.QRect((((Lb_x + (10 * Lb_row_shift)) + (10 * Lb_width)) + 40), Lb_y, 150, 20))
self.alphaLabel.setText('Alpha: 1.0')
font = self.brushsizeLabel.font()
font.setPointSize(10)
font.setBold(True)
self.alphaLabel.setFont(font)
self.alphaSlider = QtWidgets.QSlider(Form)
self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.alphaSlider.setGeometry(QtCore.QRect((((Lb_x + (10 * Lb_row_shift)) + (10 * Lb_width)) + 150), Lb_y, 225, 10))
self.alphaSlider.setObjectName('alphaSlider')
self.alphaSlider.setMinimum(0)
self.alphaSlider.setMaximum(20)
self.alphaSlider.setValue(20)
self.alphaSlider.valueChanged.connect(Form.change_alpha_value)
def add_intermediate_results_button(self, Form):
self.snap_scrollArea = QtWidgets.QScrollArea(Form)
self.snap_scrollArea.setGeometry(QtCore.QRect(100, ((((Lb_y + Lb_height) + Lb_col_shift) + Lb_height) + 30), 1622, 250))
self.snap_scrollArea.setWidgetResizable(True)
self.snap_scrollArea.setObjectName('snap_scrollArea')
self.snap_scrollArea.setAlignment(Qt.AlignCenter)
self.snap_scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.snap_scrollAreaWidgetContents = QtWidgets.QWidget()
self.snap_scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1622, 250))
self.snap_scrollAreaWidgetContents.setObjectName('snap_scrollAreaWidgetContents')
self.snap_gridlLayout = QtWidgets.QGridLayout(self.snap_scrollAreaWidgetContents)
self.snap_gridlLayout.setSpacing(20)
self.snap_gridlLayout.setAlignment(Qt.AlignLeft)
self.snap_style_button_list = []
self.mask_snap_style_button_list = []
for i in range(15):
snap_style_button = QtWidgets.QPushButton()
snap_style_button.setFixedSize(100, 100)
snap_style_button.setStyleSheet('background-color: transparent')
snap_style_button.setIcon(QIcon())
snap_style_button.setIconSize(QSize(100, 100))
snap_style_button.clicked.connect(partial(self.open_snapshot_dialog, i))
self.snap_style_button_list.append(snap_style_button)
self.snap_gridlLayout.addWidget(snap_style_button, 1, i)
mask_snap_style_button = QtWidgets.QPushButton()
mask_snap_style_button.setFixedSize(100, 100)
mask_snap_style_button.setStyleSheet('background-color: transparent')
mask_snap_style_button.setIcon(QIcon())
mask_snap_style_button.setIconSize(QSize(100, 100))
self.mask_snap_style_button_list.append(mask_snap_style_button)
self.snap_gridlLayout.addWidget(mask_snap_style_button, 0, i)
self.snap_scrollArea.setWidget(self.snap_scrollAreaWidgetContents)
def add_input_img_button(self, Form):
self.input_img_button = QtWidgets.QPushButton(Form)
self.input_img_button.setGeometry(QtCore.QRect(1770, 15, 100, 100))
self.input_img_button.setStyleSheet('background-color: transparent')
self.input_img_button.setFixedSize(100, 100)
self.input_img_button.setIcon(QIcon(None))
self.input_img_button.setIconSize(QSize(100, 100))
self.input_img_button.clicked.connect(partial(Form.update_entire_feature, 0))
def add_checkbox_widgets(self, Form):
self.checkBoxGroupBox = QtWidgets.QGroupBox('Replace Style of Components', Form)
self.checkBoxGroupBox.setGeometry(QtCore.QRect(920, 10, 800, 100))
layout = QtWidgets.QGridLayout()
self.checkBoxGroup = QtWidgets.QButtonGroup(Form)
self.checkBoxGroup.setExclusive(False)
for (i, j) in enumerate(number_object):
cb = QtWidgets.QCheckBox(number_object[j])
self.checkBoxGroup.addButton(cb, i)
layout.addWidget(cb, (i // 10), (i % 10))
cb = QtWidgets.QCheckBox('ALL')
self.checkBoxGroup.addButton(cb)
layout.addWidget(cb, ((i + 1) // 10), ((i + 1) % 10))
self.checkBoxGroupBox.setLayout(layout)
for i in range(19):
self.checkBoxGroup.button(i).setChecked(True)
checkbox_status = [cb.isChecked() for cb in self.checkBoxGroup.buttons()]
checkbox_status = checkbox_status[:19]
self.checkbox_status = checkbox_status
self.checkBoxGroup.buttonToggled.connect(self.cb_event)
def add_brush_widgets(self, Form):
KaustLogo = QtWidgets.QLabel(self)
KaustLogo.setPixmap(QPixmap('icons/1999780_200.png').scaled(60, 60))
KaustLogo.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), 25, 80, 80))
self.add_style_imgs_buttons(Form)
self.brushsizeLabel = QtWidgets.QLabel(Form)
self.brushsizeLabel.setObjectName('brushsizeLabel')
self.brushsizeLabel.setGeometry(QtCore.QRect(Tb_x, 25, 150, 20))
self.brushsizeLabel.setText('Brush size: 6')
font = self.brushsizeLabel.font()
font.setPointSize(10)
font.setBold(True)
self.brushsizeLabel.setFont(font)
self.brushSlider = QtWidgets.QSlider(Form)
self.brushSlider.setOrientation(QtCore.Qt.Horizontal)
self.brushSlider.setGeometry(QtCore.QRect((Tb_x + 150), 25, 600, 10))
self.brushSlider.setObjectName('brushSlider')
self.brushSlider.setMinimum(1)
self.brushSlider.setMaximum(100)
self.brushSlider.setValue(8)
self.brushSlider.valueChanged.connect(Form.change_brush_size)
def add_top_buttons(self, Form):
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(Tb_x, Tb_y, Tb_width, Tb_height))
self.pushButton.setObjectName('pushButton')
self.pushButton.clicked.connect(Form.open)
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(((Tb_x + (1 * Tb_row_shift)) + (1 * Tb_width)), Tb_y, Tb_width, Tb_height))
self.pushButton_2.setObjectName('pushButton_2')
self.pushButton_2.clicked.connect(Form.style_linear_interpolation)
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(((Tb_x + (2 * Tb_row_shift)) + (2 * Tb_width)), Tb_y, Tb_width, Tb_height))
self.pushButton_3.setObjectName('pushButton_3')
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(((Tb_x + (3 * Tb_row_shift)) + (3 * Tb_width)), Tb_y, Tb_width, Tb_height))
self.pushButton_4.setObjectName('pushButton_4')
self.saveImg = QtWidgets.QPushButton(Form)
self.saveImg.setGeometry(QtCore.QRect(((Tb_x + (4 * Tb_row_shift)) + (4 * Tb_width)), Tb_y, Tb_width, Tb_height))
self.saveImg.setObjectName('saveImg')
self.saveImg.clicked.connect(Form.save_img)
self.retranslateUi(Form)
def add_tool_buttons(self, Form):
self.newButton = QtWidgets.QPushButton(Form)
self.newButton.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), 140, 60, 60))
self.newButton.setObjectName('openButton')
self.newButton.setIcon(QIcon('icons/add_new_document.png'))
self.newButton.setIconSize(QSize(60, 60))
self.newButton.clicked.connect(Form.init_screen)
self.openButton = QtWidgets.QPushButton(Form)
self.openButton.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), ((140 + (60 * 1)) + (10 * 1)), 60, 60))
self.openButton.setObjectName('openButton')
self.openButton.setIcon(QIcon('icons/open.png'))
self.openButton.setIconSize(QSize(60, 60))
self.openButton.clicked.connect(Form.open)
self.fillButton = QtWidgets.QPushButton(Form)
self.fillButton.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), ((140 + (60 * 2)) + (10 * 2)), 60, 60))
self.fillButton.setObjectName('fillButton')
self.fillButton.setIcon(QIcon('icons/paint_can.png'))
self.fillButton.setIconSize(QSize(60, 60))
self.fillButton.clicked.connect(partial(Form.mode_select, 2))
self.brushButton = QtWidgets.QPushButton(Form)
self.brushButton.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), ((140 + (60 * 3)) + (10 * 3)), 60, 60))
self.brushButton.setObjectName('brushButton')
self.brushButton.setIcon(QIcon('icons/paint_brush.png'))
self.brushButton.setIconSize(QSize(60, 60))
self.brushButton.setStyleSheet('background-color: #85adad')
self.brushButton.clicked.connect(partial(Form.mode_select, 0))
self.recButton = QtWidgets.QPushButton(Form)
self.recButton.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), ((140 + (60 * 4)) + (10 * 4)), 60, 60))
self.recButton.setObjectName('undolButton')
self.recButton.setIcon(QIcon('icons/brush_square.png'))
self.recButton.setIconSize(QSize(60, 60))
self.recButton.clicked.connect(partial(Form.mode_select, 1))
self.undoButton = QtWidgets.QPushButton(Form)
self.undoButton.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), ((140 + (60 * 5)) + (10 * 5)), 60, 60))
self.undoButton.setObjectName('undolButton')
self.undoButton.setIcon(QIcon('icons/undo.png'))
self.undoButton.setIconSize(QSize(60, 60))
self.undoButton.clicked.connect(Form.undo)
self.saveButton = QtWidgets.QPushButton(Form)
self.saveButton.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), ((140 + (60 * 6)) + (10 * 6)), 60, 60))
self.saveButton.setObjectName('saveButton')
self.saveButton.setIcon(QIcon('icons/save.png'))
self.saveButton.setIconSize(QSize(60, 60))
self.saveButton.clicked.connect(Form.save_img)
def add_style_imgs_buttons(self, Form):
self.scrollArea = QtWidgets.QScrollArea(Form)
self.scrollArea.setGeometry(QtCore.QRect(1756, 140, 140, 512))
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName('scrollArea')
self.scrollArea.setAlignment(Qt.AlignCenter)
self.scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 140, 512))
self.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')
verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
verticalLayout.setContentsMargins(11, 11, 11, 11)
verticalLayout.setSpacing(6)
img_path_list = glob.glob('imgs/style_imgs_test/*.jpg')
img_path_list.sort()
style_button = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
style_button.setFixedSize(100, 100)
style_button.setIcon(QIcon('icons/random.png'))
style_button.setIconSize(QSize(100, 100))
style_button.clicked.connect(Form.load_partial_average_feature)
verticalLayout.addWidget(style_button)
for img_path in img_path_list:
style_button = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
style_button.setFixedSize(100, 100)
style_button.setIcon(QIcon(img_path))
style_button.setIconSize(QSize(100, 100))
style_button.clicked.connect(partial(Form.update_entire_feature, img_path))
verticalLayout.addWidget(style_button)
verticalLayout.addWidget(style_button)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
def add_label_buttons(self, Form):
self.color_Button = QtWidgets.QPushButton(Form)
self.color_Button.setGeometry(QtCore.QRect(int(((Lb_x - (1 * Lb_row_shift)) - 60)), Lb_y, 60, 60))
self.color_Button.setObjectName('labelButton_0')
self.color_Button.setStyleSheet(('background-color: %s;' % number_color[1]))
self.labelButton_0 = QtWidgets.QPushButton(Form)
self.labelButton_0.setGeometry(QtCore.QRect(Lb_x, Lb_y, Lb_width, Lb_height))
self.labelButton_0.setObjectName('labelButton_0')
self.labelButton_0.setText(_translate('Form', 'background'))
self.labelButton_0.setStyleSheet((('background-color: %s;' % number_color[0]) + ' color: black'))
self.labelButton_0.clicked.connect(partial(Form.switch_labels, 0))
self.labelButton_1 = QtWidgets.QPushButton(Form)
self.labelButton_1.setGeometry(QtCore.QRect(((Lb_x + (1 * Lb_row_shift)) + (1 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_1.setObjectName('labelButton_1')
self.labelButton_1.setText(_translate('Form', 'skin'))
self.labelButton_1.setStyleSheet((('background-color: %s;' % number_color[1]) + ' color: black'))
self.labelButton_1.clicked.connect(partial(Form.switch_labels, 1))
self.labelButton_2 = QtWidgets.QPushButton(Form)
self.labelButton_2.setGeometry(QtCore.QRect(((Lb_x + (2 * Lb_row_shift)) + (2 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_2.setObjectName('labelButton_2')
self.labelButton_2.setText(_translate('Form', 'nose'))
self.labelButton_2.setStyleSheet((('background-color: %s;' % number_color[2]) + ' color: black'))
self.labelButton_2.clicked.connect(partial(Form.switch_labels, 2))
self.labelButton_3 = QtWidgets.QPushButton(Form)
self.labelButton_3.setGeometry(QtCore.QRect(((Lb_x + (3 * Lb_row_shift)) + (3 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_3.setObjectName('labelButton_3')
self.labelButton_3.setText(_translate('Form', 'eye_g'))
self.labelButton_3.setStyleSheet((('background-color: %s;' % number_color[3]) + ' color: black'))
self.labelButton_3.clicked.connect(partial(Form.switch_labels, 3))
self.labelButton_4 = QtWidgets.QPushButton(Form)
self.labelButton_4.setGeometry(QtCore.QRect(((Lb_x + (4 * Lb_row_shift)) + (4 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_4.setObjectName('labelButton_4')
self.labelButton_4.setText(_translate('Form', 'l_eye'))
self.labelButton_4.setStyleSheet((('background-color: %s;' % number_color[4]) + ' color: black'))
self.labelButton_4.clicked.connect(partial(Form.switch_labels, 4))
self.labelButton_5 = QtWidgets.QPushButton(Form)
self.labelButton_5.setGeometry(QtCore.QRect(((Lb_x + (5 * Lb_row_shift)) + (5 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_5.setObjectName('labelButton_5')
self.labelButton_5.setText(_translate('Form', 'r_eye'))
self.labelButton_5.setStyleSheet((('background-color: %s;' % number_color[5]) + ' color: black'))
self.labelButton_5.clicked.connect(partial(Form.switch_labels, 5))
self.labelButton_6 = QtWidgets.QPushButton(Form)
self.labelButton_6.setGeometry(QtCore.QRect(((Lb_x + (6 * Lb_row_shift)) + (6 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_6.setObjectName('labelButton_6')
self.labelButton_6.setText(_translate('Form', 'l_brow'))
self.labelButton_6.setStyleSheet((('background-color: %s;' % number_color[6]) + ' color: black'))
self.labelButton_6.clicked.connect(partial(Form.switch_labels, 6))
self.labelButton_7 = QtWidgets.QPushButton(Form)
self.labelButton_7.setGeometry(QtCore.QRect(((Lb_x + (7 * Lb_row_shift)) + (7 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_7.setObjectName('labelButton_7')
self.labelButton_7.setText(_translate('Form', 'r_brow'))
self.labelButton_7.setStyleSheet((('background-color: %s;' % number_color[7]) + ' color: black'))
self.labelButton_7.clicked.connect(partial(Form.switch_labels, 7))
self.labelButton_8 = QtWidgets.QPushButton(Form)
self.labelButton_8.setGeometry(QtCore.QRect(((Lb_x + (8 * Lb_row_shift)) + (8 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_8.setObjectName('labelButton_8')
self.labelButton_8.setText(_translate('Form', 'l_ear'))
self.labelButton_8.setStyleSheet((('background-color: %s;' % number_color[8]) + ' color: black'))
self.labelButton_8.clicked.connect(partial(Form.switch_labels, 8))
self.labelButton_9 = QtWidgets.QPushButton(Form)
self.labelButton_9.setGeometry(QtCore.QRect(((Lb_x + (9 * Lb_row_shift)) + (9 * Lb_width)), Lb_y, Lb_width, Lb_height))
self.labelButton_9.setObjectName('labelButton_9')
self.labelButton_9.setText(_translate('Form', 'r_ear'))
self.labelButton_9.setStyleSheet((('background-color: %s;' % number_color[9]) + ' color: black'))
self.labelButton_9.clicked.connect(partial(Form.switch_labels, 9))
self.labelButton_10 = QtWidgets.QPushButton(Form)
self.labelButton_10.setGeometry(QtCore.QRect(Lb_x, ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_10.setObjectName('labelButton_10')
self.labelButton_10.setText(_translate('Form', 'mouth'))
self.labelButton_10.setStyleSheet((('background-color: %s;' % number_color[10]) + ' color: black'))
self.labelButton_10.clicked.connect(partial(Form.switch_labels, 10))
self.labelButton_11 = QtWidgets.QPushButton(Form)
self.labelButton_11.setGeometry(QtCore.QRect(((Lb_x + (1 * Lb_row_shift)) + (1 * Lb_width)), ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_11.setObjectName('labelButton_11')
self.labelButton_11.setText(_translate('Form', 'u_lip'))
self.labelButton_11.setStyleSheet((('background-color: %s;' % number_color[11]) + ' color: black'))
self.labelButton_11.clicked.connect(partial(Form.switch_labels, 11))
self.labelButton_12 = QtWidgets.QPushButton(Form)
self.labelButton_12.setGeometry(QtCore.QRect(((Lb_x + (2 * Lb_row_shift)) + (2 * Lb_width)), ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_12.setObjectName('labelButton_12')
self.labelButton_12.setText(_translate('Form', 'l_lip'))
self.labelButton_12.setStyleSheet((('background-color: %s;' % number_color[12]) + ' color: black'))
self.labelButton_12.clicked.connect(partial(Form.switch_labels, 12))
self.labelButton_13 = QtWidgets.QPushButton(Form)
self.labelButton_13.setGeometry(QtCore.QRect(((Lb_x + (3 * Lb_row_shift)) + (3 * Lb_width)), ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_13.setObjectName('labelButton_13')
self.labelButton_13.setText(_translate('Form', 'hair'))
self.labelButton_13.setStyleSheet((('background-color: %s;' % number_color[13]) + ' color: black'))
self.labelButton_13.clicked.connect(partial(Form.switch_labels, 13))
self.labelButton_14 = QtWidgets.QPushButton(Form)
self.labelButton_14.setGeometry(QtCore.QRect(((Lb_x + (4 * Lb_row_shift)) + (4 * Lb_width)), ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_14.setObjectName('labelButton_14')
self.labelButton_14.setText(_translate('Form', 'hat'))
self.labelButton_14.setStyleSheet((('background-color: %s;' % number_color[14]) + ' color: black'))
self.labelButton_14.clicked.connect(partial(Form.switch_labels, 14))
self.labelButton_15 = QtWidgets.QPushButton(Form)
self.labelButton_15.setGeometry(QtCore.QRect(((Lb_x + (5 * Lb_row_shift)) + (5 * Lb_width)), ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_15.setObjectName('labelButton_15')
self.labelButton_15.setText(_translate('Form', 'ear_r'))
self.labelButton_15.setStyleSheet((('background-color: %s;' % number_color[15]) + ' color: black'))
self.labelButton_15.clicked.connect(partial(Form.switch_labels, 15))
self.labelButton_16 = QtWidgets.QPushButton(Form)
self.labelButton_16.setGeometry(QtCore.QRect(((Lb_x + (6 * Lb_row_shift)) + (6 * Lb_width)), ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_16.setObjectName('labelButton_16')
self.labelButton_16.setText(_translate('Form', 'neck_l'))
self.labelButton_16.setStyleSheet((('background-color: %s;' % number_color[16]) + ' color: black'))
self.labelButton_16.clicked.connect(partial(Form.switch_labels, 16))
self.labelButton_17 = QtWidgets.QPushButton(Form)
self.labelButton_17.setGeometry(QtCore.QRect(((Lb_x + (7 * Lb_row_shift)) + (7 * Lb_width)), ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_17.setObjectName('labelButton_17')
self.labelButton_17.setText(_translate('Form', 'neck'))
self.labelButton_17.setStyleSheet((('background-color: %s;' % number_color[17]) + ' color: black'))
self.labelButton_17.clicked.connect(partial(Form.switch_labels, 17))
self.labelButton_18 = QtWidgets.QPushButton(Form)
self.labelButton_18.setGeometry(QtCore.QRect(((Lb_x + (8 * Lb_row_shift)) + (8 * Lb_width)), ((Lb_y + Lb_height) + Lb_col_shift), Lb_width, Lb_height))
self.labelButton_18.setObjectName('labelButton_18')
self.labelButton_18.setText(_translate('Form', 'cloth'))
self.labelButton_18.setStyleSheet((('background-color: %s;' % number_color[18]) + ' color: black'))
self.labelButton_18.clicked.connect(partial(Form.switch_labels, 18))
def cb_event(self, id, ifchecked):
if (id.text() == 'ALL'):
if ifchecked:
for cb in self.checkBoxGroup.buttons():
cb.setChecked(True)
else:
for cb in self.checkBoxGroup.buttons():
cb.setChecked(False)
self.change_cb_state()
def change_cb_state(self):
checkbox_status = [cb.isChecked() for cb in self.checkBoxGroup.buttons()]
checkbox_status = checkbox_status[:19]
self.checkbox_status = checkbox_status |
def merge_label(js1, js2, output_path):
total_label_cnt = 0
with open(js1, 'r', encoding='utf8') as fp:
data_file = json.load(fp)
data1 = data_file['data']
with open(js2, 'r', encoding='utf8') as fp:
data_file = json.load(fp)
data2 = data_file['data']
for (i, sample) in enumerate(data1):
sample_labels1 = sample['labels'].split(',')
sample_labels2 = data2[i]['labels'].split(',')
merge_label = list(set((sample_labels1 + sample_labels2)))
data1[i]['labels'] = ','.join(list(set(merge_label)))
total_label_cnt += len(list(set(merge_label)))
output = {'data': data1}
with open(output_path, 'w') as f:
json.dump(output, f, indent=1)
print('Input Json file 1 has {:d} labels'.format(count_label(js1)))
print('Input Json file 2 has {:d} labels'.format(count_label(js2)))
print('Merged Json file has {:d} labels'.format(total_label_cnt)) |
def main():
parser = ArgumentParser()
parser.add_argument('img_root', type=str, help='Image root path')
parser.add_argument('img_list', type=str, help='Image path list file')
parser.add_argument('config', type=str, help='Config file')
parser.add_argument('checkpoint', type=str, help='Checkpoint file')
parser.add_argument('--score-thr', type=float, default=0.5, help='Bbox score threshold')
parser.add_argument('--out-dir', type=str, default='./results', help='Dir to save visualize images and bbox')
parser.add_argument('--device', default='cuda:0', help='Device used for inference.')
args = parser.parse_args()
assert (0 < args.score_thr < 1)
model = init_detector(args.config, args.checkpoint, device=args.device)
if hasattr(model, 'module'):
model = model.module
out_vis_dir = osp.join(args.out_dir, 'out_vis_dir')
mmcv.mkdir_or_exist(out_vis_dir)
out_txt_dir = osp.join(args.out_dir, 'out_txt_dir')
mmcv.mkdir_or_exist(out_txt_dir)
lines = list_from_file(args.img_list)
progressbar = ProgressBar(task_num=len(lines))
for line in lines:
progressbar.update()
img_path = osp.join(args.img_root, line.strip())
if (not osp.exists(img_path)):
raise FileNotFoundError(img_path)
result = model_inference(model, img_path)
img_name = osp.basename(img_path)
save_results(result, out_txt_dir, img_name, score_thr=args.score_thr)
out_file = osp.join(out_vis_dir, img_name)
kwargs_dict = {'score_thr': args.score_thr, 'show': False, 'out_file': out_file}
model.show_result(img_path, result, **kwargs_dict)
print(f'''
Inference done, and results saved in {args.out_dir}
''') |
class LifeCycle(metaclass=ABCMeta):
def setup(self, cores_per_node):
import torch
torch.set_num_threads(cores_per_node)
def setup_torch_distribute(self, tcp_store_host, tcp_store_port, world_rank, world_size):
self._init_torch_ddp(tcp_store_host, tcp_store_port, world_rank, world_size)
self.setup_ddp_components()
def setup_torch_estimator(self, world_rank, world_size):
self.rank = world_rank
self.size = world_size
self.setup_components()
def setup_components(self):
pass
def setup_ddp_components(self):
pass
def shutdown(self):
pass
def _init_torch_ddp(self, tcp_store_host, tcp_store_port, world_rank, world_size):
import torch.distributed as dist
client_store = dist.TCPStore(tcp_store_host, tcp_store_port, (- 1), False, timeout=dist.constants.default_pg_timeout)
dist.init_process_group(backend='gloo', store=client_store, rank=world_rank, world_size=world_size)
self.backend = 'torch-distributed'
def with_sampler(self, loader, shuffle=True):
self.logger.debug('Wrapping DistributedSampler on DataLoader')
data_loader_args = {'dataset': loader.dataset, 'batch_size': loader.batch_size, 'shuffle': False, 'num_workers': loader.num_workers, 'collate_fn': loader.collate_fn, 'pin_memory': loader.pin_memory, 'drop_last': loader.drop_last, 'timeout': loader.timeout, 'worker_init_fn': loader.worker_init_fn, 'sampler': DistributedSampler(loader.dataset, num_replicas=self.size, rank=self.rank, shuffle=shuffle)}
return DataLoader(**data_loader_args)
def should_wrap_dataloader(loader):
from torch.utils.data import DataLoader
try:
from torch.utils.data import IterableDataset
not_iterable = (not isinstance(loader.dataset, IterableDataset))
except Exception as e:
not_iterable = LifeCycle
return (isinstance(loader, DataLoader) and not_iterable) |
class TIMM(Backbone):
def __init__(self, base_name, out_levels, freeze_at=0, norm='FrozenBN', pretrained=False):
super().__init__()
out_indices = [(x - 1) for x in out_levels]
if (base_name in model_params):
self.base = create_timm_resnet(base_name, out_indices=out_indices, pretrained=False)
elif (('eff' in base_name) or ('resnet' in base_name) or ('regnet' in base_name)):
self.base = create_model(base_name, features_only=True, out_indices=out_indices, pretrained=pretrained)
elif ('convnext' in base_name):
drop_path_rate = (0.2 if (('tiny' in base_name) or ('small' in base_name)) else 0.3)
self.base = create_model(base_name, features_only=True, out_indices=out_indices, pretrained=pretrained, drop_path_rate=drop_path_rate)
else:
assert 0, base_name
feature_info = [dict(num_chs=f['num_chs'], reduction=f['reduction']) for (i, f) in enumerate(self.base.feature_info)]
self._out_features = ['layer{}'.format(x) for x in out_levels]
self._out_feature_channels = {'layer{}'.format(l): feature_info[(l - 1)]['num_chs'] for l in out_levels}
self._out_feature_strides = {'layer{}'.format(l): feature_info[(l - 1)]['reduction'] for l in out_levels}
self._size_divisibility = max(self._out_feature_strides.values())
if ('resnet' in base_name):
self.freeze(freeze_at)
if (norm == 'FrozenBN'):
self = FrozenBatchNorm2d.convert_frozen_batchnorm(self)
def freeze(self, freeze_at=0):
if (freeze_at >= 1):
print('Frezing', self.base.conv1)
self.base.conv1 = freeze_module(self.base.conv1)
if (freeze_at >= 2):
print('Frezing', self.base.layer1)
self.base.layer1 = freeze_module(self.base.layer1)
def forward(self, x):
features = self.base(x)
ret = {k: v for (k, v) in zip(self._out_features, features)}
return ret
def size_divisibility(self):
return self._size_divisibility |
class Parameters():
def __init__(self, input_shape, batch_size=64, num_epochs=400, num_classes=10, alpha=1.0, num_blocks=2, max_num_training_samples=None, weight_regularizer=0.0001, dropout=0, use_bias=False, pretrained_model_path=None, max_value=None, activity_regularizer=None, signed_input=False, working_dir=None, train_val_ratio=0.2, train_val_rand=1234, device=None):
self.input_shape = input_shape
self.alpha = alpha
self.num_blocks = num_blocks
self.batch_size = batch_size
self.num_epochs = num_epochs
self.num_classes = num_classes
self.max_num_training_samples = max_num_training_samples
self.weight_regularizer = weight_regularizer
self.activity_regularizer = activity_regularizer
self.dropout = dropout
self.use_bias = use_bias
self.pretrained_model_path = pretrained_model_path
self.max_value = max_value
self.signed_input = signed_input
self.working_dir = working_dir
self.train_val_ratio = train_val_ratio
self.train_val_rand = train_val_rand
self.device = device
def to_string(self):
return 'inputShape{}_alpha{}_numBlocks{}_batchSize{}_numEpochs{}_maxNumTrainingSamples{}_weightReg{}_dropout{}_useBias{}_maxValue{}_activityReg{}_signedInput{}'.format('-'.join([str(i) for i in self.input_shape]), self.alpha, self.num_blocks, self.batch_size, self.num_epochs, self.max_num_training_samples, self.weight_regularizer, self.dropout, self.use_bias, self.max_value, self.activity_regularizer, self.signed_input, self.working_dir, self.train_val_ratio, self.train_val_rand) |
def construct_placeholders(num_classes):
placeholders = {'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'), 'batch': tf.placeholder(tf.int32, shape=None, name='batch1'), 'dropout': tf.placeholder_with_default(0.0, shape=(), name='dropout'), 'batch_size': tf.placeholder(tf.int32, name='batch_size')}
return placeholders |
class StopwatchMeter(object):
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if (self.start_time is not None):
delta = (time.time() - self.start_time)
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
def avg(self):
return (self.sum / self.n) |
class SpeakerDiarization(base.Pipeline):
def __init__(self, config: (SpeakerDiarizationConfig | None)=None):
self._config = (SpeakerDiarizationConfig() if (config is None) else config)
msg = f'Latency should be in the range [{self._config.step}, {self._config.duration}]'
assert (self._config.step <= self._config.latency <= self._config.duration), msg
self.segmentation = SpeakerSegmentation(self._config.segmentation, self._config.device)
self.embedding = OverlapAwareSpeakerEmbedding(self._config.embedding, self._config.gamma, self._config.beta, norm=1, normalize_weights=self._config.normalize_embedding_weights, device=self._config.device)
self.pred_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='hamming', cropping_mode='loose')
self.audio_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='first', cropping_mode='center')
self.binarize = Binarize(self._config.tau_active)
self.timestamp_shift = 0
self.clustering = None
(self.chunk_buffer, self.pred_buffer) = ([], [])
self.reset()
def get_config_class() -> type:
return SpeakerDiarizationConfig
def suggest_metric() -> BaseMetric:
return DiarizationErrorRate(collar=0, skip_overlap=False)
def hyper_parameters() -> Sequence[base.HyperParameter]:
return [base.TauActive, base.RhoUpdate, base.DeltaNew]
def config(self) -> SpeakerDiarizationConfig:
return self._config
def set_timestamp_shift(self, shift: float):
self.timestamp_shift = shift
def reset(self):
self.set_timestamp_shift(0)
self.clustering = OnlineSpeakerClustering(self.config.tau_active, self.config.rho_update, self.config.delta_new, 'cosine', self.config.max_speakers)
(self.chunk_buffer, self.pred_buffer) = ([], [])
def __call__(self, waveforms: Sequence[SlidingWindowFeature]) -> Sequence[tuple[(Annotation, SlidingWindowFeature)]]:
batch_size = len(waveforms)
msg = 'Pipeline expected at least 1 input'
assert (batch_size >= 1), msg
batch = torch.stack([torch.from_numpy(w.data) for w in waveforms])
expected_num_samples = int(np.rint((self.config.duration * self.config.sample_rate)))
msg = f'Expected {expected_num_samples} samples per chunk, but got {batch.shape[1]}'
assert (batch.shape[1] == expected_num_samples), msg
segmentations = self.segmentation(batch)
embeddings = self.embedding(batch, segmentations)
seg_resolution = (waveforms[0].extent.duration / segmentations.shape[1])
outputs = []
for (wav, seg, emb) in zip(waveforms, segmentations, embeddings):
sw = SlidingWindow(start=wav.extent.start, duration=seg_resolution, step=seg_resolution)
seg = SlidingWindowFeature(seg.cpu().numpy(), sw)
permuted_seg = self.clustering(seg, emb)
self.chunk_buffer.append(wav)
self.pred_buffer.append(permuted_seg)
agg_waveform = self.audio_aggregation(self.chunk_buffer)
agg_prediction = self.pred_aggregation(self.pred_buffer)
agg_prediction = self.binarize(agg_prediction)
if (self.timestamp_shift != 0):
shifted_agg_prediction = Annotation(agg_prediction.uri)
for (segment, track, speaker) in agg_prediction.itertracks(yield_label=True):
new_segment = Segment((segment.start + self.timestamp_shift), (segment.end + self.timestamp_shift))
shifted_agg_prediction[(new_segment, track)] = speaker
agg_prediction = shifted_agg_prediction
outputs.append((agg_prediction, agg_waveform))
if (len(self.chunk_buffer) == self.pred_aggregation.num_overlapping_windows):
self.chunk_buffer = self.chunk_buffer[1:]
self.pred_buffer = self.pred_buffer[1:]
return outputs |
class Scale(object):
def __init__(self, size):
self.size = size
def __call__(self, image):
image = self.changeScale(image, self.size)
return image
def changeScale(self, img, size, interpolation=Image.BILINEAR):
(ow, oh) = size
return img.resize((ow, oh), interpolation) |
def main():
args = parser.parse_args()
assert (args.dataset == 'imagenet')
args.num_classes = 1000
args.IMAGE_SIZE = 224
model = eval(args.model)(args)
(n_flops, n_params) = measure_model(model, args.IMAGE_SIZE, args.IMAGE_SIZE)
print(('FLOPs: %.2fM, Params: %.2fM' % ((n_flops / 1000000.0), (n_params / 1000000.0))))
if args.train_url:
log_file = os.path.join((args.train_url + 'measure_model.txt'))
with open(log_file, 'w') as f:
f.write(str((n_flops / 1000000.0)))
f.write(str((n_params / 1000000.0)))
f.close()
return |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, multi_grid=1):
super(BasicBlock, self).__init__()
dilation = (dilation * multi_grid)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.bn1 = BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=dilation, dilation=dilation, bias=False)
self.bn2 = BatchNorm2d(planes)
self.relu_inplace = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.downsample:
residual = self.downsample(x)
out = (out + residual)
out = self.relu_inplace(out)
return out |
def euclidean_dist(x, y):
(m, n) = (x.size(0), y.size(0))
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = (xx + yy)
dist.addmm_(x, y.t(), beta=1, alpha=(- 2))
dist = dist.clamp(min=1e-12).sqrt()
return dist |
def clip_to_window(keypoints, window, scope=None):
with tf.name_scope(scope, 'ClipToWindow'):
(y, x) = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
(win_y_min, win_x_min, win_y_max, win_x_max) = tf.unstack(window)
y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
new_keypoints = tf.concat([y, x], 2)
return new_keypoints |
def graph_resnet101_scheduled(min_num_epochs=0, max_num_epochs=400):
labels = ['Standard Trained Model 1', 'Standard Trained Model 2', 'Standard Trained Model 3', 'Standard Trained Model 4', 'Standard Trained Model 5', 'ASWT Model 1', 'ASWT Model 2']
xaxis = list(range(min_num_epochs, max_num_epochs))
curves = []
with open('graph_sources/resnet101_scheduled.csv', 'r') as g_source:
for i in range(7):
curves.append([])
r = 0
for line_raw in g_source:
if ((r != 0) and (r < (max_num_epochs + 1)) and (r > min_num_epochs)):
line = line_raw.rstrip().split(',')
for i in range(7):
curves[i].append(line[(i + 1)])
r += 1
curves = np.array(curves).astype(float)
graph_time_series(xaxis, curves, labels, 'resnet101scheduled', title='ResNet101 Training on CIFAR10') |
def check_regression_targets(y):
assert (y.ndim == 1)
if (y.dtype != dtype_t):
y = y.astype(dtype_t)
return y |
def test_quad_double_hyperbola(vrblvl=0):
par = 0.1
xtp = ['x^2 - (t - 0.5)^2 - 0.01;']
solx = (sqrt(((4 * (par ** 2)) + 1)) / 2)
print('\nvalue of the first start solution :', solx)
sol1 = make_solution(['x'], [solx])
print('the first start solution :\n', sol1)
sol2 = make_solution(['x'], [(- solx)])
print('the second start solution :\n', sol2)
print('tracking in quad double precision ...')
next_quad_double_loop(xtp, 2, [sol1, sol2], vrblvl)
return 0 |
def main():
parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (training_args.local_rank in [(- 1), 0]) else logging.WARN))
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool((training_args.local_rank != (- 1))), training_args.fp16)
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.model_cache_dir)
is_world_process_zero = ((training_args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))
processor = KGProcessor(data_args, tokenizer, is_world_process_zero)
(train_data, dev_data, test_data) = processor.get_dataset(training_args)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.model_cache_dir)
if (not hasattr(config, 'real_vocab_size')):
config.real_vocab_size = config.vocab_size
if (model_args.pos_weight is not None):
model_args.pos_weight = torch.tensor([model_args.pos_weight]).to(training_args.device)
if model_args.pooling_model:
print('using pooling model!')
if tokenizer.__class__.__name__.startswith('Roberta'):
tokenizer_cls = RobertaPoolingForTripletPrediction
elif tokenizer.__class__.__name__.startswith('Bert'):
tokenizer_cls = BertPoolingForTripletPrediction
else:
raise NotImplementedError()
model = tokenizer_cls.from_pretrained(model_args.model_name_or_path, margin=data_args.margin, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.model_cache_dir, pos_weight=model_args.pos_weight, text_loss_weight=model_args.text_loss_weight)
data_collator = PoolingCollator(tokenizer)
else:
raise NotImplementedError()
trainer = KGCTrainer(model=model, args=training_args, data_collator=data_collator, train_dataset=train_data, eval_dataset=dev_data, prediction_loss_only=True)
if data_args.group_shuffle:
print('using group shuffle')
trainer.use_group_shuffle(data_args.num_neg)
if training_args.do_train:
model_path = (model_args.model_name_or_path if ((model_args.model_name_or_path is not None) and os.path.isdir(model_args.model_name_or_path)) else None)
trainer.train(model_path=model_path)
trainer.save_model()
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
results = {}
if training_args.do_eval:
eval_output = trainer.evaluate()
result = {'eval_loss': eval_output['eval_loss']}
output_eval_file = os.path.join(training_args.output_dir, 'eval_results_lm.txt')
if trainer.is_world_master():
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
results.update(result)
if training_args.do_predict:
prediction_begin_time = time.time()
trainer.model.set_predict_mode()
trainer.prediction_loss_only = False
trainer.data_collator.set_predict_mode()
train_triples = processor.get_train_triples(data_args.train_file)
dev_triples = processor.get_dev_triples()
test_triples = processor.get_test_triples()
all_triples = ((train_triples + dev_triples) + test_triples)
all_triples_str_set = set()
for triple in all_triples:
triple_str = '\t'.join(triple)
all_triples_str_set.add(triple_str)
ranks = []
ranks_left = []
ranks_right = []
hits_left = []
hits_right = []
hits = []
top_ten_hit_count = 0
for i in range(10):
hits_left.append([])
hits_right.append([])
hits.append([])
total_test = len(test_triples)
for (test_id, test_triple) in enumerate(test_triples):
if (np.random.random() > data_args.test_ratio):
continue
head = test_triple[0]
relation = test_triple[1]
tail = test_triple[2]
head_corrupt_list = [test_triple]
if data_args.type_constrain:
tmp_entity_list = processor.rel2valid_head[relation]
else:
tmp_entity_list = processor.get_entities()
for corrupt_ent in tmp_entity_list:
if (corrupt_ent != head):
tmp_triple = [corrupt_ent, relation, tail]
tmp_triple_str = '\t'.join(tmp_triple)
if (tmp_triple_str not in all_triples_str_set):
head_corrupt_list.append(tmp_triple)
(_, tmp_features) = processor._create_examples_and_features(head_corrupt_list)
data_len = len(tmp_features)
all_input_ids = torch.tensor([f.input_ids for f in tmp_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in tmp_features], dtype=torch.long)
all_pos_indicator = torch.tensor([f.pos_indicator for f in tmp_features], dtype=torch.long)
eval_data = DictDataset(input_ids=all_input_ids, labels=all_label_ids, pos_indicator=all_pos_indicator)
trainer.data_collator.predict_mask_part = 0
preds = trainer.predict(eval_data).predictions
if trainer.is_world_master():
argsort1 = np.argsort((- preds))
rank1 = np.where((argsort1 == 0))[0][0]
print('left: ', rank1, data_len)
ranks.append((rank1 + 1))
ranks_left.append((rank1 + 1))
if (rank1 < 10):
top_ten_hit_count += 1
tail_corrupt_list = [test_triple]
if data_args.type_constrain:
tmp_entity_list = processor.rel2valid_tail[relation]
else:
tmp_entity_list = processor.get_entities()
for corrupt_ent in tmp_entity_list:
if (corrupt_ent != tail):
tmp_triple = [head, relation, corrupt_ent]
tmp_triple_str = '\t'.join(tmp_triple)
if (tmp_triple_str not in all_triples_str_set):
tail_corrupt_list.append(tmp_triple)
(_, tmp_features) = processor._create_examples_and_features(tail_corrupt_list)
data_len = len(tmp_features)
all_input_ids = torch.tensor([f.input_ids for f in tmp_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in tmp_features], dtype=torch.long)
all_pos_indicator = torch.tensor([f.pos_indicator for f in tmp_features], dtype=torch.long)
eval_data = DictDataset(input_ids=all_input_ids, labels=all_label_ids, pos_indicator=all_pos_indicator)
trainer.data_collator.predict_mask_part = 2
preds = trainer.predict(eval_data).predictions
if trainer.is_world_master():
argsort1 = np.argsort((- preds))
rank2 = np.where((argsort1 == 0))[0][0]
ranks.append((rank2 + 1))
ranks_right.append((rank2 + 1))
print('right: ', rank2, data_len)
print('mean rank until now: ', np.mean(ranks))
if (rank2 < 10):
top_ten_hit_count += 1
print(' until now: ', ((top_ten_hit_count * 1.0) / len(ranks)))
print('time used for prediction now: ', (time.time() - prediction_begin_time))
print('num of tested triples: {} / {}'.format((test_id + 1), total_test))
for hits_level in range(10):
if (rank1 <= hits_level):
hits[hits_level].append(1.0)
hits_left[hits_level].append(1.0)
else:
hits[hits_level].append(0.0)
hits_left[hits_level].append(0.0)
if (rank2 <= hits_level):
hits[hits_level].append(1.0)
hits_right[hits_level].append(1.0)
else:
hits[hits_level].append(0.0)
hits_right[hits_level].append(0.0)
if trainer.is_world_master():
for i in [0, 2, 9]:
logger.info('Hits left {0}: {1}'.format((i + 1), np.mean(hits_left[i])))
logger.info('Hits right {0}: {1}'.format((i + 1), np.mean(hits_right[i])))
logger.info('Hits {0}: {1}'.format((i + 1), np.mean(hits[i])))
logger.info('Mean rank left: {0}'.format(np.mean(ranks_left)))
logger.info('Mean rank right: {0}'.format(np.mean(ranks_right)))
logger.info('Mean rank: {0}'.format(np.mean(ranks)))
logger.info('Mean reciprocal rank left: {0}'.format(np.mean((1.0 / np.array(ranks_left)))))
logger.info('Mean reciprocal rank right: {0}'.format(np.mean((1.0 / np.array(ranks_right)))))
logger.info('Mean reciprocal rank: {0}'.format(np.mean((1.0 / np.array(ranks))))) |
_registry(pattern_type='EinsumwithArange')
class EinsumwithArange(Pattern):
def __call__(self, model):
pattern_mapping_config = {'EinsumwithArange': [{'patterns': {'in': [[(0, 'Shape'), (1, 'Arange'), (2, 'Einsum')]], 'out': [[(0, 'Range'), (1, 'Reshape'), (2, 'Matmul')]]}, 'search_mode': 'op_type', 'node_names': {0: 1, 1: 0, 2: 2}, 'input_tensors': {0: [[{'input_data': [0]}], [[0], 1]], 1: [[], [[], 1]], 2: [[{2: [0]}], [[1], 2]]}, 'output_tensors': {0: [[], [[], 1]], 1: [[], [[], 1]], 2: [[{2: [0]}], [[0], 1]]}, 'returns': [0, 1, 2]}, {'patterns': {'in': [[(0, 'Shape'), (2, 'Add'), (3, 'Arange'), (4, 'Einsum')], [(), (1, 'Shape'), (2, 'Add')]], 'out': [[(0, 'Range'), (1, 'Reshape'), (2, 'Matmul')]]}, 'search_mode': 'op_type', 'node_names': {0: 3, 1: 0, 2: 4}, 'input_tensors': {0: [[{'input_data': [0]}, {'input_data': [2]}], [[0, 1], 2]], 1: [[], [[], 1]], 2: [[{4: [1]}], [[1], 2]]}, 'output_tensors': {0: [[], [[], 1]], 1: [[], [[], 1]], 2: [[{4: [0]}], [[0], 1]]}, 'returns': [0]}]}
if (model.framework_modeling_config['framework'] != 'torch'):
return model
def _set_attr(new_node_names, ret_old_nodes, model):
for i in range(len(new_node_names)):
range_node_idx = model.get_node_id(new_node_names[i][0])
attr = OrderedDict()
if ('end' in ret_old_nodes[i][0].attr.keys()):
attr['end_with_shape'] = ret_old_nodes[i][0].attr['end']
model.nodes[range_node_idx].attr = attr
matmul_node = model.get_node_by_name(new_node_names[i][2])
reshape_node = model.get_node_by_name(new_node_names[i][1])
reshape_node.attr = OrderedDict({'dst_shape': '-1, 1'})
reshape_output = Tensor(name=(reshape_node.input_tensors[0].name + '_reshape'), source_op=[(matmul_node.name + '_reshape')], dest_op=[matmul_node.name], dtype=matmul_node.input_tensors[0].dtype)
matmul_node.input_tensors[1].dest_op = [(matmul_node.name + '_reshape')]
range_2 = model.get_node_by_name(matmul_node.input_tensors[1].source_op[0])
range_2.output_tensors[0].dest_op = [(matmul_node.name + '_reshape')]
reshape_op = util.construct_node(node_name=(matmul_node.name + '_reshape'), op_type='Reshape', input_tensors=[matmul_node.input_tensors[1]], output_tensors=[reshape_output], attr=OrderedDict({'dst_shape': '1, -1'}))
matmul_node.input_tensors[1] = reshape_output
insert_idx = model.get_node_id(new_node_names[i][2])
model.insert_nodes(insert_idx, [reshape_op])
pattern_dict = pattern_mapping_config['EinsumwithArange'][0]
(model, new_node_names, ret_old_nodes) = util.pattern_mapping('EinsumwithArange', pattern_dict, model)
if (len(new_node_names) != 0):
_set_attr(new_node_names, ret_old_nodes, model)
def _set_attr1(new_node_names, ret_old_nodes, model):
for i in range(len(new_node_names)):
range_node_idx = model.get_node_id(new_node_names[i][0])
attr = OrderedDict()
attr['algorithm'] = 'add'
attr['end_with_shape'] = 1
model.nodes[range_node_idx].attr = attr
matmul_node = model.get_node_by_name(new_node_names[i][2])
reshape_node = model.get_node_by_name(new_node_names[i][1])
reshape_node.attr = OrderedDict({'dst_shape': '-1, 1'})
reshape_output = Tensor(name=(reshape_node.input_tensors[0].name + '_reshape'), source_op=[(matmul_node.name + '_reshape')], dest_op=[matmul_node.name], dtype=matmul_node.input_tensors[0].dtype)
matmul_node.input_tensors[1].dest_op = [(matmul_node.name + '_reshape')]
range_2 = model.get_node_by_name(matmul_node.input_tensors[1].source_op[0])
range_2.output_tensors[0].dest_op = [(matmul_node.name + '_reshape')]
reshape_op = util.construct_node(node_name=(matmul_node.name + '_reshape'), op_type='Reshape', input_tensors=[matmul_node.input_tensors[1]], output_tensors=[reshape_output], attr=OrderedDict({'dst_shape': '1, -1'}))
matmul_node.input_tensors[1] = reshape_output
insert_idx = model.get_node_id(new_node_names[i][2])
model.insert_nodes(insert_idx, [reshape_op])
pattern_dict = pattern_mapping_config['EinsumwithArange'][1]
(model, new_node_names, ret_old_nodes) = util.pattern_mapping('EinsumwithArange', pattern_dict, model)
if (len(new_node_names) != 0):
_set_attr1(new_node_names, ret_old_nodes, model)
return model
return model |
def select_relevant_portion(text):
paras = text.split('\n')
selected = []
done = False
for para in paras:
sents = sent_tokenize.tokenize(para)
for sent in sents:
words = nltk.word_tokenize(sent)
for word in words:
selected.append(word)
if (len(selected) >= args.max_num_tokens):
done = True
break
if done:
break
if done:
break
selected.append('\n')
st = ' '.join(selected).strip()
return st |
def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset, labels, chosen_ids, cluster_member_ids, cluster_idx, margin_multiplier, margin_type):
def func_cond(iteration, scores_margin):
del scores_margin
return (iteration < num_candidates)
def func_body(iteration, scores_margin):
candidate_medoid = math_ops.to_int32(cluster_member_ids[iteration])
tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([((num_candidates - 1) - iteration)])
return ((iteration + 1), (scores_margin + array_ops.concat([pad_before, [(1.0 - metric_score)], pad_after], 0)))
scores_fac = ((- 1.0) * math_ops.reduce_sum(array_ops.squeeze(pairwise_distances_subset, [1, 2]), axis=0))
iteration = array_ops.constant(0)
num_candidates = array_ops.size(cluster_member_ids)
scores_margin = array_ops.zeros([num_candidates])
(_, scores_margin) = control_flow_ops.while_loop(func_cond, func_body, [iteration, scores_margin])
candidate_scores = math_ops.add(scores_fac, (margin_multiplier * scores_margin))
argmax_index = math_ops.to_int32(math_ops.argmax(candidate_scores, dimension=0))
best_medoid = math_ops.to_int32(cluster_member_ids[argmax_index])
chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
return chosen_ids |
def populate_defaults():
s = {}
for n in names:
v = default_model_settings.get(n, None)
if (v is None):
v = default_training_settings.get(n, None)
if (v is None):
v = default_feature_settings.get(n, None)
s[n] = v
return s |
class GwcDispProcessor(nn.Module):
def __init__(self, maxdisp=192, downsample=4, num_groups=40, use_concat_volume=True, concat_channels=12, *args, **kwargs):
super().__init__()
self.maxdisp = maxdisp
self.downsample = downsample
self.num_groups = num_groups
self.use_concat_volume = use_concat_volume
self.concat_channels = (concat_channels if use_concat_volume else 0)
self.dres0 = nn.Sequential(convbn_3d((self.num_groups + (self.concat_channels * 2)), 32, 3, 1, 1), nn.ReLU(inplace=True), convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True), convbn_3d(32, 32, 3, 1, 1))
self.dres2 = Hourglass(32)
self.dres3 = Hourglass(32)
self.dres4 = Hourglass(32)
self.classif0 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classif3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
def forward(self, inputs):
volume = inputs['cost_volume']
(h, w) = inputs['ref_img'].shape[2:]
cost0 = self.dres0(volume)
cost0 = (self.dres1(cost0) + cost0)
out1 = self.dres2(cost0)
out2 = self.dres3(out1)
out3 = self.dres4(out2)
if self.training:
cost0 = self.classif0(cost0)
cost1 = self.classif1(out1)
cost2 = self.classif2(out2)
cost3 = self.classif3(out3)
cost0 = F.interpolate(cost0, [self.maxdisp, h, w], mode='trilinear', align_corners=False)
cost0 = torch.squeeze(cost0, 1)
pred0 = F.softmax(cost0, dim=1)
pred0 = disparity_regression(pred0, self.maxdisp)
cost1 = F.interpolate(cost1, [self.maxdisp, h, w], mode='trilinear', align_corners=False)
cost1 = torch.squeeze(cost1, 1)
pred1 = F.softmax(cost1, dim=1)
pred1 = disparity_regression(pred1, self.maxdisp)
cost2 = F.interpolate(cost2, [self.maxdisp, h, w], mode='trilinear', align_corners=False)
cost2 = torch.squeeze(cost2, 1)
pred2 = F.softmax(cost2, dim=1)
pred2 = disparity_regression(pred2, self.maxdisp)
cost3 = F.interpolate(cost3, [self.maxdisp, h, w], mode='trilinear', align_corners=False)
cost3 = torch.squeeze(cost3, 1)
pred3 = F.softmax(cost3, dim=1)
pred3 = disparity_regression(pred3, self.maxdisp)
output = {'training_disp': {'disp': {'disp_ests': [pred0, pred1, pred2, pred3], 'disp_gt': inputs['disp_gt'], 'mask': inputs['mask']}}, 'visual_summary': {'image/train/image_c': torch.cat([inputs['ref_img'][0], inputs['tgt_img'][0]], dim=1), 'image/train/disp_c': torch.cat([inputs['disp_gt'][0], pred3[0]], dim=0)}}
return output
else:
cost3 = self.classif3(out3)
cost3 = F.interpolate(cost3, [self.maxdisp, h, w], mode='trilinear')
cost3 = torch.squeeze(cost3, 1)
pred3 = F.softmax(cost3, dim=1)
pred3 = disparity_regression(pred3, self.maxdisp)
output = {'inference_disp': {'disp_est': pred3}, 'visual_summary': {'image/test/image_c': torch.cat([inputs['ref_img'][0], inputs['tgt_img'][0]], dim=1), 'image/test/disp_c': pred3[0]}}
if ('disp_gt' in inputs):
output['visual_summary'] = {'image/val/image_c': torch.cat([inputs['ref_img'][0], inputs['tgt_img'][0]], dim=1), 'image/val/disp_c': torch.cat([inputs['disp_gt'][0], pred3[0]], dim=0)}
return output
def input_output(self):
return {'inputs': ['cost_volume', 'disp_shape'], 'outputs': ['training_disp', 'inference_disp', 'visual_summary']} |
def _write_memory_from_list(shm: SharedMemory, files: List[Tuple[(str, WriteItem)]], planner: SavePlanner):
write_results = []
offset = 0
no_shard_data: Dict[(str, STORAGE_TYPES)] = {}
for (storage_key, write_item) in files:
data = planner.resolve_data(write_item)
if torch.is_tensor(data):
data = data.detach()
if (write_item.type != WriteItemType.SHARD):
no_shard_data[write_item.index.fqn] = data
(offset, write_result) = _write_item(shm, offset, data, write_item, storage_key)
write_results.append(write_result)
return (write_results, no_shard_data) |
_SCHEDULERS.register('CosineAnnealingLR')
def build_cosine_annealing_lr(cfg, optimizer):
assert isinstance(optimizer, Optimizer)
max_epoch = cfg.TRAIN.MAX_EPOCH
if cfg.LR_SCHEDULER.IS_WARMUP:
max_epoch -= cfg.LR_SCHEDULER.WARMUP.ITERATION
minimal_lr = cfg.LR_SCHEDULER.COSINE_ANNEALING_LR.MINIMAL_LR
return optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=max_epoch, eta_min=minimal_lr) |
def compute_transformation_matrix(src_points, image_width, image_height):
dst_points = np.array([[0, 0], [image_width, 0], [image_width, image_height], [0, image_height]], dtype=np.float32)
return cv.getPerspectiveTransform(src_points, dst_points) |
def get_bn_layer(bn_type: str):
if bn_type.startswith('d'):
base_norm_class = get_bn_layer(bn_type[1:])
bn_class = {'1d': (lambda num_features, **kwargs: DualNormLayer(num_features, bn_class=base_norm_class['1d'], **kwargs)), '2d': (lambda num_features, **kwargs: DualNormLayer(num_features, bn_class=base_norm_class['2d'], **kwargs))}
elif (bn_type == 'bn'):
bn_class = {'1d': nn.BatchNorm1d, '2d': nn.BatchNorm2d}
elif (bn_type == 'none'):
bn_class = {'1d': MockBatchNorm1d, '2d': MockBatchNorm2d}
else:
raise ValueError(f'Invalid bn_type: {bn_type}')
return bn_class |
_BOX_PREDICTOR.register('FPNPredictor')
class FPNPredictor(nn.Module):
def __init__(self, cfg):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.cls_score = nn.Linear(representation_size, num_classes)
num_bbox_reg_classes = (2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes)
self.bbox_pred = nn.Linear(representation_size, (num_bbox_reg_classes * 4))
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return (scores, bbox_deltas) |
class TrainingVAE(TrainingInterface):
def _batch_to_inputs(self, batch):
(_, _, pr_mat, x, c, dt_x) = batch
pr_mat = pr_mat.to(self.device).float()
x = x.to(self.device).long()
c = c.to(self.device).float()
dt_x = dt_x.to(self.device).float()
return (x, c, pr_mat, dt_x) |
def convert(lst):
vocab = "what I 've come to realize about Afghanistan , and this is something that is often dismissed in the West".split()
dd = {(idx + 4): word for (idx, word) in enumerate(vocab)}
dd[0] = 'UNK'
dd[1] = 'PAD'
dd[2] = 'BOS'
dd[3] = 'EOS'
return ' '.join((dd[xx] for xx in lst)) |
def set_global_seeds(i):
try:
import torch
except ImportError:
pass
else:
torch.manual_seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
np.random.seed(i)
random.seed(i) |
def bbox2Coords(box):
assert (len(box) == 4)
x1 = int(round(box[0]))
y1 = int(round(box[1]))
x2 = int(round(box[2]))
y2 = int(round(box[3]))
return [x1, y1, x2, y1, x2, y2, x1, y2] |
def main():
args = get_parser().parse_args()
audio_sec = 0
decode_sec = 0
n_utt = 0
audio_durations = []
start_times = []
end_times = []
for x in glob.glob(os.path.join(args.log_dir, 'decode.*.log')):
with codecs.open(x, 'r', 'utf-8') as f:
for line in f:
x = line.strip()
if ('INFO: input lengths' in x):
audio_durations += [int(x.split('input lengths: ')[1])]
start_times += [parser.parse(x.split('(')[0])]
elif ('INFO: prediction' in x):
end_times += [parser.parse(x.split('(')[0])]
assert (len(audio_durations) == len(end_times)), (len(audio_durations), len(end_times))
assert (len(start_times) == len(end_times)), (len(start_times), len(end_times))
audio_sec += (sum(audio_durations) / 100)
decode_sec += sum([(end - start).total_seconds() for (start, end) in zip(start_times, end_times)])
n_utt += len(audio_durations)
print(('Total audio duration: %.3f [sec]' % audio_sec))
print(('Total decoding time: %.3f [sec]' % decode_sec))
rtf = ((decode_sec / audio_sec) if (audio_sec > 0) else 0)
print(('RTF: %.3f' % rtf))
latency = (((decode_sec * 1000) / n_utt) if (n_utt > 0) else 0)
print(('Latency: %.3f [ms/sentence]' % latency)) |
def SENet(model_params, input_tensor=None, input_shape=None, include_top=False, classes=1000, weights='imagenet', stride_size=2, init_filters=64, repetitions=None, **kwargs):
global backend, layers, models, keras_utils
(backend, layers, models, keras_utils) = get_submodules_from_kwargs(kwargs)
residual_block = model_params.residual_block
bn_params = get_bn_params()
if (type(stride_size) not in (tuple, list)):
stride_size = [(stride_size, stride_size, stride_size), (stride_size, stride_size, stride_size), (stride_size, stride_size, stride_size), (stride_size, stride_size, stride_size), (stride_size, stride_size, stride_size)]
else:
stride_size = list(stride_size)
if (len(stride_size) < 3):
print('Error: stride_size length must be 3 or more')
return None
if ((len(stride_size) - 1) != len(repetitions)):
print('Error: stride_size length must be equal to repetitions length - 1')
return None
for i in range(len(stride_size)):
if (type(stride_size[i]) not in (tuple, list)):
stride_size[i] = (stride_size[i], stride_size[i], stride_size[i])
if (input_tensor is None):
input = layers.Input(shape=input_shape, name='input')
elif (not backend.is_keras_tensor(input_tensor)):
input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
input = input_tensor
x = input
if model_params.input_3x3:
x = layers.ZeroPadding3D(1)(x)
x = layers.Conv3D(init_filters, (3, 3, 3), strides=stride_size[0], use_bias=False, kernel_initializer='he_uniform')(x)
x = layers.BatchNormalization(**bn_params)(x)
x = layers.Activation('relu')(x)
x = layers.ZeroPadding3D(1)(x)
x = layers.Conv3D(init_filters, (3, 3, 3), use_bias=False, kernel_initializer='he_uniform')(x)
x = layers.BatchNormalization(**bn_params)(x)
x = layers.Activation('relu')(x)
x = layers.ZeroPadding3D(1)(x)
x = layers.Conv3D((init_filters * 2), (3, 3, 3), use_bias=False, kernel_initializer='he_uniform')(x)
x = layers.BatchNormalization(**bn_params)(x)
x = layers.Activation('relu')(x)
else:
x = layers.ZeroPadding3D(3)(x)
x = layers.Conv3D(init_filters, (7, 7, 7), strides=stride_size[0], use_bias=False, kernel_initializer='he_uniform')(x)
x = layers.BatchNormalization(**bn_params)(x)
x = layers.Activation('relu')(x)
x = layers.ZeroPadding3D(1)(x)
pool = ((stride_size[1][0] + 1), (stride_size[1][1] + 1), (stride_size[1][2] + 1))
x = layers.MaxPooling3D(pool, strides=stride_size[1])(x)
filters = (init_filters * 2)
stride_count = 2
for (i, stage) in enumerate(repetitions):
filters *= 2
for j in range(stage):
if ((i == 0) and (j == 0)):
x = residual_block(filters, reduction=model_params.reduction, strides=1, groups=model_params.groups, is_first=True, **kwargs)(x)
elif ((i != 0) and (j == 0)):
x = residual_block(filters, reduction=model_params.reduction, strides=stride_size[stride_count], groups=model_params.groups, **kwargs)(x)
stride_count += 1
else:
x = residual_block(filters, reduction=model_params.reduction, strides=1, groups=model_params.groups, **kwargs)(x)
if include_top:
x = layers.GlobalAveragePooling3D()(x)
if (model_params.dropout is not None):
x = layers.Dropout(model_params.dropout)(x)
x = layers.Dense(classes)(x)
x = layers.Activation('softmax', name='output')(x)
if (input_tensor is not None):
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = input
model = models.Model(inputs, x)
if weights:
if ((type(weights) == str) and os.path.exists(weights)):
model.load_weights(weights)
else:
load_model_weights(model, model_params.model_name, weights, classes, include_top, **kwargs)
return model |
class DeepModel(nn.Module):
def __init__(self, input_size, num_classes, config):
super().__init__()
if (config == 'resnet18'):
self.model = resnet18(num_classes=num_classes)
elif (config == 'resnet32grasp'):
self.model = resnet32_grasp(num_classes=num_classes)
elif (config == 'wideresnet'):
dropout_rate = 0.0
self.model = Wide_ResNet(40, 10, dropout_rate, num_classes)
elif (config == 'vgg19'):
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512]
self.model = vgg.VGG(vgg.make_layers(cfg, batch_norm=True), num_classes=num_classes)
self.model.avgpool = nn.AdaptiveAvgPool2d((2, 2))
self.model.classifier = nn.Sequential(nn.Linear(((512 * 2) * 2), 1024), nn.ReLU(True), nn.Linear(1024, 512), nn.ReLU(True), nn.Linear(512, num_classes))
else:
raise TypeError
def forward(self, x):
x = self.model(x)
return x |
def gen_CustomizedNet():
import torch
import torch.nn as nn
class CustomizedNet(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim, output_size):
super().__init__()
self.fc1 = nn.Linear((input_size * input_feature_num), hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, x):
x = x.view((- 1), (x.shape[1] * x.shape[2]))
x = self.fc1(x)
x = self.dropout(x)
x = self.relu1(x)
x = self.fc2(x)
x = torch.unsqueeze(x, 1)
return x
return CustomizedNet |
def test_constaninit():
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = ConstantInit(val=1, bias=2, layer='Conv2d')
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0))
assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0)))
assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0)))
func = ConstantInit(val=3, bias_prob=0.01, layer='Linear')
func(model)
res = bias_init_with_prob(0.01)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4.0, bias=5.0, layer='_ConvNd')
func(model)
assert torch.all((model[0].weight == 4.0))
assert torch.all((model[2].weight == 4.0))
assert torch.all((model[0].bias == 5.0))
assert torch.all((model[2].bias == 5.0))
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias='1')
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias_prob='1')
with pytest.raises(TypeError):
func = ConstantInit(val=1, layer=1) |
def test_sparsify():
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert sp.issparse(clf.coef_)
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s) |
_model_architecture('linformer_roberta', 'linformer_roberta_base')
def linformer_roberta_base_architecture(args):
base_architecture(args) |
class SemanticStableDiffusionPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
class DefaultDataset(data.Dataset):
def __init__(self, root, transform=None):
self.samples = listdir(root)
self.samples.sort()
self.transform = transform
self.targets = None
def __getitem__(self, index):
fname = self.samples[index]
img = Image.open(fname).convert('RGB')
if (self.transform is not None):
img = self.transform(img)
return img
def __len__(self):
return len(self.samples) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.