code stringlengths 101 5.91M |
|---|
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks']:
tensor_name = (key + ':0')
if (tensor_name in all_tensor_names):
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if ('detection_masks' in tensor_dict):
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, (- 1)])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, (- 1), (- 1)])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if ('detection_masks' in output_dict):
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict |
class RLAv1_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16):
super(RLAv1_Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1((inplanes + rla_channel), width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.averagePooling = None
if ((downsample is not None) and (stride != 1)):
self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2))
self.se = None
if SE:
self.se = SELayer((planes * self.expansion), reduction)
self.eca = None
if (ECA_size != None):
self.eca = eca_layer((planes * self.expansion), int(ECA_size))
def forward(self, x, h):
identity = x
x = torch.cat((x, h), dim=1)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.se != None):
out = self.se(out)
if (self.eca != None):
out = self.eca(out)
y = out
if (self.downsample is not None):
identity = self.downsample(identity)
if (self.averagePooling is not None):
h = self.averagePooling(h)
out += identity
out = self.relu(out)
return (out, y, h, identity) |
class ONNXModel(DefaultModel):
def _load_detector(self, detector_cfg, detector_model):
return load_craft_onnx(detector_cfg, detector_model) |
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((1.0 / batch_size)))
return res |
def test_graph_coloring_get_action_mask(graph_coloring: GraphColoring) -> None:
key = jax.random.PRNGKey(0)
(state, _) = graph_coloring.reset(key)
num_nodes = graph_coloring.generator.num_nodes
get_valid_actions_fn = jax.jit(graph_coloring._get_valid_actions)
action_mask = get_valid_actions_fn(state.current_node_index, state.adj_matrix, state.colors)
assert (action_mask.dtype == jnp.bool_)
assert (action_mask.shape == (num_nodes,)) |
def test_load_points_from_multi_sweeps():
np.random.seed(0)
file_client_args = dict(backend='disk')
load_points_from_multi_sweeps_1 = LoadPointsFromMultiSweeps(sweeps_num=9, use_dim=[0, 1, 2, 3, 4], file_client_args=file_client_args)
load_points_from_multi_sweeps_2 = LoadPointsFromMultiSweeps(sweeps_num=9, use_dim=[0, 1, 2, 3, 4], file_client_args=file_client_args, pad_empty_sweeps=True, remove_close=True)
load_points_from_multi_sweeps_3 = LoadPointsFromMultiSweeps(sweeps_num=9, use_dim=[0, 1, 2, 3, 4], file_client_args=file_client_args, pad_empty_sweeps=True, remove_close=True, test_mode=True)
points = (np.random.random([100, 5]) * 2)
points = LiDARPoints(points, points_dim=5)
input_results = dict(points=points, sweeps=[], timestamp=None)
results = load_points_from_multi_sweeps_1(input_results)
assert (results['points'].tensor.numpy().shape == (100, 5))
input_results = dict(points=points, sweeps=[], timestamp=None)
results = load_points_from_multi_sweeps_2(input_results)
assert (results['points'].tensor.numpy().shape == (775, 5))
sensor2lidar_rotation = np.array([[0., 1.e-05, 0.], [(- 1.e-05), 0., (- 0.)], [(- 0.), 0., 0.]])
sensor2lidar_translation = np.array([(- 0.0009198), (- 0.), (- 0.)])
sweep = dict(data_path='tests/data/nuscenes/sweeps/LIDAR_TOP/n008-2018-09-18-12-07-26-0400__LIDAR_TOP__.pcd.bin', sensor2lidar_rotation=sensor2lidar_rotation, sensor2lidar_translation=sensor2lidar_translation, timestamp=0)
input_results = dict(points=points, sweeps=[sweep], timestamp=1.0)
results = load_points_from_multi_sweeps_1(input_results)
assert (results['points'].tensor.numpy().shape == (500, 5))
input_results = dict(points=points, sweeps=[sweep], timestamp=1.0)
results = load_points_from_multi_sweeps_2(input_results)
assert (results['points'].tensor.numpy().shape == (451, 5))
input_results = dict(points=points, sweeps=([sweep] * 10), timestamp=1.0)
results = load_points_from_multi_sweeps_2(input_results)
assert (results['points'].tensor.numpy().shape == (3259, 5))
input_results = dict(points=points, sweeps=([sweep] * 10), timestamp=1.0)
results = load_points_from_multi_sweeps_3(input_results)
assert (results['points'].tensor.numpy().shape == (3259, 5)) |
class Viewer():
def __init__(self, network):
self.network = network
self.subnets = self._get_subnets(network)
self.positions = self._get_host_positions(network)
def render_graph(self, state, ax=None, show=False, width=5, height=6):
G = self._construct_graph(state)
colors = []
labels = {}
for n in list(G.nodes):
colors.append(G.nodes[n]['color'])
labels[n] = G.nodes[n]['label']
if (ax is None):
fig = plt.figure(figsize=(width, height))
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
nx.draw_networkx_nodes(G, self.positions, node_size=1000, node_color=colors, ax=ax)
nx.draw_networkx_labels(G, self.positions, labels, font_size=10, font_weight='bold')
nx.draw_networkx_edges(G, self.positions)
ax.axis('off')
ax.set_xlim(left=0.0, right=100.0)
legend_entries = EpisodeViewer.legend(compromised=False)
ax.legend(handles=legend_entries, fontsize=12, loc=2)
if show:
fig.tight_layout()
plt.show()
plt.close(fig)
def render_episode(self, episode, width=7, height=5):
init_ep_state = episode[0][0]
G = self._construct_graph(init_ep_state)
EpisodeViewer(episode, G, self.network.sensitive_hosts, width, height)
def render_readable(self, obs):
(host_obs, aux_obs) = obs.get_readable()
aux_table = self._construct_table_from_dict(aux_obs)
host_table = self._construct_table_from_list_of_dicts(host_obs)
print('Observation:')
print(aux_table)
print(host_table)
def render_readable_state(self, state):
host_obs = state.get_readable()
host_table = self._construct_table_from_list_of_dicts(host_obs)
print('State:')
print(host_table)
def _construct_table_from_dict(self, d):
headers = list(d.keys())
table = PrettyTable(headers)
row = [str(d[k]) for k in headers]
table.add_row(row)
return table
def _construct_table_from_list_of_dicts(self, l):
headers = list(l[0].keys())
table = PrettyTable(headers)
for d in l:
row = [str(d[k]) for k in headers]
table.add_row(row)
return table
def _construct_graph(self, state):
G = nx.Graph()
sensitive_hosts = self.network.sensitive_hosts
for subnet in self.subnets:
for m in subnet:
node_color = get_host_representation(state, sensitive_hosts, m, COLORS)
node_pos = self.positions[m]
G.add_node(m, color=node_color, pos=node_pos, label=str(m))
for x in subnet:
for y in subnet:
if (x == y):
continue
G.add_edge(x, y)
subnet_prime_nodes = []
for subnet in self.subnets:
subnet_prime_nodes.append(subnet[0])
for x in subnet_prime_nodes:
for y in subnet_prime_nodes:
if (x == y):
continue
if self.network.subnets_connected(x[0], y[0]):
G.add_edge(x, y)
return G
def _get_host_positions(self, network):
address_space = network.address_space
depths = network.get_subnet_depths()
max_depth = max(depths)
subnets_by_depth = [[] for i in range((max_depth + 1))]
for (subnet_id, subnet_depth) in enumerate(depths):
if (subnet_id == 0):
continue
subnets_by_depth[subnet_depth].append(subnet_id)
max_pos = 100
margin = 10
row_height = (max_pos / (max_depth + 1))
positions = {}
for m in address_space:
m_subnet = m[0]
m_depth = depths[m_subnet]
row_max = (max_pos - (m_depth * row_height))
row_min = (max_pos - ((m_depth + 1) * row_height))
num_cols = len(subnets_by_depth[m_depth])
col_width = (max_pos / num_cols)
m_col = subnets_by_depth[m_depth].index(m_subnet)
col_min = (m_col * col_width)
col_max = ((m_col + 1) * col_width)
(col_pos, row_pos) = self._get_host_position(m, positions, address_space, row_min, row_max, col_min, col_max, margin)
positions[m] = (col_pos, row_pos)
first_m_pos = positions[address_space[0]]
agent_row = first_m_pos[1]
agent_col = min((first_m_pos[0] + (margin * 4)), (max_pos - margin))
positions[AGENT] = (agent_col, agent_row)
return positions
def _get_host_position(self, m, positions, address_space, row_min, row_max, col_min, col_max, margin):
subnet_hosts = []
for other_m in address_space:
if (other_m == m):
continue
if (other_m[0] == m[0]):
subnet_hosts.append(other_m)
threshold = 8
col_margin = ((col_max - col_min) / 4)
col_mid = (col_max - ((col_max - col_min) / 2))
m_y = random.uniform((row_min + margin), (row_max - margin))
m_x = random.uniform((col_mid - col_margin), (col_mid + col_margin))
good = False
n = 0
while ((n < 100) and (not good)):
good = True
m_x = random.uniform((col_mid - col_margin), (col_mid + col_margin))
m_y = random.uniform((row_min + margin), (row_max - margin))
for other_m in subnet_hosts:
if (other_m not in positions):
continue
(other_x, other_y) = positions[other_m]
dist = math.hypot((m_x - other_x), (m_y - other_y))
if (dist < threshold):
good = False
break
n += 1
return (m_x, m_y)
def _get_subnets(self, network):
subnets = [[] for i in range(network.get_number_of_subnets())]
for m in network.address_space:
subnets[m[0]].append(m)
subnets[0].append(AGENT)
return subnets |
def show_result_pyplot(model, img, result, palette=None, fig_size=(15, 10), opacity=0.5, title='', block=True, out_file=None):
if hasattr(model, 'module'):
model = model.module
img = model.show_result(img, result, palette=palette, show=False, opacity=opacity)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.title(title)
plt.tight_layout()
plt.show(block=block)
if (out_file is not None):
mmcv.imwrite(img, out_file) |
class LLaVATrainer(Trainer):
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if ((self.train_dataset is None) or (not has_length(self.train_dataset))):
return None
if self.args.group_by_modality_length:
lengths = self.train_dataset.modality_lengths
return LengthGroupedSampler(self.args.train_batch_size, world_size=self.args.world_size, lengths=lengths, group_by_modality=True)
else:
return super()._get_train_sampler()
def _save_checkpoint(self, model, trial, metrics=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
checkpoint_folder = f'{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}'
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
keys_to_match = ['mm_projector', 'vision_resampler']
if getattr(self.args, 'use_im_start_end', False):
keys_to_match.extend(['embed_tokens', 'embed_in'])
weight_to_save = get_mm_adapter_state_maybe_zero_3(self.model.named_parameters(), keys_to_match)
if ((self.args.local_rank == 0) or (self.args.local_rank == (- 1))):
self.model.config.save_pretrained(output_dir)
torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
else:
super(LLaVATrainer, self)._save_checkpoint(model, trial, metrics)
def _save(self, output_dir: Optional[str]=None, state_dict=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
pass
else:
super(LLaVATrainer, self)._save(output_dir, state_dict) |
def simxGetIntegerSignal(clientID, signalName, operationMode):
signalValue = ct.c_int()
if ((sys.version_info[0] == 3) and (type(signalName) is str)):
signalName = signalName.encode('utf-8')
return (c_GetIntegerSignal(clientID, signalName, ct.byref(signalValue), operationMode), signalValue.value) |
class TFCvtSelfAttentionProjection(tf.keras.layers.Layer):
def __init__(self, config: CvtConfig, embed_dim: int, kernel_size: int, stride: int, padding: int, projection_method: str='dw_bn', **kwargs):
super().__init__(**kwargs)
if (projection_method == 'dw_bn'):
self.convolution_projection = TFCvtSelfAttentionConvProjection(config, embed_dim, kernel_size, stride, padding, name='convolution_projection')
self.linear_projection = TFCvtSelfAttentionLinearProjection()
def call(self, hidden_state: tf.Tensor, training: bool=False) -> tf.Tensor:
hidden_state = self.convolution_projection(hidden_state, training=training)
hidden_state = self.linear_projection(hidden_state)
return hidden_state |
class NoRescalingComponent(Rescaling, AutotabularPreprocessingAlgorithm):
def fit(self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE]=None) -> 'AutotabularPreprocessingAlgorithm':
self.preprocessor = 'passthrough'
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
return X
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'NoRescaling', 'name': 'NoRescaling', 'handles_missing_values': False, 'handles_nominal_values': False, 'handles_numerical_features': True, 'prefers_data_scaled': False, 'prefers_data_normalized': False, 'handles_regression': True, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'is_deterministic': True, 'handles_sparse': True, 'handles_dense': True, 'input': (SPARSE, DENSE, UNSIGNED_DATA), 'output': (INPUT,), 'preferred_dtype': None} |
class MultiControlNetModel(ModelMixin):
def __init__(self, controlnets: Union[(List[ControlNetModel], Tuple[ControlNetModel])]):
super().__init__()
self.nets = nn.ModuleList(controlnets)
def forward(self, sample: torch.FloatTensor, timestep: Union[(torch.Tensor, float, int)], encoder_hidden_states: torch.Tensor, controlnet_cond: List[torch.tensor], conditioning_scale: List[float], class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[(str, Any)]]=None, guess_mode: bool=False, return_dict: bool=True) -> Union[(ControlNetOutput, Tuple)]:
for (i, (image, scale, controlnet)) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
(down_samples, mid_sample) = controlnet(sample=sample, timestep=timestep, encoder_hidden_states=encoder_hidden_states, controlnet_cond=image, conditioning_scale=scale, class_labels=class_labels, timestep_cond=timestep_cond, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, guess_mode=guess_mode, return_dict=return_dict)
if (i == 0):
(down_block_res_samples, mid_block_res_sample) = (down_samples, mid_sample)
else:
down_block_res_samples = [(samples_prev + samples_curr) for (samples_prev, samples_curr) in zip(down_block_res_samples, down_samples)]
mid_block_res_sample += mid_sample
return (down_block_res_samples, mid_block_res_sample)
def save_pretrained(self, save_directory: Union[(str, os.PathLike)], is_main_process: bool=True, save_function: Callable=None, safe_serialization: bool=False, variant: Optional[str]=None):
idx = 0
model_path_to_save = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(model_path_to_save, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant)
idx += 1
model_path_to_save = (model_path_to_save + f'_{idx}')
def from_pretrained(cls, pretrained_model_path: Optional[Union[(str, os.PathLike)]], **kwargs):
idx = 0
controlnets = []
model_path_to_load = pretrained_model_path
while os.path.isdir(model_path_to_load):
controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs)
controlnets.append(controlnet)
idx += 1
model_path_to_load = (pretrained_model_path + f'_{idx}')
logger.info(f'{len(controlnets)} controlnets loaded from {pretrained_model_path}.')
if (len(controlnets) == 0):
raise ValueError(f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {(pretrained_model_path + '_0')}.")
return cls(controlnets) |
def match_answer(answer, page, nlp=None, MAX_PARAGRAPH_CANDIDATE=3, debug=False, index_mapping=None, normalize_text=True, fast=False, approximate_search=False):
original_answer = answer
if normalize_text:
answer = normalize_answer(answer)
try:
if ((nlp == None) or approximate_search):
answer_tokens = [token for token in answer.split()]
else:
answer_tokens = [token.text for token in nlp(answer)]
except Exception as e:
print('Exception {}'.format(e))
return ((- 1), (- 1), (- 1), (- 1))
if normalize_text:
answer_tokens = [''.join((char for char in x if (len(char.encode('utf-8')) < 3))).lower() for x in answer_tokens]
found = False
max_bleu = None
start_token = None
end_token = None
paragraph_id = None
candidate_dict = {}
tokenized_paragraphs = []
tokenized_paragraphs_offset = []
for (idx, paragraph) in enumerate(page['text']):
index = paragraph.find(answer)
if (index >= 0):
assert (paragraph[index:(index + len(answer))] == answer)
return (idx, index, (index + len(original_answer)), 1.0)
index = paragraph.find(original_answer)
if (index >= 0):
assert (paragraph[index:(index + len(original_answer))] == original_answer)
return (idx, index, (index + len(original_answer)), 1.0)
paragraph_tokens = []
paragraph_offsets = []
if ((nlp == None) or approximate_search):
seen = ''
for token in paragraph.split():
paragraph_tokens.append(token)
paragraph_offsets.append(0)
seen += (str(token) + ' ')
else:
for token in nlp(paragraph):
paragraph_tokens.append(token.text)
paragraph_offsets.append(token.idx)
if normalize_text:
paragraph_tokens = [normalize_answer(''.join((char for char in x if (len(char.encode('utf-8')) < 3)))) for x in paragraph_tokens]
tokenized_paragraphs.append(paragraph_tokens)
tokenized_paragraphs_offset.append(paragraph_offsets)
intersection = len(set(paragraph_tokens).intersection(set(answer_tokens)))
if (intersection == len(answer_tokens)):
ax = ' '.join([x.strip() for x in answer_tokens if (len(x.strip()) > 0)])
for w_start in range(len(paragraph_tokens)):
token = paragraph_tokens[w_start]
if (token == answer_tokens[0]):
bx = ' '.join([x.strip() for x in paragraph_tokens[w_start:] if (len(x.strip()) > 0)])
if bx.startswith(ax):
for w_end in range(w_start, len(paragraph_tokens)):
token = paragraph_tokens[w_end]
if (token == answer_tokens[(- 1)]):
cx = ' '.join([x.strip() for x in paragraph_tokens[w_start:(w_end + 1)] if (len(x.strip()) > 0)])
if (ax == cx):
start_character = paragraph_offsets[w_start]
end_character = (paragraph_offsets[w_end] + len(paragraph_tokens[w_end]))
return (idx, start_character, end_character, 1.0)
if (intersection not in candidate_dict):
candidate_dict[intersection] = []
candidate_dict[intersection].append(idx)
candidate_idx = []
for key in sorted(candidate_dict.keys(), reverse=True):
for idx in candidate_dict[key]:
candidate_idx.append(idx)
if (len(candidate_idx) >= MAX_PARAGRAPH_CANDIDATE):
break
assert (len(candidate_idx) > 0)
if index_mapping:
new_candidate_idx = []
for idx in candidate_idx:
if (idx not in index_mapping):
new_candidate_idx.append(idx)
candidate_idx = new_candidate_idx
if (len(candidate_idx) == 0):
return ((- 1), (- 1), (- 1), (- 1))
if fast:
return (candidate_idx[0], (- 1), (- 1), (- 1))
if ((nlp != None) and approximate_search):
answer_tokens = [token.text for token in nlp(answer)]
for idx in candidate_idx:
paragraph_tokens = []
paragraph_offsets = []
for token in nlp(page['text'][idx]):
paragraph_tokens.append(token.text)
paragraph_offsets.append(token.idx)
tokenized_paragraphs[idx] = paragraph_tokens
tokenized_paragraphs_offset[idx] = paragraph_offsets
for idx in candidate_idx:
paragraph_tokens = tokenized_paragraphs[idx]
for i in range(((len(paragraph_tokens) - len(answer_tokens)) + 1)):
if (paragraph_tokens[i:(i + len(answer_tokens))] == answer_tokens):
found = True
max_bleu = 1.0
paragraph_id = idx
start_token = i
end_token = (i + len(answer_tokens))
break
if (not found):
for init in range(len(paragraph_tokens)):
for end in range(init, len(paragraph_tokens)):
candidate = paragraph_tokens[init:(end + 1)]
BLEU = get_bleu(candidate, answer_tokens)
if ((not max_bleu) or (BLEU > max_bleu) or ((BLEU == max_bleu) and end_token and start_token and (((end + 1) - init) < (end_token - start_token)))):
max_bleu = BLEU
paragraph_id = idx
start_token = init
end_token = end
if (max_bleu == 1):
break
if (max_bleu == 1):
break
if (max_bleu == 1):
break
if debug:
print('wikipedia_tile:', page['wikipedia_title'])
print('bleu: {0:.2f}'.format(max_bleu))
print('paragraph_id:', paragraph_id)
print('start_token_id:', start_token)
print('end_token_id:', end_token)
print('start_token:', tokenized_paragraphs[paragraph_id][start_token])
print('end_token:', tokenized_paragraphs[paragraph_id][end_token])
print('TOKENIZED MATCH', tokenized_paragraphs[paragraph_id][start_token:end_token])
print('len(tokenized_paragraphs):', len(tokenized_paragraphs))
print('len(tokenized_paragraphs_offset):', len(tokenized_paragraphs_offset))
print('paragraph_tokens:', tokenized_paragraphs[paragraph_id])
print('paragraph_offsets:', tokenized_paragraphs_offset[paragraph_id])
print('start_character:', tokenized_paragraphs_offset[paragraph_id][start_token])
print('end_character:', tokenized_paragraphs_offset[paragraph_id][end_token])
paragraph_tokens = tokenized_paragraphs[paragraph_id]
paragraph_offsets = tokenized_paragraphs_offset[paragraph_id]
if (nlp == None):
start_character = (- 1)
end_character = (- 1)
else:
start_character = paragraph_offsets[start_token]
end_character = (paragraph_offsets[end_token] + len(paragraph_tokens[end_token]))
return (paragraph_id, start_character, end_character, max_bleu) |
class ImageFeatureExtractionMixin(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
class FastSmoothSeNormConv3d(nn.Module):
def __init__(self, in_channels, out_channels, reduction=2, **kwargs):
super(FastSmoothSeNormConv3d, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, bias=True, **kwargs)
self.norm = FastSmoothSENorm(out_channels, reduction)
def forward(self, x):
x = self.conv(x)
x = F.relu(x, inplace=True)
x = self.norm(x)
return x |
def eval_one_episode(params, rng):
(state, timestep) = env.reset(rng)
def step(val):
(params, state, timestep, tot_r, rng, done) = val
(rng, key_step) = jax.random.split(rng)
obs = timestep.observation.grid
q_values = network.apply(params, obs[(jnp.newaxis,)])
a_t = jnp.argmax(q_values, axis=(- 1))[0]
(state, timestep) = env.step(state, a_t)
tot_r += timestep.reward
return (params, state, timestep, tot_r, rng, timestep.last())
(params, state, timestep, tot_r, rng, done) = jax.lax.while_loop((lambda val: (val[5] == False)), step, (params, state, timestep, 0, rng, False))
return (params, tot_r) |
def quick_test(model_in_memory: RumorTweetsClassifer):
timestamped_print('prediction test on trained rumor classifier: ')
try:
elmo_token_indexer = ELMoTokenCharactersIndexer()
rumor_train_set_reader = RumorTweetsDataReader(token_indexers={'elmo': elmo_token_indexer})
predictor = RumorTweetTaggerPredictor(model_in_memory, dataset_reader=rumor_train_set_reader)
tweet_id = ''
test_tweet_sentence = 'Single-aisle Airbus A320s are the workhorses of the global fleet & proliferate across European skies
outputs = predictor.predict(tweet_id, test_tweet_sentence)
print('predictor output: ', outputs)
print('print vocab: ')
model_in_memory.vocab.print_statistics()
timestamped_print(('prediction label on (%s): %s' % (test_tweet_sentence, (outputs['label'] if ('label' in outputs) else 'label is unknown'))))
except Exception as e:
timestamped_print('errors in quick model test ')
print(e) |
def parse_domain_task(env_name, universe):
env_name = env_name.replace(universe, '').strip('-')
domains = DOMAINS_BY_UNIVERSE[universe]
domain = next((domain for domain in domains if (domain in env_name)))
env_name = env_name.replace(domain, '').strip('-')
tasks = TASKS_BY_DOMAIN_BY_UNIVERSE[universe][domain]
task = next((task for task in tasks if (task == env_name)), None)
if (task is None):
matching_tasks = [task for task in tasks if (task in env_name)]
if (len(matching_tasks) > 1):
raise ValueError('Task name cannot be unmbiguously determined: {}. Following task names match: {}'.format(env_name, matching_tasks))
elif (len(matching_tasks) == 1):
task = matching_tasks[(- 1)]
else:
task = DEFAULT_TASK
return (domain, task) |
def CreateSrcDataLoader(args):
if (args.source == 'gta5'):
source_dataset = GTA5DataSet(args.data_dir, args.data_list, crop_size=image_sizes['cityscapes'], resize=image_sizes['gta5'], mean=IMG_MEAN, max_iters=(args.num_steps * args.batch_size))
elif (args.source == 'synthia'):
source_dataset = SYNDataSet(args.data_dir, args.data_list, crop_size=image_sizes['cityscapes'], resize=image_sizes['synthia'], mean=IMG_MEAN, max_iters=(args.num_steps * args.batch_size))
else:
raise ValueError('The source dataset mush be either gta5 or synthia')
source_dataloader = data.DataLoader(source_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
return source_dataloader |
def main():
ch = Chip('example', 'EX1', X, Y)
ch.strs.read_constids(path.join(path.dirname(__file__), 'constids.inc'))
logic = create_logic_tiletype(ch)
io = create_io_tiletype(ch)
bram = create_bram_tiletype(ch)
null = create_corner_tiletype(ch)
for x in range(X):
for y in range(Y):
if ((x == 0) or (x == (X - 1))):
if ((y == 0) or (y == (Y - 1))):
ch.set_tile_type(x, y, 'NULL')
else:
ch.set_tile_type(x, y, 'IO')
elif ((y == 0) or (y == (Y - 1))):
ch.set_tile_type(x, y, 'IO')
elif ((y % 15) == 7):
ch.set_tile_type(x, y, 'BRAM')
else:
ch.set_tile_type(x, y, 'LOGIC')
create_nodes(ch)
set_timings(ch)
ch.write_bba(sys.argv[1]) |
_cache(maxsize=1)
def get_spacy_nlp():
import en_core_web_lg
nlp = en_core_web_lg.load()
return nlp |
def imagenet_convnext_small_pretrained(output_dim):
model = timm.create_model('convnext_small', pretrained=True)
return _convnext_replace_fc(model, output_dim) |
.parametrize('old_shape', [(100,), (1, 23), (2, 4, 5)])
.parametrize('new_shape', [(100,), (1, 23), (2, 4, 5)])
def test_reshape_head(old_shape, new_shape, depth=1000):
np.random.seed(0)
p = 8
bits = np.random.randint((1 << p), size=((depth,) + old_shape), dtype=np.uint64)
message = cs.base_message(old_shape)
(other_bits_push, _) = cs.repeat(cs.Uniform(p), depth)
(message,) = other_bits_push(message, bits)
resized = cs.reshape_head(message, new_shape)
reconstructed = cs.reshape_head(resized, old_shape)
assert_message_equal(message, reconstructed) |
def create_event(name, value):
event = dict()
event['name'] = name
event['value'] = value
return event |
class DataHolder():
def __init__(self, training, validation):
self.X_train = training[0]
self.X_validate = validation[0]
self.y_train = training[1]
self.y_validate = validation[1]
self.X_train_norm = MinMaxScaler().fit_transform(self.X_train)
self.X_validate_norm = MinMaxScaler().fit_transform(self.X_validate)
if NORMALIZE_ALL:
self.X_train = StandardScaler().fit_transform(self.X_train)
self.X_validate = StandardScaler().fit_transform(self.X_validate)
self.y_train_nn = []
self.y_validate_nn = []
for y in self.y_train:
self.y_train_nn.append([y])
for y in self.y_validate:
self.y_validate_nn.append([y])
self.y_train_nn = np.array(self.y_train_nn, dtype=np.float32)
self.y_validate_nn = np.array(self.y_validate_nn, dtype=np.float32)
def dump(self, base):
header = ','.join([('x' + str(x)) for x in range(1, (1 + self.X_train.shape[1]))])
header += ','
header += ','.join([('y' + str(x)) for x in range(1, (1 + self.y_train_nn.shape[1]))])
np.savetxt((base + '_train.csv'), np.hstack((self.X_train, self.y_train_nn)), fmt='%10.5f', delimiter=',', header=header, comments='')
np.savetxt((base + '_validate.csv'), np.hstack((self.X_validate, self.y_validate_nn)), fmt='%10.5f', delimiter=',', header=header, comments='')
np.savetxt((base + '_train_norm.csv'), np.hstack((self.X_train_norm, self.y_train_nn)), fmt='%10.5f', delimiter=',', header=header, comments='')
np.savetxt((base + '_validate_norm.csv'), np.hstack((self.X_validate_norm, self.y_validate_nn)), fmt='%10.5f', delimiter=',', header=header, comments='') |
class ResNetBackbone(nn.Module):
def __init__(self, layers, dcn_layers=[0, 0, 0, 0], dcn_interval=1, atrous_layers=[], block=Bottleneck, norm_layer=nn.BatchNorm2d, in_channels=3):
super().__init__()
self.num_base_layers = len(layers)
self.layers = nn.ModuleList()
self.channels = []
self.norm_layer = norm_layer
self.dilation = 1
self.atrous_layers = atrous_layers
self.inplanes = 64
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self._make_layer(block, 64, layers[0], dcn_layers=dcn_layers[0], dcn_interval=dcn_interval)
self._make_layer(block, 128, layers[1], stride=2, dcn_layers=dcn_layers[1], dcn_interval=dcn_interval)
self._make_layer(block, 256, layers[2], stride=2, dcn_layers=dcn_layers[2], dcn_interval=dcn_interval)
self._make_layer(block, 512, layers[3], stride=2, dcn_layers=dcn_layers[3], dcn_interval=dcn_interval)
self.backbone_modules = [m for m in self.modules() if isinstance(m, nn.Conv2d)]
def _make_layer(self, block, planes, blocks, stride=1, dcn_layers=0, dcn_interval=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if (len(self.layers) in self.atrous_layers):
self.dilation += 1
stride = 1
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False, dilation=self.dilation), self.norm_layer((planes * block.expansion)))
layers = []
use_dcn = (dcn_layers >= blocks)
layers.append(block(self.inplanes, planes, stride, downsample, self.norm_layer, self.dilation, use_dcn=use_dcn))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
use_dcn = (((i + dcn_layers) >= blocks) and ((i % dcn_interval) == 0))
layers.append(block(self.inplanes, planes, norm_layer=self.norm_layer, use_dcn=use_dcn))
layer = nn.Sequential(*layers)
self.channels.append((planes * block.expansion))
self.layers.append(layer)
return layer
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for layer in self.layers:
x = layer(x)
outs.append(x)
return tuple(outs)
def init_backbone(self, path):
state_dict = torch.load(path)
keys = list(state_dict)
for key in keys:
if key.startswith('layer'):
idx = int(key[5])
new_key = (('layers.' + str((idx - 1))) + key[6:])
state_dict[new_key] = state_dict.pop(key)
self.load_state_dict(state_dict, strict=False)
def add_layer(self, conv_channels=1024, downsample=2, depth=1, block=Bottleneck):
self._make_layer(block, (conv_channels // block.expansion), blocks=depth, stride=downsample) |
def main(test_file, out_file):
(ft, fz) = read_predictions()
(test_df, Y_test) = read_ground_truth(test_file)
pred = blend_concat(ft, fz, Y_test)
(pred, results) = compute_metrics(pred)
results.to_csv(out_file)
print(results) |
def _copy_input(input):
if torch.is_tensor(input):
return input.detach().clone()
else:
return input.copy() |
def area(masks):
if (masks.dtype != np.uint8):
raise ValueError('Masks type should be np.uint8')
return np.sum(masks, axis=(1, 2), dtype=np.float32) |
def rx_pv(host, port, chunk=hl2ss.ChunkSize.PERSONAL_VIDEO, mode=hl2ss.StreamMode.MODE_1, width=1920, height=1080, framerate=30, divisor=1, profile=hl2ss.VideoProfile.H265_MAIN, level=hl2ss.H26xLevel.DEFAULT, bitrate=None, options=None, decoded_format='bgr24'):
if (bitrate is None):
bitrate = get_video_codec_default_bitrate(width, height, framerate, divisor, profile)
if (options is None):
options = get_video_codec_default_options(width, height, framerate, divisor, profile)
return (hl2ss.rx_decoded_pv(host, port, chunk, mode, width, height, framerate, divisor, profile, level, bitrate, options, decoded_format) if decoded_format else hl2ss.rx_pv(host, port, chunk, mode, width, height, framerate, divisor, profile, level, bitrate, options)) |
.timeout(10)
def test_obtain_exact_trajectories():
max_path_length = 15
n_workers = 8
env = GarageEnv(PointEnv())
per_worker_actions = [env.action_space.sample() for _ in range(n_workers)]
policies = [FixedPolicy(env.spec, ([action] * max_path_length)) for action in per_worker_actions]
workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers)
sampler = MultiprocessingSampler.from_worker_factory(workers, policies, envs=env)
n_traj_per_worker = 3
rollouts = sampler.obtain_exact_trajectories(n_traj_per_worker, agent_update=policies)
assert (sum(rollouts.lengths) >= (n_workers * n_traj_per_worker))
assert (len(rollouts.lengths) == (n_workers * n_traj_per_worker))
worker = (- 1)
for (count, rollout) in enumerate(rollouts.split()):
if ((count % n_traj_per_worker) == 0):
worker += 1
assert (rollout.actions == per_worker_actions[worker]).all()
sampler.shutdown_worker()
env.close() |
def calc_gdp_noise_multi(total_queries, target_epsilon, delta):
'GDP analysis following Algorithm 2 in:
def f(mu, eps, delta):
return (delta_eps_mu(eps, mu) - delta)
final_mu = brentq((lambda x: f(x, target_epsilon, delta)), 1e-05, 1000)
sigma = (np.sqrt(total_queries) / final_mu)
return sigma |
def compare_graphs_specificity(x):
if ((x[2] + x[1]) == 0):
return 0
return (float(x[2]) / float((x[2] + x[1]))) |
class Aggressive(object):
max_speed = 70
speed_lim_dist = 1
speed_decrease = 8
safety_time = 3
min_proximity_threshold = 8
braking_distance = 4
tailgate_counter = (- 1) |
def qop_registry(op_types):
def decorator_op(cls):
assert cls.__name__.endswith('Operator'), "The name of subclass of QOperator should end with 'Operator' substring."
if (cls.__name__[:(- len('Operator'))] in QOPERATORS):
raise ValueError('Cannot have two operators with the same name.')
for single_op_type in [op_type.strip() for op_type in op_types.split(',')]:
if (single_op_type.startswith('QLinear') or (single_op_type in ['QGemm', 'QAttention', 'QEmbedLayerNormalization', 'ArgMax', 'Reshape', 'Transpose', 'Squeeze', 'Unsqueeze', 'Gather', 'MaxPool', 'Pad', 'Resize', 'Split'])):
QOPERATORS[single_op_type] = cls
return cls
return decorator_op |
def quasirandom(n_samples, dimension, sampler):
skip = 0
if (sampler == 'LHS'):
sampler = skopt.sampler.Lhs()
elif (sampler == 'Halton'):
sampler = skopt.sampler.Halton(min_skip=1, max_skip=1)
elif (sampler == 'Hammersley'):
if (dimension == 1):
sampler = skopt.sampler.Hammersly(min_skip=1, max_skip=1)
else:
sampler = skopt.sampler.Hammersly()
skip = 1
elif (sampler == 'Sobol'):
sampler = skopt.sampler.Sobol(randomize=False)
if (dimension < 3):
skip = 1
else:
skip = 2
space = ([(0.0, 1.0)] * dimension)
return np.asarray(sampler.generate(space, (n_samples + skip))[skip:], dtype=config.real(np)) |
class ExcludeRowsMissingTarget(object):
def transform(X=None, y=None, sample_weight=None, warn=False):
if (y is None):
return (X, y, sample_weight)
y_missing = pd.isnull(y)
if (np.sum(np.array(y_missing)) == 0):
return (X, y, sample_weight)
logger.debug('Exclude rows with missing target values')
if warn:
warnings.warn('There are samples with missing target values in the data which will be excluded for further analysis')
y = y.drop(y.index[y_missing])
y.reset_index(drop=True, inplace=True)
if (X is not None):
X = X.drop(X.index[y_missing])
X.reset_index(drop=True, inplace=True)
if (sample_weight is not None):
sample_weight = sample_weight.drop(sample_weight.index[y_missing])
sample_weight.reset_index(drop=True, inplace=True)
return (X, y, sample_weight) |
def _select_links(link_ids, true_parents):
if (link_ids is None):
return None
return {par: {true_parents[par][link]: '-->'} for par in true_parents for link in link_ids} |
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
self.waiting = False
self.closed = False
nenvs = len(env_fns)
(self.remotes, self.work_remotes) = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
(observation_space, action_space) = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, rews, dones, infos) = zip(*results)
return (np.stack(obs), np.stack(rews), np.stack(dones), infos)
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def _assert_not_closed(self):
assert (not self.closed), 'Trying to operate on a SubprocVecEnv after calling close()' |
class ParallelGroupTest(unittest.TestCase):
def test_ranks_generation(self):
dmp_sizes = [[('model', 2), ('pipeline', 2), ('data', 2)], [('tensor', 4), ('data', 2)], [('model', 2), ('sequence', 2), ('pipeline', 2), ('data', 2)]]
correct_ranks = [{'model': [[0, 1], [2, 3], [4, 5], [6, 7]], 'pipeline': [[0, 2], [1, 3], [4, 6], [5, 7]], 'data': [[0, 4], [1, 5], [2, 6], [3, 7]]}, {'tensor': [[0, 1, 2, 3], [4, 5, 6, 7]], 'data': [[0, 4], [1, 5], [2, 6], [3, 7]]}, {'model': [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], 'sequence': [[0, 2], [1, 3], [4, 6], [5, 7], [8, 10], [9, 11], [12, 14], [13, 15]], 'pipeline': [[0, 4], [1, 5], [2, 6], [3, 7], [8, 12], [9, 13], [10, 14], [11, 15]], 'data': [[0, 8], [1, 9], [2, 10], [3, 11], [4, 12], [5, 13], [6, 14], [7, 15]]}]
correct_same_group_ranks_10 = [{'model': [10, 11], 'pipeline': [8, 10], 'data': [10, 14]}, {'tensor': [8, 9, 10, 11], 'data': [10, 14]}, {'model': [10, 11], 'sequence': [8, 10], 'pipeline': [10, 14], 'data': [2, 10]}]
correct_same_group_ranks_0 = [{'model': [0, 1], 'pipeline': [0, 2], 'data': [0, 4]}, {'tensor': [0, 1, 2, 3], 'data': [0, 4]}, {'model': [0, 1], 'sequence': [0, 2], 'pipeline': [0, 4], 'data': [0, 8]}]
for (index, slicing_dim) in enumerate(dmp_sizes):
size = np.prod([p[1] for p in slicing_dim])
_DistributedContext.WORLD_SIZE = size
rank_order = list(range(size))
pg_ranks = get_pg_ranks(slicing_dim, rank_order)
for name in pg_ranks[0]:
self.assertEqual(pg_ranks[0][name], correct_ranks[index][name])
ranks_in_same_group = get_ranks_in_same_group((slicing_dim, rank_order), 0)
self.assertEqual(ranks_in_same_group, correct_same_group_ranks_0[index])
_DistributedContext.WORLD_SIZE = (size * 2)
rank_order = list(range((size * 2)))
pg_ranks = get_pg_ranks(slicing_dim, rank_order)
for name in pg_ranks[0]:
self.assertEqual(pg_ranks[0][name], correct_ranks[index][name])
second_instance_correct_ranks = copy.deepcopy(correct_ranks[index][name])
for d in second_instance_correct_ranks:
for i in range(len(d)):
d[i] += size
self.assertEqual(pg_ranks[1][name], second_instance_correct_ranks)
ranks_in_same_group = get_ranks_in_same_group((slicing_dim, rank_order), 10)
self.assertEqual(ranks_in_same_group, correct_same_group_ranks_10[index])
reset_distributed()
def test_2_process_create_groups(self):
run_dist_code('test_pg_dist_with_2node', nproc=2)
((torch.cuda.is_available() and (torch.cuda.device_count() < 4)), 'Requires 4 gpus')
def test_4_process_create_groups(self):
run_dist_code('test_pg_dist_with_4node', nproc=4)
((torch.cuda.is_available() and (torch.cuda.device_count() < 4)), 'Requires 4 gpus')
def test_4_process_create_groups_without_atorch_init(self):
run_dist_code('test_pg_dist_with_4node_without_atorch_init', nproc=4) |
(config_path=os.path.join('..', 'fairseq', 'config'), config_name='config')
def hydra_main(cfg: FairseqConfig) -> float:
add_defaults(cfg)
if cfg.common.reset_logging:
reset_logging()
else:
with open_dict(cfg):
cfg.job_logging_cfg = OmegaConf.to_container(HydraConfig.get().job_logging, resolve=True)
cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True, enum_to_str=True))
OmegaConf.set_struct(cfg, True)
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, pre_main)
else:
distributed_utils.call_main(cfg, pre_main)
except BaseException as e:
if (not cfg.common.suppress_crashes):
raise
else:
logger.error(('Crashed! ' + str(e)))
try:
best_val = metrics.get_smoothed_value('valid', cfg.checkpoint.best_checkpoint_metric)
except:
best_val = None
if (best_val is None):
best_val = float('inf')
return best_val |
def create_annotations_from_bbox_ids(sorted_node_ids, bbox_by_id, parent_id, max_id):
new_annotations = []
for node_id in sorted_node_ids:
bbox_object = bbox_by_id[node_id]
(new_ann, new_bbox_ann, max_id) = generate_new_anns_from_bbox_object(bbox_object, parent_id, max_id)
new_annotations += [new_ann, new_bbox_ann]
return (new_annotations, max_id) |
class TestInitAutoAccelerate(unittest.TestCase):
(((torch.cuda.device_count() < 2) or (torch_version() < (2, 0, 0))), 'run with gpu_num >=2 torch.version > 2.0')
def test_init_auto_accelerate(self):
world_size = 2
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(find_free_port())
mp.spawn(_test_init_auto_accelerate, args=(world_size,), nprocs=world_size, join=True) |
def train(data, X, Y, model, criterion, optim, batch_size):
model.train()
total_loss = 0
n_samples = 0
for (X, Y) in data.get_batches(X, Y, batch_size, True):
model.zero_grad()
output = model(X)
scale = data.scale.expand(output.size(0), data.m)
loss = criterion((output * scale), (Y * scale))
loss.backward()
optim.step()
total_loss += loss.item()
n_samples += (output.size(0) * data.m)
return (total_loss / n_samples) |
class CallbackContainer(object):
def __init__(self, callbacks: Optional[List]=None, queue_length: int=10):
instantiated_callbacks = []
if (callbacks is not None):
for callback in callbacks:
if isinstance(callback, type):
instantiated_callbacks.append(callback())
else:
instantiated_callbacks.append(callback)
self.callbacks = [c for c in instantiated_callbacks]
self.queue_length = queue_length
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model: Any):
self.model = model
for callback in self.callbacks:
callback.set_model(model)
def set_trainer(self, trainer: Any):
self.trainer = trainer
for callback in self.callbacks:
callback.set_trainer(trainer)
def on_epoch_begin(self, epoch: int, logs: Optional[Dict]=None):
logs = (logs or {})
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None):
logs = (logs or {})
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs, metric)
def on_batch_begin(self, batch: int, logs: Optional[Dict]=None):
logs = (logs or {})
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
def on_batch_end(self, batch: int, logs: Optional[Dict]=None):
logs = (logs or {})
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
def on_train_begin(self, logs: Optional[Dict]=None):
logs = (logs or {})
logs['start_time'] = _get_current_time()
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs: Optional[Dict]=None):
logs = (logs or {})
for callback in self.callbacks:
callback.on_train_end(logs)
def on_eval_begin(self, logs: Optional[Dict]=None):
logs = (logs or {})
for callback in self.callbacks:
callback.on_eval_begin(logs) |
.parametrize('as_frame', [True, False])
def test_load_ecoli(as_frame):
df = load_ecoli(as_frame=as_frame)
if as_frame:
assert ((df.shape, type(df)) == ((336, 9), pd.DataFrame))
else:
assert ((df.shape, type(df)) == ((336, 9), np.ndarray)) |
def read_beam_files(file_base_path, ref_only=True):
vit_path = (file_base_path + 'beam')
corr_path = (file_base_path + 'corr')
src_path = (file_base_path + 'src')
vit_lst = []
if (not ref_only):
with open(vit_path, 'r') as f:
for line in f:
vit_lst.append(line.strip())
corr_lst = []
with open(corr_path, 'r') as f:
for line in f:
corr_lst.append(line.strip())
src_lst = []
with open(src_path, 'r') as f:
for line in f:
src_lst.append(line.strip())
return (vit_lst, corr_lst, src_lst) |
def vis_clouds(model_id, pre_sampled=True, n_samples=1024, edge_length_threshold=0.1, shuffle=False):
import random
import numpy as np
from mayavi import mlab
import matplotlib.pyplot as plt
from dids import Dataset
from shapenet.core.blender_renderings.config import RenderConfig
from shapenet.core.meshes import get_mesh_dataset
from util3d.mayavi_vis import vis_point_cloud
from util3d.mayavi_vis import vis_mesh
from template_ffd.data.ids import get_example_ids
from template_ffd.inference.clouds import get_inferred_cloud_dataset
from template_ffd.model import get_builder
builder = get_builder(model_id)
cat_id = builder.cat_id
kwargs = dict(model_id=model_id, n_samples=n_samples)
if (not pre_sampled):
kwargs['edge_length_threshold'] = edge_length_threshold
cloud_dataset = get_inferred_cloud_dataset(pre_sampled=pre_sampled, **kwargs)
image_dataset = RenderConfig().get_dataset(cat_id, builder.view_index)
example_ids = get_example_ids(cat_id, 'eval')
if shuffle:
example_ids = list(example_ids)
random.shuffle(example_ids)
mesh_dataset = get_mesh_dataset(cat_id)
zipped_dataset = Dataset.zip(image_dataset, cloud_dataset, mesh_dataset)
with zipped_dataset:
for example_id in example_ids:
(image, cloud, mesh) = zipped_dataset[example_id]
plt.imshow(image)
vis_point_cloud(np.array(cloud), color=(0, 1, 0), scale_factor=0.01)
(v, f) = (np.array(mesh[k]) for k in ('vertices', 'faces'))
vis_mesh(v, f, color=(0, 0, 1), opacity=0.1, include_wireframe=False)
plt.show(block=False)
mlab.show()
plt.close() |
('mmocr.utils.ocr.KIEDataset')
def test_readtext(mock_kiedataset):
torch.manual_seed(4)
random.seed(4)
mmocr = MMOCR_testobj()
mmocr_det = MMOCR_testobj(kie='', recog='')
mmocr_recog = MMOCR_testobj(kie='', det='', recog='CRNN_TPS')
mmocr_det_recog = MMOCR_testobj(kie='')
def readtext(imgs, ocr_obj=mmocr, **kwargs):
e2e_res = ocr_obj.readtext(imgs, **kwargs)
for res in e2e_res:
res.pop('filename')
return e2e_res
def kiedataset_with_test_dict(**kwargs):
kwargs['dict_file'] = 'tests/data/kie_toy_dataset/dict.txt'
return KIEDataset(**kwargs)
mock_kiedataset.side_effect = kiedataset_with_test_dict
toy_dir = 'tests/data/toy_dataset/imgs/test/'
toy_img1_path = (toy_dir + 'img_1.jpg')
str_e2e_res = readtext(toy_img1_path)
toy_img1 = mmcv.imread(toy_img1_path)
np_e2e_res = readtext(toy_img1)
assert (str_e2e_res == np_e2e_res)
toy_img2_path = (toy_dir + 'img_2.jpg')
toy_img2 = mmcv.imread(toy_img2_path)
toy_imgs = [toy_img1, toy_img2]
toy_img_paths = [toy_img1_path, toy_img2_path]
np_e2e_results = readtext(toy_imgs)
str_e2e_results = readtext(toy_img_paths)
str_tuple_e2e_results = readtext(tuple(toy_img_paths))
assert (np_e2e_results == str_e2e_results)
assert (str_e2e_results == str_tuple_e2e_results)
toy_imgs.append((toy_dir + 'img_3.jpg'))
e2e_res = readtext(toy_imgs)
full_batch_e2e_res = readtext(toy_imgs, batch_mode=True)
assert (full_batch_e2e_res == e2e_res)
batch_e2e_res = readtext(toy_imgs, batch_mode=True, recog_batch_size=2, det_batch_size=2)
assert (batch_e2e_res == full_batch_e2e_res)
full_batch_det_res = mmocr_det.readtext(toy_imgs, batch_mode=True)
det_res = mmocr_det.readtext(toy_imgs)
batch_det_res = mmocr_det.readtext(toy_imgs, batch_mode=True, single_batch_size=2)
assert (len(full_batch_det_res) == len(det_res))
assert (len(batch_det_res) == len(det_res))
assert all([np.allclose(full_batch_det_res[i]['boundary_result'], det_res[i]['boundary_result']) for i in range(len(full_batch_det_res))])
assert all([np.allclose(batch_det_res[i]['boundary_result'], det_res[i]['boundary_result']) for i in range(len(batch_det_res))])
full_batch_recog_res = mmocr_recog.readtext(toy_imgs, batch_mode=True)
recog_res = mmocr_recog.readtext(toy_imgs)
batch_recog_res = mmocr_recog.readtext(toy_imgs, batch_mode=True, single_batch_size=2)
full_batch_recog_res.sort(key=(lambda x: x['text']))
batch_recog_res.sort(key=(lambda x: x['text']))
recog_res.sort(key=(lambda x: x['text']))
assert np.all([np.allclose(full_batch_recog_res[i]['score'], recog_res[i]['score']) for i in range(len(full_batch_recog_res))])
assert np.all([np.allclose(batch_recog_res[i]['score'], recog_res[i]['score']) for i in range(len(full_batch_recog_res))])
with tempfile.TemporaryDirectory() as tmpdirname:
mmocr.readtext(toy_imgs, export=tmpdirname)
assert (len(os.listdir(tmpdirname)) == len(toy_imgs))
with tempfile.TemporaryDirectory() as tmpdirname:
mmocr_det.readtext(toy_imgs, export=tmpdirname)
assert (len(os.listdir(tmpdirname)) == len(toy_imgs))
with tempfile.TemporaryDirectory() as tmpdirname:
mmocr_recog.readtext(toy_imgs, export=tmpdirname)
assert (len(os.listdir(tmpdirname)) == len(toy_imgs))
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_output = os.path.join(tmpdirname, '1.jpg')
mmocr.readtext(toy_imgs[0], output=tmp_output)
assert os.path.exists(tmp_output)
with tempfile.TemporaryDirectory() as tmpdirname:
mmocr.readtext(toy_imgs, output=tmpdirname)
assert (len(os.listdir(tmpdirname)) == len(toy_imgs))
with mock.patch('mmocr.utils.ocr.mmcv.imshow') as mock_imshow:
mmocr.readtext(toy_img1_path, imshow=True)
mock_imshow.assert_called_once()
mock_imshow.reset_mock()
mmocr.readtext(toy_imgs, imshow=True)
assert (mock_imshow.call_count == len(toy_imgs))
with io.StringIO() as capturedOutput:
sys.stdout = capturedOutput
res = mmocr.readtext(toy_imgs, print_result=True)
assert (json.loads(('[%s]' % capturedOutput.getvalue().strip().replace('\n\n', ',').replace("'", '"'))) == res)
sys.stdout = sys.__stdout__
with io.StringIO() as capturedOutput:
sys.stdout = capturedOutput
res = mmocr.readtext(toy_imgs, details=True, print_result=True)
assert (json.loads(('[%s]' % capturedOutput.getvalue().strip().replace('\n\n', ',').replace("'", '"'))) == res)
sys.stdout = sys.__stdout__
with mock.patch('mmocr.utils.ocr.stitch_boxes_into_lines') as mock_merge:
mmocr_det_recog.readtext(toy_imgs, merge=True)
assert (mock_merge.call_count == len(toy_imgs)) |
class calc_metrics():
def __init__(self):
pass
def __call__(self, refs, preds):
ref4bleu = [[]]
pred4bleu = []
bleu_fn = BLEU()
sentence_blue = []
sentence_blue_fn = BLEU(effective_order=True)
for (ref, pred) in zip(refs, preds):
if (len(ref) > 0):
pred4bleu.append(pred)
ref4bleu[0].append(ref)
sentence_blue.append(sentence_blue_fn.sentence_score(pred, [ref]).score)
bleu = bleu_fn.corpus_score(pred4bleu, ref4bleu)
return ({'bleu': bleu}, (sentence_blue, pred4bleu, ref4bleu[0])) |
def get_train_dataset(options):
return ReconstructDataset().initialize(options, text_path=options.train_path, embeddings_path=options.embeddings_path, filter_length=options.train_filter_length, data_type=options.train_data_type) |
class CIFAR100(DatasetBase):
def __init__(self, scenario, params):
dataset = 'cifar100'
if (scenario == 'ni'):
num_tasks = len(params.ns_factor)
else:
num_tasks = params.num_tasks
super(CIFAR100, self).__init__(dataset, scenario, num_tasks, params.num_runs, params)
def download_load(self):
dataset_train = datasets.CIFAR100(root=self.root, train=True, download=True)
self.train_data = dataset_train.data
self.train_label = np.array(dataset_train.targets)
dataset_test = datasets.CIFAR100(root=self.root, train=False, download=True)
self.test_data = dataset_test.data
self.test_label = np.array(dataset_test.targets)
def setup(self):
if (self.scenario == 'ni'):
(self.train_set, self.val_set, self.test_set) = construct_ns_multiple_wrapper(self.train_data, self.train_label, self.test_data, self.test_label, self.task_nums, 32, self.params.val_size, self.params.ns_type, self.params.ns_factor, plot=self.params.plot_sample)
elif (self.scenario == 'nc'):
self.task_labels = create_task_composition(class_nums=100, num_tasks=self.task_nums, fixed_order=self.params.fix_order)
self.test_set = []
for labels in self.task_labels:
(x_test, y_test) = load_task_with_labels(self.test_data, self.test_label, labels)
self.test_set.append((x_test, y_test))
else:
raise Exception('wrong scenario')
def new_task(self, cur_task, **kwargs):
if (self.scenario == 'ni'):
(x_train, y_train) = self.train_set[cur_task]
labels = set(y_train)
elif (self.scenario == 'nc'):
labels = self.task_labels[cur_task]
(x_train, y_train) = load_task_with_labels(self.train_data, self.train_label, labels)
return (x_train, y_train, labels)
def new_run(self, **kwargs):
self.setup()
return self.test_set
def test_plot(self):
test_ns(self.train_data[:10], self.train_label[:10], self.params.ns_type, self.params.ns_factor) |
class PFNLayer(nn.Module):
def __init__(self, in_channels, out_channels, norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), last_layer=False, mode='max'):
super().__init__()
self.fp16_enabled = False
self.name = 'PFNLayer'
self.last_vfe = last_layer
if (not self.last_vfe):
out_channels = (out_channels // 2)
self.units = out_channels
self.norm = build_norm_layer(norm_cfg, self.units)[1]
self.linear = nn.Linear(in_channels, self.units, bias=False)
assert (mode in ['max', 'avg'])
self.mode = mode
_fp16(apply_to='inputs', out_fp32=True)
def forward(self, inputs, num_voxels=None, aligned_distance=None):
x = self.linear(inputs)
x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
x = F.relu(x)
if (self.mode == 'max'):
if (aligned_distance is not None):
x = x.mul(aligned_distance.unsqueeze((- 1)))
x_max = torch.max(x, dim=1, keepdim=True)[0]
elif (self.mode == 'avg'):
if (aligned_distance is not None):
x = x.mul(aligned_distance.unsqueeze((- 1)))
x_max = (x.sum(dim=1, keepdim=True) / num_voxels.type_as(inputs).view((- 1), 1, 1))
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated |
def merge_gradients(*gradient_list):
merged = OrderedDict()
for gradients in gradient_list:
assert isinstance(gradients, (dict, OrderedDict))
for (key, val) in gradients.items():
if merged.has_key(key):
merged[key] = (merged[key] + val)
else:
merged[key] = val
return merged |
class VisionEncoderDecoderOnnxConfig(OnnxConfig):
def inputs(self) -> None:
pass
def get_encoder_config(self, encoder_config: PretrainedConfig) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(encoder_config)
def get_decoder_config(self, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, feature: str='default') -> OnnxConfig:
decoder_config.encoder_hidden_size = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(decoder_config, feature) |
class ConditionalDomainAdversarialLoss(nn.Module):
def __init__(self, domain_discriminator: nn.Module, entropy_conditioning: Optional[bool]=False, randomized: Optional[bool]=False, num_classes: Optional[int]=(- 1), features_dim: Optional[int]=(- 1), randomized_dim: Optional[int]=1024, reduction: Optional[str]='mean'):
super(ConditionalDomainAdversarialLoss, self).__init__()
self.domain_discriminator = domain_discriminator
self.grl = WarmStartGradientReverseLayer(alpha=1.0, lo=0.0, hi=1.0, max_iters=1000, auto_step=True)
self.entropy_conditioning = entropy_conditioning
if randomized:
assert ((num_classes > 0) and (features_dim > 0) and (randomized_dim > 0))
self.map = RandomizedMultiLinearMap(features_dim, num_classes, randomized_dim)
else:
self.map = MultiLinearMap()
self.bce = (lambda input, target, weight: (F.binary_cross_entropy(input, target, weight, reduction=reduction) if self.entropy_conditioning else F.binary_cross_entropy(input, target, reduction=reduction)))
self.domain_discriminator_accuracy = None
def forward(self, g_s: torch.Tensor, f_s: torch.Tensor, g_t: torch.Tensor, f_t: torch.Tensor) -> torch.Tensor:
f = torch.cat((f_s, f_t), dim=0)
g = torch.cat((g_s, g_t), dim=0)
g = F.softmax(g, dim=1).detach()
h = self.grl(self.map(f, g))
d = self.domain_discriminator(h)
d_label = torch.cat((torch.ones((g_s.size(0), 1)).to(g_s.device), torch.zeros((g_t.size(0), 1)).to(g_t.device)))
weight = (1.0 + torch.exp((- entropy(g))))
batch_size = f.size(0)
weight = ((weight / torch.sum(weight)) * batch_size)
self.domain_discriminator_accuracy = binary_accuracy(d, d_label)
return self.bce(d, d_label, weight.view_as(d)) |
def adjust_brightness(img, brightness_factor):
if (not _is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img |
def add_user_id_to_windowed_dataset(user_datasets_windowed, encode_user_id=True, as_feature=False, as_label=True, verbose=0):
if encode_user_id:
all_users = sorted(list(user_datasets_windowed.keys()))
user_id_encoder = dict([(u, i) for (i, u) in enumerate(all_users)])
else:
user_id_encoder = None
if ((not as_feature) and (not as_label)):
return (user_datasets_windowed, user_id_encoder)
user_datasets_modified = {}
for (user, user_dataset) in user_datasets_windowed.items():
(data, labels) = user_dataset
if encode_user_id:
user_id = user_id_encoder[user]
else:
user_id = user
if as_feature:
user_feature = np.expand_dims(np.full(data.shape[:(- 1)], user_id), axis=(- 1))
data_modified = np.append(data, user_feature, axis=(- 1))
else:
data_modified = data
if as_label:
user_labels = np.expand_dims(np.full(labels.shape[:(- 1)], user_id), axis=(- 1))
labels_modified = np.append(labels, user_labels, axis=(- 1))
else:
labels_modified = labels
if (verbose > 0):
print(f'User {user}: id {repr(user)} -> {repr(user_id)}, data shape {data.shape} -> {data_modified.shape}, labels shape {labels.shape} -> {labels_modified.shape}')
user_datasets_modified[user] = (data_modified, labels_modified)
return (user_datasets_modified, user_id_encoder) |
def get_possible_iteration_indexes(logdir, population_i):
possible_iterations = get_possible_iterations(logdir, population_i)
possible_iteration_indexes = range(len(possible_iterations))
return (possible_iteration_indexes, possible_iterations) |
class FinetuneRequest(RequestBaseModel):
model_name_or_path: str = 'Intel/neural-chat-7b-v3-1'
train_file: str = None
dataset_name: str = None
output_dir: str = './tmp'
max_steps: int = 3
overwrite_output_dir: bool = True
dataset_concatenation: bool = False
peft: str = 'lora' |
def test_double_pade(vrblvl=0):
pol = ['(x^2 - 1)*(1-s) + (3*x^2 - 3/2)*s;']
variables = ['x', 's']
sol1 = make_solution(variables, [1, 0])
sol2 = make_solution(variables, [(- 1), 0])
sols = [sol1, sol2]
print('The solutions at the start :')
for sol in sols:
print(sol)
srs = double_newton_at_point(pol, sols, idx=2, vrblvl=vrblvl)
print('The series :')
for ser in srs:
print(ser)
pade = double_pade_approximants(pol, sols, idx=2, vrblvl=vrblvl)
print('the Pade approximants :')
for pad in pade:
print(pad)
return (len(pade) != 2) |
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() == 'true'):
return True
elif (v.lower() == 'false'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.') |
def download(malware2href: Dict[(str, str)], save_dir: str) -> None:
os.makedirs(save_dir, exist_ok=True)
for (file_hash, href) in malware2href.items():
source = requests.get(href, allow_redirects=True)
try:
with ZipFile(BytesIO(source.content)) as f:
f.extractall(path=save_dir, pwd=b'infected')
except BadZipFile:
logger.debug(f'Skipping {file_hash}')
continue |
def _canonicalise_and_remove_am(molecules_in):
return [rdkit_general_ops.canconicalize(smi, remove_am=True) for smi in molecules_in] |
class TestMain(unittest.TestCase):
res_mockup = 'nusc_eval.json'
res_eval_folder = 'tmp'
def tearDown(self):
if os.path.exists(self.res_mockup):
os.remove(self.res_mockup)
if os.path.exists(self.res_eval_folder):
shutil.rmtree(self.res_eval_folder)
def _mock_submission(nusc: NuScenes, split: str) -> Dict[(str, dict)]:
def random_class(category_name: str) -> str:
class_names = sorted(DETECTION_NAMES)
tmp = category_to_detection_name(category_name)
if ((tmp is not None) and (np.random.rand() < 0.9)):
return tmp
else:
return class_names[np.random.randint(0, (len(class_names) - 1))]
def random_attr(name: str) -> str:
rel_attributes = detection_name_to_rel_attributes(name)
if (len(rel_attributes) == 0):
return ''
else:
return rel_attributes[np.random.randint(0, len(rel_attributes))]
mock_meta = {'use_camera': False, 'use_lidar': True, 'use_radar': False, 'use_map': False, 'use_external': False}
mock_results = {}
splits = create_splits_scenes()
val_samples = []
for sample in nusc.sample:
if (nusc.get('scene', sample['scene_token'])['name'] in splits[split]):
val_samples.append(sample)
for sample in tqdm(val_samples, leave=False):
sample_res = []
for ann_token in sample['anns']:
ann = nusc.get('sample_annotation', ann_token)
detection_name = random_class(ann['category_name'])
sample_res.append({'sample_token': sample['token'], 'translation': list((np.array(ann['translation']) + (5 * (np.random.rand(3) - 0.5)))), 'size': list(((np.array(ann['size']) * 2) * (np.random.rand(3) + 0.5))), 'rotation': list((np.array(ann['rotation']) + ((np.random.rand(4) - 0.5) * 0.1))), 'velocity': list((nusc.box_velocity(ann_token)[:2] * (np.random.rand(3)[:2] + 0.5))), 'detection_name': detection_name, 'detection_score': random.random(), 'attribute_name': random_attr(detection_name)})
mock_results[sample['token']] = sample_res
mock_submission = {'meta': mock_meta, 'results': mock_results}
return mock_submission
def test_delta(self):
random.seed(42)
np.random.seed(42)
assert ('NUSCENES' in os.environ), 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
with open(self.res_mockup, 'w') as f:
json.dump(self._mock_submission(nusc, 'mini_val'), f, indent=2)
cfg = config_factory('detection_cvpr_2019')
nusc_eval = DetectionEval(nusc, cfg, self.res_mockup, eval_set='mini_val', output_dir=self.res_eval_folder, verbose=False)
(metrics, md_list) = nusc_eval.evaluate()
self.assertAlmostEqual(metrics.nd_score, 0.) |
def unlabel_rgb(colors):
str_vals = ''
for index in range(len(colors)):
try:
float(colors[index])
str_vals = (str_vals + colors[index])
except ValueError:
if ((colors[index] == ',') or (colors[index] == '.')):
str_vals = (str_vals + colors[index])
str_vals = (str_vals + ',')
numbers = []
str_num = ''
for char in str_vals:
if (char != ','):
str_num = (str_num + char)
else:
numbers.append(float(str_num))
str_num = ''
return (numbers[0], numbers[1], numbers[2]) |
def making(x):
x = x.lower()
if (('iphone' in x) or ('apple' in x) or ('' in x)):
return 'iphone'
elif (('huawei' in x) or ('honor' in x) or ('' in x) or ('' in x)):
return 'huawei'
elif (('xiaomi' in x) or ('' in x) or ('redmi' in x)):
return 'xiaomi'
elif ('' in x):
return 'meizu'
elif ('' in x):
return 'gionee'
elif (('' in x) or ('samsung' in x)):
return 'samsung'
elif ('vivo' in x):
return 'vivo'
elif ('oppo' in x):
return 'oppo'
elif (('lenovo' in x) or ('' in x)):
return 'lenovo'
elif ('nubia' in x):
return 'nubia'
elif (('oneplus' in x) or ('' in x)):
return 'oneplus'
elif (('smartisan' in x) or ('' in x)):
return 'smartisan'
elif (('360' in x) or ('360' in x)):
return '360'
elif (('zte' in x) or ('' in x)):
return 'zte'
else:
return 'others' |
def find_dataset_using_name(dataset_name):
dataset_filename = (('data.' + dataset_name) + '_dataset')
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = (dataset_name.replace('_', '') + 'dataset')
for (name, cls) in datasetlib.__dict__.items():
if ((name.lower() == target_dataset_name.lower()) and issubclass(cls, BaseDataset)):
dataset = cls
if (dataset is None):
raise NotImplementedError(('In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.' % (dataset_filename, target_dataset_name)))
return dataset |
def mkdir_or_exist(dir_name, mode=511):
if (dir_name == ''):
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True) |
def get_list_from_gridsearch(config, enable_config=True, default=None):
config = dcopy(config)
if enable_config:
if is_grid_search(config):
return config['grid_search']
else:
return [config]
else:
return [default] |
class TFDistilBertPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def test_digits_cosine_two_stage():
model = FacilityLocationSelection(100, 'cosine', optimizer='two-stage')
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_ranking)
assert_array_almost_equal(model.gains, digits_cosine_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def load_omniglot():
def reshape_data(data):
return data.T.reshape(((- 1), 1, 28, 28))
omni_raw = scipy.io.loadmat('data/omniglot/chardata.mat')
train_data = reshape_data(omni_raw['data']).astype(np.float32)
train_label = omni_raw['target'].argmax(axis=0)
test_data = reshape_data(omni_raw['testdata']).astype(np.float32)
test_label = omni_raw['testtarget'].argmax(axis=0)
train_data = torch.from_numpy(train_data).float()
train_label = torch.from_numpy(train_label).long()
test_data = torch.from_numpy(test_data).float()
test_label = torch.from_numpy(test_label).long()
return ([(train_data[i], train_label[i]) for i in range(len(train_data))], [(test_data[i], test_label[i]) for i in range(len(test_data))]) |
class LMDBGenerator():
def __init__(self, ext='.jpg', gen_type='seq'):
self.ext = ext
self.gen_type = gen_type
def generate_lmdb_file(self, root_dir, frames_dir):
env = lmdb.open(os.path.join(root_dir, ('lmdb_' + self.gen_type)))
root_in_dirs = os.listdir(frames_dir)
for d in root_in_dirs:
folder_dir = os.path.join(frames_dir, d)
_files_basename = sorted([f for f in os.listdir(folder_dir) if f.endswith(self.ext)])
files_str_vec = '|'.join(_files_basename)
print(('Generating lmdb for: ' + folder_dir))
with env.begin(write=True) as txn:
txn.put(d.encode('ascii'), files_str_vec.encode()) |
class personal_video_upload(StreamUpload):
port = hl2ss.StreamPort.PERSONAL_VIDEO
mode = hl2ss.StreamMode.MODE_1
profile = hl2ss.VideoProfile.H264_MAIN
def __init__(self, *a, width=760, height=428, fps=15, **kw):
self.width = width
self.height = height
self.fps = fps
self.bitrate = int(((((width * height) * fps) * 12) / 50))
self.gop_size = hl2ss.get_gop_size(self.profile, self.fps)
super().__init__(*a, **kw)
def create_client(self):
hl2ss.start_subsystem_pv(self.host, self.port)
return hl2ss.rx_pv(self.host, self.port, hl2ss.ChunkSize.PERSONAL_VIDEO, self.mode, self.width, self.height, self.fps, self.profile, self.bitrate) |
def read_yaml(yaml_path):
with open(yaml_path, 'r') as f:
yaml_file = yaml.load(f, Loader=yaml.Loader)
return yaml_file |
def accuracy(y_true: torch.LongTensor, y_pred: torch.Tensor):
(y_true, y_pred) = _format_inputs(y_true, y_pred)
return (y_true == y_pred).float().mean().item() |
def main(version: str, data_root: str, split_name: str, output_dir: str, config_name: str='predict_2020_icra.json') -> None:
nusc = NuScenes(version=version, dataroot=data_root)
helper = PredictHelper(nusc)
dataset = get_prediction_challenge_split(split_name)
config = load_prediction_config(helper, config_name)
oracle = PhysicsOracle(config.seconds, helper)
cv_heading = ConstantVelocityHeading(config.seconds, helper)
cv_preds = []
oracle_preds = []
for token in dataset:
cv_preds.append(cv_heading(token).serialize())
oracle_preds.append(oracle(token).serialize())
json.dump(cv_preds, open(os.path.join(output_dir, 'cv_preds.json'), 'w'))
json.dump(oracle_preds, open(os.path.join(output_dir, 'oracle_preds.json'), 'w')) |
def get_test_data_iter(config, kv):
test_data = [np.random.rand(40, 30), np.random.rand(40, 20)]
test_label = np.random.randint(0, 10, (80,))
test = mx.io.NDArrayIter(test_data, test_label, batch_size=config['batch_size'], shuffle=True)
return test |
class UniformMutationTestCases(unittest.TestCase):
def test_should_constructor_raises_an_exception_is_probability_is_negative(self) -> None:
with self.assertRaises(Exception):
UniformMutation((- 1))
def test_should_constructor_raises_an_exception_is_probability_is_higher_than_one(self) -> None:
with self.assertRaises(Exception):
UniformMutation(1.01)
def test_should_constructor_create_a_non_null_object(self):
operator = UniformMutation(0.3)
operator2 = UniformMutation(0.3, 0.7)
self.assertIsNotNone(operator)
self.assertIsNotNone(operator2)
def test_should_constructor_create_a_valid_operator(self):
operator = UniformMutation(0.5, 20)
self.assertEqual(0.5, operator.probability)
self.assertEqual(20, operator.perturbation)
def test_should_constructor_raise_an_exception_if_the_probability_is_greater_than_one(self):
with self.assertRaises(Exception):
UniformMutation(2)
def test_should_constructor_raise_an_exception_if_the_probability_is_lower_than_zero(self):
with self.assertRaises(Exception):
UniformMutation((- 12))
def test_should_the_solution_remain_unchanged_if_the_probability_is_zero(self):
operator = UniformMutation(0.0, 3.0)
solution = FloatSolution([(- 5), (- 5), (- 5)], [5, 5, 5], 1)
solution.variables = [1.0, 2.0, 3.0]
mutated_solution = operator.execute(solution)
self.assertEqual([1.0, 2.0, 3.0], mutated_solution.variables)
def test_should_the_solution_change_if_the_probability_is_one(self):
operator = UniformMutation(1.0, 3.0)
solution = FloatSolution([(- 5), (- 5), (- 5)], [5, 5, 5], 1)
solution.variables = [1.0, 2.0, 3.0]
mutated_solution = operator.execute(solution)
self.assertNotEqual([1.0, 2.0, 3.0], mutated_solution.variables)
def test_should_the_solution_change_between_max_and_min_value(self):
operator = UniformMutation(1.0, 5)
solution = FloatSolution([(- 1), 12, (- 3), (- 5)], [1, 17, 3, (- 2)], 1)
solution.variables = [(- 7.0), 3.0, 12.0, 13.4]
mutated_solution = operator.execute(solution)
for i in range(solution.number_of_variables):
self.assertGreaterEqual(mutated_solution.variables[i], solution.lower_bound[i])
self.assertLessEqual(mutated_solution.variables[i], solution.upper_bound[i]) |
def parse_gin_bindings(gin_bindings: Dict=None) -> None:
if (gin_bindings is None):
log.info('No additional gin bindings to parse')
else:
log.info(f'Parsing additional bindings: {pformat(gin_bindings)}')
with gin.unlock_config():
for (key, value) in replace_human_redable_kwargs(gin_bindings):
try:
gin.bind_parameter(key, value)
_message = 'BOUND '
except Exception:
_message = 'IGNORED'
log.info(f'{_message} - {key} : {value}') |
class SCEModel(ShuffleMomentumQueueBaseModel):
def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, initial_momentum: int=0.999, scheduler_momentum: str='constant', shuffle_bn: bool=True, num_devices: int=1, simulate_n_devices: int=8, queue: Optional[DictConfig]=None, sym: bool=False, use_keys: bool=False, temp: float=0.1, temp_m: float=0.05, start_warmup_temp_m: float=0.05, warmup_epoch_temp_m: int=0, warmup_scheduler_temp_m: Optional[int]='cosine', coeff: float=0.5, warmup_scheduler_coeff: Optional[int]='linear', warmup_epoch_coeff: int=0, start_warmup_coeff: float=1.0, scheduler_coeff: Optional[str]=None, final_scheduler_coeff: float=0.0) -> None:
super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, predictor=predictor, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, normalize_outputs=normalize_outputs, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass, initial_momentum=initial_momentum, scheduler_momentum=scheduler_momentum, shuffle_bn=shuffle_bn, num_devices=num_devices, simulate_n_devices=simulate_n_devices, queue=queue, sym=sym, use_keys=use_keys)
self.save_hyperparameters()
self.temp = temp
self.temp_m = temp_m
self.start_warmup_temp_m = start_warmup_temp_m
self.final_temp_m = temp_m
self.warmup_scheduler_temp_m = warmup_scheduler_temp_m
self.warmup_epoch_temp_m = warmup_epoch_temp_m
self.coeff = coeff
self.initial_coeff = coeff
self.warmup_scheduler_coeff = warmup_scheduler_coeff
self.warmup_epoch_coeff = warmup_epoch_coeff
self.start_warmup_coeff = start_warmup_coeff
self.scheduler_coeff = scheduler_coeff
self.final_scheduler_coeff = final_scheduler_coeff
def _precompute_mask(self) -> None:
batch_size = self.trainer.datamodule.train_local_batch_size
self.mask = compute_sce_mask(batch_size=batch_size, num_negatives=(self.queue.shape[1] if (self.queue is not None) else 0), use_keys=self.use_keys, rank=self.global_rank, world_size=self.trainer.world_size, device=self.device)
def on_fit_start(self) -> None:
super().on_fit_start()
self._precompute_mask()
def compute_loss(self, q: Tensor, k: Tensor, k_global: Tensor, queue: (Tensor | None)) -> Tensor:
k_loss = (k_global if self.use_keys else k)
loss = compute_sce_loss(q=q, k=k, k_global=k_loss, use_keys=self.use_keys, queue=queue, mask=self.mask, coeff=self.coeff, temp=self.temp, temp_m=self.temp_m, LARGE_NUM=LARGE_NUM)
return loss
def on_train_batch_start(self, batch: Any, batch_idx: int) -> None:
if (self.warmup_epoch_temp_m > 0):
if (self.current_epoch >= self.warmup_epoch_temp_m):
self.temp_m = self.final_temp_m
else:
self.temp_m = scheduler_value(self.warmup_scheduler_temp_m, self.start_warmup_temp_m, self.final_temp_m, self.global_step, ((self.warmup_epoch_temp_m * self.training_steps_per_epoch) - 1))
if (self.warmup_epoch_coeff > 0):
if (self.current_epoch >= self.warmup_epoch_coeff):
self.coeff = self.initial_coeff
else:
self.coeff = scheduler_value(self.warmup_scheduler_coeff, self.start_warmup_coeff, self.initial_coeff, self.global_step, ((self.warmup_epoch_coeff * self.training_steps_per_epoch) - 1))
if (self.scheduler_coeff is not None):
if (self.warmup_epoch_coeff > 0):
if (self.current_epoch >= self.warmup_epoch_coeff):
self.coeff = scheduler_value(self.scheduler_coeff, self.initial_coeff, self.final_scheduler_coeff, (self.global_step - (self.warmup_epoch_coeff * self.training_steps_per_epoch)), (((self.trainer.max_epochs - self.warmup_epoch_coeff) * self.training_steps_per_epoch) - 1))
else:
self.coeff = scheduler_value(self.scheduler_coeff, self.initial_coeff, self.final_scheduler_coeff, self.global_step, ((self.trainer.max_epochs * self.training_steps_per_epoch) - 1))
self.log('pretrain/temp', self.temp, on_step=True, on_epoch=True)
self.log('pretrain/temp_m', self.temp_m, on_step=True, on_epoch=True)
self.log('pretrain/coeff', self.coeff, on_step=True, on_epoch=True)
return |
def test_cqd_score_full_output():
archive = GridArchive(solution_dim=2, dims=[10, 10], ranges=[((- 1), 1), ((- 1), 1)])
archive.add_single([4.0, 4.0], 1.0, [0.0, 0.0])
result = archive.cqd_score(iterations=5, target_points=np.array([[[1.0, 1.0]], [[1.0, 1.0]], [[1.0, 1.0]], [[(- 1.0), (- 1.0)]], [[(- 1.0), (- 1.0)]]]), penalties=2, obj_min=0.0, obj_max=1.0)
assert (result.iterations == 5)
assert np.isclose(result.mean, (1.0 + 0.5))
assert np.all(np.isclose(result.scores, (1.0 + 0.5)))
assert np.all(np.isclose(result.target_points, np.array([[[1.0, 1.0]], [[1.0, 1.0]], [[1.0, 1.0]], [[(- 1.0), (- 1.0)]], [[(- 1.0), (- 1.0)]]])))
assert np.all(np.isclose(result.penalties, [0.0, 1.0]))
assert np.isclose(result.obj_min, 0.0)
assert np.isclose(result.obj_max, 1.0)
assert np.isclose(result.dist_max, (2 * np.sqrt(2)))
assert (result.dist_ord is None) |
def parse_with_unit(s):
(number, unit) = s.split()
number = float(number)
multipliers = {'KBytes': 1000.0, 'MBytes': 1000000.0}
mul = multipliers[unit]
return (number * mul) |
class ModelArguments():
model_name_or_path: str = field(default=None, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
language: str = field(default=None, metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'})
train_language: Optional[str] = field(default=None, metadata={'help': 'Train language if it is different from the evaluation language.'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
do_lower_case: Optional[bool] = field(default=False, metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) |
def validate_qobj_against_schema(qobj):
validate_json_against_schema(qobj.as_dict(), 'qobj', err_msg='Qobj failed validation. Set Qiskit log level to DEBUG for further information.') |
def gan_train(config):
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess_config.allow_soft_placement = True
default_graph = tf.Graph()
with default_graph.as_default():
sess = tf.Session(config=sess_config, graph=default_graph)
logger = logging.getLogger('')
du = DataUtil(config=config)
du.load_vocab(src_vocab=config.generator.src_vocab, dst_vocab=config.generator.dst_vocab, src_vocab_size=config.src_vocab_size, dst_vocab_size=config.dst_vocab_size)
generator = Model(config=config, graph=default_graph, sess=sess)
generator.build_train_model()
generator.build_generate(max_len=config.generator.max_length, generate_devices=config.generator.devices, optimizer=config.generator.optimizer)
generator.build_rollout_generate(max_len=config.generator.max_length, roll_generate_devices=config.generator.devices)
generator.init_and_restore(modelFile=config.generator.modelFile)
dis_filter_sizes = [i for i in range(1, config.discriminator.dis_max_len, 4)]
dis_num_filters = [(100 + (i * 10)) for i in range(1, config.discriminator.dis_max_len, 4)]
discriminator = DisCNN(sess=sess, max_len=config.discriminator.dis_max_len, num_classes=2, vocab_size=config.dst_vocab_size, vocab_size_s=config.src_vocab_size, batch_size=config.discriminator.dis_batch_size, dim_word=config.discriminator.dis_dim_word, filter_sizes=dis_filter_sizes, num_filters=dis_num_filters, source_dict=config.discriminator.dis_src_vocab, target_dict=config.discriminator.dis_dst_vocab, gpu_device=config.discriminator.dis_gpu_devices, positive_data=config.discriminator.dis_positive_data, negative_data=config.discriminator.dis_negative_data, source_data=config.discriminator.dis_source_data, dev_positive_data=config.discriminator.dis_dev_positive_data, dev_negative_data=config.discriminator.dis_dev_negative_data, dev_source_data=config.discriminator.dis_dev_source_data, max_epoches=config.discriminator.dis_max_epoches, dispFreq=config.discriminator.dis_dispFreq, saveFreq=config.discriminator.dis_saveFreq, saveto=config.discriminator.dis_saveto, reload=config.discriminator.dis_reload, clip_c=config.discriminator.dis_clip_c, optimizer=config.discriminator.dis_optimizer, reshuffle=config.discriminator.dis_reshuffle, scope=config.discriminator.dis_scope)
batch_iter = du.get_training_batches(set_train_src_path=config.generator.src_path, set_train_dst_path=config.generator.dst_path, set_batch_size=config.generator.batch_size, set_max_length=config.generator.max_length)
for epoch in range(1, (config.gan_iter_num + 1)):
for gen_iter in range(config.gan_gen_iter_num):
batch = next(batch_iter)
(x, y_ground) = (batch[0], batch[1])
y_sample = generator.generate_step(x)
logging.info('generate the samples')
(y_sample_dealed, y_sample_mask) = deal_generated_samples(y_sample, du.dst2idx)
x_to_maxlen = extend_sentence_to_maxlen(x, config.generator.max_length)
logging.info('calculate the reward')
rewards = generator.get_reward(x=x, x_to_maxlen=x_to_maxlen, y_sample=y_sample_dealed, y_sample_mask=y_sample_mask, rollnum=config.rollnum, disc=discriminator, max_len=config.discriminator.dis_max_len, bias_num=config.bias_num, data_util=du)
loss = generator.generate_step_and_update(x, y_sample_dealed, rewards)
print('the reward is ', rewards)
print('the loss is ', loss)
logging.info(('save the model into %s' % config.generator.modelFile))
generator.saver.save(generator.sess, config.generator.modelFile)
if config.generator.teacher_forcing:
logging.info('doiong the teacher forcing begin!')
(y_ground, y_ground_mask) = deal_generated_samples_to_maxlen(y_sample=y_ground, dicts=du.dst2idx, maxlen=config.discriminator.dis_max_len)
rewards_ground = np.ones_like(y_ground)
rewards_ground = (rewards_ground * y_ground_mask)
loss = generator.generate_step_and_update(x, y_ground, rewards_ground)
print('the teacher forcing reward is ', rewards_ground)
print('the teacher forcing loss is ', loss)
generator.saver.save(generator.sess, config.generator.modelFile)
logging.info('prepare the gan_dis_data begin')
data_num = prepare_gan_dis_data(train_data_source=config.generator.src_path, train_data_target=config.generator.dst_path, gan_dis_source_data=config.discriminator.dis_source_data, gan_dis_positive_data=config.discriminator.dis_positive_data, num=config.generate_num, reshuf=True)
logging.info(('generate and the save in to %s.' % config.discriminator.dis_negative_data))
generator.generate_and_save(data_util=du, infile=config.discriminator.dis_source_data, generate_batch=config.discriminator.dis_batch_size, outfile=config.discriminator.dis_negative_data)
logging.info(('prepare %d gan_dis_data done!' % data_num))
logging.info('finetuen the discriminator begin')
discriminator.train(max_epoch=config.gan_dis_iter_num, positive_data=config.discriminator.dis_positive_data, negative_data=config.discriminator.dis_negative_data, source_data=config.discriminator.dis_source_data)
discriminator.saver.save(discriminator.sess, discriminator.saveto)
logging.info('finetune the discrimiantor done!')
logging.info('reinforcement training done!') |
def train_all_epochs(opt, model, optimizer, train_sampler, train_loader, criterion, val_loader, num_train_samples=None, no_acc_eval=False, save_all_ranks=False, training_status_info=None, save_params=True):
timer_start = time.time()
if (training_status_info is None):
training_status_info = {}
training_status_info['best_acc1'] = 0
training_status_info['best_acc5'] = 0
training_status_info['best_acc1_at_epoch'] = 0
training_status_info['best_acc5_at_epoch'] = 0
training_status_info['training_elasped_time'] = 0
training_status_info['validation_elasped_time'] = 0
if (num_train_samples is None):
num_train_samples = len(train_loader)
for epoch in range(opt.start_epoch, opt.epochs):
logging.info('--- Start training epoch {}'.format(epoch))
if (train_sampler is not None):
train_sampler.set_epoch(epoch)
training_timer_start = time.time()
train_one_epoch_info = train_one_epoch(train_loader, model, criterion, optimizer, epoch, opt, num_train_samples, no_acc_eval=no_acc_eval)
training_status_info['training_elasped_time'] += (time.time() - training_timer_start)
if (val_loader is not None):
validation_timer_start = time.time()
validate_info = validate(val_loader, model, criterion, opt, epoch=epoch)
training_status_info['validation_elasped_time'] += (time.time() - validation_timer_start)
acc1 = validate_info['top1_acc']
acc5 = validate_info['top5_acc']
else:
acc1 = 0
acc5 = 0
is_best_acc1 = (acc1 > training_status_info['best_acc1'])
is_best_acc5 = (acc5 > training_status_info['best_acc5'])
training_status_info['best_acc1'] = max(acc1, training_status_info['best_acc1'])
training_status_info['best_acc5'] = max(acc5, training_status_info['best_acc5'])
if is_best_acc1:
training_status_info['best_acc1_at_epoch'] = epoch
if is_best_acc5:
training_status_info['best_acc5_at_epoch'] = epoch
elasped_hour = ((time.time() - timer_start) / 3600)
remaining_hour = ((((time.time() - timer_start) / float(((epoch - opt.start_epoch) + 1))) * (opt.epochs - epoch)) / 3600)
logging.info('--- Epoch={}, Elasped hour={:8.4g}, Remaining hour={:8.4g}, Training Speed={:4g}, best_acc1={:4g}, best_acc1_at_epoch={}, best_acc5={}, best_acc5_at_epoch={}'.format(epoch, elasped_hour, remaining_hour, ((num_train_samples * (epoch + 1)) / float((training_status_info['training_elasped_time'] + 1e-08))), training_status_info['best_acc1'], training_status_info['best_acc1_at_epoch'], training_status_info['best_acc5'], training_status_info['best_acc5_at_epoch']))
if (save_params and ((opt.rank == 0) or save_all_ranks) and ((((epoch + 1) % opt.save_freq) == 0) or ((epoch + 1) == opt.epochs))):
checkpoint_filename = os.path.join(opt.save_dir, 'latest-params_rank{}.pth'.format(opt.rank))
save_checkpoint(checkpoint_filename, {'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'top1_acc': acc1, 'top5_acc': acc5, 'training_status_info': training_status_info})
if (save_params and is_best_acc1 and ((opt.rank == 0) or save_all_ranks)):
checkpoint_filename = os.path.join(opt.save_dir, 'best-params_rank{}.pth'.format(opt.rank))
save_checkpoint(checkpoint_filename, {'epoch': epoch, 'state_dict': model.state_dict(), 'top1_acc': acc1, 'top5_acc': acc5, 'training_status_info': training_status_info})
if (save_params and is_best_acc1 and ((opt.rank == 0) or save_all_ranks)):
checkpoint_filename = os.path.join(opt.save_dir, 'student_best-params_rank{}.pth'.format(opt.rank))
save_checkpoint(checkpoint_filename, {'epoch': epoch, 'state_dict': model.student_model.state_dict(), 'top1_acc': acc1, 'top5_acc': acc5, 'training_status_info': training_status_info})
pass
return training_status_info |
def analyze_prediction_entropy(logit_list, ent_list, input_doc: numpy.ndarray, eos_tokens=[50256], pred_dist: numpy.ndarray=None, nucleus_filter: bool=True, top_p: float=0.95):
assert (sum(pred_dist[0]) > 0.99)
assert (sum(pred_dist[0]) < 1.01)
input_doc = input_doc.tolist()
input_bigram = get_bigram(input_doc)
input_bigram = [f'{big[0][2]}_{big[1][2]}' for big in input_bigram]
print(f'Bigram like {input_bigram[0]}')
indices = [i for (i, x) in enumerate(logit_list) if (x in eos_tokens)]
outputs = []
outputs_pos = []
last_indi = 0
print(f'Decode: {bpe_tokenizer.decode(logit_list)}')
for indi in indices:
indi = (indi + 1)
if ((indi - last_indi) < 3):
break
(output, output_pos) = analyze_sentence(logit_list[last_indi:indi], ent_list[last_indi:indi], pred_dist[last_indi:indi], input_doc, input_bigram, nucleus_filter=nucleus_filter, top_p=top_p)
outputs += output
outputs_pos += output_pos
last_indi = indi
return (outputs, outputs_pos) |
class ORCWidget(ORCLayout):
def __init__(self, name, width_and_height, parent=None, optional=False):
super().__init__(name, parent)
self.copied_tree = False
self.optional = optional
self.width_and_height = width_and_height
width_min = width_and_height[0]
width_pref = width_and_height[1]
width_max = width_and_height[2]
height_min = width_and_height[3]
height_pref = width_and_height[4]
height_max = width_and_height[5]
self.width_min = width_min
self.width_pref = width_pref
self.width_max = width_max
self.height_min = height_min
self.height_pref = height_pref
self.height_max = height_max
def modify_width_min(self, width_min):
self.width_min = width_min
def modify_width_pref(self, width_pref):
self.width_pref = width_pref
def modify_width_max(self, width_max):
self.width_max = width_max
def modify_height_min(self, height_min):
self.height_min = height_min
def modify_height_pref(self, height_pref):
self.height_pref = height_pref
def modify_height_max(self, height_max):
self.height_max = height_max
def set_optional(self):
self.optional = True
def constraint_spec(self):
self.update_from_upper_tree()
left = self.variables[(self.name + '_l')]
right = self.variables[(self.name + '_r')]
top = self.variables[(self.name + '_t')]
bottom = self.variables[(self.name + '_b')]
min_width_constraint = ((right - left) >= self.width_min)
max_width_constraint = ((right - left) <= self.width_max)
min_height_constraint = ((bottom - top) >= self.height_min)
max_height_constraint = ((bottom - top) <= self.height_max)
self.constraints += [min_width_constraint, max_width_constraint, min_height_constraint, max_height_constraint]
width_delta = cvx.Variable()
height_delta = cvx.Variable()
self.variables[(self.name + '_w_delta')] = width_delta
self.variables[(self.name + '_h_delta')] = height_delta
pref_width_constraint = (((right - left) + width_delta) == self.width_pref)
pref_height_constraint = (((bottom - top) + height_delta) == self.height_pref)
self.constraints += [pref_width_constraint, pref_height_constraint]
width_objective = (self.weight * cvx.square(width_delta))
height_objective = (self.weight * cvx.square(height_delta))
self.objectives += [width_objective, height_objective] |
def evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval):
if (rank == 0):
global global_step
generator.eval()
losses_tot = []
with torch.no_grad():
for (batch_idx, (x, x_lengths, y, y_lengths)) in enumerate(val_loader):
(x, x_lengths) = (x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True))
(y, y_lengths) = (y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True))
((z, z_m, z_logs, logdet, z_mask), (x_m, x_logs, x_mask), (attn, logw, logw_)) = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = commons.mle_loss(z, z_m, z_logs, logdet, z_mask)
l_length = commons.duration_loss(logw, logw_, x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if (batch_idx == 0):
losses_tot = loss_gs
else:
losses_tot = [(x + y) for (x, y) in zip(losses_tot, loss_gs)]
if ((batch_idx % hps.train.log_interval) == 0):
logger.info('Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * len(x)), len(val_loader.dataset), ((100.0 * batch_idx) / len(val_loader)), loss_g.item()))
logger.info([x.item() for x in loss_gs])
losses_tot = [(x / len(val_loader)) for x in losses_tot]
loss_tot = sum(losses_tot)
scalar_dict = {'loss/g/total': loss_tot}
scalar_dict.update({'loss/g/{}'.format(i): v for (i, v) in enumerate(losses_tot)})
utils.summarize(writer=writer_eval, global_step=global_step, scalars=scalar_dict)
logger.info('====> Epoch: {}'.format(epoch)) |
def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
def _objective(trial, checkpoint_dir=None):
model_path = None
if checkpoint_dir:
for subdir in os.listdir(checkpoint_dir):
if subdir.startswith(PREFIX_CHECKPOINT_DIR):
model_path = os.path.join(checkpoint_dir, subdir)
trainer.objective = None
trainer.train(model_path=model_path, trial=trial)
if (getattr(trainer, 'objective', None) is None):
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
trainer._tune_save_checkpoint()
ray.tune.report(objective=trainer.objective)
return trainer.objective
_tb_writer = trainer.tb_writer
trainer.tb_writer = None
trainer.model = None
if (('resources_per_trial' not in kwargs) and (trainer.args.n_gpu > 0)):
n_jobs = int(kwargs.pop('n_jobs', 1))
num_gpus_per_trial = trainer.args.n_gpu
if ((num_gpus_per_trial / n_jobs) >= 1):
num_gpus_per_trial = int(math.ceil((num_gpus_per_trial / n_jobs)))
kwargs['resources_per_trial'] = {'gpu': num_gpus_per_trial}
if ('reporter' not in kwargs):
from ray.tune import CLIReporter
kwargs['progress_reporter'] = CLIReporter(metric_columns=['objective'])
if (('keep_checkpoints_num' in kwargs) and (kwargs['keep_checkpoints_num'] > 0)):
trainer.use_tune_checkpoints = True
if (kwargs['keep_checkpoints_num'] > 1):
logger.warning('Currently keeping {} checkpoints for each trial. Checkpoints are usually huge, consider setting `keep_checkpoints_num=1`.')
if ('scheduler' in kwargs):
from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining
if isinstance(kwargs['scheduler'], PopulationBasedTraining):
if (not trainer.use_tune_checkpoints):
logger.warning("You are using PopulationBasedTraining but you haven't enabled checkpointing. This means your trials will train from scratch everytime they are exploiting new configurations. Consider enabling checkpointing by passing `keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`.")
if (isinstance(kwargs['scheduler'], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)) and ((not trainer.args.do_eval) or (not trainer.args.evaluate_during_training))):
raise RuntimeError("You are using {cls} as a scheduler but you haven't enabled evaluation during training. This means your trials will not report intermediate results to Ray Tune, and can thus not be stopped early or used to exploit other trials parameters. If this is what you want, do not use {cls}. If you would like to use {cls}, make sure you pass `do_eval=True` and `evaluate_during_training=True` in the Trainer `args`.".format(cls=type(kwargs['scheduler']).__name__))
analysis = ray.tune.run(_objective, config=trainer.hp_space(None), num_samples=n_trials, **kwargs)
best_trial = analysis.get_best_trial(metric='objective', mode=direction[:3])
best_run = BestRun(best_trial.trial_id, best_trial.last_result['objective'], best_trial.config)
trainer.tb_writer = _tb_writer
return best_run |
def main(argv):
start_time = time.time()
batch_size = FLAGS.batch_size
emb_dim = FLAGS.emb_dim
gru_units = (FLAGS.model_size * FLAGS.model_size_scale)
epochs = FLAGS.epochs
freeze = FLAGS.freeze
l2 = FLAGS.l2
label_fraction = FLAGS.label_fraction
lr = FLAGS.lr
max_seq = FLAGS.max_seq
patience = FLAGS.patience
projection_head = FLAGS.projection_head
inputs = tf.keras.layers.Input(shape=(max_seq, emb_dim))
gru_1 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru_units, return_sequences=True, kernel_regularizer=tf.keras.regularizers.l2(l2)))(inputs)
gru_2 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(gru_units, kernel_regularizer=tf.keras.regularizers.l2(l2)))(gru_1)
if (projection_head[2] == '0'):
encoder = tf.keras.models.Model(inputs, gru_2, name='encoder')
encoder.load_weights(((FLAGS.weight_path + FLAGS.name) + '.h5'))
mlp_1 = tf.keras.layers.Dense(gru_units, activation='relu')(gru_2)
mlp_2 = tf.keras.layers.Dense((gru_units // 2), activation='relu')(mlp_1)
prediction = tf.keras.layers.Dense(1)(mlp_2)
elif (projection_head[2] == '1'):
mlp = tf.keras.layers.Dense((gru_units * 2), activation='linear')(gru_2)
encoder = tf.keras.models.Model(inputs, mlp, name='encoder')
encoder.load_weights(((FLAGS.weight_path + FLAGS.name) + '.h5'))
mlp_1 = tf.keras.layers.Dense(gru_units, activation='relu')(mlp)
mlp_2 = tf.keras.layers.Dense((gru_units // 2), activation='relu')(mlp_1)
prediction = tf.keras.layers.Dense(1)(mlp_2)
elif (projection_head[2] == '2'):
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(gru_2)
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(mlp)
encoder = tf.keras.models.Model(inputs, mlp, name='encoder')
encoder.load_weights(((FLAGS.weight_path + FLAGS.name) + '.h5'))
mlp_1 = tf.keras.layers.Dense(gru_units, activation='relu')(mlp)
mlp_2 = tf.keras.layers.Dense((gru_units // 2), activation='relu')(mlp_1)
prediction = tf.keras.layers.Dense(1)(mlp_2)
elif (projection_head[2] == '3'):
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(gru_2)
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(mlp)
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(mlp)
encoder = tf.keras.models.Model(inputs, mlp, name='encoder')
encoder.load_weights(((FLAGS.weight_path + FLAGS.name) + '.h5'))
mlp_1 = tf.keras.layers.Dense(gru_units, activation='relu')(mlp)
mlp_2 = tf.keras.layers.Dense((gru_units // 2), activation='relu')(mlp_1)
prediction = tf.keras.layers.Dense(1)(mlp_2)
elif (projection_head[2] == '4'):
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(gru_2)
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(mlp)
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(mlp)
mlp = tf.keras.layers.Dense((gru_units * 2), activation='relu')(mlp)
encoder = tf.keras.models.Model(inputs, mlp, name='encoder')
encoder.load_weights(((FLAGS.weight_path + FLAGS.name) + '.h5'))
mlp_1 = tf.keras.layers.Dense(gru_units, activation='relu')(mlp)
mlp_2 = tf.keras.layers.Dense((gru_units // 2), activation='relu')(mlp_1)
prediction = tf.keras.layers.Dense(1)(mlp_2)
else:
print('Wrong projection head argument, should be [0-4]-[0-4].')
if FLAGS.freeze:
encoder.trainable = False
fine_tuning_model = tf.keras.models.Model(inputs, prediction, name='fine_tuning_model')
fine_tuning_model.summary()
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
loss_object = tf.keras.losses.MeanSquaredLogarithmicError()
train_loss = tf.keras.metrics.Mean(name='train_loss')
val_loss = tf.keras.metrics.Mean(name='val_loss')
test_loss = tf.keras.metrics.Mean(name='test_loss')
def train_step(data, labels):
with tf.GradientTape() as tape:
predictions = fine_tuning_model(data, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, fine_tuning_model.trainable_variables)
optimizer.apply_gradients(zip(gradients, fine_tuning_model.trainable_variables))
train_loss(loss)
def val_step(data, labels):
predictions = fine_tuning_model(data, training=False)
v_loss = loss_object(labels, predictions)
val_loss(v_loss)
def test_step(data, labels):
predictions = fine_tuning_model(data, training=False)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
return predictions
with open((FLAGS.input + 'train.pkl'), 'rb') as f:
(train, train_y) = pickle.load(f)
with open((FLAGS.input + 'val.pkl'), 'rb') as f:
(val, val_y) = pickle.load(f)
with open((FLAGS.input + 'test.pkl'), 'rb') as f:
(test, test_y) = pickle.load(f)
(train, train_y) = (divide_dataset(train, label_fraction), divide_dataset(train_y, label_fraction))
dataset_info = (('# fine-tuning samples {}\n' + '# validation samples {}\n') + '# test samples {}')
print(dataset_info.format(len(train), len(val), len(test)))
best_val_loss = 1000
save_predictions = list()
for epoch in range(epochs):
time_start = time.time()
train_loss.reset_states()
val_loss.reset_states()
test_loss.reset_states()
(train, train_y) = shuffle_two(train, train_y)
for i in range(((len(train) // batch_size) + 1)):
batch_train = copy.deepcopy(train[(batch_size * i):((batch_size * i) + batch_size)])
batch_train_labels = train_y[(batch_size * i):((batch_size * i) + batch_size)]
for batch_cascade in batch_train:
while (len(batch_cascade) < max_seq):
batch_cascade.append(np.zeros(emb_dim))
train_step(np.array(batch_train), np.array(batch_train_labels))
for i in range(((len(val) // batch_size) + 1)):
batch_val = copy.deepcopy(val[(batch_size * i):((batch_size * i) + batch_size)])
batch_val_labels = val_y[(batch_size * i):((batch_size * i) + batch_size)]
for batch_cascade in batch_val:
while (len(batch_cascade) < max_seq):
batch_cascade.append(np.zeros(emb_dim))
val_step(np.array(batch_val), np.array(batch_val_labels))
pred = list()
for i in range(((len(test) // batch_size) + 1)):
batch_test = copy.deepcopy(test[(batch_size * i):((batch_size * i) + batch_size)])
batch_test_labels = test_y[(batch_size * i):((batch_size * i) + batch_size)]
for batch_cascade in batch_test:
while (len(batch_cascade) < max_seq):
batch_cascade.append(np.zeros(emb_dim))
batch_predictions = test_step(np.array(batch_test), np.array(batch_test_labels))
pred.extend(batch_predictions)
pred = [float(pre) for pre in pred]
report_loss = np.mean(np.square((np.log2(np.array([(pre if (pre >= 1) else 1) for pre in pred])) - np.log2(np.array([(tru if (tru >= 1) else 1) for tru in list(test_y)])))))
template = '{}: Fine-tuning Epoch {:3}, Time: {:.3f}s, Train Loss: {:.3f}, Val Loss: {:.3f}, Test Loss: {:.3f}, LOG2 MSLE: {:.3f}'
print(template.format(FLAGS.name, (epoch + 1), (time.time() - time_start), train_loss.result(), val_loss.result(), test_loss.result(), report_loss))
if (val_loss.result() < best_val_loss):
best_val_loss = val_loss.result()
save_predictions = pred
patience = FLAGS.patience
fine_tuning_model.save_weights(((((FLAGS.teacher_path + FLAGS.name) + '-') + FLAGS.num) + '.h5'))
print('Model saved!')
if (patience == 0):
report_loss = np.mean(np.square((np.log2(np.array([(pre if (pre >= 1) else 1) for pre in save_predictions])) - np.log2(np.array([(tru if (tru >= 1) else 1) for tru in list(test_y)])))))
print('Predictions saved! Best Test MSLE: {}'.format(report_loss))
break
else:
patience -= 1
print('Finished! Time used: {:.3f}min'.format(((time.time() - start_time) / 60))) |
def calculate_spectrum_mesh(e_0, theta, mesh, phi=0.0, epsrel=0.2, monitor=console_monitor, z=74):
s = Spectrum()
s.kvp = e_0
s.th = theta
s.x = mesh
mesh_len = len(mesh)
fluence = get_fluence(e_0)
cs = get_cs(e_0, z=z)
mu = get_mu_csda(e_0, z=z)
warnings.simplefilter('ignore')
for (i, e_g) in enumerate(s.x):
s.y.append(integrate_source(fluence, cs, mu, theta, e_g, e_0, phi=phi, epsrel=epsrel, z=z))
if (monitor is not None):
monitor((i + 1), mesh_len)
if (z == 74):
add_char_radiation(s)
return s |
class NoSuperInitModel(PreTrainedModel):
config_class = NoSuperInitConfig
def __init__(self, config):
super().__init__(config)
self.linear = torch.nn.Linear(config.attribute, config.attribute)
def forward(self, x):
return self.linear(x)
def _init_weights(self, module):
pass |
def s_load_file(file_path, process, document_store, max_length=378):
if file_path.endswith('pdf'):
text = load_pdf(file_path)
elif file_path.endswith('docx'):
text = read_docx(file_path)
text = text.replace('\n', '')
text = text.replace('\n\n', '')
text = re.sub('\\s+', ' ', text)
sentences = re.split('(?<=[;!.?])', text)
new_sents = []
for i in range(int((len(sentences) / 2))):
sent = (sentences[(2 * i)] + sentences[((2 * i) + 1)])
new_sents.append(sent.strip())
if ((len(sentences) % 2) == 1):
new_sents.append(sentences[(len(sentences) - 1)])
paragraphs = []
current_length = 0
current_paragraph = ''
for sentence in new_sents:
sentence_length = len(sentence)
if ((current_length + sentence_length) <= max_length):
current_paragraph += sentence
current_length += sentence_length
else:
paragraphs.append(current_paragraph.strip())
current_paragraph = sentence
current_length = sentence_length
print('length for origin', len(paragraphs))
paragraphs.append(current_paragraph.strip())
paragraphs = list(set(paragraphs))
print('length for processed', len(paragraphs))
documents = []
metadata = {'source': file_path}
for paragraph in paragraphs:
new_doc = SDocument(content=paragraph, metadata=metadata)
documents.append(new_doc)
document_store.write_documents(documents)
return document_store |
class Logger(object):
def __init__(self, fpath=None, mode='w'):
self.console = sys.stdout
self.file = None
if (fpath is not None):
self.file = open(fpath, mode)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if (self.file is not None):
self.file.write(msg)
def flush(self):
self.console.flush()
if (self.file is not None):
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if (self.file is not None):
self.file.close() |
_registry(dataset_type='VOCRecord', framework='tensorflow, tensorflow_itex', dataset_format='')
class TensorflowVOCRecord(IterableDataset):
def __new__(cls, root, transform=None, filter=None):
from tensorflow.python.platform import gfile
glob_pattern = os.path.join(root, ('%s-*' % 'val'))
file_names = gfile.Glob(glob_pattern)
if (not file_names):
raise ValueError('Found no files in --root matching: {}'.format(glob_pattern))
from tensorflow.python.data.experimental import parallel_interleave
ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False)
ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names)))
if (transform is not None):
ds = ds.map(transform, num_parallel_calls=None)
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return ds |
def get_handle(fsdp_unit):
if hasattr(fsdp_unit, '_handles'):
if (len(fsdp_unit._handles) != 1):
raise ValueError('unknown error, currently, length of handles in FSDP is 1, check version of pytorch')
return fsdp_unit._handles[0]
elif hasattr(fsdp_unit, '_handle'):
return fsdp_unit._handle
else:
raise ValueError('unknown error, pytorch version should in - check version of pytorch') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.