code stringlengths 101 5.91M |
|---|
class SoftCrossEntropy(nn.Module):
def __init__(self):
super(SoftCrossEntropy, self).__init__()
return
def forward(self, inputs, target, target_lens):
assert (inputs.shape == target.shape)
assert (inputs.shape[0] == target_lens.shape[0])
mask = (torch.arange(target.shape[1], device=target.device).view(1, (- 1)) < target_lens.view((- 1), 1))
target = (target * mask.unsqueeze(2))
return torch.mean((torch.sum(torch.sum(((- target) * F.log_softmax(inputs, (- 1))), (- 1)), (- 1)) / (target_lens.float() ** 2))) |
class NumpyForm(NumpyMeta, Form):
def __init__(self, primitive, inner_shape=(), *, parameters=None, form_key=None):
primitive = ak.types.numpytype.dtype_to_primitive(ak.types.numpytype.primitive_to_dtype(primitive))
if (not isinstance(inner_shape, Iterable)):
raise TypeError("{} 'inner_shape' must be iterable, not {}".format(type(self).__name__, repr(inner_shape)))
self._primitive = primitive
self._inner_shape = tuple(inner_shape)
self._init(parameters=parameters, form_key=form_key)
def primitive(self):
return self._primitive
def inner_shape(self):
return self._inner_shape
def copy(self, primitive=UNSET, inner_shape=UNSET, *, parameters=UNSET, form_key=UNSET):
return NumpyForm((self._primitive if (primitive is UNSET) else primitive), (self._inner_shape if (inner_shape is UNSET) else inner_shape), parameters=(self._parameters if (parameters is UNSET) else parameters), form_key=(self._form_key if (form_key is UNSET) else form_key))
def simplified(cls, primitive, inner_shape=(), *, parameters=None, form_key=None):
return cls(primitive, inner_shape, parameters=parameters, form_key=form_key)
def itemsize(self):
return ak.types.numpytype.primitive_to_dtype(self._primitive).itemsize
def __repr__(self):
args = [repr(self._primitive)]
if (len(self._inner_shape) > 0):
args.append(('inner_shape=' + repr(self._inner_shape)))
args += self._repr_args()
return '{}({})'.format(type(self).__name__, ', '.join(args))
def _to_dict_part(self, verbose, toplevel):
if ((not verbose) and (not toplevel) and (len(self._inner_shape) == 0) and ((self._parameters is None) or (len(self._parameters) == 0)) and (self._form_key is None)):
return self._primitive
else:
out = {'class': 'NumpyArray', 'primitive': self._primitive}
if (verbose or (len(self._inner_shape) > 0)):
out['inner_shape'] = [(None if (item is unknown_length) else item) for item in self._inner_shape]
return self._to_dict_extra(out, verbose)
def type(self):
out = ak.types.NumpyType(self._primitive, parameters=None)
for x in self._inner_shape[::(- 1)]:
out = ak.types.RegularType(out, x)
out._parameters = self._parameters
return out
def to_RegularForm(self) -> (RegularForm | NumpyForm):
out: (RegularForm | NumpyForm) = NumpyForm(self._primitive, (), parameters=None, form_key=None)
for x in self._inner_shape[::(- 1)]:
out = ak.forms.RegularForm(out, x, parameters=None, form_key=None)
out._parameters = self._parameters
return out
def _columns(self, path, output, list_indicator):
output.append('.'.join(path))
def _select_columns(self, match_specifier: _SpecifierMatcher) -> Self:
return self
def _prune_columns(self, is_inside_record_or_union: bool) -> Self:
return self
def _column_types(self):
return (ak.types.numpytype.primitive_to_dtype(self._primitive),)
def __setstate__(self, state):
if isinstance(state, dict):
self.__dict__.update(state)
else:
(has_identities, parameters, form_key, inner_shape, itemsize, format) = state
format = format.lstrip('<').lstrip('>').lstrip('=')
if (format == '?'):
dtype = np.dtype(np.bool_)
elif (format in ('b', 'h', 'i', 'l', 'q')):
if (itemsize == 1):
dtype = np.dtype(np.int8)
elif (itemsize == 2):
dtype = np.dtype(np.int16)
elif (itemsize == 4):
dtype = np.dtype(np.int32)
elif (itemsize == 8):
dtype = np.dtype(np.int64)
else:
raise AssertionError(format)
elif (format in ('c', 'B', 'H', 'I', 'L', 'Q')):
if (itemsize == 1):
dtype = np.dtype(np.uint8)
elif (itemsize == 2):
dtype = np.dtype(np.uint16)
elif (itemsize == 4):
dtype = np.dtype(np.uint32)
elif (itemsize == 8):
dtype = np.dtype(np.uint64)
else:
raise AssertionError(format)
elif (format == 'e'):
dtype = np.dtype(np.float16)
elif (format == 'f'):
dtype = np.dtype(np.float32)
elif (format == 'd'):
dtype = np.dtype(np.float64)
elif (format == 'g'):
dtype = np.dtype(np.float128)
elif (format == 'Zf'):
dtype = np.dtype(np.complex64)
elif (format == 'Zd'):
dtype = np.dtype(np.complex128)
elif (format == 'Zg'):
dtype = np.dtype(np.complex256)
else:
dtype = np.dtype(format)
primitive = ak.types.numpytype.dtype_to_primitive(dtype)
if (form_key is not None):
form_key = ('part0-' + form_key)
self.__init__(primitive, inner_shape, parameters=parameters, form_key=form_key)
def _expected_from_buffers(self, getkey: Callable[([Form, str], str)], recursive: bool) -> Iterator[tuple[(str, DType)]]:
from awkward.types.numpytype import primitive_to_dtype
(yield (getkey(self, 'data'), primitive_to_dtype(self.primitive)))
def _is_equal_to(self, other: Any, all_parameters: bool, form_key: bool) -> bool:
return (self._is_equal_to_generic(other, all_parameters, form_key) and (self._primitive == other._primitive)) |
class RandomSelectPolicy(SelectPolicy):
def __init__(self, fuzzer: GPTFuzzer=None):
super().__init__(fuzzer)
def select(self) -> PromptNode:
seed = random.choice(self.fuzzer.prompt_nodes)
seed.visited_num += 1
return seed |
class VideoFolderDataset(Dataset):
def __init__(self, root, train, resolution, path=None, n_frames=16, skip=1, fold=1, max_size=None, use_labels=False, return_vid=False, time_saliency=False, sub=False, seed=42, **super_kwargs):
video_root = osp.join(os.path.join(root))
if (not (1 <= fold <= 3)):
raise ValueError('fold should be between 1 and 3, got {}'.format(fold))
self.path = video_root
name = video_root.split('/')[(- 1)]
self.name = name
self.train = train
self.fold = fold
self.resolution = resolution
self.nframes = n_frames
self.annotation_path = os.path.join(video_root, 'ucfTrainTestlist')
self.classes = list(natsorted((p for p in os.listdir(video_root) if osp.isdir(osp.join(video_root, p)))))
self.classes.remove('ucfTrainTestlist')
class_to_idx = {self.classes[i]: i for i in range(len(self.classes))}
self.samples = make_dataset(video_root, class_to_idx, ('avi',), is_valid_file=None)
video_list = [x[0] for x in self.samples]
self.video_list = video_list
self._use_labels = use_labels
self._label_shape = None
self._raw_labels = None
self._raw_shape = ([len(self.video_list)] + [3, resolution, resolution])
self.num_channels = 3
self.return_vid = return_vid
frames_between_clips = skip
print(root, frames_between_clips, n_frames)
self.indices = self._select_fold(self.video_list, self.annotation_path, fold, train)
self.size = len(self.indices)
print(self.size)
random.seed(seed)
self.shuffle_indices = [i for i in range(self.size)]
random.shuffle(self.shuffle_indices)
self._need_init = True
def _select_fold(self, video_list, annotation_path, fold, train):
name = ('train' if train else 'test')
name = '{}list{:02d}.txt'.format(name, fold)
f = os.path.join(annotation_path, name)
selected_files = []
with open(f, 'r') as fid:
data = fid.readlines()
data = [x.strip().split(' ') for x in data]
data = [os.path.join(self.path, x[0]) for x in data]
selected_files.extend(data)
'\n name = "train" if not train else "test"\n name = "{}list{:02d}.txt".format(name, fold)\n f = os.path.join(annotation_path, name)\n with open(f, "r") as fid:\n data = fid.readlines()\n data = [x.strip().split(" ") for x in data]\n data = [os.path.join(self.path, x[0]) for x in data]\n selected_files.extend(data)\n '
selected_files = set(selected_files)
indices = [i for i in range(len(video_list)) if (video_list[i] in selected_files)]
return indices
def __len__(self):
return self.size
def _preprocess(self, video):
video = resize_crop(video, self.resolution)
return video
def __getitem__(self, idx):
idx = self.shuffle_indices[idx]
idx = self.indices[idx]
video = read_video(self.video_list[idx])[0]
prefix = np.random.randint(((len(video) - self.nframes) + 1))
video = video[prefix:(prefix + self.nframes)].float().permute(3, 0, 1, 2)
return (self._preprocess(video), idx) |
def save(model):
if issubclass(type(model), torch.jit.ScriptModule):
return model.save_to_buffer()
elif issubclass(type(model), torch.nn.Module):
return deepcopy(model)
else:
raise RuntimeError(f'Cannot save type {type(model)}') |
def make_estimator(name, categorical_columns=None, iforest_kw=None, lof_kw=None):
if (name == 'LOF'):
outlier_detector = LocalOutlierFactor(**(lof_kw or {}))
if (categorical_columns is None):
preprocessor = RobustScaler()
else:
preprocessor = ColumnTransformer(transformers=[('categorical', OneHotEncoder(), categorical_columns)], remainder=RobustScaler())
else:
outlier_detector = IsolationForest(**(iforest_kw or {}))
if (categorical_columns is None):
preprocessor = None
else:
ordinal_encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=(- 1))
preprocessor = ColumnTransformer(transformers=[('categorical', ordinal_encoder, categorical_columns)], remainder='passthrough')
return make_pipeline(preprocessor, outlier_detector) |
class StandardScaler():
def __init__(self):
self.mean = 0.0
self.std = 1.0
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = (torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean)
std = (torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std)
return ((data - mean) / std)
def inverse_transform(self, data):
mean = (torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean)
std = (torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std)
return ((data * std) + mean) |
def test_ArrayBuilder_real():
def f1(x, z):
x.real(1)
x.real(2.2)
x.real(z)
return x
a = ak.highlevel.ArrayBuilder()
b = f1(a, np.array([3.5], dtype=np.float32)[0])
assert (ak.operations.to_list(a.snapshot()) == [1, 2.2, 3.5])
assert (ak.operations.to_list(b.snapshot()) == [1, 2.2, 3.5]) |
def pose_publisher():
model_state_pub = rospy.Publisher('/gazebo/set_model_states', ModelStates, queue_size=1)
relative_pose_pub = rospy.Publisher('/gazebo/relative_pose', Pose, queue_size=1)
poses_msg = ModelStates()
poses_msg.name = ([None] * 3)
poses_msg.pose = [Pose() for i in range(3)]
poses_msg.twist = [Twist() for i in range(3)]
poses_msg.name[0] = 'target_red'
poses_msg.name[1] = 'target_blue'
poses_msg.name[2] = 'target_green'
poses_msg.pose[0].position.x = (- 8)
poses_msg.pose[0].position.y = (- 8.5)
poses_msg.pose[0].position.z = 1.5
poses_msg.pose[1].position.x = 11
poses_msg.pose[1].position.y = 2.5
poses_msg.pose[1].position.z = 1.5
poses_msg.pose[2].position.x = 13.5
poses_msg.pose[2].position.y = (- 8)
poses_msg.pose[2].position.z = 1.5
poses_msg.pose[2].orientation.z = 0.707106
poses_msg.pose[2].orientation.w = 0.707106
f = 10.0
v = 0.0
a = 0.5
i = 0
period_1 = 2
period_2 = (period_1 + 4)
period_3 = ((period_2 + 2) + 2)
period_4 = (period_3 + 4)
period_total = (period_4 + 2)
rate = rospy.Rate(f)
while (not rospy.is_shutdown()):
if ((i % (period_total * f)) < (period_1 * f)):
v = (v + (a / f))
elif ((i % (period_total * f)) < (period_2 * f)):
v = 1.0
elif ((i % (period_total * f)) < (period_3 * f)):
v = (v - (a / f))
elif ((i % (period_total * f)) < (period_4 * f)):
v = (- 1.0)
else:
v = (v + (a / f))
poses_msg.pose[0].position.y = (poses_msg.pose[0].position.y + (v / f))
poses_msg.pose[1].position.y = (poses_msg.pose[1].position.y + (v / f))
if (id == 3):
poses_msg.pose[2].position.y = (poses_msg.pose[2].position.y + (v / f))
else:
poses_msg.pose[2].position.x = (poses_msg.pose[2].position.x + (v / f))
model_state_pub.publish(poses_msg)
i = (i + 1)
try:
response = get_link_state('iris_0::realsense_camera::link', 'target_green::link')
relative_pose = response.link_state.pose
relative_pose_pub.publish(relative_pose)
except:
continue
rate.sleep() |
class SelfTrainingClassifier(_RoutingNotSupportedMixin, MetaEstimatorMixin, BaseEstimator):
_estimator_type = 'classifier'
_parameter_constraints: dict = {'base_estimator': [HasMethods(['fit'])], 'threshold': [Interval(Real, 0.0, 1.0, closed='left')], 'criterion': [StrOptions({'threshold', 'k_best'})], 'k_best': [Interval(Integral, 1, None, closed='left')], 'max_iter': [Interval(Integral, 0, None, closed='left'), None], 'verbose': ['verbose']}
def __init__(self, base_estimator, threshold=0.75, criterion='threshold', k_best=10, max_iter=10, verbose=False):
self.base_estimator = base_estimator
self.threshold = threshold
self.criterion = criterion
self.k_best = k_best
self.max_iter = max_iter
self.verbose = verbose
_fit_context(prefer_skip_nested_validation=False)
def fit(self, X, y):
(X, y) = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'lil', 'dok'], force_all_finite=False)
self.base_estimator_ = clone(self.base_estimator)
if (y.dtype.kind in ['U', 'S']):
raise ValueError('y has dtype string. If you wish to predict on string targets, use dtype object, and use -1 as the label for unlabeled samples.')
has_label = (y != (- 1))
if np.all(has_label):
warnings.warn('y contains no unlabeled samples', UserWarning)
if ((self.criterion == 'k_best') and (self.k_best > (X.shape[0] - np.sum(has_label)))):
warnings.warn('k_best is larger than the amount of unlabeled samples. All unlabeled samples will be labeled in the first iteration', UserWarning)
self.transduction_ = np.copy(y)
self.labeled_iter_ = np.full_like(y, (- 1))
self.labeled_iter_[has_label] = 0
self.n_iter_ = 0
while ((not np.all(has_label)) and ((self.max_iter is None) or (self.n_iter_ < self.max_iter))):
self.n_iter_ += 1
self.base_estimator_.fit(X[safe_mask(X, has_label)], self.transduction_[has_label])
prob = self.base_estimator_.predict_proba(X[safe_mask(X, (~ has_label))])
pred = self.base_estimator_.classes_[np.argmax(prob, axis=1)]
max_proba = np.max(prob, axis=1)
if (self.criterion == 'threshold'):
selected = (max_proba > self.threshold)
else:
n_to_select = min(self.k_best, max_proba.shape[0])
if (n_to_select == max_proba.shape[0]):
selected = np.ones_like(max_proba, dtype=bool)
else:
selected = np.argpartition((- max_proba), n_to_select)[:n_to_select]
selected_full = np.nonzero((~ has_label))[0][selected]
self.transduction_[selected_full] = pred[selected]
has_label[selected_full] = True
self.labeled_iter_[selected_full] = self.n_iter_
if (selected_full.shape[0] == 0):
self.termination_condition_ = 'no_change'
break
if self.verbose:
print(f'End of iteration {self.n_iter_}, added {selected_full.shape[0]} new labels.')
if (self.n_iter_ == self.max_iter):
self.termination_condition_ = 'max_iter'
if np.all(has_label):
self.termination_condition_ = 'all_labeled'
self.base_estimator_.fit(X[safe_mask(X, has_label)], self.transduction_[has_label])
self.classes_ = self.base_estimator_.classes_
return self
_if(_estimator_has('predict'))
def predict(self, X):
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=True, force_all_finite=False, reset=False)
return self.base_estimator_.predict(X)
_if(_estimator_has('predict_proba'))
def predict_proba(self, X):
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=True, force_all_finite=False, reset=False)
return self.base_estimator_.predict_proba(X)
_if(_estimator_has('decision_function'))
def decision_function(self, X):
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=True, force_all_finite=False, reset=False)
return self.base_estimator_.decision_function(X)
_if(_estimator_has('predict_log_proba'))
def predict_log_proba(self, X):
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=True, force_all_finite=False, reset=False)
return self.base_estimator_.predict_log_proba(X)
_if(_estimator_has('score'))
def score(self, X, y):
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=True, force_all_finite=False, reset=False)
return self.base_estimator_.score(X, y) |
def test_anndataloader_distributed_sampler_init():
adata = scvi.data.synthetic_iid()
manager = generic_setup_adata_manager(adata)
with pytest.raises(ValueError):
_ = scvi.dataloaders.AnnDataLoader(manager, sampler='a sampler', distributed_sampler=True) |
def cardinality_bsgs(self, verbose=False):
E1 = self
k = self.base_field()
q = k.order()
if (q < 50):
if verbose:
print('q=', q, '< 50 so using exhaustive count')
return cardinality_exhaustive(self)
E2 = E1.quadratic_twist()
if verbose:
print('Quadratic twist is ', E2.ainvs())
bounds = Hasse_bounds(q)
(lower, upper) = bounds
B = ((upper - q) - 1)
a = ZZ(0)
N1 = N2 = M = ZZ(1)
kmin = (- B)
kmax = B
q1 = (q + 1)
if (q > (2 ** 10)):
N1 = (ZZ(2) ** sum([e for (P, e) in E1._p_primary_torsion_basis(2)]))
N2 = (ZZ(2) ** sum([e for (P, e) in E2._p_primary_torsion_basis(2)]))
if (q > (2 ** 20)):
N1 *= (ZZ(3) ** sum([e for (P, e) in E1._p_primary_torsion_basis(3)]))
N2 *= (ZZ(3) ** sum([e for (P, e) in E2._p_primary_torsion_basis(3)]))
if (q > (2 ** 40)):
N1 *= (ZZ(5) ** sum([e for (P, e) in E1._p_primary_torsion_basis(5)]))
N2 *= (ZZ(5) ** sum([e for (P, e) in E2._p_primary_torsion_basis(5)]))
a = q1
M = N1
(g, u, v) = M.xgcd(N2)
if (N2 > g):
a = ((((a * v) * N2) - ((q1 * u) * M)) // g)
M *= (N2 // g)
a = (a % M)
if verbose:
print('(a,M)=', (a, M))
kmin = (((- B) - a) / M).ceil()
kmax = ((B - a) / M).floor()
if (kmin == kmax):
self._order = ((q1 - a) - (kmin * M))
if verbose:
print('no random points were needed')
return self._order
if verbose:
print('(2,3,5)-torsion subgroup gives M=', M)
while (kmax != kmin):
n = order_from_bounds(E1.random_point(), bounds, N1, operation='+')
if verbose:
print('New point on E has order ', n)
N1 = N1.lcm(n)
(g, u, v) = M.xgcd(n)
if (n > g):
a = ((((a * v) * n) + ((q1 * u) * M)) // g)
M *= (n // g)
a = (a % M)
if verbose:
print('(a,M)=', (a, M))
kmin = (((- B) - a) / M).ceil()
kmax = ((B - a) / M).floor()
if (kmin == kmax):
self._order = ((q1 - a) - (kmin * M))
return self._order
if verbose:
print('number of possibilities is now ', ((kmax - kmin) + 1))
n = order_from_bounds(E2.random_point(), bounds, N2, operation='+')
if verbose:
print("New point on E' has order ", n)
N2 = N2.lcm(n)
(g, u, v) = M.xgcd(n)
if (n > g):
a = ((((a * v) * n) - ((q1 * u) * M)) // g)
M *= (n // g)
a = (a % M)
if verbose:
print('(a,M)=', (a, M))
kmin = (((- B) - a) / M).ceil()
kmax = ((B - a) / M).floor()
if (kmin == kmax):
self._order = ((q1 - a) - (kmin * M))
return self._order
if verbose:
print('number of possibilities is now ', ((kmax - kmin) + 1)) |
def dropout_vnet(input_shape=(280, 280, 280, 1), kernel_size=3, activation='relu', padding='SAME', **kwargs):
inputs = Input(input_shape)
(conv1, pool1) = down_stage(inputs, 16, kernel_size=kernel_size, activation=activation, padding=padding)
(conv2, pool2) = down_stage(pool1, 32, kernel_size=kernel_size, activation=activation, padding=padding)
(conv3, pool3) = down_stage(pool2, 64, kernel_size=kernel_size, activation=activation, padding=padding)
(conv4, _) = down_stage(pool3, 128, kernel_size=kernel_size, activation=activation, padding=padding)
conv4 = SpatialDropout3D(0.5)(conv4, training=True)
conv5 = up_stage(conv4, conv3, 64, kernel_size=kernel_size, activation=activation, padding=padding)
conv6 = up_stage(conv5, conv2, 32, kernel_size=kernel_size, activation=activation, padding=padding)
conv7 = up_stage(conv6, conv1, 16, kernel_size=kernel_size, activation=activation, padding=padding)
conv8 = end_stage(conv7, kernel_size=kernel_size, activation=activation, padding=padding)
return Model(inputs=inputs, outputs=conv8) |
class ManifoldPoint(Element):
def __init__(self, parent, coords=None, chart=None, name=None, latex_name=None, check_coords=True):
if parent.is_empty():
raise TypeError(f'cannot define a point on the {parent} because it has been declared empty')
Element.__init__(self, parent)
parent._has_defined_points = True
self._manifold = parent.manifold()
self._coordinates = {}
if (coords is not None):
if (len(coords) != parent.manifold().dimension()):
raise ValueError(('the number of coordinates must be equal ' + "to the manifold's dimension"))
from sage.manifolds.manifold import TopologicalManifold
if (chart is None):
chart = parent._def_chart
elif isinstance(parent, TopologicalManifold):
if (chart not in parent._atlas):
raise ValueError(('the {} has not been'.format(chart) + 'defined on the {}'.format(parent)))
if check_coords:
if (not chart.valid_coordinates(*coords)):
raise ValueError(('the coordinates {}'.format(coords) + ' are not valid on the {}'.format(chart)))
for schart in chart._supercharts:
self._coordinates[schart] = tuple(coords)
for schart in chart._subcharts:
if (schart != chart):
if schart.valid_coordinates(*coords):
self._coordinates[schart] = tuple(coords)
self._name = name
if (latex_name is None):
self._latex_name = self._name
else:
self._latex_name = latex_name
def _repr_(self):
description = 'Point'
if (self._name is not None):
description += (' ' + self._name)
description += ' on the {}'.format(self._manifold)
return description
def _latex_(self):
if (self._latex_name is None):
return (('\\text{' + str(self)) + '}')
return self._latex_name
def coordinates(self, chart=None, old_chart=None):
if (chart is None):
dom = self.parent()
chart = dom._def_chart
def_chart = chart
else:
dom = chart.domain()
def_chart = dom._def_chart
if (self not in dom):
raise ValueError(('the point does not belong to the domain ' + 'of {}'.format(chart)))
if (chart not in self._coordinates):
for ochart in self._coordinates:
if ((chart in ochart._supercharts) or (chart in ochart._subcharts)):
self._coordinates[chart] = self._coordinates[ochart]
return self._coordinates[chart]
if (old_chart is not None):
s_old_chart = old_chart
s_chart = chart
else:
if ((def_chart in self._coordinates) and ((def_chart, chart) in dom._coord_changes)):
old_chart = def_chart
s_old_chart = def_chart
s_chart = chart
else:
for ochart in self._coordinates:
for subchart in ochart._subcharts:
if ((subchart, chart) in dom._coord_changes):
old_chart = ochart
s_old_chart = subchart
s_chart = chart
break
if (old_chart is not None):
break
if (old_chart is None):
for schart in chart._subcharts:
for ochart in self._coordinates:
for subchart in ochart._subcharts:
if ((subchart, schart) in dom._coord_changes):
old_chart = ochart
s_old_chart = subchart
s_chart = schart
break
if (old_chart is not None):
break
if (old_chart is not None):
break
if (old_chart is None):
raise ValueError(((('the coordinates of {}'.format(self) + ' in the {}'.format(chart)) + ' cannot be computed ') + 'by means of known changes of charts.'))
else:
chcoord = dom._coord_changes[(s_old_chart, s_chart)]
self._coordinates[chart] = chcoord(*self._coordinates[old_chart])
return self._coordinates[chart]
coord = coordinates
def set_coordinates(self, coords, chart=None):
self._coordinates.clear()
self.add_coord(coords, chart)
set_coord = set_coordinates
def add_coordinates(self, coords, chart=None):
if (len(coords) != self.parent().manifold()._dim):
raise ValueError(('the number of coordinates must be equal to ' + "the manifold's dimension."))
if (chart is None):
chart = self.parent()._def_chart
elif (chart not in self.parent()._atlas):
raise ValueError((('the {}'.format(chart) + ' has not been ') + 'defined on the {}'.format(self.parent())))
self._coordinates[chart] = coords
add_coord = add_coordinates
def __eq__(self, other):
if (other is self):
return True
if (not isinstance(other, ManifoldPoint)):
return False
if (other.parent().manifold() != self.parent().manifold()):
return False
common_chart = None
if hasattr(self.parent(), '_def_chart'):
def_chart = self.parent()._def_chart
else:
def_chart = self.parent().manifold()._def_chart
if ((def_chart in self._coordinates) and (def_chart in other._coordinates)):
common_chart = def_chart
else:
for chart in self._coordinates:
if (chart in other._coordinates):
common_chart = chart
break
if (common_chart is None):
if (def_chart in self._coordinates):
try:
other.coordinates(def_chart)
common_chart = def_chart
except ValueError:
pass
if (common_chart is None):
if (def_chart in other._coordinates):
try:
self.coordinates(def_chart)
common_chart = def_chart
except ValueError:
pass
if (common_chart is None):
for chart in self._coordinates:
try:
other.coordinates(chart)
common_chart = chart
break
except ValueError:
pass
else:
for chart in other._coordinates:
try:
self.coordinates(chart)
common_chart = chart
break
except ValueError:
pass
if (common_chart is None):
return False
periods = common_chart.periods()
for (ind, (xs, xo)) in enumerate(zip(self._coordinates[common_chart], other._coordinates[common_chart])):
diff = (xs - xo)
period = periods[ind]
if (period is not None):
if (not ((diff / period) in ZZ)):
return False
elif (isinstance(diff, Expression) and (not diff.is_trivial_zero())):
return False
elif (not (diff == 0)):
return False
return True
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash(self.parent().manifold())
(size=10, color='black', label_color=None, fontsize=10, label_offset=0.1)
def plot(self, chart=None, ambient_coords=None, mapping=None, label=None, parameters=None, **kwds):
from sage.plot.point import point2d
from sage.plot.text import text
from sage.plot.graphics import Graphics
from sage.plot.plot3d.shapes2 import point3d, text3d
from sage.manifolds.chart import Chart
if (self._manifold.base_field_type() != 'real'):
raise NotImplementedError('plot of points on manifolds over fields different from the real field is not implemented')
if (chart is None):
chart = self.parent().default_chart()
elif (not isinstance(chart, Chart)):
raise TypeError("the argument 'chart' must be a coordinate chart")
if (mapping is None):
eff_point = self
else:
eff_point = mapping(self)
if (ambient_coords is None):
ambient_coords = chart[:]
elif (not isinstance(ambient_coords, tuple)):
ambient_coords = tuple(ambient_coords)
nca = len(ambient_coords)
if ((nca != 2) and (nca != 3)):
raise TypeError('invalid number of ambient coordinates: {}'.format(nca))
size = kwds['size']
color = kwds['color']
label_color = kwds['label_color']
fontsize = kwds['fontsize']
label_offset = kwds['label_offset']
coords = eff_point.coord(chart)
xx = chart[:]
xp = [coords[xx.index(c)] for c in ambient_coords]
if (parameters is not None):
xps = [coord.substitute(parameters) for coord in xp]
xp = xps
xlab = [(coord + label_offset) for coord in xp]
if (label_color is None):
label_color = color
resu = Graphics()
if (nca == 2):
if (label is None):
label = (('$' + self._latex_name) + '$')
resu += (point2d(xp, color=color, size=size) + text(label, xlab, fontsize=fontsize, color=label_color))
else:
if (label is None):
label = self._name
resu += (point3d(xp, color=color, size=size) + text3d(label, xlab, fontsize=fontsize, color=label_color))
return resu |
def test_f1_macro_2d_list():
y_true = [[1, 2, 3, 4], [1, 2, 5, 6]]
y_pred = [[1, 5, 6], [1, 2, 3]]
assert (0.4285714 == approx(f1(y_true, y_pred, 'macro'))) |
def _check_bn(model):
flag = [False]
model.apply((lambda module: _check_bn_apply(module, flag)))
return flag[0] |
def register_types(module):
root_module = module.get_root()
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('EventId', import_from_module='ns.core')
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('InetSocketAddress', import_from_module='ns.network')
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('Mac48Address', import_from_module='ns.network')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('PyViz')
module.add_enum('PacketCaptureMode', ['PACKET_CAPTURE_DISABLED', 'PACKET_CAPTURE_FILTER_HEADERS_OR', 'PACKET_CAPTURE_FILTER_HEADERS_AND'], outer_class=root_module['ns3::PyViz'])
module.add_class('LastPacketsSample', outer_class=root_module['ns3::PyViz'])
module.add_class('NetDeviceStatistics', outer_class=root_module['ns3::PyViz'])
module.add_class('NodeStatistics', outer_class=root_module['ns3::PyViz'])
module.add_class('PacketCaptureOptions', outer_class=root_module['ns3::PyViz'])
module.add_class('PacketDropSample', outer_class=root_module['ns3::PyViz'])
module.add_class('PacketSample', outer_class=root_module['ns3::PyViz'])
module.add_class('RxPacketSample', parent=root_module['ns3::PyViz::PacketSample'], outer_class=root_module['ns3::PyViz'])
module.add_class('TransmissionSample', outer_class=root_module['ns3::PyViz'])
module.add_class('TxPacketSample', parent=root_module['ns3::PyViz::PacketSample'], outer_class=root_module['ns3::PyViz'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4'])
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet')
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv4Header &', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ipv4L3Protocol::DropReason', 'ns3::Ptr<ns3::Ipv4>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv4Header &', 'ns3::Ptr<const ns3::Packet>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ptr<ns3::Ipv4>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_container('std::vector< std::string >', 'std::string', container_type=u'vector')
module.add_container('std::vector< ns3::PyViz::TransmissionSample >', 'ns3::PyViz::TransmissionSample', container_type=u'vector')
module.add_container('ns3::PyViz::TransmissionSampleList', 'ns3::PyViz::TransmissionSample', container_type=u'vector')
module.add_container('std::vector< ns3::PyViz::PacketDropSample >', 'ns3::PyViz::PacketDropSample', container_type=u'vector')
module.add_container('ns3::PyViz::PacketDropSampleList', 'ns3::PyViz::PacketDropSample', container_type=u'vector')
module.add_container('std::vector< ns3::PyViz::RxPacketSample >', 'ns3::PyViz::RxPacketSample', container_type=u'vector')
module.add_container('std::vector< ns3::PyViz::TxPacketSample >', 'ns3::PyViz::TxPacketSample', container_type=u'vector')
module.add_container('std::vector< ns3::PyViz::PacketSample >', 'ns3::PyViz::PacketSample', container_type=u'vector')
module.add_container('std::set< unsigned int >', 'unsigned int', container_type=u'set')
module.add_container('std::vector< ns3::PyViz::NetDeviceStatistics >', 'ns3::PyViz::NetDeviceStatistics', container_type=u'vector')
module.add_container('std::vector< ns3::PyViz::NodeStatistics >', 'ns3::PyViz::NodeStatistics', container_type=u'vector')
module.add_container('std::set< ns3::TypeId >', 'ns3::TypeId', container_type=u'set')
module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module) |
.parametrize('hint, expected', [(A, UNSUPPORTED), (list[A], Instance(TypeInfo(list), (UNSUPPORTED,)))])
def test_convert_type_hint_unsupported(hint, expected):
ts = TypeSystem()
ts.convert_type_hint(hint, unsupported=UNSUPPORTED) |
def _SyncAllParams(devices, model, init_net, net, rendezvous, unique_param_names, max_concurrent_distributed_ops=4):
if ((rendezvous is None) or (rendezvous['num_shards'] <= 1)):
_SyncAllParamsSingleHost(devices, model, net, unique_param_names)
else:
_SyncAllParamsDistributed(devices, model, init_net, net, rendezvous, unique_param_names, max_concurrent_distributed_ops) |
class TestMinimumPhase():
def test_bad_args(self):
assert_raises(ValueError, minimum_phase, [1.0])
assert_raises(ValueError, minimum_phase, [1.0, 1.0])
assert_raises(ValueError, minimum_phase, np.full(10, 1j))
assert_raises(ValueError, minimum_phase, 'foo')
assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8)
assert_raises(ValueError, minimum_phase, np.ones(10), method='foo')
assert_warns(RuntimeWarning, minimum_phase, np.arange(3))
def test_homomorphic(self):
h = [1, (- 1)]
h_new = minimum_phase(np.convolve(h, h[::(- 1)]))
assert_allclose(h_new, h, rtol=0.05)
rng = np.random.RandomState(0)
for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101):
h = rng.randn(n)
h_new = minimum_phase(np.convolve(h, h[::(- 1)]))
assert_allclose(np.abs(fft(h_new)), np.abs(fft(h)), rtol=0.0001)
def test_hilbert(self):
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.0)
k = [0., 0., 0., 0., (- 0.), (- 0.)]
m = minimum_phase(h, 'hilbert')
assert_allclose(m, k, rtol=0.005)
h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.0)
k = [0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), (- 0.)]
m = minimum_phase(h, 'hilbert', n_fft=(2 ** 19))
assert_allclose(m, k, rtol=0.002) |
def load(file, file_format=None, **kwargs):
if isinstance(file, Path):
file = str(file)
if ((file_format is None) and is_str(file)):
file_format = file.split('.')[(- 1)]
if (file_format not in file_handlers):
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if is_str(file):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj |
def test_named_record_int32_float64_parameters():
t = RecordType([NumpyType('int32'), NumpyType('float64')], None, parameters={'__record__': 'Name', 'p': [123]})
assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t)) |
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input)) |
def read_json(fname):
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict) |
class _UnilinearModel(Model):
def __init__(self):
super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, estimate=_unilin_est, meta={'name': 'Univariate Linear', 'equ': 'y = B_0 * x + B_1', 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) |
def parse_args():
parser = argparse.ArgumentParser(description='Convert the conll03 format data into conllu format.')
parser.add_argument('input', help='Input conll03 format data filename.')
parser.add_argument('output', help='Output json filename.')
args = parser.parse_args()
return args |
def generate_pureSetting(inter_prob, intra_prob, alpha):
cps = [15, 30, 60, 75, 90, 105, 135]
fname = (((((('pure_' + str(inter_prob)) + '_') + str(intra_prob)) + '_') + str(alpha)) + '.txt')
cps_sizes = []
cps_probs = []
sizes_1 = [250, 250]
probs_1 = construct_SBM_block(sizes_1, inter_prob, intra_prob)
sizes_2 = [125, 125, 125, 125]
probs_2 = construct_SBM_block(sizes_2, inter_prob, intra_prob)
sizes_3 = ([50] * 10)
probs_3 = construct_SBM_block(sizes_3, inter_prob, intra_prob)
list_sizes = []
list_sizes.append(sizes_1)
list_sizes.append(sizes_2)
list_sizes.append(sizes_3)
list_probs = []
list_probs.append(probs_1)
list_probs.append(probs_2)
list_probs.append(probs_3)
list_idx = 1
sizes = sizes_2
probs = probs_2
maxt = 150
G_0 = nx.stochastic_block_model(sizes, probs)
G_0 = nx.Graph(G_0)
G_t = G_0
G_times = []
G_times.append(G_t)
for t in range(maxt):
if (t in cps):
if ((list_idx + 1) > (len(list_sizes) - 1)):
list_idx = 0
else:
list_idx = (list_idx + 1)
sizes = list_sizes[list_idx]
probs = list_probs[list_idx]
G_t = SBM_snapshot(G_t, 1.0, sizes, probs)
G_times.append(G_t)
print(('generating ' + str(t)), end='\r')
else:
G_t = SBM_snapshot(G_t, alpha, sizes, probs)
G_times.append(G_t)
print(('generating ' + str(t)), end='\r')
to_edgelist(G_times, fname) |
.parametrize('method', ['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
def test_gh18171(method):
def f(x):
f._count += 1
return np.nan
f._count = 0
res = root_scalar(f, bracket=(0, 1), method=method)
assert (res.converged is False)
assert res.flag.startswith('The function value at x')
assert (res.function_calls == f._count)
assert (str(res.root) in res.flag) |
def main():
parser = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
parser = GLUETransformer.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
if (args.output_dir is None):
args.output_dir = os.path.join('./results', f"{args.task}_{time.strftime('%Y%m%d_%H%M%S')}")
os.makedirs(args.output_dir)
model = GLUETransformer(args)
trainer = generic_train(model, args)
if args.do_predict:
checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True)))
model = model.load_from_checkpoint(checkpoints[(- 1)])
return trainer.test(model) |
class Mixed_5c(nn.Module):
def __init__(self):
super(Mixed_5c, self).__init__()
self.branch0 = nn.Sequential(BasicConv3d(832, 384, kernel_size=1, stride=1))
self.branch1 = nn.Sequential(BasicConv3d(832, 192, kernel_size=1, stride=1), SepConv3d(192, 384, kernel_size=3, stride=1, padding=1))
self.branch2 = nn.Sequential(BasicConv3d(832, 48, kernel_size=1, stride=1), SepConv3d(48, 128, kernel_size=3, stride=1, padding=1))
self.branch3 = nn.Sequential(nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1), BasicConv3d(832, 128, kernel_size=1, stride=1))
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out |
.expansion
class ExpandReduceCUDABlockAll(pm.ExpandTransformation):
environments = [CUDA]
def redirect_edge(graph, edge, new_src=None, new_src_conn=None, new_dst=None, new_dst_conn=None, new_data=None):
data = (new_data if new_data else edge.data)
if (new_src and new_dst):
ret = graph.add_edge(new_src, new_src_conn, new_dst, new_dst_conn, data)
graph.remove_edge(edge)
elif new_src:
ret = graph.add_edge(new_src, new_src_conn, edge.dst, edge.dst_conn, data)
graph.remove_edge(edge)
elif new_dst:
ret = graph.add_edge(edge.src, edge.src_conn, new_dst, new_dst_conn, data)
graph.remove_edge(edge)
else:
pass
return ret
def expansion(node: 'Reduce', state: SDFGState, sdfg: SDFG):
graph = state
reduce_node = node
in_edge = graph.in_edges(reduce_node)[0]
out_edge = graph.out_edges(reduce_node)[0]
axes = reduce_node.axes
(new_entry, new_exit) = graph.add_map(name='inner_reduce_block', ndrange={('i' + str(i)): f'{rng[0]}:{(rng[1] + 1)}:{rng[2]}' for (i, rng) in enumerate(in_edge.data.subset) if (i in axes)}, schedule=dtypes.ScheduleType.Default)
map = new_entry.map
ExpandReduceCUDABlockAll.redirect_edge(graph, in_edge, new_dst=new_entry)
ExpandReduceCUDABlockAll.redirect_edge(graph, out_edge, new_src=new_exit)
subset_in = subsets.Range([(in_edge.data.subset[i] if (i not in axes) else (new_entry.map.params[0], new_entry.map.params[0], 1)) for i in range(len(in_edge.data.subset))])
memlet_in = dace.Memlet(data=in_edge.data.data, volume=1, subset=subset_in)
memlet_out = dcpy(out_edge.data)
graph.add_edge(u=new_entry, u_connector=None, v=reduce_node, v_connector=None, memlet=memlet_in)
graph.add_edge(u=reduce_node, u_connector=None, v=new_exit, v_connector=None, memlet=memlet_out)
from dace.transformation.dataflow.local_storage import LocalStorage, InLocalStorage, OutLocalStorage
in_local_storage_subgraph = {LocalStorage.node_a: graph.nodes().index(new_entry), LocalStorage.node_b: graph.nodes().index(reduce_node)}
out_local_storage_subgraph = {LocalStorage.node_a: graph.nodes().index(reduce_node), LocalStorage.node_b: graph.nodes().index(new_exit)}
local_storage = InLocalStorage()
local_storage.setup_match(sdfg, sdfg.sdfg_id, sdfg.nodes().index(state), in_local_storage_subgraph, 0)
local_storage.array = in_edge.data.data
local_storage.apply(graph, sdfg)
in_transient = local_storage._data_node
sdfg.data(in_transient.data).storage = dtypes.StorageType.Register
local_storage = OutLocalStorage()
local_storage.setup_match(sdfg, sdfg.sdfg_id, sdfg.nodes().index(state), out_local_storage_subgraph, 0)
local_storage.array = out_edge.data.data
local_storage.apply(graph, sdfg)
out_transient = local_storage._data_node
sdfg.data(out_transient.data).storage = dtypes.StorageType.Register
e1 = graph.in_edges(out_transient)[0]
e2 = graph.out_edges(out_transient)[0]
e1.data.data = dcpy(e2.data.data)
e1.data.subset = dcpy(e2.data.subset)
code = 'if '
for (i, param) in enumerate(new_entry.map.params):
code += (param + '== 0')
if (i < (len(axes) - 1)):
code += ' and '
code += ':\n'
code += '\tout=inp'
tasklet_node = graph.add_tasklet(name='block_reduce_write', inputs=['inp'], outputs=['out'], code=code)
edge_out_outtrans = graph.out_edges(out_transient)[0]
edge_out_innerexit = graph.out_edges(new_exit)[0]
ExpandReduceCUDABlockAll.redirect_edge(graph, edge_out_outtrans, new_dst=tasklet_node, new_dst_conn='inp')
e = graph.add_edge(u=tasklet_node, u_connector='out', v=new_exit, v_connector=None, memlet=dcpy(edge_out_innerexit.data))
e.data.volume = 0
e.data.dynamic = True
reduce_node.axes = None
sdfg.fill_scope_connectors()
reduce_node.implementation = 'CUDA (block)'
sub_expansion = ExpandReduceCUDABlock()
sub_expansion.setup_match(sdfg, sdfg.sdfg_id, sdfg.node_id(state), {}, 0)
return sub_expansion.expansion(node=node, state=state, sdfg=sdfg) |
def convert_pkl(old_pkl, new_pkl):
import dill
import pickle
dill._dill._reverse_typemap['ObjectType'] = object
with open(old_pkl, 'rb') as f:
loaded = pickle.load(f, encoding='latin1')
with open(new_pkl, 'wb') as outfile:
pickle.dump(loaded, outfile) |
def _random_operator(data_type: str) -> str:
if (data_type == 'categorical'):
ops = ['==', '!=']
elif (data_type == 'boolean'):
ops = ['', 'not ']
elif (data_type == 'numerical'):
ops = ['==', '!=', '>', '<', '>=', '<=']
else:
raise ValueError(f'Unknown `data_type`: {data_type}')
return rng.choice(ops) |
def pnn_model_fn(features, labels, mode, params):
fields_embeddings = []
for cat_feature_column in params['category_feature_columns']:
embed_input = fc.input_layer(features, [cat_feature_column])
fields_embeddings.append(embed_input)
fields_embeddings = tf.concat(fields_embeddings, axis=(- 1))
with tf.variable_scope('linear_part'):
linear_w = tf.get_variable(name='linear_w', shape=((len(params['category_feature_columns']) * FLAGS.embedding_dim), params['output_dimension']), dtype=tf.float32, regularizer=tf.contrib.layers.l2_regularizer(scale=params['weight_regularizer']))
lz = tf.matmul(fields_embeddings, linear_w)
with tf.variable_scope('product_part'):
fields_embeddings = tf.reshape(fields_embeddings, shape=((- 1), len(params['category_feature_columns']), FLAGS.embedding_dim))
product_output = []
if (params['product_method'] == 'IPNN'):
inner_product_w = tf.get_variable(name='inner_product_w', shape=(params['output_dimension'], len(params['category_feature_columns'])), dtype=tf.float32, regularizer=tf.contrib.layers.l2_regularizer(scale=params['weight_regularizer']))
for i in range(params['output_dimension']):
theta = tf.expand_dims(inner_product_w[i], axis=1)
delta = tf.multiply(fields_embeddings, theta)
delta = tf.reduce_sum(delta, axis=1)
lp_i = tf.reduce_sum(tf.square(delta), axis=1, keepdims=True)
product_output.append(lp_i)
else:
outer_product_w = tf.get_variable(name='outer_product_w', shape=(params['output_dimension'], FLAGS.embedding_dim, FLAGS.embedding_dim), dtype=tf.float32, regularizer=tf.contrib.layers.l2_regularizer(scale=params['weight_regularizer']))
fields_embeddings_sum = tf.reduce_sum(fields_embeddings, axis=1)
p = tf.matmul(tf.expand_dims(fields_embeddings_sum, axis=2), tf.expand_dims(fields_embeddings_sum, axis=1))
for i in range(params['output_dimension']):
wi = outer_product_w[i]
upper = tf.matrix_band_part(wi, 0, (- 1))
wi = ((upper + tf.transpose(upper)) - tf.matrix_band_part(wi, 0, 0))
lp_i = tf.multiply(p, wi)
lp_i = tf.expand_dims(tf.reduce_sum(lp_i, axis=[1, 2]), axis=1)
product_output.append(lp_i)
lp = tf.concat(product_output, axis=1)
bias = tf.get_variable(name='bias', shape=params['output_dimension'], dtype=tf.float32)
product_final = tf.nn.relu(((lz + lp) + bias))
with tf.variable_scope('fcn'):
net = product_final
for unit in params['hidden_units']:
net = tf.layers.dense(net, unit, activation=tf.nn.relu)
if (('dropout_rate' in params) and (0.0 < params['dropout_rate'] < 1.0)):
net = tf.layers.dropout(net, params['dropout_rate'], training=(mode == tf.estimator.ModeKeys.TRAIN))
if params['batch_norm']:
net = tf.layers.batch_normalization(net, training=(mode == tf.estimator.ModeKeys.TRAIN))
logit = tf.layers.dense(net, 1)
prediction = tf.sigmoid(logit, name='prediction')
if (mode == tf.estimator.ModeKeys.PREDICT):
predictions = {'probabilities': prediction}
export_outputs = {'prediction': tf.estimator.export.PredictOutput(predictions)}
return tf.estimator.EstimatorSpec(mode, predictions=predictions, export_outputs=export_outputs)
y = labels['read_comment']
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logit), name='loss')
reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if (len(reg_variables) > 0):
loss += tf.add_n(reg_variables)
accuracy = tf.metrics.accuracy(labels=y, predictions=tf.to_float(tf.greater_equal(prediction, 0.5)))
auc = tf.metrics.auc(labels=y, predictions=prediction)
metrics = {'eval_accuracy': accuracy, 'eval_auc': auc}
if (mode == tf.estimator.ModeKeys.EVAL):
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate'], beta1=0.9, beta2=0.999, epsilon=1e-08)
update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
assert (mode == tf.estimator.ModeKeys.TRAIN)
tf.summary.scalar('train_accuracy', accuracy[1])
tf.summary.scalar('train_auc', auc[1])
log_hook = tf.train.LoggingTensorHook({'train_loss': loss, 'train_auc': auc[1], 'lz': lz, 'lp': lp}, every_n_iter=100)
profiler_hook = tf.train.ProfilerHook(save_steps=1000, output_dir='./profiler', show_memory=True)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, training_hooks=[log_hook, profiler_hook]) |
def get_list_of_files(directory_list):
files = []
for directory in directory_list:
dir_name = directory['directory_name']
schema_dir = directory['schema_directory']
with open(os.path.join(schema_dir, 'schema.json'), 'r') as json_data:
schema = json.load(json_data)
schema_attributes = schema['attributes']
schema_meta_data = schema['meta_data']
file_type = schema_meta_data['file_type']
text_type = schema_meta_data['text_type']
max_sentences = directory['max_sentences']
if (max_sentences == (- 1)):
max_sentences = schema_meta_data['length']
max_idx = max(filter((lambda x: (type(x) == int)), schema_attributes.values()))
if (not directory['file_names']):
for fname in os.listdir(dir_name):
if (fname == 'schema.json'):
continue
files.append({'file_name': os.path.join(dir_name, fname), 'file_type': file_type, 'tags': schema_attributes, 'text_type': text_type, 'max_sentences': max_sentences, 'max_index': max_idx})
else:
for fname in directory['file_names']:
if (fname == 'schema.json'):
continue
files.append({'file_name': os.path.join(dir_name, fname), 'file_type': file_type, 'tags': schema_attributes, 'text_type': text_type, 'max_sentences': max_sentences, 'max_index': max_idx})
return files |
class Trainer(DefaultTrainer):
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if (output_folder is None):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if (evaluator_type == 'lvis'):
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if (evaluator_type == 'coco'):
return COCOEvaluator(dataset_name, cfg, True, output_folder)
if (evaluator_type == 'sem_seg'):
return SemSegEvaluator(dataset_name, distributed=True, num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, output_dir=output_folder)
if (evaluator_type == 'cityscapes_instance'):
assert (torch.cuda.device_count() >= comm.get_rank()), 'CityscapesEvaluator currently do not work with multiple machines.'
return CityscapesInstanceEvaluator(dataset_name)
if (evaluator_type == 'cityscapes_sem_seg'):
assert (torch.cuda.device_count() >= comm.get_rank()), 'CityscapesEvaluator currently do not work with multiple machines.'
return CityscapesSemSegEvaluator(dataset_name)
if (len(evaluator_list) == 0):
raise NotImplementedError('no Evaluator for the dataset {} with the type {}'.format(dataset_name, evaluator_type))
if (len(evaluator_list) == 1):
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def build_train_loader(cls, cfg):
if ('SemanticSegmentor' in cfg.MODEL.META_ARCHITECTURE):
mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg))
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper) |
class Trainer(BaseTrainer):
def __init__(self, cfg: (DictConfig | ExperimentConfig), build_networks: bool=True, ckpt_dir: Optional[os.PathLike]=None, keep: Optional[(str | Sequence[str])]=None, skip: Optional[(str | Sequence[str])]=None) -> None:
super().__init__(cfg=cfg, keep=keep, skip=skip)
assert (self.config.dynamics.group.upper() in ['U1', 'SU3'])
if (self.config.dynamics.group == 'U1'):
self.g = U1Phase()
elif (self.config.dynamics.group == 'SU3'):
self.g = SU3()
else:
raise ValueError
self.config: ExperimentConfig = instantiate(cfg)
self.clip_norm = self.config.learning_rate.clip_norm
self._lr_warmup = torch.linspace(self.config.learning_rate.min_lr, self.config.learning_rate.lr_init, (2 * self.steps.nepoch))
self.dtype = PT_DTYPES.get(self.config.precision, None)
assert (self.dtype is not None)
dsetup: dict = setup_torch_distributed(self.config.backend)
self.size: int = dsetup['size']
self.rank: int = dsetup['rank']
self.local_rank: int = dsetup['local_rank']
self._is_orchestrator: bool = ((self.local_rank == 0) and (self.rank == 0))
self._with_cuda: bool = torch.cuda.is_available()
self._dtype = self.dtype
self.device: str = ('cuda' if torch.cuda.is_available() else 'cpu')
self.warning(f'Using {self.dtype} on {self.device}!')
self.lattice = self.build_lattice()
self.loss_fn = self.build_loss_fn()
self.dynamics: Dynamics = self.build_dynamics(build_networks=build_networks)
self.ckpt_dir: Path = (Path(CHECKPOINTS_DIR).joinpath('checkpoints') if (ckpt_dir is None) else Path(ckpt_dir).resolve())
self.ckpt_dir.mkdir(exist_ok=True, parents=True)
self._fstep = 0
self._bstep = 0
self._gstep = 0
self._estep = 0
self._hstep = 0
if self.config.restore:
output: dict = self.load_ckpt()
self.dynamics: Dynamics = output['dynamics']
ckpt: dict = output['ckpt']
self._gstep: int = ckpt.get('gstep', ckpt.get('step', 0))
if self._is_orchestrator:
self.warning(f'Restoring global step from ckpt! self._gstep: {self._gstep}')
else:
self._gstep: int = 0
self.warning('Using `torch.optim.Adam` optimizer')
self._optimizer = torch.optim.Adam(self.dynamics.parameters(), lr=self.config.learning_rate.lr_init)
self.num_params = self.count_parameters(self.dynamics)
self.autocast_context_train = torch.autocast(dtype=self._dtype, device_type=self.device, enabled=((self._dtype != torch.float64) and (self.device != 'cpu')))
self.ds_config = {}
self.grad_scaler = None
self.dynamics_engine = None
if (self.config.backend == 'DDP'):
from torch.nn.parallel import DistributedDataParallel as DDP
self.optimizer = self._optimizer
find_unused_parameters = (str(self.config.dynamics.group).lower() == 'su3')
self.dynamics_engine = DDP(self.dynamics, find_unused_parameters=find_unused_parameters)
if (self._dtype != torch.float64):
self.grad_scaler = GradScaler()
elif (self.config.backend.lower() in ['ds', 'deepspeed']):
self._setup_deepspeed()
elif (self.config.backend.lower() in ['hvd', 'horovod']):
self._setup_horovod()
else:
self.optimizer = self._optimizer
logfreq = self.config.steps.log
log.warning(f'logging with freq {logfreq} for wandb.watch')
if (self.config.use_wandb and (wandb.run is not None)):
wandb.run.watch(self.dynamics, log='all', log_freq=logfreq)
assert (isinstance(self.dynamics, Dynamics) and isinstance(self.dynamics, nn.Module) and (str(self.config.dynamics.group).upper() in {'U1', 'SU3'}))
def count_parameters(self, model: Optional[nn.Module]=None) -> int:
model = (self.dynamics if (model is None) else model)
num_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
log.info(f'num_params in model: {num_params}')
if (self.config.init_wandb and (wandb.run is not None)):
wandb.run.config['NUM_PARAMS'] = num_params
return num_params
def _setup_deepspeed(self) -> None:
self.ds_config = self.prepare_ds_config()
if (self.dtype == torch.bfloat16):
log.warning('Using `bf16` in DeepSpeed config...')
self.ds_config |= {'bf16': {'enabled': True}}
self.dynamics = self.dynamics.to(torch.bfloat16)
if (self.dtype == torch.float16):
log.warning('Using `fp16` in DeepSpeed config...')
self.ds_config |= {'fp16': {'enabled': True}}
self.dynamics = self.dynamics.to(torch.float16)
if (self.rank == 0):
print_json(json.dumps(self.ds_config, indent=4))
if ('optimizer' in self.ds_config.items()):
(engine, optimizer, *_) = deepspeed.initialize(model=self.dynamics, config=self.ds_config, model_parameters=self.dynamics.parameters())
else:
(engine, optimizer, *_) = deepspeed.initialize(model=self.dynamics, config=self.ds_config, optimizer=self._optimizer, model_parameters=self.dynamics.parameters())
assert (engine is not None)
assert (optimizer is not None)
self.dynamics_engine = engine
self.optimizer = optimizer
self.device = self.dynamics_engine.local_rank
def _setup_horovod(self) -> None:
import horovod.torch as hvd
compression = (hvd.Compression.fp16 if (self.dtype in {*BF16_SYNONYMS, *FP16_SYNONYMS}) else hvd.Compression.none)
self.optimizer = hvd.DistributedOptimizer(self._optimizer, named_parameters=self.dynamics.named_parameters(), compression=compression)
hvd.broadcast_parameters(self.dynamics.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.optimizer, root_rank=0)
def prepare_ds_config(self) -> dict:
if (self.config.backend.lower() not in ['ds', 'deepspeed']):
return {}
ds_config = {}
assert (self.config.ds_config_path is not None)
ds_config = load_ds_config(self.config.ds_config_path)
self.info(f'Loaded DeepSpeed config from: {self.config.ds_config_path}')
pname = 'l2hmc-qcd'
if self.config.debug_mode:
pname += '-debug'
if self.config.init_wandb:
ds_config['wandb'].update({'enabled': True, 'project': pname, 'group': f'{self.config.framework}/{self.config.backend}'})
else:
ds_config['wandb'] = {}
opath = Path(os.getcwd()).joinpath('ds_outputs').resolve()
ds_config['csv_monitor'] = {'enabled': True, 'output_path': opath.joinpath('ds_csv_monitor').as_posix()}
ds_config.update({'gradient_accumulation_steps': 1, 'train_micro_batch_size_per_gpu': 1})
ds_config['train_batch_size'] = ((self.size * ds_config['gradient_accumulation_steps']) * ds_config['train_micro_batch_size_per_gpu'])
scheduler = ds_config.get('scheduler', None)
if (scheduler is not None):
sparams = scheduler.get('params', None)
if (sparams is not None):
ds_config['scheduler']['params'].update({'warmup_num_steps': self.config.steps.nepoch, 'total_num_steps': (self.config.steps.nera * self.config.steps.nepoch)})
zero_opt_config = ds_config.get('zero_optimization', None)
if (zero_opt_config is not None):
hostname = str(socket.gethostbyaddr(socket.gethostname())[0]).lower()
if hostname.startswith('thetagpu'):
nvme_path = Path('/raid/scratch/').resolve()
else:
nvme_path = Path('/local/scratch').resolve()
if nvme_path.exists():
nvme_path = nvme_path.as_posix()
self.info(f'[{hostname}] Setting NVMe path to: {nvme_path}')
zero_opt_config['offload_param']['nvme_path'] = nvme_path
zero_opt_config['offload_optimizer']['nvme_path'] = nvme_path
ds_config['zero_optimization'] = zero_opt_config
self.config.set_ds_config(ds_config)
self.ds_config = ds_config
return ds_config
def warning(self, s: str) -> None:
if self._is_orchestrator:
log.warning(s)
def info(self, s: str) -> None:
if self._is_orchestrator:
log.info(s)
def draw_x(self):
return self.g.random(list(self.config.dynamics.xshape)).flatten(1)
def draw_v(self):
return self.g.random_momentum(list(self.config.dynamics.xshape))
def reset_optimizer(self):
if self._is_orchestrator:
import horovod.torch as hvd
self.warning('Resetting optimizer state!')
self.optimizer.state = defaultdict(dict)
hvd.broadcast_optimizer_state(self.optimizer, root_rank=0)
def build_lattice(self):
group = str(self.config.dynamics.group).upper()
kwargs = {'nchains': self.config.dynamics.nchains, 'shape': list(self.config.dynamics.latvolume)}
if (group == 'U1'):
return LatticeU1(**kwargs)
if (group == 'SU3'):
c1 = (self.config.c1 if (self.config.c1 is not None) else 0.0)
return LatticeSU3(c1=c1, **kwargs)
raise ValueError('Unexpected value in `config.dynamics.group`')
def build_loss_fn(self) -> Callable:
assert isinstance(self.lattice, (LatticeU1, LatticeSU3))
return LatticeLoss(lattice=self.lattice, loss_config=self.config.loss)
def build_optimizer(self, dynamics: Optional[Dynamics]=None, build_networks: bool=True) -> torch.optim.Optimizer:
if (dynamics is None):
dynamics = self.build_dynamics(build_networks=build_networks)
assert (dynamics is not None)
return (torch.optim.Adam(dynamics.parameters(), lr=self.config.learning_rate.lr_init) if (self.config.dynamics.group == 'U1') else torch.optim.SGD(dynamics.parameters(), lr=self.config.learning_rate.lr_init))
def build_dynamics(self, build_networks: bool=True) -> Dynamics:
input_spec = self.get_input_spec()
net_factory = None
if build_networks:
net_factory = NetworkFactory(input_spec=input_spec, conv_config=self.config.conv, network_config=self.config.network, net_weights=self.config.net_weights)
dynamics = Dynamics(config=self.config.dynamics, potential_fn=self.lattice.action, network_factory=net_factory)
if torch.cuda.is_available():
dynamics.cuda()
return dynamics
def get_lr(self, step: int) -> float:
return self.config.learning_rate.lr_init
def build_lr_schedule(self):
return LambdaLR(optimizer=self.optimizer, lr_lambda=(lambda step: self.get_lr(step)))
def save_ckpt(self, era: int, epoch: int, metrics: Optional[dict]=None, run: Optional[Any]=None) -> None:
if ((self.rank != 0) or (not self.config.save)):
return
tstamp = get_timestamp('%Y-%m-%d-%H%M%S')
step = self._gstep
ckpt_file = self.ckpt_dir.joinpath(f'ckpt-{era}-{epoch}-{step}-{tstamp}.tar')
assert isinstance(self.dynamics.xeps, nn.ParameterList)
assert isinstance(self.dynamics.veps, nn.ParameterList)
xeps = [e.detach().cpu().numpy() for e in self.dynamics.xeps]
veps = [e.detach().cpu().numpy() for e in self.dynamics.veps]
ckpt = {'era': era, 'epoch': epoch, 'xeps': xeps, 'veps': veps, 'gstep': self._gstep, 'model_state_dict': self.dynamics.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict()}
if (metrics is not None):
ckpt.update(metrics)
torch.save(ckpt, ckpt_file)
modelfile = self.ckpt_dir.joinpath(f'model-{era}-{epoch}-{step}-{tstamp}.pth')
torch.save(self.dynamics.state_dict(), modelfile)
self.info(f'Saving checkpoint to: {ckpt_file.as_posix()}')
self.info(f'Saving modelfile to: {modelfile.as_posix()}')
if ((wandb.run is not None) and self.config.init_wandb):
artifact = wandb.Artifact('model', type='model')
artifact.add_file(modelfile.as_posix())
wandb.run.log_artifact(artifact)
def load_ckpt(self, dynamics: Optional[Dynamics]=None, optimizer: Optional[torch.optim.Optimizer]=None, build_networks: bool=True, era: Optional[int]=None, epoch: Optional[int]=None) -> dict:
if (dynamics is None):
dynamics = (self.dynamics if (self.dynamics is not None) else self.build_dynamics(build_networks=build_networks))
if (optimizer is None):
optimizer = (self.optimizer if (self.optimizer is not None) else self.build_optimizer())
output = {'dynamics': dynamics, 'optimizer': optimizer, 'ckpt': {}}
ckpts = [Path(self.ckpt_dir).joinpath(i) for i in os.listdir(self.ckpt_dir) if i.endswith('.tar')]
modelfiles = [Path(self.ckpt_dir).joinpath(i) for i in os.listdir(self.ckpt_dir) if i.endswith('.pth')]
self.info(f'''Looking for checkpoints in:
{self.ckpt_dir}''')
if (not ckpts):
self.warning('No checkpoints found to load from')
return output
ckpt_file = None
modelfile = None
if (era is not None):
cmatch = f'ckpt-{era}'
mmatch = f'model-{era}'
if (epoch is not None):
cmatch += f'-{epoch}'
mmatch += f'-{epoch}'
for ckpt in ckpts:
if (cmatch in ckpt.as_posix()):
ckpt_file = ckpt
for mfile in modelfiles:
if (mmatch in mfile.as_posix()):
modelfile = mfile
else:
ckpts = sorted(ckpts, key=(lambda t: os.stat(t).st_mtime))
mfiles = sorted(modelfiles, key=(lambda t: os.stat(t).st_mtime))
ckpt_file = ckpts[(- 1)]
modelfile = mfiles[(- 1)]
if (modelfile is not None):
self.info(f'Loading model from: {modelfile}')
dynamics.load_state_dict(torch.load(modelfile))
output['modelfile'] = modelfile
if (ckpt_file is not None):
ckpt_file = Path(self.ckpt_dir).joinpath(ckpt_file)
self.info(f'Loading checkpoint from: {ckpt_file}')
ckpt = torch.load(ckpt_file)
output['ckpt'] = ckpt
output['ckpt_file'] = ckpt_file
return output
def should_log(self, epoch):
return (((epoch % self.steps.log) == 0) and self._is_orchestrator)
def should_print(self, epoch):
return (((epoch % self.steps.print) == 0) and self._is_orchestrator)
def should_emit(self, epoch: int, nepoch: int) -> bool:
nprint = min(getattr(self.steps, 'print', int((nepoch // 10))), int((nepoch // 5)))
nlog = min(getattr(self.steps, 'log', int((nepoch // 4))), int((nepoch // 4)))
emit = (((epoch % nprint) == 0) or ((epoch % nlog) == 0))
return (self._is_orchestrator and emit)
def record_metrics(self, metrics: dict, job_type: str, step: Optional[int]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, model: Optional[(nn.Module | Dynamics)]=None, optimizer: Optional[Any]=None) -> tuple[(dict[(str, ScalarLike)], str)]:
assert (job_type in {'train', 'eval', 'hmc'})
if (step is None):
timer = self.timers.get(job_type, None)
if isinstance(timer, StepTimer):
step = timer.iterations
if (step is not None):
metrics[f'{job_type[0]}step'] = step
if ((job_type == 'train') and (step is not None)):
metrics['lr'] = self.get_lr(step)
if ((job_type == 'eval') and ('eps' in metrics)):
_ = metrics.pop('eps', None)
metrics.update(self.metrics_to_numpy(metrics))
avgs = self.histories[job_type].update(metrics)
summary = summarize_dict(avgs)
metrics |= {'xeps': torch.tensor(self.dynamics.xeps), 'veps': torch.tensor(self.dynamics.veps)}
metrics |= {f'{k}/avg': v for (k, v) in avgs.items()}
if ((step is not None) and (writer is not None)):
assert (step is not None)
update_summaries(step=step, writer=writer, metrics=metrics, prefix=job_type, optimizer=optimizer, use_tb=self.config.use_tb, use_wandb=(self.config.use_wandb and self.config.init_wandb))
writer.flush()
if (self.config.init_aim or self.config.init_wandb):
self.track_metrics(record=metrics, avgs=avgs, job_type=job_type, step=step, run=run, arun=arun)
return (avgs, summary)
def track_metrics(self, record: dict[(str, (((torch.Tensor | np.ndarray) | list) | ScalarLike))], avgs: dict[(str, ScalarLike)], job_type: str, step: Optional[int], run: Optional[Any]=None, arun: Optional[Any]=None) -> None:
if (self.local_rank != 0):
return
dQdict = None
dQint = record.get('dQint', None)
if (dQint is not None):
dQdict = {f'dQint/{job_type}': {'val': dQint, 'step': step, 'avg': dQint.mean()}}
if ((wandb.run is not None) and self.config.init_wandb):
wandb.run.log(dQdict, commit=False)
if (arun is not None):
from aim import Distribution
kwargs = {'step': step, 'job_type': job_type, 'arun': arun}
try:
self.aim_track(avgs, prefix='avgs', **kwargs)
self.aim_track(record, prefix='record', **kwargs)
if (dQdict is not None):
self.aim_track({'dQint': dQint}, prefix='dQ', **kwargs)
except ValueError:
self.warning('Unable to track record with aim, skipping!')
def profile_step(self, inputs: tuple[(Tensor, Tensor)]) -> tuple[(Tensor, dict)]:
(xinit, beta) = inputs
assert isinstance(self.dynamics, Dynamics)
assert isinstance(self.config, ExperimentConfig)
try:
self.optimizer.zero_grad()
except Exception:
pass
xinit = self.g.compat_proj(xinit)
if (self.dynamics_engine is not None):
(xout, metrics) = self.dynamics_engine((xinit, beta))
else:
(xout, metrics) = self.dynamics((xinit, beta))
xout = self.g.compat_proj(xout)
xprop = self.g.compat_proj(metrics.pop('mc_states').proposed.x)
beta = beta
loss = self.loss_fn(xinit, x_prop=xprop, acc=metrics['acc'])
loss.backward()
if (self.config.learning_rate.clip_norm > 0.0):
torch.nn.utils.clip_grad.clip_grad_norm(self.dynamics.parameters(), self.config.learning_rate.clip_norm)
self.optimizer.step()
return (xout.detach(), metrics)
def profile(self, nsteps: int=5) -> dict:
assert isinstance(self.dynamics, Dynamics)
self.dynamics.train()
x = self.draw_x()
beta = torch.tensor(1.0)
metrics = {}
for _ in range(nsteps):
(x, metrics) = self.profile_step((x, beta))
return metrics
def hmc_step(self, inputs: tuple[(Tensor, (float | Tensor))], eps: Optional[float]=None, nleapfrog: Optional[int]=None) -> tuple[(Tensor, dict)]:
self.dynamics.eval()
(xi, beta) = inputs
beta = (torch.tensor(beta) if isinstance(beta, float) else beta)
assert isinstance(beta, Tensor)
beta = beta.to(self.device)
xi = self.g.compat_proj(self.dynamics.unflatten(xi.to(self.device)))
(xo, metrics) = self.dynamics.apply_transition_hmc((xi, beta), eps=eps, nleapfrog=nleapfrog)
xp = metrics.pop('mc_states').proposed.x
loss = self.loss_fn(x_init=xi, x_prop=xp, acc=metrics['acc'])
if self.config.dynamics.verbose:
lmetrics = self.loss_fn.lattice_metrics(xinit=xi, xout=xo)
metrics.update(lmetrics)
metrics.update({'loss': loss.item()})
self.dynamics.train()
self._hstep += 1
return (xo.detach(), metrics)
def eval_step(self, inputs: tuple[(Tensor, float)]) -> tuple[(Tensor, dict)]:
self.dynamics.eval()
(xinit, beta) = inputs
beta = torch.tensor(beta).to(self.device)
xinit = self.g.compat_proj(self.dynamics.unflatten(xinit.to(self.device)))
(xout, metrics) = self.dynamics((xinit, beta))
xprop = metrics.pop('mc_states').proposed.x
loss = self.loss_fn(x_init=xinit, x_prop=xprop, acc=metrics['acc'])
if self.config.dynamics.verbose:
lmetrics = self.loss_fn.lattice_metrics(xinit=xinit, xout=xout)
metrics.update(lmetrics)
metrics.update({'loss': loss.item()})
self.dynamics.train()
self._estep += 1
return (xout.detach(), metrics)
def get_context_manager(self, renderable: ConsoleRenderable) -> (Live | nullcontext):
return nullcontext()
def get_printer(self, job_type: str) -> None:
return None
def _setup_eval(self, beta: Optional[float]=None, eval_steps: Optional[int]=None, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, run: Optional[Any]=None, job_type: Optional[str]='eval', nchains: Optional[int]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None, nprint: Optional[int]=None) -> dict:
assert (job_type in ['eval', 'hmc'])
if isinstance(skip, str):
skip = [skip]
if (beta is None):
beta = self.config.annealing_schedule.beta_final
if ((nleapfrog is None) and (str(job_type).lower() == 'hmc')):
nleapfrog = int(self.config.dynamics.nleapfrog)
if self.config.dynamics.merge_directions:
nleapfrog *= 2
if ((eps is None) and (str(job_type).lower() == 'hmc')):
eps = self.config.dynamics.eps_hmc
self.warning(f'Step size `eps` not specified for HMC! Using default: {eps:.4f} for generic HMC')
if (x is None):
x = self.lattice.random()
self.warning(f'x.shape (original): {x.shape}')
if (nchains is not None):
if (isinstance(nchains, int) and (nchains > 0)):
x = x[:nchains]
assert isinstance(x, Tensor)
if (nchains is not None):
self.warning(f'x[:nchains].shape: {x.shape}')
table = Table(row_styles=['dim', 'none'], expand=True)
eval_steps = (self.steps.test if (eval_steps is None) else eval_steps)
assert isinstance(eval_steps, int)
nprint = (max(1, min(50, (eval_steps // 50))) if (nprint is None) else nprint)
nlog = max((1, min((10, eval_steps))))
if (nlog <= eval_steps):
nlog = min(10, max(1, (eval_steps // 100)))
if (run is not None):
run.config.update({job_type: {'beta': beta, 'xshape': x.shape}})
assert isinstance(x, Tensor)
assert isinstance(beta, float)
assert isinstance(nlog, int)
assert isinstance(nprint, int)
assert isinstance(eval_steps, int)
output = {'x': x, 'eps': eps, 'beta': beta, 'nlog': nlog, 'table': table, 'nprint': nprint, 'eval_steps': eval_steps, 'nleapfrog': nleapfrog}
log.info('\n'.join([f'{k}={v}' for (k, v) in output.items() if (k != 'x')]))
return output
def eval(self, beta: Optional[float]=None, eval_steps: Optional[int]=None, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, job_type: Optional[str]='eval', nchains: Optional[int]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None, dynamic_step_size: Optional[bool]=None, nprint: Optional[int]=None, make_plots: bool=True) -> dict:
assert (job_type in ['eval', 'hmc'])
tables = {}
summaries = []
patience = 5
stuck_counter = 0
setup = self._setup_eval(x=x, run=run, beta=beta, eps=eps, nleapfrog=nleapfrog, skip=skip, nchains=nchains, job_type=job_type, eval_steps=eval_steps, nprint=nprint)
x = setup['x']
eps = setup['eps']
beta = setup['beta']
table = setup['table']
panel = Panel(table)
ctx = self.get_context_manager(panel)
nleapfrog = setup['nleapfrog']
eval_steps = setup['eval_steps']
assert ((x is not None) and (beta is not None))
nlog = setup.get('nlog', self.config.steps.log)
nprint = setup.get('nprint', self.config.steps.print)
timer = self.timers[job_type]
history = self.histories[job_type]
assert ((eval_steps is not None) and (timer is not None) and (history is not None) and (nlog is not None) and (nprint is not None))
device_type = ('cuda' if WITH_CUDA else 'cpu')
if (device_type == 'cuda'):
fpctx = torch.autocast(device_type=device_type)
else:
fpctx = nullcontext()
def eval_fn(z):
with fpctx:
if (job_type == 'hmc'):
assert (eps is not None)
return self.hmc_step(z, eps=eps, nleapfrog=nleapfrog)
return self.eval_step(z)
def refresh_view():
if isinstance(ctx, Live):
ctx.refresh()
def _should_emit(step):
return (((step % nlog) == 0) or ((step % nprint) == 0))
plots = None
if (is_interactive() and make_plots):
plots = plotter.init_plots()
self.dynamics.eval()
with ctx:
x = self.warmup(beta=beta, x=x)
for step in range(eval_steps):
timer.start()
(x, metrics) = eval_fn((x, beta))
dt = timer.stop()
if _should_emit(step):
record = {f'{job_type[0]}step': step, 'dt': dt, 'beta': beta, 'loss': metrics.pop('loss', None), 'dQsin': metrics.pop('dQsin', None), 'dQint': metrics.pop('dQint', None)}
record.update(metrics)
(avgs, summary) = self.record_metrics(run=run, arun=arun, step=step, writer=writer, metrics=record, job_type=job_type)
summaries.append(summary)
table = self.update_table(table=table, step=step, avgs=avgs)
if ((step % nprint) == 0):
log.info(summary)
refresh_view()
if (avgs.get('acc', 1.0) < 1e-05):
if (stuck_counter < patience):
stuck_counter += 1
else:
self.warning('Chains are stuck! Redrawing x')
x = self.lattice.random()
stuck_counter = 0
if ((job_type == 'hmc') and dynamic_step_size):
acc = record.get('acc_mask', None)
record['eps'] = eps
if ((acc is not None) and (eps is not None)):
acc_avg = acc.mean()
if (acc_avg < 0.66):
eps -= (eps / 10.0)
else:
eps += (eps / 10.0)
if (is_interactive() and self._is_orchestrator and (plots is not None)):
if (len(self.histories[job_type].history.keys()) == 0):
plotter.update_plots(history=metrics, plots=plots, logging_steps=nlog)
else:
plotter.update_plots(history=self.histories[job_type].history, plots=plots, logging_steps=nlog)
if isinstance(ctx, Live):
ctx.console.clear_live()
tables[str(0)] = setup['table']
self.dynamics.train()
return {'timer': timer, 'history': history, 'summaries': summaries, 'tables': tables}
def calc_loss(self, xinit: torch.Tensor, xprop: torch.Tensor, acc: torch.Tensor) -> torch.Tensor:
return self.loss_fn(xinit, xprop, acc)
def forward_step(self, x: torch.Tensor, beta: torch.Tensor) -> tuple[(torch.Tensor, dict)]:
x.requires_grad_(True)
try:
self.optimizer.zero_grad()
except Exception:
pass
with self.autocast_context_train:
if (self.dynamics_engine is not None):
(xout, metrics) = self.dynamics_engine((x, beta))
else:
(xout, metrics) = self.dynamics((x, beta))
self._fstep += 1
return (xout, metrics)
def backward_step(self, loss: torch.Tensor) -> torch.Tensor:
if ((self.config.backend.lower() in ['ds', 'deepspeed']) and (self.dynamics_engine is not None)):
self.dynamics_engine.backward(loss)
self.dynamics_engine.step()
elif (self.grad_scaler is None):
loss.backward()
if (self.config.learning_rate.clip_norm > 0.0):
torch.nn.utils.clip_grad.clip_grad_norm(parameters=self.dynamics.parameters(), max_norm=self.clip_norm)
self.optimizer.step()
else:
self.grad_scaler.scale(loss).backward()
self.grad_scaler.unscale_(self.optimizer)
if (self.config.learning_rate.clip_norm > 0):
torch.nn.utils.clip_grad.clip_grad_norm(parameters=self.dynamics.parameters(), max_norm=self.config.learning_rate.clip_norm)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
self._bstep += 1
return loss
def train_step(self, inputs: tuple[(Tensor, (Tensor | float))]) -> tuple[(Tensor, dict)]:
(xinit, beta) = inputs
xinit = self.g.compat_proj(xinit.reshape(self.xshape))
beta = (torch.tensor(beta) if isinstance(beta, float) else beta)
assert isinstance(beta, Tensor)
(xout, metrics) = self.forward_step(x=xinit, beta=beta)
xprop = metrics.pop('mc_states').proposed.x
loss = self.calc_loss(xinit=xinit, xprop=xprop, acc=metrics['acc'])
aux_loss = 0.0
if ((aw := self.config.loss.aux_weight) > 0):
yinit = self.dynamics.unflatten(self.g.random(xinit.shape).to(self.device))
(_, metrics_) = self.forward_step(x=yinit, beta=beta)
yprop = metrics_.pop('mc_states').proposed.x
aux_loss = self.calc_loss(xinit=yinit, xprop=yprop, acc=metrics_['acc'])
aux_loss += (aw * aux_loss)
loss_tot = (loss + aux_loss)
loss = self.backward_step(loss_tot)
if isinstance(loss_tot, Tensor):
loss_tot = loss_tot.item()
metrics['loss'] = loss_tot
if self.config.dynamics.verbose:
with torch.no_grad():
lmetrics = self.loss_fn.lattice_metrics(xinit=xinit, xout=xout)
metrics.update(lmetrics)
self._gstep += 1
return (xout.detach(), metrics)
def train_step_detailed(self, x: Optional[Tensor]=None, beta: Optional[(Tensor | float)]=None, era: int=0, epoch: int=0, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, rows: Optional[dict]=None, summaries: Optional[list]=None, verbose: bool=True) -> tuple[(Tensor, dict)]:
if (x is None):
x = self.dynamics.lattice.random()
if (beta is None):
beta = self.config.annealing_schedule.beta_init
if isinstance(beta, float):
beta = torch.tensor(beta).to(self.device)
self.timers['train'].start()
(xout, metrics) = self.train_step((x, beta))
dt = self.timers['train'].stop()
record = {'era': era, 'epoch': epoch, 'tstep': self._gstep, 'dt': dt, 'beta': beta, 'loss': metrics.pop('loss', None), 'dQsin': metrics.pop('dQsin', None), 'dQint': metrics.pop('dQint', None), **metrics}
(avgs, summary) = self.record_metrics(run=run, arun=arun, step=self._gstep, writer=writer, metrics=record, job_type='train', model=self.dynamics, optimizer=self.optimizer)
if (rows is not None):
rows[self._gstep] = avgs
if (summaries is not None):
summaries.append(summary)
if verbose:
log.info(summary)
self._gstep += 1
return (xout.detach(), record)
def eval_step_detailed(self, job_type: str, x: Optional[Tensor]=None, beta: Optional[float]=None, verbose: bool=True) -> tuple[(Tensor, dict)]:
if (x is None):
x = self.dynamics.lattice.random()
if (beta is None):
beta = self.config.annealing_schedule.beta_init
self.timers[job_type].start()
if (job_type == 'eval'):
(xout, metrics) = self.eval_step((x, beta))
elif (job_type == 'hmc'):
(xout, metrics) = self.hmc_step((x, beta))
else:
raise ValueError(f'Job type should be eval or hmc, got: {job_type}')
dt = self.timers[job_type].stop()
record = {'dt': dt, 'beta': beta, 'loss': metrics.pop('loss', None), 'dQsin': metrics.pop('dQsin', None), 'dQint': metrics.pop('dQint', None), **metrics}
(_, summary) = self.record_metrics(step=self._gstep, metrics=record, job_type=job_type)
if verbose:
log.info(summary)
self._estep += 1
return (xout, record)
def train_epoch(self, x: Tensor, beta: (float | Tensor), era: Optional[int]=None, run: Optional[Any]=None, arun: Optional[Any]=None, nepoch: Optional[int]=None, writer: Optional[Any]=None, extend: int=1, nprint: Optional[int]=None, nlog: Optional[int]=None, warmup: bool=True, plots: Optional[Any]=None) -> tuple[(Tensor, dict)]:
self.dynamics.train()
rows = {}
summaries = []
extend = (1 if (extend is None) else extend)
table = Table(expand=True, box=box.HORIZONTALS, row_styles=['dim', 'none'])
panel = Panel(table)
nepoch = (self.steps.nepoch if (nepoch is None) else nepoch)
assert isinstance(nepoch, int)
nepoch *= extend
losses = []
ctx = self.get_context_manager(panel)
log_freq = (self.steps.log if (nlog is None) else nlog)
print_freq = (self.steps.print if (nprint is None) else nprint)
assert ((log_freq is not None) and isinstance(log_freq, int))
assert ((print_freq is not None) and isinstance(print_freq, int))
def should_print(epoch):
return (self._is_orchestrator and ((epoch % print_freq) == 0))
def should_log(epoch):
return (self._is_orchestrator and ((epoch % log_freq) == 0))
def refresh_view():
if isinstance(ctx, Live):
ctx.refresh()
patience = 10
stuck_iters = 0
with ctx:
if warmup:
wt0 = time.perf_counter()
x = self.warmup(beta=beta, x=x)
self.info(f'Thermalizing configs {beta:.2f} took {(time.perf_counter() - wt0):.4f} s')
summary = ''
for epoch in range(nepoch):
self.timers['train'].start()
(x, metrics) = self.train_step((x, beta))
dt = self.timers['train'].stop()
losses.append(metrics['loss'])
if should_log(epoch):
record = {'era': era, 'epoch': epoch, 'tstep': self._gstep, 'dt': dt, 'beta': beta, 'loss': metrics.pop('loss', None), 'dQsin': metrics.pop('dQsin', None), 'dQint': metrics.pop('dQint', None)}
record.update(metrics)
(avgs, summary) = self.record_metrics(run=run, arun=arun, step=self._gstep, writer=writer, metrics=record, job_type='train', model=self.dynamics, optimizer=self.optimizer)
rows[self._gstep] = avgs
summaries.append(summary)
table = self.update_table(table=table, avgs=avgs, step=epoch)
if (avgs.get('acc', 1.0) < 1e-05):
if (stuck_iters < patience):
stuck_iters += 1
else:
self.warning('Chains are stuck! Redrawing x')
x = self.lattice.random()
stuck_iters = 0
refresh_view()
if (is_interactive() and self._is_orchestrator and (plots is not None)):
if (len(self.histories['train'].history.keys()) == 0):
plotter.update_plots(metrics, plots, logging_steps=log_freq)
else:
plotter.update_plots(self.histories['train'].history, plots=plots, logging_steps=log_freq)
if should_print(epoch):
refresh_view()
log.info(summary)
if isinstance(ctx, Live):
ctx.console.clear_live()
data = {'rows': rows, 'table': table, 'losses': losses, 'summaries': summaries}
return (x, data)
def _setup_training(self, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, train_dir: Optional[os.PathLike]=None, nera: Optional[int]=None, nepoch: Optional[int]=None, beta: Optional[((float | Sequence[float]) | dict[(str, float)])]=None) -> dict:
skip = ([skip] if isinstance(skip, str) else skip)
train_dir = (Path(os.getcwd()).joinpath(self._created, 'train') if (train_dir is None) else Path(train_dir))
train_dir.mkdir(exist_ok=True, parents=True)
if (x is None):
x = self.g.random(list(self.xshape)).flatten(1)
nera = (self.config.steps.nera if (nera is None) else nera)
nepoch = (self.config.steps.nepoch if (nepoch is None) else nepoch)
assert ((nera is not None) and isinstance(nera, int))
assert ((nepoch is not None) and isinstance(nepoch, int))
if (beta is None):
betas = self.config.annealing_schedule.setup(nera=nera, nepoch=nepoch)
elif isinstance(beta, (list, np.ndarray)):
nera = len(beta)
betas = {f'{i}': b for (i, b) in zip(range(nera), beta)}
elif isinstance(beta, (int, float)):
betas = {f'{i}': b for (i, b) in zip(range(nera), (nera * [beta]))}
elif isinstance(beta, dict):
nera = len(list(beta.keys()))
betas = {f'{i}': b for (i, b) in beta.items()}
else:
raise TypeError(f'Expected `beta` to be one of: `float, list, dict`, received: {type(beta)}')
beta_final = list(betas.values())[(- 1)]
assert ((beta_final is not None) and isinstance(beta_final, float))
return {'x': x, 'nera': nera, 'nepoch': nepoch, 'betas': betas, 'train_dir': train_dir, 'beta_final': beta_final}
def warmup(self, beta: (float | torch.Tensor), nsteps: int=100, tol: float=1e-05, x: Optional[Tensor]=None, nchains: Optional[int]=None) -> Tensor:
self.dynamics.eval()
if (x is None):
x = self.dynamics.lattice.random().to(self.device)
if (nchains is not None):
x = x[:nchains]
if isinstance(beta, float):
beta = torch.tensor(beta).to(self.device)
pexact = (plaq_exact(beta).to(self.device).to(self._dtype) if (self.config.dynamics.group == 'U1') else None)
for step in range(nsteps):
(x, metrics) = self.hmc_step((x, beta))
plaqs = metrics.get('plaqs', None)
assert ((x is not None) and isinstance(x, Tensor))
if ((plaqs is not None) and (pexact is not None)):
pdiff = (plaqs - pexact).abs().sum()
if (pdiff < tol):
log.warning(f'Chains thermalized! step: {step}, plaq_diff: {pdiff:.4f}')
return x
if ((nsteps > 100) and ((step % 10) == 0) and self._is_orchestrator):
log.info(f'(warm-up) step: {step}, plaqs: {plaqs.mean():.4f}')
self.dynamics.train()
return x
def train(self, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, train_dir: Optional[os.PathLike]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, nera: Optional[int]=None, nepoch: Optional[int]=None, nprint: Optional[int]=None, nlog: Optional[int]=None, beta: Optional[((float | Sequence[float]) | dict[(str, float)])]=None, warmup: bool=True, make_plots: bool=True) -> dict:
self.dynamics.train()
setup = self._setup_training(x=x, skip=skip, train_dir=train_dir, nera=nera, nepoch=nepoch, beta=beta)
era = 0
epoch = 0
extend = 1
x = setup['x']
nera = setup['nera']
betas = setup['betas']
nepoch = setup['nepoch']
train_dir = setup['train_dir']
beta_final: float = setup['beta_final']
assert ((x is not None) and isinstance(x, Tensor))
assert (nera is not None)
assert (train_dir is not None)
plots = (plotter.init_plots() if (is_interactive() and make_plots) else None)
for era in range(nera):
b = torch.tensor(betas.get(str(era), beta_final))
if ((era == (nera - 1)) and (self.steps.extend_last_era is not None)):
extend = int(self.steps.extend_last_era)
if self._is_orchestrator:
if ((era > 1) and (str((era - 1)) in self.summaries['train'])):
esummary = self.histories['train'].era_summary(f'{(era - 1)}')
log.info(f'''Avgs over last era:
{esummary}
''')
box_header(f'ERA: {era} / {nera}, BETA: {b:.3f}')
epoch_start = time.time()
(x, edata) = self.train_epoch(x=x, beta=b, era=era, run=run, arun=arun, writer=writer, extend=extend, nepoch=nepoch, nprint=nprint, nlog=nlog, warmup=warmup, plots=plots)
self.rows['train'][str(era)] = edata['rows']
self.tables['train'][str(era)] = edata['table']
self.summaries['train'][str(era)] = edata['summaries']
losses = torch.Tensor(list(edata['losses'][1:]))
if self.config.annealing_schedule.dynamic:
dy_avg = (losses[1:] - losses[:(- 1)]).mean().item()
if (dy_avg > 0):
b -= (b / 10.0)
else:
b += (b / 10.0)
if (self._is_orchestrator and self.config.save):
st0 = time.time()
self.save_ckpt(era, epoch, run=run)
log.info(f'Saving took: {(time.time() - st0):<5g}s')
log.info(f'Era {era} took: {(time.time() - epoch_start):<5g}s')
return {'timer': self.timers['train'], 'rows': self.rows['train'], 'summaries': self.summaries['train'], 'history': self.histories['train'], 'tables': self.tables['train']}
def train_dynamic(self, x: Optional[Tensor]=None, skip: Optional[(str | Sequence[str])]=None, train_dir: Optional[os.PathLike]=None, run: Optional[Any]=None, arun: Optional[Any]=None, writer: Optional[Any]=None, nera: Optional[int]=None, nepoch: Optional[int]=None, beta: Optional[((float | Sequence[float]) | dict[(str, float)])]=None) -> dict:
self.dynamics.train()
setup = self._setup_training(x=x, skip=skip, train_dir=train_dir, nera=nera, nepoch=nepoch, beta=beta)
era = 0
epoch = 0
extend = 1
x = setup['x']
nera = setup['nera']
betas = setup['betas']
nepoch = setup['nepoch']
train_dir = setup['train_dir']
beta_final = setup['beta_final']
b = torch.tensor(betas.get(str(era), beta_final))
assert (x is not None)
assert (nera is not None)
assert (train_dir is not None)
while (b < beta_final):
if ((era == (nera - 1)) and (self.steps.extend_last_era is not None)):
extend = int(self.steps.extend_last_era)
if self._is_orchestrator:
if ((era > 1) and (str((era - 1)) in self.summaries['train'])):
esummary = self.histories['train'].era_summary(f'{(era - 1)}')
log.info(f'''Avgs over last era:
{esummary}
''')
box_header(f'ERA: {era} / {nera}, BETA: {b:.3f}')
epoch_start = time.time()
(x, edata) = self.train_epoch(x=x, beta=b, era=era, run=run, arun=arun, writer=writer, extend=extend, nepoch=nepoch)
st0 = time.time()
losses = torch.stack(edata['losses'][1:])
if self.config.annealing_schedule.dynamic:
dy_avg = (losses[1:] - losses[:(- 1)]).mean().item()
if (dy_avg > 0):
b -= (b / 10.0)
else:
b += (b / 10.0)
self.rows['train'][str(era)] = edata['rows']
self.tables['train'][str(era)] = edata['table']
self.summaries['train'][str(era)] = edata['summaries']
if (((era + 1) == nera) or ((((era + 1) % 5) == 0) and self.config.save)):
self.save_ckpt(era, epoch, run=run)
if self._is_orchestrator:
log.info(f'Saving took: {(time.time() - st0):<5g}s')
log.info(f'Era {era} took: {(time.time() - epoch_start):<5g}s')
era += 1
return {'timer': self.timers['train'], 'rows': self.rows['train'], 'summaries': self.summaries['train'], 'history': self.histories['train'], 'tables': self.tables['train']}
def metric_to_numpy(self, metric: ((((Tensor | list) | np.ndarray) | float) | None)) -> np.ndarray:
if isinstance(metric, float):
return np.array(metric)
if isinstance(metric, list):
if isinstance(metric[0], Tensor):
metric = torch.stack(metric)
elif isinstance(metric[0], np.ndarray):
metric = np.stack(metric)
else:
raise ValueError(f'Unexpected value encountered: {type(metric)}')
if (not isinstance(metric, Tensor)):
try:
metric = torch.Tensor(metric)
except TypeError:
metric = torch.tensor(0.0)
return metric.to(torch.float32).detach().cpu().numpy()
def aim_track(self, metrics: dict, step: int, job_type: str, arun: aim.Run, prefix: Optional[str]=None) -> None:
context = {'subset': job_type}
for (key, val) in metrics.items():
name = (f'{prefix}/{key}' if (prefix is not None) else f'{key}')
if isinstance(val, dict):
for (k, v) in val.items():
self.aim_track(v, step=step, arun=arun, job_type=job_type, prefix=f'{name}/{k}')
if isinstance(val, (Tensor, np.ndarray)):
if (len(val.shape) > 1):
dist = Distribution(val)
arun.track(dist, step=step, name=name, context=context)
arun.track(val.mean(), step=step, name=f'{name}/avg', context=context)
else:
arun.track(val, name=name, step=step, context=context)
def print_weights(self, grab: bool=True):
_ = print_dict(dict(self.dynamics.named_parameters()), grab=grab)
def print_grads(self, grab: bool=True):
_ = print_dict({k: v.grad for (k, v) in self.dynamics.named_parameters()}, grab=grab)
def print_grads_and_weights(self, grab: bool=True):
log.info((80 * '-'))
log.info('GRADS:')
self.print_grads(grab=grab)
log.info((80 * '-'))
log.info('WEIGHTS:')
self.print_weights(grab=grab)
log.info((80 * '-')) |
def add_vehicle(traci, veh_id, route_id, route_edge_ids: List[str], type_id, depart_pos, depart_lane, depart_speed):
traci.route.add(route_id, route_edge_ids)
logging.debug(f'Added route {route_id} with edge_ids {route_edge_ids}...')
traci.vehicle.add(veh_id, route_id, type_id, departPos=depart_pos, departLane=depart_lane, departSpeed=depart_speed)
logging.debug(f'Added vehicle {veh_id} of type {type_id} with route {route_id} with params ({depart_pos}, {depart_lane}, {depart_speed})...') |
def test_get_visual_block_pipeline():
pipe = Pipeline([('imputer', SimpleImputer()), ('do_nothing', 'passthrough'), ('do_nothing_more', None), ('classifier', LogisticRegression())])
est_html_info = _get_visual_block(pipe)
assert (est_html_info.kind == 'serial')
assert (est_html_info.estimators == tuple((step[1] for step in pipe.steps)))
assert (est_html_info.names == ['imputer: SimpleImputer', 'do_nothing: passthrough', 'do_nothing_more: passthrough', 'classifier: LogisticRegression'])
assert (est_html_info.name_details == [str(est) for (_, est) in pipe.steps]) |
def s_to_speaker(span, speakers):
if (speakers[span.i1] == speakers[span.i2]):
return speakers[span.i1]
return None |
def get_tgt_model(args, root, sample_shape, num_classes, loss, add_loss=False, use_determined=False, context=None, opid=0):
(src_train_loader, _, _, _, _, _, _) = get_data(root, args.embedder_dataset, args.batch_size, False, maxsize=5000)
if (len(sample_shape) == 4):
IMG_SIZE = (224 if ((args.weight == 'tiny') or (args.weight == 'base')) else 196)
src_model = wrapper2D(sample_shape, num_classes, use_embedder=False, weight=args.weight, train_epoch=args.embedder_epochs, activation=args.activation, drop_out=args.drop_out)
src_model = src_model.to(args.device).eval()
src_feats = []
src_ys = []
for (i, data) in enumerate(src_train_loader):
(x_, y_) = data
x_ = x_.to(args.device)
x_ = transforms.Resize((IMG_SIZE, IMG_SIZE))(x_)
out = src_model(x_)
if (len(out.shape) > 2):
out = out.mean(1)
src_ys.append(y_.detach().cpu())
src_feats.append(out.detach().cpu())
src_feats = torch.cat(src_feats, 0)
src_ys = torch.cat(src_ys, 0).long()
src_train_dataset = torch.utils.data.TensorDataset(src_feats, src_ys)
del src_model
else:
(src_feats, src_ys) = (src_train_loader.dataset.tensors[0].mean(1), src_train_loader.dataset.tensors[1])
src_train_dataset = torch.utils.data.TensorDataset(src_feats, src_ys)
(tgt_train_loader, _, _, n_train, _, _, data_kwargs) = get_data(root, args.dataset, args.batch_size, False, get_shape=True)
transform = (data_kwargs['transform'] if ((data_kwargs is not None) and ('transform' in data_kwargs)) else None)
if args.infer_label:
(tgt_train_loader, num_classes_new) = infer_labels(tgt_train_loader)
else:
num_classes_new = num_classes
print('src feat shape', src_feats.shape, src_ys.shape, 'num classes', num_classes_new)
(tgt_train_loaders, tgt_class_weights) = load_by_class(tgt_train_loader, num_classes_new)
wrapper_func = (wrapper1D if (len(sample_shape) == 3) else wrapper2D)
tgt_model = wrapper_func(sample_shape, num_classes, weight=args.weight, train_epoch=args.embedder_epochs, activation=args.activation, target_seq_len=args.target_seq_len, drop_out=args.drop_out)
tgt_model = tgt_model.to(args.device).train()
(args, tgt_model, tgt_model_optimizer, tgt_model_scheduler) = get_optimizer_scheduler(args, tgt_model, module='embedder')
tgt_model_optimizer.zero_grad()
if (args.objective == 'otdd-exact'):
score_func = partial(otdd, src_train_dataset=src_train_dataset, exact=True)
elif (args.objective == 'otdd-gaussian'):
score_func = partial(otdd, src_train_dataset=src_train_dataset, exact=False)
elif (args.objective == 'l2'):
score_func = partial(l2, src_train_dataset=src_train_dataset)
else:
score_func = MMD_loss(src_data=src_feats, maxsamples=args.maxsamples)
score = 0
(total_losses, times, embedder_stats) = ([], [], [])
for ep in range(args.embedder_epochs):
total_loss = 0
time_start = default_timer()
for i in np.random.permutation(num_classes_new):
feats = []
datanum = 0
for (j, data) in enumerate(tgt_train_loaders[i]):
if (transform is not None):
(x, y, z) = data
else:
(x, y) = data
x = x.to(args.device)
out = tgt_model(x)
feats.append(out)
datanum += x.shape[0]
if (datanum > args.maxsamples):
break
feats = torch.cat(feats, 0).mean(1)
if (feats.shape[0] > 1):
loss = (tgt_class_weights[i] * score_func(feats))
loss.backward()
total_loss += loss.item()
time_end = default_timer()
times.append((time_end - time_start))
total_losses.append(total_loss)
embedder_stats.append([total_losses[(- 1)], times[(- 1)]])
print('[train embedder', ep, ('%.6f' % tgt_model_optimizer.param_groups[0]['lr']), '] time elapsed:', ('%.4f' % times[(- 1)]), '\totdd loss:', ('%.4f' % total_losses[(- 1)]))
tgt_model_optimizer.step()
tgt_model_scheduler.step()
tgt_model_optimizer.zero_grad()
del tgt_train_loader, tgt_train_loaders
torch.cuda.empty_cache()
tgt_model.output_raw = False
return (tgt_model, embedder_stats) |
def dict_val(metric_dict):
out = {}
for (k, v) in metric_dict.items():
out[k] = v.val
return out |
def PrimitiveGroups(d=None):
if (d is None):
return PrimitiveGroupsAll()
else:
d = Integer(d)
if (d < 0):
raise ValueError('a primitive group acts on a non negative integer number of positions')
return PrimitiveGroupsOfDegree(d) |
class OntologyLabelingFunction(LabelingFunction):
def __init__(self, name: str, ontology: Dict[(str, np.array)], case_sensitive: bool=False, max_ngrams: int=8, stopwords=None) -> None:
super().__init__(name, None)
self.max_ngrams = max_ngrams
self.case_sensitive = case_sensitive
self.stopwords = ({} if (not stopwords) else stopwords)
self._labels = {}
for (term, proba) in ontology.items():
self._labels[term] = (None if np.all((proba == (1.0 / len(proba)))) else int((np.argmax(proba) + 1)))
self.ontology = frozenset(ontology)
def _get_term_label(self, term):
for key in [term, term.lower(), term.rstrip('s'), (term + 's')]:
if (key in self.stopwords):
return self.stopwords[key]
if (key in self._labels):
return self._labels[key]
return None
def __call__(self, sentence: Sentence) -> Dict[(int, int)]:
matches = apply_matcher(sentence.words, sentence.char_offsets, self.ontology, max_ngrams=self.max_ngrams, longest_match_only=True, case_sensitive=self.case_sensitive)
matches = sorted(matches, key=(lambda x: x[0]), reverse=0)
L = {}
for ((char_start, char_end), term) in matches:
label = self._get_term_label(term)
if (not label):
continue
(start, end) = get_word_index_span((char_start, (char_end - 1)), sentence)
for i in range(start, (end + 1)):
L[i] = label
return L |
class TDAmeritradeGetBalance(VirtualFunctionTool):
name = 'TDAmeritradeGetBalance'
summary = 'Retrieve the balance of an account that belongs to the User.'
parameters: List[ArgParameter] = [{'name': 'account', 'type': 'string', 'description': "The account type, one of ['self-directed TFSA', 'self-directed non-registered'].", 'required': True}]
returns: List[ArgReturn] = [{'name': 'balance', 'type': 'number', 'description': 'The balance of the account in USD.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'account' parameter is not in the correct format."}] |
class PointMagneticFluxDensity(BaseRx):
def __init__(self, locations, orientation='x', component='real', **kwargs):
self.projField = 'b'
super().__init__(locations, orientation, component, **kwargs) |
def pad_batched_data(batched_data):
batched_post_tokens = [item['post'].split() for item in batched_data]
batched_res_tokens = [item['response'].split() for item in batched_data]
encoder_len = (max([len(p) for p in batched_post_tokens]) + 1)
decoder_len = (max([len(r) for r in batched_res_tokens]) + 1)
(posts, responses, posts_length, responses_length) = ([], [], [], [])
for token_list in batched_post_tokens:
posts.append(_padding(token_list, encoder_len))
posts_length.append((len(token_list) + 1))
for token_list in batched_res_tokens:
responses.append(_padding(token_list, decoder_len))
responses_length.append((len(token_list) + 1))
batched_corrs = [item['corr_responses'] for item in batched_data]
corr_responses = []
for corrs in batched_corrs:
response_k = []
for res in corrs:
tokens = res.split()
token_pad = _pad_corr_res(tokens, decoder_len)
response_k.append(token_pad)
corr_responses.append(response_k)
paded_data = {'posts': np.array(posts), 'responses': np.array(responses), 'posts_length': posts_length, 'responses_length': responses_length, 'corr_responses': np.array(corr_responses)}
return paded_data |
def clear_no_need_grad_tester(rng, func, inputs, func_args=[], func_kwargs={}, backward=None, atol_f=1e-06, ctx=None, func_name=None, insert_identity=[], auto_forward=False):
if (ctx is None):
ctx = nn.Context()
if (backward is None):
backward = [True for _ in inputs]
if (not (True in backward)):
return
state_rng = None
if (rng is not None):
state_rng = rng.get_state()
else:
rng = rng = np.random.RandomState(313)
def create_variables(inputs, backward):
vinputs = []
for (i, b) in zip(inputs, backward):
if (i is None):
vinputs += [None]
continue
vinputs += [nn.Variable(i.shape, need_grad=b)]
vinputs[(- 1)].data.cast(i.dtype)[...] = i
return vinputs
vinputs = create_variables(inputs, backward)
vinputs_clear_buffer = create_variables(inputs, backward)
vinputs_identity_clear_buffer = []
if (not insert_identity):
insert_identity = ([True] * len(vinputs))
with nn.context_scope(ctx), nn.auto_forward(auto_forward):
for (idx, i) in enumerate(vinputs_clear_buffer):
if (i is None):
vinputs_identity_clear_buffer += [None]
elif insert_identity[idx]:
vinputs_identity_clear_buffer += [F.identity(i)]
else:
vinputs_identity_clear_buffer += [i]
with nn.context_scope(ctx), nn.auto_forward(auto_forward):
o = func(*(vinputs + func_args), **func_kwargs)
o = force_tuple(o)
F.sink(*o).forward(clear_no_need_grad=False)
o_clear_buffer = func(*(vinputs_identity_clear_buffer + func_args), **func_kwargs)
o_clear_buffer = force_tuple(o_clear_buffer)
o_identity_clear_buffer = list(map((lambda x: (F.identity(x) if (x is not None) else None)), o_clear_buffer))
o_identity_clear_buffer = list(filter((lambda x: (x is not None)), o_identity_clear_buffer))
F.sink(*o_identity_clear_buffer).forward(clear_no_need_grad=True)
for i in range(len(o)):
if (o[i] is None):
continue
ref = o[i].d
res = o_identity_clear_buffer[i].d
assert_allclose(ref, res, atol=atol_f, err_msg='{} forward(clear_no_need_grad=True) test fails'.format(func_name))
vinputs = list(filter((lambda x: (x is not None)), vinputs))
vinputs_clear_buffer = list(filter((lambda x: (x is not None)), vinputs_clear_buffer))
for i in range(len(vinputs)):
vinputs[i].grad.zero()
vinputs_clear_buffer[i].grad.zero()
for i in range(len(o)):
if (o[i] is None):
continue
o[i].g = randn(rng, *o[i].shape)
o_identity_clear_buffer[i].g = o[i].g
F.sink(*o).backward()
F.sink(*o_identity_clear_buffer).backward(clear_buffer=True)
for i in range(len(vinputs)):
ref = vinputs[i].g
res = vinputs_clear_buffer[i].g
assert_allclose(ref, res, atol=atol_f, err_msg='{} forward(clear_no_need_grad=True) and backward test fails'.format(func_name))
if state_rng:
rng.set_state(state_rng) |
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
super(NLayerDiscriminator, self).__init__()
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input) |
def ref_binary_error(x, l):
y = []
for (x_, l_) in zip(x, l):
y.append(((x_ >= 0.5) != (l_ >= 0.5)))
return np.array(y).reshape(x.shape) |
def _replace_ref_nodes_with_names(model: models.Model, model_list: List[optplan.ProblemGraphNodeSchema]) -> None:
def process_field(model: models.Model, child_model: models.Model) -> str:
if isinstance(child_model, str):
return child_model
ind = model_list.index(child_model)
return model_list[ind].name
_iter_optplan_fields(model, set(), process_field) |
class ShiftedPrimedTableaux_weight_shape(ShiftedPrimedTableaux):
def __init__(self, weight, shape, skew=None, primed_diagonal=False):
ShiftedPrimedTableaux.__init__(self, skew=skew, primed_diagonal=primed_diagonal)
if (skew is None):
Parent.__init__(self, category=FiniteEnumeratedSets())
else:
Parent.__init__(self, category=Sets().Finite())
self._weight = weight
if (skew is None):
self._shape = _Partitions(shape)
else:
self._shape = SkewPartition((shape, skew))
def _repr_(self):
return 'Shifted Primed Tableaux of weight {} and shape {}'.format(self._weight, self._shape)
def _contains_tableau(self, T):
if (not super()._contains_tableau(T)):
return False
flat = [item.integer() for sublist in T for item in sublist]
if (not flat):
return (not self._weight)
max_ind = max(flat)
weight = tuple([flat.count((i + 1)) for i in range(max_ind)])
if (self._weight != weight):
return False
shape = [len(row) for row in T]
skew = [row.count(None) for row in T]
if (sum(skew) == 0):
shape = _Partitions(shape)
else:
shape = SkewPartition((shape, skew))
return (self._shape == shape)
def __iter__(self):
if (self._skew is not None):
raise NotImplementedError('skew tableau must be empty')
if (not self._shape.dominates(sorted(self._weight, reverse=True))):
return
full_shape = self._shape
sub_tab = []
tab_list_new = [[]]
half = (~ QQ(2))
for (i, w) in enumerate(self._weight):
tab_list_old = tab_list_new
tab_list_new = []
for sub_tab in tab_list_old:
sub_shape = [len(row) for row in sub_tab]
for strip in _add_strip(sub_shape, full_shape, w):
l = (len(strip) // 2)
new_tab = []
new_tab1 = None
if (len(sub_shape) < len(full_shape)):
new_tab = [((sub_tab[r] + ([(i + half)] * strip[r])) + ([(i + 1)] * strip[((- r) - 1)])) for r in range((l - 1))]
if (strip[l] != 0):
if self._primed_diagonal:
new_tab1 = new_tab[:]
new_tab1.append(([(i + half)] + ([(i + 1)] * (strip[l] - 1))))
new_tab.append(([(i + 1)] * strip[l]))
else:
new_tab = [((sub_tab[r] + ([(i + half)] * strip[r])) + ([(i + 1)] * strip[((- r) - 1)])) for r in range(l)]
tab_list_new.append(new_tab)
if new_tab1:
tab_list_new.append(new_tab1)
for tab in tab_list_new:
(yield self.element_class(self, tab)) |
def test_map_param():
def map_uses_param(A: dace.float32[10], B: dace.float32[10], C: dace.float32[10]):
for i in dace.map[0:10]:
a = (i - A[i])
b = (B[i] * i)
C[i] = (a + b)
sdfg = map_uses_param.to_sdfg(simplify=True)
num_tasklet_fusions = sdfg.apply_transformations_repeated(TaskletFusion)
assert (num_tasklet_fusions == 3)
A = np.zeros([10], dtype=np.float32)
B = np.ones([10], dtype=np.float32)
C = np.empty([10], dtype=np.float32)
sdfg(A=A, B=B, C=C)
ref = (np.array(range(0, 10, 1)) * 2.0)
assert (C == ref).all() |
def resize_worker(img_file, size, use_rgb, format, resample):
(i, file) = img_file
img = Image.open(file)
if use_rgb:
img = img.convert('RGB')
img = resize_and_convert(img, size, format, resample)
return (i, img) |
_module()
class EpochBasedRunnerAmp(EpochBasedRunner):
def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None, create_symlink=True):
if (meta is None):
meta = dict(epoch=(self.epoch + 1), iter=self.iter)
elif isinstance(meta, dict):
meta.update(epoch=(self.epoch + 1), iter=self.iter)
else:
raise TypeError(f'meta should be a dict or None, but got {type(meta)}')
if (self.meta is not None):
meta.update(self.meta)
filename = filename_tmpl.format((self.epoch + 1))
filepath = osp.join(out_dir, filename)
optimizer = (self.optimizer if save_optimizer else None)
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
if create_symlink:
dst_file = osp.join(out_dir, 'latest.pth')
if (platform.system() != 'Windows'):
mmcv.symlink(filename, dst_file)
else:
shutil.copy(filepath, dst_file)
def resume(self, checkpoint, resume_optimizer=True, map_location='default'):
if (map_location == 'default'):
if torch.cuda.is_available():
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id)))
else:
checkpoint = self.load_checkpoint(checkpoint)
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
if (('optimizer' in checkpoint) and resume_optimizer):
if isinstance(self.optimizer, Optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
elif isinstance(self.optimizer, dict):
for k in self.optimizer.keys():
self.optimizer[k].load_state_dict(checkpoint['optimizer'][k])
else:
raise TypeError(f'Optimizer should be dict or torch.optim.Optimizer but got {type(self.optimizer)}')
if ('amp' in checkpoint):
apex.amp.load_state_dict(checkpoint['amp'])
self.logger.info('load amp state dict')
self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter) |
def Affine(name_scope, input_tensor, out_channels, relu=True):
input_shape = input_tensor.get_shape().as_list()
input_channels = input_shape[(- 1)]
with tf.name_scope(name_scope):
weights = tf.Variable(tf.truncated_normal([input_channels, out_channels], stddev=(1.0 / math.sqrt(float(input_channels)))), name='weights')
biases = tf.Variable(tf.zeros([out_channels]), name='biases')
h = (tf.matmul(input_tensor, weights) + biases)
if relu:
return tf.nn.relu(h)
else:
return h |
def require_apex(test_case):
return unittest.skipUnless(is_apex_available(), 'test requires apex')(test_case) |
def filter_lines(lines):
lines = [line for line in map(str.strip, lines) if (line and (not line.startswith('#')))]
func_sigs = [split_signature(line) for line in lines if (line.split(' ')[0] != 'void')]
sub_sigs = [split_signature(line) for line in lines if (line.split(' ')[0] == 'void')]
all_sigs = list(sorted((func_sigs + sub_sigs), key=itemgetter(0)))
return (func_sigs, sub_sigs, all_sigs) |
class STL10(CIFAR10):
base_folder = 'stl10_binary'
url = '
filename = 'stl10_binary.tar.gz'
tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'
class_names_file = 'class_names.txt'
train_list = [['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'], ['train_y.bin', '5a34089d4802c674881badbb'], ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']]
test_list = [['test_X.bin', '7f263ba9f9e0b06bf721ac82'], ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']]
splits = ('train', 'train+unlabeled', 'unlabeled', 'test')
def __init__(self, root, split='train', transform=None, target_transform=None, download=True, ratio=1, remain_size=True):
if (split not in self.splits):
raise ValueError('Split "{}" not found. Valid splits are: {}'.format(split, ', '.join(self.splits)))
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.split = split
self.ratio = ratio
self.remain_size = remain_size
if download:
self.download()
if (not self._check_integrity()):
raise RuntimeError('Dataset not found or corrupted. You can use download=True to download it')
if (self.split == 'train'):
(self.data, self.labels) = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
elif (self.split == 'train+unlabeled'):
(self.data, self.labels) = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
(unlabeled_data, _) = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate((self.labels, np.asarray(([(- 1)] * unlabeled_data.shape[0]))))
elif (self.split == 'unlabeled'):
(self.data, _) = self.__loadfile(self.train_list[2][0])
self.labels = np.asarray(([(- 1)] * self.data.shape[0]))
else:
(self.data, self.labels) = self.__loadfile(self.test_list[0][0], self.test_list[1][0])
class_file = os.path.join(self.root, self.base_folder, self.class_names_file)
if os.path.isfile(class_file):
with open(class_file) as f:
self.classes = f.read().splitlines()
def __getitem__(self, index):
index = (index % int((self.ratio * len(self.data))))
if (self.labels is not None):
(img, target) = (self.data[index], int(self.labels[index]))
else:
(img, target) = (self.data[index], None)
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
if self.remain_size:
return self.data.shape[0]
else:
return int((self.data.shape[0] * self.ratio))
def __loadfile(self, data_file, labels_file=None):
labels = None
if labels_file:
path_to_labels = os.path.join(self.root, self.base_folder, labels_file)
with open(path_to_labels, 'rb') as f:
labels = (np.fromfile(f, dtype=np.uint8) - 1)
path_to_data = os.path.join(self.root, self.base_folder, data_file)
with open(path_to_data, 'rb') as f:
everything = np.fromfile(f, dtype=np.uint8)
images = np.reshape(everything, ((- 1), 3, 96, 96))
images = np.transpose(images, (0, 1, 3, 2))
return (images, labels)
def extra_repr(self):
return 'Split: {split}'.format(**self.__dict__) |
def tree_map(fn: Any, pytree: PyTree) -> PyTree:
(flat_args, spec) = tree_flatten(pytree)
return tree_unflatten([fn(i) for i in flat_args], spec) |
class SimplicialComplexes(Category_singleton):
_method
def super_categories(self):
return [Sets()]
class Finite(CategoryWithAxiom):
class ParentMethods():
_method
def dimension(self):
return max((c.dimension() for c in self.facets()))
class ParentMethods():
_method
def facets(self):
_method
def faces(self):
class SubcategoryMethods():
_method
def Connected(self):
return self._with_axiom('Connected')
class Connected(CategoryWithAxiom): |
class build_py(old_build_py):
def run(self):
build_src = self.get_finalized_command('build_src')
if (build_src.py_modules_dict and (self.packages is None)):
self.packages = list(build_src.py_modules_dict.keys())
old_build_py.run(self)
def find_package_modules(self, package, package_dir):
modules = old_build_py.find_package_modules(self, package, package_dir)
build_src = self.get_finalized_command('build_src')
modules += build_src.py_modules_dict.get(package, [])
return modules
def find_modules(self):
old_py_modules = self.py_modules[:]
new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
self.py_modules[:] = new_py_modules
modules = old_build_py.find_modules(self)
self.py_modules[:] = old_py_modules
return modules |
def run_task(task):
log.info(f'Task name: {task.name}')
task_args = (task.args if ('args' in task) else '')
task_args = task_args.replace('$\\', '\\$')
command = f'CUDA_VISIBLE_DEVICES={utils.WORKER_CUDA_DEVICE} HYDRA_CONFIG_PATH={task.config_path} {task.environ} python {task.command} repeat={task.repeat} {task_args}'
log.info(f'Command: {command}')
ret = os.system(command)
ret = str(ret)
log.info(f'Task "{task.name}" finished with return code: {ret}.')
return ret |
class WoE():
def __init__(self, f_type: str, split: List[float], woe_diff_th: float=0.0, target_type: TaskType=TaskType.BIN):
self.f_type = f_type
self.split = split
self.woe_diff = woe_diff_th
self.target_type = target_type
self.iv = None
self.cod_dict = None
def __codding(self, x: pd.Series):
if (self.f_type == 'cat'):
x_cod = x.map(self.split)
elif (self.f_type == 'real'):
x_cod = np.searchsorted(self.split, x.values, side='left')
x_cod = pd.Series(data=x_cod, index=x.index)
else:
raise ValueError('_f_type is cat or real')
return x_cod
def _bucket_woe(x, total_good: int, total_bad: int):
t_bad = x['bad']
t_good = x['count_nonzero']
t_bad = (0.5 if (t_bad == 0) else t_bad)
t_good = (0.5 if (t_good == 0) else t_good)
return np.log(((t_bad / total_bad) / (t_good / total_good)))
def __woe(self, df: pd.DataFrame) -> Tuple[(Dict, DataFrame, Tuple[(float, ...)])]:
df.columns = [0, 'target']
stat = df.groupby(0)['target'].agg([np.mean, np.count_nonzero, np.size])
if (self.target_type == TaskType.BIN):
stat['bad'] = (stat['size'] - stat['count_nonzero'])
t_good = np.maximum(stat['count_nonzero'].sum(), 0.5)
t_bad = np.maximum(stat['bad'].sum(), 0.5)
stat['woe'] = stat.apply((lambda x: self._bucket_woe(x, t_good, t_bad)), axis=1)
iv_stat = (((stat['bad'] / t_bad) - (stat['count_nonzero'] / t_good)) * stat['woe'])
self.iv = iv_stat.sum()
return (stat['woe'].to_dict(), stat, (t_good, t_bad))
elif (self.target_type == TaskType.REG):
stat['woe'] = stat['mean']
iv_stat = ((stat['woe'].abs() * stat['size']) / stat['size'].sum())
self.iv = iv_stat.sum()
return (stat['woe'].to_dict(), stat, None)
def __df_cod_transform(self, x: pd.Series, spec_values):
x_ = deepcopy(x)
if isinstance(spec_values, list):
spec_values_ = spec_values.copy()
elif isinstance(spec_values, dict):
spec_values_ = spec_values.keys()
else:
spec_values_ = []
x_.loc[x_.isin(spec_values_)] = (- np.inf)
df_cod = self.__codding(x_)
df_cod.loc[x.isin(spec_values_)] = x.loc[x.isin(spec_values_)]
return df_cod
def fit(self, x, y, spec_values):
df_cod = self.__df_cod_transform(x, spec_values)
df_cod = pd.concat([df_cod, y], axis=1)
(stat, total, t_stat) = self.__woe(df_cod)
if (self.target_type == TaskType.BIN):
(t_good, t_bad) = t_stat
good_stats = total.loc[[x for x in total.index if ((type(x) in [int, float]) or (x in ('__Small__', '__NaN__')) or is_mark_prefix(x))]]
nsm_values = (([x for x in spec_values if ('NaN' in x)] + [x for x in spec_values if ('Small' in x)]) + [x for x in spec_values if ('Mark' in x)])
for key in nsm_values:
if (((key in ('__Small__', '__NaN__')) or is_mark_prefix(key)) and (key in good_stats.index)):
check_row = good_stats.loc[key]
diff = (good_stats['woe'] - check_row['woe']).abs()
min_diff = diff[(diff > 0)].min()
if (min_diff < self.woe_diff):
idx = (diff <= min_diff)
if (self.target_type == TaskType.BIN):
good_stats.loc[(idx, 'woe')] = self._bucket_woe(good_stats.loc[(idx, ['bad', 'count_nonzero'])].sum(axis=0), t_good, t_bad)
good_stats.loc[(idx, 'size')] = good_stats.loc[(idx, 'size')].sum()
good_stats.loc[(idx, 'mean')] = (good_stats.loc[(idx, 'count_nonzero')].sum() / good_stats['size'])
elif (self.target_type == TaskType.REG):
gs = good_stats.loc[(idx, ['woe', 'size'])].copy()
t_gs_size = gs['size'].sum()
good_stats.loc[(idx, 'woe')] = ((gs['woe'] * gs['size']).sum() / t_gs_size)
good_stats.loc[(idx, 'size')] = t_gs_size
good_stats.loc[(idx, 'mean')] = good_stats.loc[(idx, 'woe')]
for key in good_stats.index.values:
stat[key] = good_stats.loc[(key, 'woe')]
for key in nsm_values:
woe_val = None
if (key in ('__Mark_0__', '__Small_0__', '__NaN_0__')):
woe_val = 0
elif (key in ('__Mark_maxfreq__', '__Small_maxfreq__', '__NaN_maxfreq__')):
idx = good_stats['size'].values.argmax()
woe_val = good_stats.iloc[idx]['woe']
elif (key in ('__Mark_maxp__', '__Small_maxp__', '__NaN_maxp__')):
idx = good_stats['mean'].values.argmax()
woe_val = good_stats.iloc[idx]['woe']
elif (key in ('__Mark_minp__', '__Small_minp__', '__NaN_minp__')):
idx = good_stats['mean'].values.argmin()
woe_val = good_stats.iloc[idx]['woe']
elif ((key in ('__Small__', '__NaN__')) or is_mark_prefix(key)):
continue
stat[key] = woe_val
self.cod_dict = stat
return df_cod
def fit_transform(self, x: pd.Series, y: pd.Series, spec_values):
df_cod = self.fit(x, y, spec_values)
df_cod = df_cod[0].map(self.cod_dict).copy()
return df_cod
def transform(self, x: pd.Series, spec_values):
df_cod = self.__df_cod_transform(x, spec_values)
df_cod = df_cod.map(self.cod_dict)
return df_cod
def split_feature(self, x: pd.Series, spec_values):
df_cod = self.__df_cod_transform(x, spec_values)
return df_cod
def fit_transform_cv(self, x: pd.Series, y: pd.Series, spec_values, cv_index_split: Dict[(int, List[int])]):
x_ = deepcopy(x)
for key in cv_index_split:
(train_index, test_index) = cv_index_split[key]
self.fit(x.iloc[train_index], y.iloc[train_index], spec_values)
x_.iloc[test_index] = self.transform(x.iloc[test_index], spec_values)
return x_.astype(float) |
class BankManagerPayBill(VirtualFunctionTool):
name = 'BankManagerPayBill'
summary = 'Pay a bill to a specified payee with your service acccount number.'
parameters: List[ArgParameter] = [{'name': 'from_account_number', 'type': 'string', 'description': "The user's bank account number used for paying the bill in the format 'XXX-XXXX-XXXX'.", 'required': True}, {'name': 'payee_id', 'type': 'string', 'description': "The unique identifier of the payee in the format 'P-XXXXXX'.", 'required': True}, {'name': 'service_account_number', 'type': 'string', 'description': 'Your account number assigned by the service provider, which helps the provider identify transactions related to that specific customer.', 'required': True}, {'name': 'payment_date', 'type': 'string', 'description': "The date the payment should be paid, in the format 'YYYY-MM-DD'.", 'required': True}, {'name': 'amount', 'type': 'number', 'description': 'The amount to pay, must be positive.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'Whether the bill payment was successful.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'from_account_number' parameter is not in the correct format, the 'from_account_number' is not owned by the user, the 'payee_id' is invalid, or the 'amount' parameter is not positive."}] |
def evaluate_tfidf_distance(ref_texts, hypo_texts):
print('Evaluating TF-IDF Distance...')
vocab = get_vocab(ref_texts)
results = {'n_ref': len(ref_texts), 'n_hypo': len(hypo_texts)}
ref_feature = get_feature(ref_texts, vocab)
hypo_feature = get_feature(hypo_texts, vocab)
results[f'tfidf_distance'] = np.linalg.norm((ref_feature - hypo_feature))
return results |
def __curve_validation__(curve, actual_vector, probs):
for item in [actual_vector, probs]:
if (not isinstance(item, (list, numpy.ndarray))):
raise pycmCurveError(VECTOR_TYPE_ERROR)
if (len(actual_vector) != len(probs)):
raise pycmCurveError(VECTOR_SIZE_ERROR)
for item in probs:
if (not all(map(isfloat, item))):
raise pycmCurveError(PROBABILITY_TYPE_ERROR)
if (abs((sum(item) - 1)) > 0.001):
raise pycmCurveError(PROBABILITY_SUM_ERROR)
curve.actual_vector = actual_vector
curve.probs = probs |
def extract_mep_S1_models(layout):
wandb_dir = RESULT_PATH.format(layout=layout)
runs = glob.glob(f'{wandb_dir}/run*')
run_ids = [x.split('-')[(- 1)] for x in runs]
print(runs)
print(run_ids)
api = wandb.Api()
for (i, run_id) in enumerate(run_ids):
run = api.run(f'{WANDB_NAME}/Overcooked/{run_id}')
if (run.state == 'finished'):
for policy_id in range(1, (12 + 1)):
policy_name = f'mep{policy_id}'
final_ep_sparse_r = run.summary[f'{policy_name}-{policy_name}-ep_sparse_r']
history = run.history()
history = history[['_step', f'{policy_name}-{policy_name}-ep_sparse_r']]
steps = history['_step'].to_numpy()
ep_sparse_r = history[f'{policy_name}-{policy_name}-ep_sparse_r'].to_numpy()
files = run.files()
actor_pts = [f for f in files if f.name.startswith(f'{policy_name}/actor_periodic')]
actor_versions = [eval(f.name.split('_')[(- 1)].split('.pt')[0]) for f in actor_pts]
actor_pts = {v: p for (v, p) in zip(actor_versions, actor_pts)}
actor_versions = sorted(actor_versions)
max_actor_versions = (max(actor_versions) + 1)
max_steps = max(steps)
ep_sparse_r = [(0 if np.isnan(x) else x) for x in ep_sparse_r]
new_steps = [steps[0]]
new_ep_sparse_r = [ep_sparse_r[0]]
for (s, er) in zip(steps[1:], ep_sparse_r[1:]):
l_s = new_steps[(- 1)]
l_er = new_ep_sparse_r[(- 1)]
for w in range((l_s + 1), s, 100):
new_steps.append(w)
new_ep_sparse_r.append((l_er + (((er - l_er) * (w - l_s)) / (s - l_s))))
steps = new_steps
ep_sparse_r = new_ep_sparse_r
selected_pts = dict(init=0, mid=(- 1), final=max_steps)
mid_ep_sparse_r = (final_ep_sparse_r / 2)
min_delta = .0
for (s, score) in zip(steps, ep_sparse_r):
if (min_delta > abs((mid_ep_sparse_r - score))):
min_delta = abs((mid_ep_sparse_r - score))
selected_pts['mid'] = s
selected_pts = {k: int(((v / max_steps) * max_actor_versions)) for (k, v) in selected_pts.items()}
for (tag, exp_version) in selected_pts.items():
version = actor_versions[0]
for actor_version in actor_versions:
if (abs((exp_version - version)) > abs((exp_version - actor_version))):
version = actor_version
print(policy_name, tag, 'Expected', exp_version, 'Found', version)
ckpt = actor_pts[version]
ckpt.download('tmp', replace=True)
mep_s1_dir = f'{POLICY_POOL_PATH}/{layout}/mep/s1'
os.system(f'mv tmp/{policy_name}/actor_periodic_{version}.pt {mep_s1_dir}/{policy_name}_{tag}_actor.pt') |
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
if (args.num_gpus > 1):
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if (os.name == 'nt'):
init_method = ('file:///' + init_file.replace('\\', '/'))
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
sync_device = (torch.device('cuda', rank) if (args.num_gpus > 1) else None)
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if ((rank != 0) or (not args.verbose)):
custom_ops.verbosity = 'none'
device = torch.device('cuda', rank)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
with torch.no_grad():
from training.networks import Generator
from training.stylenerf import Discriminator
G2 = Generator(*G.init_args, **G.init_kwargs).to(device)
misc.copy_params_and_buffers(G, G2, require_all=False)
G = G2
D_init_kwargs = EasyDict(**args.D.init_kwargs)
D_init_kwargs.step = 2
D = Discriminator(*args.D.init_args, **D_init_kwargs).to(device)
misc.copy_params_and_buffers(args.D, D, require_all=False)
if ('Adapted_net' in args):
from training.adaptednet import AdaptedNet
Adapted_net = AdaptedNet(*args.Adapted_net.init_args, **args.Adapted_net.init_kwargs).to(device)
misc.copy_params_and_buffers(args.Adapted_net, Adapted_net, require_all=False)
if ((rank == 0) and args.verbose):
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
for metric in args.metrics:
if ((rank == 0) and args.verbose):
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
if ('Adapted_net' in args):
result_dict = metric_main.calc_metric_trans(metric=metric, G=G, D=D, Adapted_net=Adapted_net, dataset_kwargs=args.dataset_kwargs, num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
else:
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs, num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
if (rank == 0):
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if ((rank == 0) and args.verbose):
print()
if ((rank == 0) and args.verbose):
print('Exiting...') |
class PoolAggregator(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.tran = nn.Linear(in_features, out_features, True)
def forward(self, x, neighbor):
f = [self.tran(torch.cat([x[i:(i + 1)], n])) for (i, n) in enumerate(neighbor)]
x = torch.cat([x.max(dim=0, keepdim=True)[0] for x in f])
neighbor = [self.tran(n).max(dim=0, keepdim=True)[0] for n in neighbor]
return (x, neighbor) |
def getConfigFromDict(obj, inputDict, defaultConfig):
if (not inputDict):
for (member, value) in vars(defaultConfig).items():
setattr(obj, member, value)
else:
for (member, value) in vars(defaultConfig).items():
setattr(obj, member, inputDict.get(member, value)) |
class CoercionPDtoKM(HyperbolicModelCoercion):
def image_coordinates(self, x):
return (((2 * real(x)) / ((Integer(1) + (real(x) ** 2)) + (imag(x) ** 2))), ((2 * imag(x)) / ((Integer(1) + (real(x) ** 2)) + (imag(x) ** 2))))
def image_isometry_matrix(self, x):
return SL2R_to_SO21((((matrix(2, [1, I, I, 1]) * x) * matrix(2, [1, (- I), (- I), 1])) / Integer(2))) |
def tree_serialize_leaves_tensorstore(checkpoint_dir, pytree):
leaf_key_paths = jax_utils.leaf_key_paths(pytree, is_leaf=is_named_array)
specs = jtu.tree_map(partial(_tensorstore_spec_for, checkpoint_dir), leaf_key_paths, is_leaf=is_named_array)
async def _do_serialize():
futures = jtu.tree_map(_serialize_one_leaf, pytree, specs, is_leaf=is_named_array)
return (await asyncio.gather(*jtu.tree_leaves(futures)))
asyncio.run(_do_serialize()) |
def test_disagreement(example_diversity):
(y_pred_classifier1, y_pred_classifier2, y_real, y_ex1) = example_diversity
disagreement = disagreement_measure(y_real, y_pred_classifier1, y_pred_classifier2)
assert np.isclose(disagreement, 0.5).all() |
def extract_roi(opts, cls_ids, label, label_edit):
pixel_num = {'before_edit': 0, 'after_edit': 0, 'whole': 0}
roi = np.zeros((256, 256), np.uint8)
label = label.view(256, 256).numpy()
label_edit = label_edit.view(256, 256).numpy()
for ids in cls_ids:
roi += (label == int(ids)).astype(np.uint8)
pixel_num['before_edit'] += int(sum((label == int(ids)).reshape((- 1))))
for ids in cls_ids:
roi += (label_edit == int(ids)).astype(np.uint8)
pixel_num['after_edit'] += int(sum((label_edit == int(ids)).reshape((- 1))))
roi = (roi > 0)
pixel_num['whole'] = int(sum((roi.reshape((- 1)) > 0)))
if opts['dilate']:
kernel = np.ones((opts['dilate'], opts['dilate']), np.uint8)
dilate_roi = cv2.dilate(np.float32(roi), kernel, iterations=3).astype(np.uint8)
return (dilate_roi, pixel_num)
else:
roi = np.float32(roi).astype(np.uint8)
return (roi, pixel_num) |
class MLP(BaseModel):
name = 'mlp'
def __init__(self, task, embedding_size=768, n_classes=3, hidden_size=5, nlayers=1, dropout=0.1, representation=None, n_words=None):
super().__init__()
self.dropout_p = dropout
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.nlayers = nlayers
self.n_classes = n_classes
self.representation = representation
self.n_words = n_words
self.task = task
if (self.representation in ['onehot', 'random']):
self.build_embeddings(n_words, embedding_size)
self.mlp = self.build_mlp()
self.out = nn.Linear(self.final_hidden_size, n_classes)
self.dropout = nn.Dropout(dropout)
self.criterion = nn.CrossEntropyLoss()
def build_embeddings(self, n_words, embedding_size):
if (self.task == 'dep_label'):
self.embedding_size = (int((embedding_size / 2)) * 2)
self.embedding = nn.Embedding(n_words, int((embedding_size / 2)))
else:
self.embedding = nn.Embedding(n_words, embedding_size)
if (self.representation == 'random'):
self.embedding.weight.requires_grad = False
def build_mlp(self):
src_size = self.embedding_size
tgt_size = self.hidden_size
mlp = []
for layer in range(self.nlayers):
mlp += [nn.Linear(src_size, tgt_size)]
mlp += [nn.ReLU()]
mlp += [nn.Dropout(self.dropout_p)]
(src_size, tgt_size) = (tgt_size, int((tgt_size / 2)))
self.final_hidden_size = src_size
return nn.Sequential(*mlp)
def forward(self, x):
if (self.representation in ['onehot', 'random']):
x = self.get_embeddings(x)
x_emb = self.dropout(x)
x = self.mlp(x_emb)
logits = self.out(x)
return logits
def get_embeddings(self, x):
x_emb = self.embedding(x)
if (len(x.shape) > 1):
x_emb = x_emb.reshape(x.shape[0], (- 1))
return x_emb
def train_batch(self, data, target, optimizer, criterion):
optimizer.zero_grad()
mlp_out = self(data)
loss = self.criterion(mlp_out, target)
loss.backward()
optimizer.step()
return (loss.item() / math.log(2))
def eval_batch(self, data, target):
mlp_out = self(data)
loss = (self.criterion(mlp_out, target) / math.log(2))
accuracy = (mlp_out.argmax(dim=(- 1)) == target).float().detach().sum()
loss = (loss.item() * data.shape[0])
return (loss, accuracy)
def get_args(self):
return {'nlayers': self.nlayers, 'hidden_size': self.hidden_size, 'embedding_size': self.embedding_size, 'dropout': self.dropout_p, 'n_classes': self.n_classes, 'representation': self.representation, 'n_words': self.n_words, 'task': self.task} |
def collate_fn(batch):
if (not isinstance(batch, Sequence)):
raise TypeError(f'{batch.dtype} is not supported.')
if isinstance(batch[0], DataContainer):
stacked = []
if batch[0].cpu_only:
stacked.append([sample.data for sample in batch])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
pad_dims = batch[0].pad_dims
assert isinstance(batch[0].data, torch.Tensor)
if (pad_dims is not None):
ndim = batch[0].dim()
assert (ndim > pad_dims)
max_shape = [0 for _ in range(pad_dims)]
for dim in range(1, (pad_dims + 1)):
max_shape[(dim - 1)] = batch[0].size((- dim))
for sample in batch:
for dim in range(0, (ndim - pad_dims)):
assert (batch[0].size(dim) == sample.size(dim))
for dim in range(1, (pad_dims + 1)):
max_shape[(dim - 1)] = max(max_shape[(dim - 1)], sample.size((- dim)))
pad_seqs = []
for sample in batch:
pad_seq = [0 for _ in range((pad_dims * 2))]
for dim in range(1, (pad_dims + 1)):
pad_seq[((2 * dim) - 1)] = (max_shape[(dim - 1)] - sample.size((- dim)))
pad_seqs.append(pad_seq)
padded_samples = list(map((lambda sample, pad_seq: f.pad(sample.data, pad_seq, value=sample.padding_value)), batch, pad_seqs))
stacked.append(default_collate(padded_samples))
elif (pad_dims is None):
stacked.append(default_collate([sample.data for sample in batch]))
else:
raise ValueError('pad_dims should be either None or integers (1-3)')
else:
stacked.append([sample.data for sample in batch])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], Mapping):
return {key: collate_fn([d[key] for d in batch]) for key in batch[0]}
else:
return default_collate(batch) |
def T_sequences_smallcases(t, existence=False, check=True):
db = {47: [((([1, (- 1), (- 1), 0, 0, (- 1), 1, (- 1)] + ([0] * 8)) + [1, (- 1), (- 1), 0, 0, (- 1), (- 1)]) + ([0] * 24)), ([0, 0, 0, (- 1), 1, 0, 0, 0, (- 1), (- 1), (- 1), 1, 1, 1, 1, 1, 0, 0, 0, 1, (- 1), 0, 0, 1] + ([0] * 23)), (([0] * 26) + [(- 1), 0, 1, 0, 0, 0, 0, 1, (- 1), 1, 1, 1, 0, 0, 0, 0, 1, 0, (- 1), 0, 0]), (([0] * 24) + [1, 1, 0, (- 1), 0, (- 1), 1, 1, (- 1), 0, 0, 0, 0, 0, (- 1), 1, (- 1), (- 1), 0, (- 1), 0, (- 1), 1])], 65: [(([0] * 33) + [1, 1, 1, 1, 1, (- 1), (- 1), 1, 1, (- 1), 1, (- 1), 1, 1, (- 1), (- 1), 1, 1, 1, 1, 1, (- 1), (- 1), 1, (- 1), 1, (- 1), 1, (- 1), (- 1), 1, 1]), ((([0] * 32) + [1]) + ([0] * 32)), ((((([1] * 5) + [(- 1), (- 1), 1, 1, (- 1), 1, (- 1), 1, 1]) + ([(- 1)] * 7)) + [1, 1, (- 1), 1, (- 1), 1, (- 1), 1, 1, (- 1), (- 1)]) + ([0] * 33)), ([0] * 65)], 93: [((([0, (- 1), 0, 0, (- 1), 1, 0, (- 1), 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, (- 1), 0, (- 1), 1, 1, 1, (- 1), 0, 1, 0, 0, 1] + ([0] * 33)) + [1, 1, 0, 0, 1, 0, 0, (- 1), 0, 0, (- 1), 1, 0, 1]) + ([0] * 15)), ((((([(- 1), 0, (- 1), 1, 0, 0, 1, 0, 0, (- 1), 0, 0, (- 1), (- 1), 0, 0, 0, (- 1), 1, 0, 1] + ([0] * 5)) + [(- 1), 0, 1, 1]) + ([0] * 32)) + [1, 1, 0, 0, 1, 1, 0, 1, (- 1), 0, 1, (- 1), 0, 0, (- 1)]) + ([0] * 16)), (((((([0] * 32) + [1, 0, 0, 1, (- 1), 0, 1, (- 1), 0, (- 1), (- 1), 0, 0, (- 1), (- 1), 1, 0, 0, (- 1), 0, (- 1), 1, 1, 1, (- 1), 0, 1, 0, 0, 1]) + ([0] * 17)) + [1, 1, 0, (- 1)]) + ([0] * 5)) + [1, 0, 1, (- 1), 0]), (((((([0] * 31) + [1, 0, 1, (- 1), 0, 0, (- 1), 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, (- 1), 1, 0, 1]) + ([0] * 5)) + [(- 1), 0, 1, 1]) + ([0] * 17)) + [(- 1), 0, 0, (- 1), 0, 1, (- 1), (- 1), (- 1), 1, 0, 1, 0, 0, (- 1)])]}
if (t in db):
if existence:
return True
sequences = list(map(Sequence, db[t]))
if check:
assert is_T_sequences_set(sequences)
return sequences
if ((((t + 1) % 2) == 0) and turyn_sequences_smallcases(((t + 1) // 2), existence=True)):
if existence:
return True
turyn_seqs = turyn_sequences_smallcases(((t + 1) // 2))
return T_sequences_construction_from_base_sequences(turyn_seqs, check=check)
if ((((t + 1) % 4) == 0) and turyn_sequences_smallcases(((t + 1) // 4), existence=True)):
if existence:
return True
turyn_seqs = turyn_sequences_smallcases(((t + 1) // 4))
return T_sequences_construction_from_turyn_sequences(turyn_seqs, check=check)
for p in range(1, t):
n = ((t - p) // 2)
if ((((t - p) % 2) == 0) and base_sequences_smallcases(n, p, existence=True)):
if existence:
return True
base_seqs = base_sequences_smallcases(n, p, check=False)
return T_sequences_construction_from_base_sequences(base_seqs, check=check)
if existence:
return False
raise ValueError(f'T Sequences of length {t} not yet implemented.') |
def get_parser():
parser = argparse.ArgumentParser(description='apply clusters')
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='split to process', required=True)
parser.add_argument('--labels', help='split to process', default='phn')
parser.add_argument('--path', help='path to pca and centroids', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--max-tsz', type=int, help='batch kmeans up to this much', default=14)
return parser |
class docRowTypeSub(supermod.docRowType):
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry) |
class BasicMachine(object):
def __init__(self, datasets=(None, None), models=None, args=None, **kwargs):
super(BasicMachine, self).__init__()
self.args = args
print('==> creating model ')
self.model = archs.__dict__[self.args.arch]()
print('==> creating model [Finish]')
(self.train_loader, self.val_loader) = datasets
self.loss = torch.nn.MSELoss()
self.title = ((((('_' + args.machine) + '_') + args.data) + '_') + args.arch)
self.args.checkpoint = (args.checkpoint + self.title)
self.device = torch.device('cuda')
if (not is_dir(self.args.checkpoint)):
mkdir_p(self.args.checkpoint)
self.optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), self.model.parameters()), lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay)
if (not self.args.evaluate):
self.writer = SummaryWriter(((self.args.checkpoint + '/') + 'ckpt'))
self.best_acc = 0
self.is_best = False
self.current_epoch = 0
self.metric = (- 100000)
self.hl = (6 if self.args.hl else 1)
self.count_gpu = len(range(torch.cuda.device_count()))
if (self.count_gpu > 1):
self.model = nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
self.model.to(self.device)
self.loss.to(self.device)
print(('==> Total params: %.2fM' % (sum((p.numel() for p in self.model.parameters())) / 1000000.0)))
print(('==> Total devices: %d' % torch.cuda.device_count()))
print(('==> Current Checkpoint: %s' % self.args.checkpoint))
if (self.args.resume != ''):
self.resume(self.args.resume)
def train(self, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
self.model.train()
end = time.time()
bar = Bar('Processing', max=(len(self.train_loader) * self.hl))
for _ in range(self.hl):
for (i, batches) in enumerate(self.train_loader):
inputs = batches['constructed_images']
fake_target = batches['fake_region']
real_target = batches['real_region']
target = batches['real_images'].to(self.device)
mask = batches['mask'].to(self.device)
current_index = ((len(self.train_loader) * epoch) + i)
if self.args.hl:
feeded = torch.cat([inputs, mask], dim=1)
else:
feeded = inputs
feeded = feeded.to(self.device)
output = self.model(feeded)
L2_loss = self.loss(output, target)
total_loss = L2_loss
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
losses.update(L2_loss.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
suffix = '({batch}/{size}) Data: {data:.2f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss L2: {loss_label:.4f} '.format(batch=(i + 1), size=len(self.train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss_label=losses.avg)
if ((current_index % 1000) == 0):
print(suffix)
if ((self.args.freq > 0) and ((current_index % self.args.freq) == 0)):
self.validate(current_index)
self.flush()
self.save_checkpoint()
self.record('train/loss_L2', losses.avg, current_index)
def test(self):
self.model.eval()
ssimes = AverageMeter()
psnres = AverageMeter()
with torch.no_grad():
for (i, batches) in enumerate(self.val_loader):
inputs = batches['image'].to(self.device)
target = batches['target'].to(self.device)
mask = batches['mask'].to(self.device)
outputs = self.model(inputs)
if (type(outputs) == type(inputs)):
output = outputs
elif (type(outputs[0]) == type([])):
output = outputs[0][0]
else:
output = outputs[0]
output = im_to_numpy(torch.clamp((output[0] * 255), min=0.0, max=255.0)).astype(np.uint8)
target = im_to_numpy(torch.clamp((target[0] * 255), min=0.0, max=255.0)).astype(np.uint8)
skimage.io.imsave(('%s/%s' % (self.args.checkpoint, batches['name'][0])), output)
psnr = compare_psnr(target, output)
ssim = compare_ssim(target, output, multichannel=True)
psnres.update(psnr, inputs.size(0))
ssimes.update(ssim, inputs.size(0))
print(('%s:PSNR:%s,SSIM:%s' % (self.args.checkpoint, psnres.avg, ssimes.avg)))
print('DONE.\n')
def validate(self, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
ssimes = AverageMeter()
psnres = AverageMeter()
self.model.eval()
end = time.time()
with torch.no_grad():
for (i, batches) in enumerate(self.val_loader):
inputs = batches['image'].to(self.device)
target = batches['target'].to(self.device)
mask = batches['mask'].to(self.device)
if self.args.hl:
feeded = torch.cat([inputs, torch.zeros((1, 4, self.args.input_size, self.args.input_size)).to(self.device)], dim=1)
else:
feeded = inputs
output = self.model(feeded)
L2_loss = self.loss(output, target)
psnr = (10 * log10((1 / L2_loss.item())))
ssim = pytorch_ssim.ssim(output, target)
losses.update(L2_loss.item(), inputs.size(0))
psnres.update(psnr, inputs.size(0))
ssimes.update(ssim.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
print(('Epoches:%s,Losses:%.3f,PSNR:%.3f,SSIM:%.3f' % ((epoch + 1), losses.avg, psnres.avg, ssimes.avg)))
self.record('val/loss_L2', losses.avg, epoch)
self.record('val/PSNR', psnres.avg, epoch)
self.record('val/SSIM', ssimes.avg, epoch)
self.metric = psnres.avg
def resume(self, resume_path):
if is_file(resume_path):
print("=> loading checkpoint '{}'".format(resume_path))
current_checkpoint = torch.load(resume_path)
if isinstance(current_checkpoint['state_dict'], torch.nn.DataParallel):
current_checkpoint['state_dict'] = current_checkpoint['state_dict'].module
if isinstance(current_checkpoint['optimizer'], torch.nn.DataParallel):
current_checkpoint['optimizer'] = current_checkpoint['optimizer'].module
self.args.start_epoch = current_checkpoint['epoch']
self.metric = current_checkpoint['best_acc']
self.model.load_state_dict(current_checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(resume_path, current_checkpoint['epoch']))
else:
raise Exception("=> no checkpoint found at '{}'".format(resume_path))
def save_checkpoint(self, filename='checkpoint.pth.tar', snapshot=None):
is_best = (True if (self.best_acc < self.metric) else False)
if is_best:
self.best_acc = self.metric
state = {'epoch': (self.current_epoch + 1), 'arch': self.args.arch, 'state_dict': self.model.state_dict(), 'best_acc': self.best_acc, 'optimizer': (self.optimizer.state_dict() if self.optimizer else None)}
filepath = os.path.join(self.args.checkpoint, filename)
torch.save(state, filepath)
if (snapshot and ((state['epoch'] % snapshot) == 0)):
shutil.copyfile(filepath, os.path.join(self.args.checkpoint, 'checkpoint_{}.pth.tar'.format(state.epoch)))
if is_best:
self.best_acc = self.metric
print(('Saving Best Metric with PSNR:%s' % self.best_acc))
shutil.copyfile(filepath, os.path.join(self.args.checkpoint, 'model_best.pth.tar'))
def clean(self):
self.writer.close()
def record(self, k, v, epoch):
self.writer.add_scalar(k, v, epoch)
def flush(self):
self.writer.flush()
sys.stdout.flush()
def norm(self, x):
if self.args.gan_norm:
return ((x * 2.0) - 1.0)
else:
return x
def denorm(self, x):
if self.args.gan_norm:
return ((x + 1.0) / 2.0)
else:
return x |
def faiss_search_knn(feat, k, nprobe=128, num_process=4, is_precise=True, sort=True, verbose=False):
(dists, nbrs) = faiss_search_approx_knn(query=feat, target=feat, k=k, nprobe=nprobe, verbose=verbose)
if is_precise:
print('compute precise dist among k={} nearest neighbors'.format(k))
(dists, nbrs) = precise_dist(feat, nbrs, num_process=num_process, sort=sort, verbose=verbose)
return (dists, nbrs) |
def train(epochs=10, batch_size=32, alpha=0.6, w=0.4, num_workers=2, lr=0.0001, save_epoch=10, train_path=(ROOT / 'dataset/KITTI/training'), model_path=(ROOT / 'weights/'), select_model='resnet18', api_key=''):
train_path = str(train_path)
model_path = str(model_path)
print('[INFO] Loading dataset...')
dataset = Dataset(train_path)
hyper_params = {'epochs': epochs, 'batch_size': batch_size, 'w': w, 'num_workers': num_workers, 'lr': lr, 'shuffle': True}
experiment = Experiment(api_key, project_name='YOLO3D')
experiment.log_parameters(hyper_params)
data_gen = data.DataLoader(dataset, batch_size=hyper_params['batch_size'], shuffle=hyper_params['shuffle'], num_workers=hyper_params['num_workers'])
base_model = model_factory[select_model]
model = regressor_factory[select_model](model=base_model).cuda()
opt_SGD = torch.optim.SGD(model.parameters(), lr=hyper_params['lr'], momentum=0.9)
conf_loss_func = nn.CrossEntropyLoss().cuda()
dim_loss_func = nn.MSELoss().cuda()
orient_loss_func = OrientationLoss
latest_model = None
first_epoch = 1
if (not os.path.isdir(model_path)):
os.mkdir(model_path)
else:
try:
latest_model = [x for x in sorted(os.listdir(model_path)) if x.endswith('.pkl')][(- 1)]
except:
pass
if (latest_model is not None):
checkpoint = torch.load((model_path + latest_model))
model.load_state_dict(checkpoint['model_state_dict'])
opt_SGD.load_state_dict(checkpoint['optimizer_state_dict'])
first_epoch = checkpoint['epoch']
loss = checkpoint['loss']
print(f'[INFO] Using previous model {latest_model} at {first_epoch} epochs')
print('[INFO] Resuming training...')
total_num_batches = int((len(dataset) / hyper_params['batch_size']))
with experiment.train():
for epoch in range(first_epoch, (int(hyper_params['epochs']) + 1)):
curr_batch = 0
passes = 0
with tqdm(data_gen, unit='batch') as tepoch:
for (local_batch, local_labels) in tepoch:
tepoch.set_description(f'Epoch {epoch}')
truth_orient = local_labels['Orientation'].float().cuda()
truth_conf = local_labels['Confidence'].float().cuda()
truth_dim = local_labels['Dimensions'].float().cuda()
local_batch = local_batch.float().cuda()
[orient, conf, dim] = model(local_batch)
orient_loss = orient_loss_func(orient, truth_orient, truth_conf)
dim_loss = dim_loss_func(dim, truth_dim)
truth_conf = torch.max(truth_conf, dim=1)[1]
conf_loss = conf_loss_func(conf, truth_conf)
loss_theta = (conf_loss + (w * orient_loss))
loss = ((alpha * dim_loss) + loss_theta)
writer.add_scalar('Loss/train', loss, epoch)
experiment.log_metric('Loss/train', loss, epoch=epoch)
opt_SGD.zero_grad()
loss.backward()
opt_SGD.step()
tepoch.set_postfix(loss=loss.item())
if ((epoch % save_epoch) == 0):
model_name = os.path.join(model_path, f'{select_model}_epoch_{epoch}.pkl')
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': opt_SGD.state_dict(), 'loss': loss}, model_name)
print(f'[INFO] Saving weights as {model_name}')
writer.flush()
writer.close() |
def create_model(args, logger, model_name):
if (model_name == 'adapter_mlp'):
if (hasattr(args, 'adapter_inference') and args.adapter_inference):
from models.adapter_inference import Adapter
else:
from models.adapter import Adapter
model = Adapter(args, logger)
elif ('downstream' in model_name):
if (args.downstream_task_name == 'task_cls'):
from models.task_head_task_cls import Task_Head
elif (args.downstream_task_name == 'step_cls'):
from models.task_head_step_cls import Task_Head
elif (args.downstream_task_name == 'step_forecasting'):
from models.task_head_step_forecasting import Task_Head
model = Task_Head(args, logger)
else:
raise ValueError('Model {} not recognized.'.format(args.adapter_name))
if need_logging(args):
logger.info(model)
logger.info('--> model {} was created'.format(model_name))
return model |
class Encoder_CNNtime_SAfreq(nn.Module):
def __init__(self, n_margin, n_frame, n_bin, cnn_channel, cnn_kernel, hid_dim, n_layers, n_heads, pf_dim, dropout, device):
super().__init__()
self.device = device
self.n_frame = n_frame
self.n_bin = n_bin
self.cnn_channel = cnn_channel
self.cnn_kernel = cnn_kernel
self.hid_dim = hid_dim
self.conv = nn.Conv2d(1, self.cnn_channel, kernel_size=(1, self.cnn_kernel))
self.n_proc = ((n_margin * 2) + 1)
self.cnn_dim = (self.cnn_channel * (self.n_proc - (self.cnn_kernel - 1)))
self.tok_embedding_freq = nn.Linear(self.cnn_dim, hid_dim)
self.pos_embedding_freq = nn.Embedding(n_bin, hid_dim)
self.layers_freq = nn.ModuleList([EncoderLayer(hid_dim, n_heads, pf_dim, dropout, device) for _ in range(n_layers)])
self.dropout = nn.Dropout(dropout)
self.scale_freq = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, spec_in):
batch_size = spec_in.shape[0]
spec_cnn = self.conv(spec_in.unsqueeze(1))
spec_cnn = spec_cnn.unfold(3, 61, 1).permute(0, 3, 2, 1, 4).contiguous().reshape([(batch_size * self.n_frame), self.n_bin, self.cnn_dim])
spec_emb_freq = self.tok_embedding_freq(spec_cnn)
pos_freq = torch.arange(0, self.n_bin).unsqueeze(0).repeat((batch_size * self.n_frame), 1).to(self.device)
spec_freq = self.dropout(((spec_emb_freq * self.scale_freq) + self.pos_embedding_freq(pos_freq)))
for layer_freq in self.layers_freq:
spec_freq = layer_freq(spec_freq)
spec_freq = spec_freq.reshape([batch_size, self.n_frame, self.n_bin, self.hid_dim])
return spec_freq |
def get_representative_dataset(data_loader, n_iters, data_loader_key=0, transforms=None):
class RepresentativeDataset(object):
def __init__(self, in_data_loader):
self.dl = in_data_loader
self.iter = iter(self.dl)
def __call__(self):
for _ in range(n_iters):
try:
x = next(self.iter)[data_loader_key]
except StopIteration:
self.iter = iter(self.dl)
x = next(self.iter)[data_loader_key]
if (transforms is not None):
x = transforms(x.float())
(yield [x.cpu().numpy()])
return RepresentativeDataset(data_loader) |
def binarize_scores(threshold: float, scores: list[float]) -> list[int]:
return (np.array(scores) > threshold).astype(int).tolist() |
def unfold(g, input, dimension, size, step):
return g.op('ATen', input, operator_s='unfold', dimension_i=dimension, size_i=size, step_i=step) |
def extract_data_for_mask_loss_from_matches(proposals_targets: Iterable[Instances], estimated_segm: torch.Tensor) -> DataForMaskLoss:
data = DataForMaskLoss()
masks_gt = []
offset = 0
assert (estimated_segm.shape[2] == estimated_segm.shape[3]), f'Expected estimated segmentation to have a square shape, but the actual shape is {estimated_segm.shape[2:]}'
mask_size = estimated_segm.shape[2]
num_proposals = sum((inst.proposal_boxes.tensor.size(0) for inst in proposals_targets))
num_estimated = estimated_segm.shape[0]
assert (num_proposals == num_estimated), 'The number of proposals {} must be equal to the number of estimates {}'.format(num_proposals, num_estimated)
for proposals_targets_per_image in proposals_targets:
n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0)
if (not n_i):
continue
gt_masks_per_image = proposals_targets_per_image.gt_masks.crop_and_resize(proposals_targets_per_image.proposal_boxes.tensor, mask_size).to(device=estimated_segm.device)
masks_gt.append(gt_masks_per_image)
offset += n_i
if masks_gt:
data.masks_est = estimated_segm
data.masks_gt = torch.cat(masks_gt, dim=0)
return data |
class BaseEliminationOrder():
def __init__(self, model):
if (not isinstance(model, BayesianModel)):
raise ValueError('Model should be a BayesianModel instance')
self.bayesian_model = model.copy()
self.moralized_model = self.bayesian_model.moralize()
def cost(self, node):
return 0
def get_elimination_order(self, nodes=None, show_progress=True):
if (nodes is None):
nodes = self.bayesian_model.nodes()
nodes = set(nodes)
ordering = []
if (show_progress and SHOW_PROGRESS):
pbar = tqdm(total=len(nodes))
pbar.set_description('Finding Elimination Order: ')
while nodes:
scores = {node: self.cost(node) for node in nodes}
min_score_node = min(scores, key=scores.get)
ordering.append(min_score_node)
nodes.remove(min_score_node)
self.bayesian_model.remove_node(min_score_node)
self.moralized_model.remove_node(min_score_node)
if (show_progress and SHOW_PROGRESS):
pbar.update(1)
return ordering
def fill_in_edges(self, node):
return combinations(self.bayesian_model.neighbors(node), 2) |
('/get_accounts')
def get_accounts():
accounts = []
for address in app.eth_accounts:
item = app.eth_accounts[address]
accounts.append({'address': address, 'name': item['name'], 'type': 'emulator'})
Account.enable_unaudited_hdwallet_features()
local_account_names = app.configure['local_account_names']
for index in range(len(local_account_names)):
account = Account.from_mnemonic(app.configure['mnemonic_phrase'], account_path=app.configure['key_derivation_path'].format(index))
accounts.append({'address': account.address, 'name': local_account_names[index], 'type': 'local'})
return accounts |
def print_scores(scores):
print(f'mean: {np.mean(scores):.4f}, std: {np.std(scores):.4f}')
counter = collections.Counter(scores)
for (k, v) in sorted(counter.items()):
print(f'score {k}: count {v}')
print('total count:', len(scores)) |
def get_overview_paragraphs(overview, specific_summary_dir):
overview_paragraphs = []
try:
soup = BeautifulSoup(urllib.request.urlopen(overview), 'html.parser')
except Exception as e:
print(e)
time.sleep(4)
try:
soup = BeautifulSoup(urllib.request.urlopen(overview), 'html.parser')
except Exception as e:
print('Overview not found: ', e, overview)
f_errors.write((((((overview + '\t') + 'Overview') + '\t') + specific_summary_dir) + '\n'))
return overview_paragraphs
flag = 0
pat = '(.*\\(synopsis\\))'
paragraphs = soup.findAll(['p', 'h3'])
iframe_text = 'Your browser does not support the IFRAME tag.'
for (ix, paragraph) in enumerate(paragraphs):
overview_text = paragraph.text.strip().replace(iframe_text, '').replace('\r\n', ' ').replace('\n', ' ')
if re.match(pat, overview_text, re.IGNORECASE):
break
if re.match(pat, overview_text, re.IGNORECASE):
to_replace = re.match(pat, overview_text, re.IGNORECASE).group(1)
overview_text = overview_text.replace(to_replace, '')
overview_text = remove_toc(overview_text)
overview_text = unidecode(overview_text)
overview_text = '. '.join([line.strip().rstrip() for line in overview_text.split('. ')])
return overview_text |
(Output('national-post-graph', 'figure'), Input('stored-df-data', 'data'), prevent_initial_call=True)
def update_fig_5(jsonified_cleaned_data):
df = pd.read_json(jsonified_cleaned_data, orient='split')
return plot_lines(df, 'National Post') |
class SetVocab(BaseVocab):
_base_special_tokens = [u'pad', u'root', u'unk']
def __init__(self, *args, **kwargs):
super(SetVocab, self).__init__(*args, **kwargs)
special_tokens = [getattr(base_special_token, self._config.getstr(self, 'special_token_case'))() for base_special_token in self._base_special_tokens]
if self._config.getboolean(self, 'special_token_html'):
special_tokens = [(u'<%s>' % special_token) for special_token in special_tokens]
for (i, base_special_token) in enumerate(self._base_special_tokens):
self.__dict__[(base_special_token.upper() + '_IDX')] = i
self.__dict__[(base_special_token.upper() + '_STR')] = special_tokens[i]
self._str2idx = dict(zip(special_tokens, range(len(special_tokens))))
self._idx2str = dict(zip(range(len(special_tokens)), special_tokens))
self._special_tokens = set(special_tokens)
return
def add(self, token):
return self.index(token)
def token(self, index):
assert isinstance(index, (six.integer_types + (np.int32, np.int64)))
return self[index]
def index(self, token):
assert isinstance(token, six.string_types)
return self[token]
def get_root(self):
return self.ROOT_STR
def cased(self):
return self._config.getboolean(self, 'cased')
def base_special_tokens(self):
return self._base_special_tokens
def special_tokens(self):
return self._special_tokens
def __getitem__(self, key):
if isinstance(key, six.string_types):
if ((not self.cased) and (key not in self.special_tokens)):
key = key.lower()
return self._str2idx.get(key, self.UNK_IDX)
elif isinstance(key, (six.integer_types + (np.int32, np.int64))):
return self._idx2str.get(key, self.UNK_STR)
elif hasattr(key, '__iter__'):
return [self[k] for k in key]
else:
raise ValueError('key to SetVocab.__getitem__ must be (iterable of) string or integer')
return
def __setitem__(self, key, value):
if isinstance(key, six.string_types):
if ((not self.cased) and (key not in self.special_tokens)):
key = key.lower()
self._str2idx[key] = value
self._idx2str[value] = key
elif isinstance(key, (six.integer_types + (np.int32, np.int64))):
if ((not self.cased) and (value not in self.special_tokens)):
value = value.lower()
self._idx2str[key] = value
self._str2idx[value] = key
elif (hasattr(key, '__iter__') and hasattr(value, '__iter__')):
for (k, v) in zip(key, value):
self[k] = v
else:
raise ValueError('keys and values to SetVocab.__setitem__ must be (iterables of) string or integer')
def __contains__(self, key):
if isinstance(key, six.string_types):
if ((not self.cased) and (key not in self.special_tokens)):
key = key.lower()
return (key in self._str2idx)
elif isinstance(key, (six.integer_types + (np.int32, np.int64))):
return (key in self._idx2str)
else:
raise ValueError('key to SetVocab.__contains__ must be string or integer')
return
def __len__(self):
return len(self._str2idx)
def __iter__(self):
return (key for key in sorted(self._str2idx, key=self._str2idx.get)) |
def random_blur(image, height, width, p=1.0):
del width
def _transform(image):
sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)
return gaussian_blur(image, kernel_size=(height // 10), sigma=sigma, padding='SAME')
return random_apply(_transform, p=p, x=image) |
def record_video_of_policy(task, world_params, policy_fn, file_name, number_of_resets, max_time_steps=100, env_wrappers=np.array([]), env_wrappers_args=np.array([])):
actual_skip_frame = world_params['skip_frame']
env = get_world(task.get_task_name(), task.get_task_params(), world_params, enable_visualization=False, env_wrappers=env_wrappers, env_wrappers_args=env_wrappers_args)
recorder = VideoRecorder(env, '{}.mp4'.format(file_name))
for reset_idx in range(number_of_resets):
obs = env.reset()
recorder.capture_frame()
for i in range(max_time_steps):
desired_action = policy_fn(obs)
for _ in range(actual_skip_frame):
(obs, reward, done, info) = env.step(action=desired_action)
recorder.capture_frame()
recorder.close()
env.close() |
def pair_to_graph(sp1, sp2):
g = Graph()
for part in sp1:
part_list = list(part)
if part_list:
g.add_vertex((part_list[0], 1))
if (part_list[0] < 0):
g.add_edge((part_list[0], 1), (abs(part_list[0]), 2))
for i in range(1, len(part_list)):
g.add_vertex((part_list[i], 1))
if (part_list[i] < 0):
g.add_edge((part_list[i], 1), (abs(part_list[i]), 2))
g.add_edge((part_list[(i - 1)], 1), (part_list[i], 1))
for part in sp2:
part_list = list(part)
if part_list:
g.add_vertex((part_list[0], 2))
for i in range(1, len(part_list)):
g.add_vertex((part_list[i], 2))
g.add_edge((part_list[(i - 1)], 2), (part_list[i], 2))
return g |
def create_ret_var(ctx: LeanGenContext, offset: int, cast: str, ret_var_base: str, is_explicit: bool, is_tail_call: bool, pc_offset: int) -> str:
ret_var = inc_name_sub(ret_var_base, ctx.name_sub)
ctx.add_main("generalize' hr_rev_{}: {}mem (ap{} - {}) = {},".format(ret_var, ((cast + ' ') if cast else ''), pc_offset, (- offset), ret_var))
if is_explicit:
ctx.add_main(f'simp only [hr_rev_{ret_var}] at h_call{pc_offset},')
ctx.add_main(f'have htv_{ret_var} := hr_rev_{ret_var}.symm, clear hr_rev_{ret_var},')
if (ctx.rc_steps is not None):
ctx.rc_steps.add_rc_ret_var(ret_var, f'htv_{ret_var}')
if (not is_tail_call):
ctx.add_final(f'use_only [{ret_var}],')
return f'htv_{ret_var}' |
def add_filehandler(logger, filepath, level=logging.DEBUG):
fh = logging.FileHandler(filepath)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.