code stringlengths 281 23.7M |
|---|
def copy_config(config, config_path):
def _get_last_subfolder_path(path):
subfolders = [f.path for f in os.scandir(path) if f.is_dir()]
return max(subfolders, default=None)
checkpoint_dir = os.path.join(config['trainer'].pop('checkpoint_dir'), 'logs')
last_run_dir = _get_last_subfolder_path(checkpoint_dir)
config_file_name = os.path.basename(config_path)
if last_run_dir:
shutil.copy2(config_path, os.path.join(last_run_dir, config_file_name)) |
class KiteQuadtree(KiteView):
title = 'Scene.quadtree'
def __init__(self, spool):
model = spool.model
self.model = model
self.main_widget = KiteQuadtreePlot(model)
self.tools = {}
self.param_quadtree = KiteParamQuadtree(model, self.main_widget, expanded=True)
self.parameters = [self.param_quadtree]
model.sigSceneModelChanged.connect(self.modelChanged)
KiteView.__init__(self)
def modelChanged(self):
self.main_widget.update()
self.main_widget.transFromFrame()
self.main_widget.updateFocalPoints()
self.param_quadtree.updateValues()
self.param_quadtree.onConfigUpdate()
self.param_quadtree.updateEpsilonLimits()
()
def activateView(self):
self.model.scene.quadtree.ensureTree()
self.main_widget.activatePlot()
()
def deactivateView(self):
self.main_widget.deactivatePlot() |
class FlexibleReplayPool(ReplayPool):
def __init__(self, max_size, fields_attrs, obs_filter=False, modify_rew=False):
super(FlexibleReplayPool, self).__init__()
max_size = int(max_size)
self._max_size = max_size
self.fields = {}
self.fields_attrs = {}
self.add_fields(fields_attrs)
self.obs_filter = obs_filter
self.modify_rew = modify_rew
self._pointer = 0
self._size = 0
self._samples_since_save = 0
def size(self):
return self._size
def field_names(self):
return list(self.fields.keys())
def add_fields(self, fields_attrs):
self.fields_attrs.update(fields_attrs)
for (field_name, field_attrs) in fields_attrs.items():
field_shape = (self._max_size, *field_attrs['shape'])
initializer = field_attrs.get('initializer', np.zeros)
self.fields[field_name] = initializer(field_shape, dtype=field_attrs['dtype'])
def _advance(self, count=1):
self._pointer = ((self._pointer + count) % self._max_size)
self._size = min((self._size + count), self._max_size)
self._samples_since_save += count
def add_sample(self, sample):
samples = {key: value[(None, ...)] for (key, value) in sample.items()}
self.add_samples(samples)
def add_samples(self, samples):
field_names = list(samples.keys())
num_samples = samples[field_names[0]].shape[0]
index = (np.arange(self._pointer, (self._pointer + num_samples)) % self._max_size)
for field_name in self.field_names:
default_value = self.fields_attrs[field_name].get('default_value', 0.0)
values = samples.get(field_name, default_value)
if ((field_name not in samples.keys()) and ('infos' in samples) and (field_name in samples['infos'][0].keys())):
values = np.expand_dims(np.array([samples['infos'][i].get(field_name, default_value) for i in range(num_samples)]), axis=1)
try:
assert (values.shape[0] == num_samples), f'value shape: {values.shape[0]}, expected: {num_samples}'
if isinstance(values[0], dict):
values = np.stack([np.concatenate([value[key] for key in value.keys()], axis=(- 1)) for value in values])
self.fields[field_name][index] = values
except Exception as e:
import traceback
traceback.print_exc(limit=10)
print('[ DEBUG ] errors occurs: {}'.format(e))
import pdb
pdb.set_trace()
self._advance(num_samples)
def restore_samples(self, samples):
num_samples = samples[list(samples.keys())[0]].shape[0]
index = (np.arange(0, num_samples) % self._max_size)
for (key, values) in samples.items():
assert (key in self.field_names)
self.fields[key][index] = values
def random_indices(self, batch_size):
if (self._size == 0):
return np.arange(0, 0)
return np.random.randint(0, self._size, batch_size)
def random_batch(self, batch_size, field_name_filter=None, **kwargs):
random_indices = self.random_indices(batch_size)
return self.batch_by_indices(random_indices, field_name_filter=field_name_filter, **kwargs)
def last_n_batch(self, last_n, field_name_filter=None, **kwargs):
last_n_indices = (np.arange((self._pointer - min(self.size, last_n)), self._pointer) % self._max_size)
return self.batch_by_indices(last_n_indices, field_name_filter=field_name_filter, **kwargs)
def filter_fields(self, field_names, field_name_filter):
if isinstance(field_name_filter, str):
field_name_filter = [field_name_filter]
if isinstance(field_name_filter, (list, tuple)):
field_name_list = field_name_filter
def filter_fn(field_name):
return (field_name in field_name_list)
else:
filter_fn = field_name_filter
filtered_field_names = [field_name for field_name in field_names if filter_fn(field_name)]
return filtered_field_names
def batch_by_indices(self, indices, field_name_filter=None):
if np.any(((indices % self._max_size) > self.size)):
raise ValueError('Tried to retrieve batch with indices greater than current size')
field_names = self.field_names
if (field_name_filter is not None):
field_names = self.filter_fields(field_names, field_name_filter)
return {field_name: self.fields[field_name][indices] for field_name in field_names}
def save_latest_experience(self, pickle_path):
latest_samples = self.last_n_batch(self._samples_since_save)
with gzip.open(pickle_path, 'wb') as f:
pickle.dump(latest_samples, f)
self._samples_since_save = 0
def load_experience(self, experience_path):
with gzip.open(experience_path, 'rb') as f:
latest_samples = pickle.load(f)
key = list(latest_samples.keys())[0]
num_samples = latest_samples[key].shape[0]
for (field_name, data) in latest_samples.items():
assert (data.shape[0] == num_samples), data.shape
self.add_samples(latest_samples)
self._samples_since_save = 0
def return_all_samples(self):
return {field_name: self.fields[field_name][:self.size] for field_name in self.field_names}
def __getstate__(self):
state = self.__dict__.copy()
state['fields'] = {field_name: self.fields[field_name][:self.size] for field_name in self.field_names}
return state
def __setstate__(self, state):
if (state['_size'] < state['_max_size']):
pad_size = (state['_max_size'] - state['_size'])
for field_name in state['fields'].keys():
field_shape = state['fields_attrs'][field_name]['shape']
state['fields'][field_name] = np.concatenate((state['fields'][field_name], np.zeros((pad_size, *field_shape))), axis=0)
self.__dict__ = state |
class TestSpectralClusterer(unittest.TestCase):
def setUp(self):
super().setUp()
pass
def test_6by2_matrix(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [0.0, 1.0], [0.1, 1.0], [0.9, (- 0.1)], [0.0, 1.2]])
refinement_options = refinement.RefinementOptions(gaussian_blur_sigma=0, p_percentile=0.95, refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(refinement_options=refinement_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
np.testing.assert_equal(expected, labels)
def test_1000by6_matrix(self):
matrix = np.array((((([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 400) + ([[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] * 300)) + ([[0.0, 0.0, 2.0, 0.0, 0.0, 0.0]] * 200)) + ([[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]] * 100)))
noisy = ((np.random.rand(1000, 6) * 2) - 1)
matrix = (matrix + (noisy * 0.1))
refinement_options = refinement.RefinementOptions(gaussian_blur_sigma=0, p_percentile=0.2, refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(refinement_options=refinement_options, stop_eigenvalue=0.01)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array((((([0] * 400) + ([1] * 300)) + ([2] * 200)) + ([3] * 100)))
np.testing.assert_equal(expected, labels)
def test_1000by6_matrix_reduce_dimension(self):
matrix = np.array((((([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 400) + ([[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] * 300)) + ([[0.0, 0.0, 2.0, 0.0, 0.0, 0.0]] * 200)) + ([[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]] * 100)))
noisy = ((np.random.rand(1000, 6) * 2) - 1)
matrix = (matrix + (noisy * 0.1))
refinement_options = refinement.RefinementOptions(gaussian_blur_sigma=0, p_percentile=0.2, refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(refinement_options=refinement_options, stop_eigenvalue=0.01, max_spectral_size=100)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array((((([0] * 400) + ([1] * 300)) + ([2] * 200)) + ([3] * 100)))
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_eigengap_normalizeddiff(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [0.0, 1.0], [0.1, 1.0], [0.9, (- 0.1)], [0.0, 1.2]])
refinement_options = refinement.RefinementOptions(gaussian_blur_sigma=0, p_percentile=0.95, refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(refinement_options=refinement_options, eigengap_type=EigenGapType.NormalizedDiff)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_normalized_laplacian(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [0.0, 1.0], [0.1, 1.0], [0.9, (- 0.1)], [0.0, 1.2]])
refinement_sequence = []
refinement_options = refinement.RefinementOptions(p_percentile=0.95, refinement_sequence=refinement_sequence)
clusterer = spectral_clusterer.SpectralClusterer(max_clusters=2, refinement_options=refinement_options, laplacian_type=LaplacianType.GraphCut, row_wise_renorm=True)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
np.testing.assert_equal(expected, labels)
def test_1000by6_matrix_normalized_laplacian(self):
matrix = np.array((((([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 400) + ([[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] * 300)) + ([[0.0, 0.0, 2.0, 0.0, 0.0, 0.0]] * 200)) + ([[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]] * 100)))
noisy = ((np.random.rand(1000, 6) * 2) - 1)
matrix = (matrix + (noisy * 0.1))
refinement_sequence = []
refinement_options = refinement.RefinementOptions(p_percentile=0.95, refinement_sequence=refinement_sequence)
clusterer = spectral_clusterer.SpectralClusterer(max_clusters=4, refinement_options=refinement_options, laplacian_type=LaplacianType.GraphCut, row_wise_renorm=True)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array((((([0] * 400) + ([1] * 300)) + ([2] * 200)) + ([3] * 100)))
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_auto_tune(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [0.0, 1.0], [0.1, 1.0], [0.9, (- 0.1)], [0.0, 1.2]])
refinement_sequence = [RefinementName.RowWiseThreshold]
refinement_options = refinement.RefinementOptions(thresholding_type=ThresholdType.Percentile, refinement_sequence=refinement_sequence)
auto_tune = autotune.AutoTune(p_percentile_min=0.6, p_percentile_max=0.95, init_search_step=0.05, search_level=1)
clusterer = spectral_clusterer.SpectralClusterer(max_clusters=2, refinement_options=refinement_options, autotune=auto_tune, laplacian_type=LaplacianType.GraphCut, row_wise_renorm=True)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
np.testing.assert_equal(expected, labels)
def test_2by2_matrix_auto_tune(self):
matrix = np.array([[1.0, 0.0], [0.0, 1.0]])
refinement_sequence = [RefinementName.RowWiseThreshold]
refinement_options = refinement.RefinementOptions(thresholding_type=ThresholdType.Percentile, refinement_sequence=refinement_sequence)
auto_tune = autotune.AutoTune(p_percentile_min=0.6, p_percentile_max=0.95, init_search_step=0.05, search_level=1, proxy=AutoTuneProxy.PercentileOverNME)
fallback_options = fallback_clusterer.FallbackOptions(spectral_min_embeddings=3)
clusterer = spectral_clusterer.SpectralClusterer(max_clusters=2, refinement_options=refinement_options, autotune=auto_tune, fallback_options=fallback_options, laplacian_type=LaplacianType.GraphCut, row_wise_renorm=True)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 1])
np.testing.assert_equal(expected, labels)
def test_1000by6_matrix_auto_tune(self):
matrix = np.array((((([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 400) + ([[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] * 300)) + ([[0.0, 0.0, 2.0, 0.0, 0.0, 0.0]] * 200)) + ([[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]] * 100)))
noisy = ((np.random.rand(1000, 6) * 2) - 1)
matrix = (matrix + (noisy * 0.1))
refinement_sequence = [RefinementName.RowWiseThreshold]
refinement_options = refinement.RefinementOptions(thresholding_type=ThresholdType.Percentile, refinement_sequence=refinement_sequence)
auto_tune = autotune.AutoTune(p_percentile_min=0.9, p_percentile_max=0.95, init_search_step=0.03, search_level=1)
clusterer = spectral_clusterer.SpectralClusterer(max_clusters=4, refinement_options=refinement_options, autotune=auto_tune, laplacian_type=LaplacianType.GraphCut, row_wise_renorm=True)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array((((([0] * 400) + ([1] * 300)) + ([2] * 200)) + ([3] * 100)))
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_affinity_integration(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [0.0, 1.0], [0.1, 1.0], [0.9, (- 0.1)], [0.0, 1.2]])
constraint_matrix = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]])
refinement_sequence = [RefinementName.RowWiseThreshold, RefinementName.Symmetrize]
refinement_options = refinement.RefinementOptions(p_percentile=0.95, thresholding_type=ThresholdType.Percentile, thresholding_with_binarization=True, thresholding_preserve_diagonal=True, symmetrize_type=SymmetrizeType.Average, refinement_sequence=refinement_sequence)
constraint_options = constraint.ConstraintOptions(constraint_name=ConstraintName.AffinityIntegration, apply_before_refinement=False, integration_type=IntegrationType.Max)
clusterer = spectral_clusterer.SpectralClusterer(max_clusters=2, refinement_options=refinement_options, constraint_options=constraint_options, laplacian_type=LaplacianType.GraphCut, row_wise_renorm=True)
labels = clusterer.predict(matrix, constraint_matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 1, 1])
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_constraint_propagation(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [0.0, 1.0], [0.1, 1.0], [0.9, (- 0.1)], [0.0, 1.2]])
constraint_matrix = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, (- 1)], [0, 0, 0, 0, (- 1), 1]])
refinement_sequence = [RefinementName.RowWiseThreshold, RefinementName.Symmetrize]
refinement_options = refinement.RefinementOptions(p_percentile=0.95, thresholding_type=ThresholdType.Percentile, thresholding_with_binarization=True, thresholding_preserve_diagonal=True, symmetrize_type=SymmetrizeType.Average, refinement_sequence=refinement_sequence)
constraint_options = constraint.ConstraintOptions(constraint_name=ConstraintName.ConstraintPropagation, apply_before_refinement=True, constraint_propagation_alpha=0.6)
clusterer = spectral_clusterer.SpectralClusterer(max_clusters=2, refinement_options=refinement_options, constraint_options=constraint_options, laplacian_type=LaplacianType.GraphCut, row_wise_renorm=True)
labels = clusterer.predict(matrix, constraint_matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 1, 1, 0, 1])
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_single_cluster(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [1.0, 0.0], [1.1, 0.0], [0.9, (- 0.1)], [1.0, 0.2]])
refinement_options = refinement.RefinementOptions(gaussian_blur_sigma=0, p_percentile=0.95, refinement_sequence=ICASSP2018_REFINEMENT_SEQUENCE)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, refinement_options=refinement_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 0, 0, 0])
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_single_cluster_all_affinity(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [1.0, 0.0], [1.1, 0.0], [0.9, (- 0.1)], [1.0, 0.5]])
fallback_options = FallbackOptions(single_cluster_condition=SingleClusterCondition.AllAffinity, single_cluster_affinity_threshold=0.93)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, laplacian_type=LaplacianType.GraphCut, refinement_options=None, fallback_options=fallback_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 0, 0, 1])
np.testing.assert_equal(expected, labels)
fallback_options = FallbackOptions(single_cluster_condition=SingleClusterCondition.AllAffinity, single_cluster_affinity_threshold=0.91)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, laplacian_type=LaplacianType.GraphCut, refinement_options=None, fallback_options=fallback_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 0, 0, 0])
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_single_cluster_neighbor_affinity(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [1.0, 0.0], [1.0, 0.5], [1.1, 0.0], [0.9, (- 0.1)]])
fallback_options = FallbackOptions(single_cluster_condition=SingleClusterCondition.NeighborAffinity, single_cluster_affinity_threshold=0.96)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, laplacian_type=LaplacianType.GraphCut, refinement_options=None, fallback_options=fallback_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 1, 0, 0])
np.testing.assert_equal(expected, labels)
fallback_options = FallbackOptions(single_cluster_condition=SingleClusterCondition.NeighborAffinity, single_cluster_affinity_threshold=0.94)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, laplacian_type=LaplacianType.GraphCut, refinement_options=None, fallback_options=fallback_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 0, 0, 0])
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_single_cluster_affinity_std(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [1.0, 0.0], [1.0, 0.5], [1.1, 0.0], [0.9, (- 0.1)]])
fallback_options = FallbackOptions(single_cluster_condition=SingleClusterCondition.AffinityStd, single_cluster_affinity_threshold=0.02)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, laplacian_type=LaplacianType.GraphCut, refinement_options=None, fallback_options=fallback_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 1, 0, 0])
np.testing.assert_equal(expected, labels)
fallback_options = FallbackOptions(single_cluster_condition=SingleClusterCondition.AffinityStd, single_cluster_affinity_threshold=0.03)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, laplacian_type=LaplacianType.GraphCut, refinement_options=None, fallback_options=fallback_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 0, 0, 0])
np.testing.assert_equal(expected, labels)
def test_6by2_matrix_single_cluster_fallback_naive(self):
matrix = np.array([[1.0, 0.0], [1.1, 0.1], [1.0, 0.0], [1.0, 0.5], [1.1, 0.0], [0.9, (- 0.1)]])
fallback_options = FallbackOptions(single_cluster_condition=SingleClusterCondition.FallbackClusterer, fallback_clusterer_type=FallbackClustererType.Naive, naive_threshold=0.95)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, laplacian_type=LaplacianType.GraphCut, refinement_options=None, fallback_options=fallback_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 1, 0, 0])
np.testing.assert_equal(expected, labels)
fallback_options = FallbackOptions(single_cluster_condition=SingleClusterCondition.FallbackClusterer, fallback_clusterer_type=FallbackClustererType.Naive, naive_threshold=0.9)
clusterer = spectral_clusterer.SpectralClusterer(min_clusters=1, laplacian_type=LaplacianType.GraphCut, refinement_options=None, fallback_options=fallback_options)
labels = clusterer.predict(matrix)
labels = utils.enforce_ordered_labels(labels)
expected = np.array([0, 0, 0, 0, 0, 0])
np.testing.assert_equal(expected, labels) |
def mirror_files(*path_patterns: str, exclude_name_patterns: Sequence[str]=['.*', '_*'], include_directories: bool=True, cwd: Optional[Union[(Path, str)]]=None, verbose: Optional[bool]=None) -> List[Path]:
log.bad('workflow: mirror_files() has been deprecated')
if (cwd is None):
cwd = Path.cwd()
elif isinstance(cwd, str):
cwd = Path(cwd)
for path in cwd.iterdir():
if path.is_symlink():
if ((not include_directories) and path.is_dir()):
continue
path.unlink()
create = {}
paths = util.find_paths(path_patterns=path_patterns, exclude_name_patterns=exclude_name_patterns, cwd=cwd)
for real_path in paths:
try:
cwd.relative_to(os.path.normpath(real_path))
except ValueError:
is_parent_directory = False
else:
is_parent_directory = True
if is_parent_directory:
continue
if ((not include_directories) and real_path.is_dir()):
continue
link_path = (cwd / real_path.name)
if link_path.exists():
continue
relative_path = os.path.relpath(real_path, cwd)
create[link_path] = relative_path
if (create and is_verbose(verbose)):
names = [path.name for path in create.keys()]
log.ok(f"mirror: {' '.join(sorted(names))}")
created = []
for (link_path, relative_path) in create.items():
link_path.symlink_to(relative_path)
created.append(link_path)
return created |
class SmtLibCommand(namedtuple('SmtLibCommand', ['name', 'args'])):
def serialize(self, outstream=None, printer=None, daggify=True):
if ((outstream is None) and (printer is not None)):
outstream = printer.stream
elif ((outstream is not None) and (printer is None)):
if daggify:
printer = SmtDagPrinter(outstream)
else:
printer = SmtPrinter(outstream)
else:
assert (((outstream is not None) and (printer is not None)) or ((outstream is None) and (printer is None))), 'Exactly one of outstream and printer must be set.'
if (self.name == smtcmd.SET_OPTION):
outstream.write(('(%s %s %s)' % (self.name, self.args[0], self.args[1])))
elif (self.name == smtcmd.SET_INFO):
outstream.write(('(%s %s %s)' % (self.name, self.args[0], quote(self.args[1]))))
elif (self.name == smtcmd.ASSERT):
outstream.write(('(%s ' % self.name))
printer.printer(self.args[0])
outstream.write(')')
elif (self.name == smtcmd.GET_VALUE):
outstream.write(('(%s (' % self.name))
for a in self.args:
printer.printer(a)
outstream.write(' ')
outstream.write('))')
elif (self.name in [smtcmd.CHECK_SAT, smtcmd.EXIT, smtcmd.RESET_ASSERTIONS, smtcmd.GET_UNSAT_CORE, smtcmd.GET_ASSIGNMENT, smtcmd.GET_MODEL]):
outstream.write(('(%s)' % self.name))
elif (self.name == smtcmd.SET_LOGIC):
outstream.write(('(%s %s)' % (self.name, self.args[0])))
elif (self.name in [smtcmd.DECLARE_FUN, smtcmd.DECLARE_CONST]):
symbol = self.args[0]
type_str = symbol.symbol_type().as_smtlib()
outstream.write(('(%s %s %s)' % (self.name, quote(symbol.symbol_name()), type_str)))
elif (self.name == smtcmd.DEFINE_FUN):
name = self.args[0]
params_list = self.args[1]
params = ' '.join([('(%s %s)' % (v, v.symbol_type().as_smtlib(funstyle=False))) for v in params_list])
rtype = self.args[2]
expr = self.args[3]
outstream.write(('(%s %s (%s) %s ' % (self.name, name, params, rtype.as_smtlib(funstyle=False))))
printer.printer(expr)
outstream.write(')')
elif (self.name in [smtcmd.PUSH, smtcmd.POP]):
outstream.write(('(%s %d)' % (self.name, self.args[0])))
elif (self.name == smtcmd.DEFINE_SORT):
name = self.args[0]
params_list = self.args[1]
params = ' '.join((x.as_smtlib(funstyle=False) for x in params_list))
rtype = self.args[2]
outstream.write(('(%s %s (%s) %s)' % (self.name, name, params, rtype.as_smtlib(funstyle=False))))
elif (self.name == smtcmd.DECLARE_SORT):
type_decl = self.args[0]
outstream.write(('(%s %s %d)' % (self.name, type_decl.name, type_decl.arity)))
elif (self.name in smtcmd.ALL_COMMANDS):
raise NotImplementedError(("'%s' is a valid SMT-LIB command but it is currently not supported. Please open a bug-report." % self.name))
else:
raise UnknownSmtLibCommandError(self.name)
def serialize_to_string(self, daggify=True):
buf = StringIO()
self.serialize(buf, daggify=daggify)
return buf.getvalue() |
class SocketTests(unittest.TestCase):
def setUp(self):
self.orgsocket = socket.socket
socket.socket = MockSocket
self.proxy = Proxy()
self.proxy._fdmap = {}
def tearDown(self):
socket.socket = self.orgsocket
def testProxyFd(self):
self.proxy._poll = MockPoll()
self.proxy._PrepareSockets()
self.assertTrue(isinstance(self.proxy._proxyfd, MockSocket))
self.assertEqual(list(self.proxy._fdmap.keys()), [1])
self.assertEqual(self.proxy._poll.registry, {1: ((select.POLLIN | select.POLLPRI) | select.POLLERR)}) |
def upgrade(op, tables, tester):
op.create_table('namespaceautoprunepolicy', sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('namespace_id', sa.Integer(), nullable=False), sa.Column('policy', sa.Text(), nullable=False), sa.ForeignKeyConstraint(['namespace_id'], ['user.id'], name=op.f('fk_namespaceautoprunepolicy_namespace_id_user')), sa.PrimaryKeyConstraint('id', name=op.f('pk_namespaceautoprunepolicyid')))
op.create_index('namespaceautoprunepolicy_namespace_id', 'namespaceautoprunepolicy', ['namespace_id'], unique=True)
op.create_index('namespaceautoprunepolicy_uuid', 'namespaceautoprunepolicy', ['uuid'], unique=True)
op.create_table('autoprunetaskstatus', sa.Column('id', sa.Integer(), nullable=False), sa.Column('namespace_id', sa.Integer(), nullable=False), sa.Column('last_ran_ms', sa.BigInteger(), nullable=True), sa.Column('status', sa.Text(), nullable=True), sa.ForeignKeyConstraint(['namespace_id'], ['user.id'], name=op.f('fk_autoprunetaskstatus_namespace_id_user')), sa.PrimaryKeyConstraint('id', name=op.f('pk_autoprunetaskstatusid')))
op.create_index('autoprunetaskstatus_namespace_id', 'autoprunetaskstatus', ['namespace_id'], unique=True)
op.create_index('autoprunetaskstatus_last_ran_ms', 'autoprunetaskstatus', ['last_ran_ms'], unique=False) |
class ReportSlaveIdResponse(ModbusResponse):
function_code = 17
_rtu_byte_count_pos = 2
def __init__(self, identifier=b'\x00', status=True, **kwargs):
ModbusResponse.__init__(self, **kwargs)
self.identifier = identifier
self.status = status
self.byte_count = None
def encode(self):
if self.status:
status = ModbusStatus.SLAVE_ON
else:
status = ModbusStatus.SLAVE_OFF
length = (len(self.identifier) + 1)
packet = struct.pack('>B', length)
packet += self.identifier
packet += struct.pack('>B', status)
return packet
def decode(self, data):
self.byte_count = int(data[0])
self.identifier = data[1:(self.byte_count + 1)]
status = int(data[(- 1)])
self.status = (status == ModbusStatus.SLAVE_ON)
def __str__(self) -> str:
return f'ReportSlaveIdResponse({self.function_code}, {self.identifier}, {self.status})' |
def call_cmd(command, *args, **kwargs):
ignore_errors = kwargs.pop('ignore_errors', False)
try:
sp = exec_cmd(command, *args, **kwargs)
except Exception as exc:
if (not ignore_errors):
raise
returncode = 1
stdoutdata = ''
stderrdata = to_str(exc).strip()
else:
returncode = sp.returncode
(stdoutdata, stderrdata) = map(str.strip, sp.communicate())
return (returncode, stdoutdata, stderrdata) |
def get_cifar100c(ctype, intensity, mean=(0.5071, 0.4867, 0.4408), std=(0.2675, 0.2565, 0.2761), root='./data', download=True, **kwargs):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
return cifarc.CIFAR100C(root, ctype, intensity, transform=transform, download=download) |
class PSPModule(nn.Module):
def __init__(self, in_channels, sizes=(1, 2, 3, 6), use_bathcnorm=True):
super().__init__()
self.blocks = nn.ModuleList([PSPBlock(in_channels, (in_channels // len(sizes)), size, use_bathcnorm=use_bathcnorm) for size in sizes])
def forward(self, x):
xs = ([block(x) for block in self.blocks] + [x])
x = torch.cat(xs, dim=1)
return x |
.parametrize('source, result', [('\nfrom __future__ import print_function,\n division, with_statement,\n unicode_literals\n', (((_FF.print_function | _FF.division) | _FF.with_statement) | _FF.unicode_literals)), ("\nfrom __future__ import print_function, division\nprint('hello')\n", (_FF.print_function | _FF.division)), ("\nfrom __future__ import print_function, division, unknown,,,,,\nprint 'hello'\n", (_FF.print_function | _FF.division)), ('\nfrom __future__ import (\n print_function,\n division)\n', (_FF.print_function | _FF.division)), ('\nfrom __future__ import \\\n print_function, \\\n division\n', (_FF.print_function | _FF.division))])
def test_parse_future(source, result):
fp = BytesIO(source.encode('latin-1'))
flags = parse_future_flags(fp)
assert (flags == result) |
def test_H_opt():
o = check_success('-H', 'formatter', 'html')
assert ('HTML' in o)
o = check_success('-H', 'lexer', 'python')
assert ('Python' in o)
o = check_success('-H', 'filter', 'raiseonerror')
assert ('raiseonerror' in o)
e = check_failure('-H', 'lexer', 'foobar')
assert ('not found' in e) |
class GraphDisplay(DisplayOptionalPage):
def __init__(self, parent, tabname, helptext, waittime, command=None):
self.trace_var = None
super().__init__(parent, tabname, helptext, waittime, command)
def add_options(self):
self.add_option_refresh()
super().add_options()
self.add_option_smoothing()
def add_option_refresh(self):
logger.debug('Adding refresh option')
tk_var = get_config().tk_vars['refreshgraph']
btnrefresh = ttk.Button(self.optsframe, image=get_images().icons['reset'], command=(lambda : tk_var.set(True)))
btnrefresh.pack(padx=2, side=tk.RIGHT)
Tooltip(btnrefresh, text='Graph updates at every model save. Click to refresh now.', wraplength=200)
logger.debug('Added refresh option')
def add_option_smoothing(self):
logger.debug('Adding Smoothing Slider')
tk_var = get_config().tk_vars['smoothgraph']
min_max = (0, 0.99)
hlp = 'Set the smoothing amount. 0 is no smoothing, 0.99 is maximum smoothing.'
ctl_frame = ttk.Frame(self.optsframe)
ctl_frame.pack(padx=2, side=tk.RIGHT)
lbl = ttk.Label(ctl_frame, text='Smoothing Amount:', anchor=tk.W)
lbl.pack(pady=5, side=tk.LEFT, anchor=tk.N, expand=True)
tbox = ttk.Entry(ctl_frame, width=6, textvariable=tk_var, justify=tk.RIGHT)
tbox.pack(padx=(0, 5), side=tk.RIGHT)
ctl = ttk.Scale(ctl_frame, variable=tk_var, command=(lambda val, var=tk_var, dt=float, rn=2, mm=(0, 0.99): set_slider_rounding(val, var, dt, rn, mm)))
ctl['from_'] = min_max[0]
ctl['to'] = min_max[1]
ctl.pack(padx=5, pady=5, fill=tk.X, expand=True)
for item in (tbox, ctl):
Tooltip(item, text=hlp, wraplength=200)
logger.debug('Added Smoothing Slider')
def display_item_set(self):
session = get_config().session
smooth_amount_var = get_config().tk_vars['smoothgraph']
if (session.initialized and session.logging_disabled):
logger.trace('Logs disabled. Hiding graph')
self.set_info("Graph is disabled as 'no-logs' or 'pingpong' has been selected")
self.display_item = None
if (self.trace_var is not None):
smooth_amount_var.trace_vdelete('w', self.trace_var)
self.trace_var = None
elif session.initialized:
logger.trace('Loading graph')
self.display_item = session
if (self.trace_var is None):
self.trace_var = smooth_amount_var.trace('w', self.smooth_amount_callback)
else:
self.display_item = None
if (self.trace_var is not None):
smooth_amount_var.trace_vdelete('w', self.trace_var)
self.trace_var = None
def display_item_process(self):
logger.trace('Adding graph')
existing = list(self.subnotebook_get_titles_ids().keys())
display_tabs = sorted(self.display_item.loss_keys)
if any((key.startswith('total') for key in display_tabs)):
total_idx = [idx for (idx, key) in enumerate(display_tabs) if key.startswith('total')][0]
display_tabs.insert(0, display_tabs.pop(total_idx))
for loss_key in display_tabs:
tabname = loss_key.replace('_', ' ').title()
if (tabname in existing):
continue
data = Calculations(session=get_config().session, display='loss', loss_keys=[loss_key], selections=['raw', 'smoothed'], smooth_amount=get_config().tk_vars['smoothgraph'].get())
self.add_child(tabname, data)
def smooth_amount_callback(self, *args):
smooth_amount = get_config().tk_vars['smoothgraph'].get()
logger.debug('Updating graph smooth_amount: (new_value: %s, args: %s)', smooth_amount, args)
for graph in self.subnotebook.children.values():
graph.calcs.args['smooth_amount'] = smooth_amount
def add_child(self, name, data):
logger.debug('Adding child: %s', name)
graph = TrainingGraph(self.subnotebook, data, 'Loss')
graph.build()
graph = self.subnotebook_add_page(name, widget=graph)
Tooltip(graph, text=self.helptext, wraplength=200)
def save_items(self):
graphlocation = FileHandler('dir', None).retfile
if (not graphlocation):
return
for graph in self.subnotebook.children.values():
graph.save_fig(graphlocation)
def close(self):
if (self.trace_var is not None):
get_config().tk_vars['smoothgraph'].trace_vdelete('w', self.trace_var)
self.trace_var = None
if (self.subnotebook is None):
logger.debug('No graphs to clear. Returning')
return
for (name, graph) in self.subnotebook.children.items():
logger.debug('Clearing: %s', name)
graph.clear()
super().close() |
class Effect11942(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'kineticDamage', ship.getModifiedItemAttr('shipBonusGD1'), skill='Gallente Destroyer', **kwargs) |
class ImageNetEvaluator():
def __init__(self, tfrecord_dir: str, training_inputs: List[str], data_inputs: List[str], validation_inputs: List[str], image_size: int=224, batch_size: int=128, format_bgr: bool=False, model_type: str='resnet'):
if (not data_inputs):
raise ValueError('data_inputs list cannot be empty for imagenet')
self._data_inputs = data_inputs
if (not validation_inputs):
raise ValueError('validation_inputs list cannot be empty for imagenet')
self._validation_inputs = validation_inputs
if (not training_inputs):
raise ValueError('training_inputs list cannot be empty for imagenet')
self._training_inputs = training_inputs
self._val_data_loaders = ImageNetDataLoader(tfrecord_dir=tfrecord_dir, image_size=image_size, batch_size=batch_size, num_epochs=1, format_bgr=format_bgr, is_training=False, model_type=model_type)
self._batch_size = batch_size
def evaluate(self, session: tf.Session, iterations: int=None) -> float:
if (iterations is None):
iterations = (image_net_config.dataset['val_images_len'] // self._batch_size)
input_label_tensors = [session.graph.get_tensor_by_name(input_label) for input_label in (tuple(self._data_inputs) + tuple(self._validation_inputs))]
train_tensors = [session.graph.get_tensor_by_name(training_input) for training_input in self._training_inputs]
train_tensors_dict = dict.fromkeys(train_tensors, False)
eval_names = ['top1-acc', 'top5-acc']
eval_outputs = [session.graph.get_operation_by_name(name).outputs[0] for name in eval_names]
acc_top1 = 0
acc_top5 = 0
logger.info('Evaluating graph for %d iterations with batch_size %d', iterations, self._batch_size)
curr_iter = 0
total_samples = 0
with progressbar.ProgressBar(max_value=iterations) as progress_bar:
for input_label in self._val_data_loaders:
input_label_tensors_dict = dict(zip(input_label_tensors, input_label))
feed_dict = {**input_label_tensors_dict, **train_tensors_dict}
with session.graph.as_default():
output_data = session.run(eval_outputs, feed_dict=feed_dict)
curr_samples = input_label[0].shape[0]
acc_top1 += (curr_samples * output_data[0])
acc_top5 += (curr_samples * output_data[1])
total_samples += curr_samples
curr_iter += 1
progress_bar.update(curr_iter)
if (curr_iter >= iterations):
break
logger.info('Avg accuracy Top 1: %f Avg accuracy Top 5: %f on validation Dataset', (acc_top1 / total_samples), (acc_top5 / total_samples))
return (acc_top1 / total_samples) |
def test_set_observation_field(requests_mock):
requests_mock.post(f'{API_V1}/observation_field_values', json=SAMPLE_DATA['post_put_observation_field_value'], status_code=200)
response = set_observation_field(observation_id=, observation_field_id=31, value='fouraging', access_token='token')
assert (response['id'] == 31)
assert (response['observation_field_id'] == 31)
assert (response['observation_id'] == )
assert (response['value'] == 'fouraging') |
class PublisherPaidImpression(BasePublisherImpression):
publisher = models.ForeignKey(Publisher, related_name='publisher_paid_impressions', on_delete=models.PROTECT, null=True)
class Meta():
ordering = ('-date',)
unique_together = ('publisher', 'date')
verbose_name_plural = _('Publisher paid impressions') |
def train(epoch):
print(('Epoch: %d' % epoch))
net.train()
train_loss = 0
correct = 0
total = 0
for (batch_idx, (inputs, targets)) in enumerate(trainloader):
(inputs, targets) = (inputs.cuda(), targets.cuda())
optimizer.zero_grad()
(outputs, kl) = net(inputs)
loss = elbo(outputs, targets, kl, get_beta(epoch, len(trainset)))
loss.backward()
optimizer.step()
pred = torch.max(outputs, dim=1)[1]
correct += torch.sum(pred.eq(targets)).item()
total += targets.numel()
print(f'[TRAIN] Acc: {((100.0 * correct) / total):.3f}') |
class BinPacking(OptimizationApplication):
def __init__(self, weights: List[int], max_weight: int, max_number_of_bins: Optional[int]=None) -> None:
self._weights = weights
self._max_weight = max_weight
if (max_number_of_bins is None):
self._max_number_of_bins = len(weights)
else:
self._max_number_of_bins = max_number_of_bins
def to_quadratic_program(self) -> QuadraticProgram:
mdl = Model(name='BinPacking')
num_bins = self._max_number_of_bins
num_items = len(self._weights)
y = mdl.binary_var_list(num_bins, name='y')
mdl.minimize(mdl.sum(y))
x = mdl.binary_var_matrix(num_items, num_bins, name='x')
for i in range(num_items):
mdl.add_constraint((mdl.sum((x[(i, j)] for j in range(num_bins))) == 1))
for j in range(num_bins):
mdl.add_constraint((mdl.sum(((self._weights[i] * x[(i, j)]) for i in range(num_items))) <= (self._max_weight * y[j])))
op = from_docplex_mp(mdl)
return op
def interpret(self, result: Union[(OptimizationResult, np.ndarray)]) -> List[List[int]]:
x = self._result_to_x(result)
num_items = len(self._weights)
num_bins = self._max_number_of_bins
bins = x[:num_bins]
items = np.array(x[num_bins:]).reshape((num_items, num_bins))
items_in_bins = [[i for i in range(num_items) if (bins[j] and items[(i, j)])] for j in range(num_bins)]
return items_in_bins
_optionals.HAS_MATPLOTLIB.require_in_call
def get_figure(self, result: Union[(OptimizationResult, np.ndarray)]) -> Figure:
import matplotlib.pyplot as plt
colors = plt.colormaps['jet'].resampled(len(self._weights))
items_in_bins = self.interpret(result)
num_bins = len(items_in_bins)
(fig, axes) = plt.subplots()
for (_, bin_i) in enumerate(items_in_bins):
sum_items = 0
for item in bin_i:
axes.bar(_, self._weights[item], bottom=sum_items, label=f'Item {item}', color=colors(item))
sum_items += self._weights[item]
axes.hlines(self._max_weight, (- 0.5), (num_bins - 0.5), linestyle='--', color='tab:red', label='Max Weight')
axes.set_xticks(np.arange(num_bins))
axes.set_xlabel('Bin')
axes.set_ylabel('Weight')
axes.legend()
return fig |
class F28_TestCase(CommandTest):
command = 'authselect'
def runTest(self):
self.assert_parse('authselect')
self.assert_parse('authselect select winbind', 'authselect select winbind\n')
self.assert_parse('authselect select sssd with-mkhomedir', 'authselect select sssd with-mkhomedir\n') |
class CSSHeaderFile(object):
def __init__(self, filename):
self.fn = filename
self.data = []
self.read()
def read_wf_file(self, fn, nbytes, dtype, foff=0):
with open(fn, 'rb') as f:
fmt = (dtype % nbytes)
f.seek(foff)
try:
data = num.array(unpack(fmt, f.read((nbytes * 4))), dtype=num.int32)
except Exception:
logger.exception(('Error while unpacking %s' % fn))
return
return data
def read(self):
with open(self.fn, 'rb') as f:
lines = f.readlines()
for (iline, line) in enumerate(lines):
line = str(line.decode('ascii'))
d = {}
for (ident, convert, (istart, istop), desc) in template:
try:
d[ident] = convert(line[istart:istop].strip())
except Exception:
raise CSSWfError(iline=(iline + 1), data=line, ident=ident, convert=convert, istart=(istart + 1), istop=(istop + 1), desc=desc, d=d)
fn = os.path.join(self.superdir, d['dir'], d['dfile'])
if os.path.isfile(fn):
self.data.append(d)
else:
logger.error(('no such file: %s (see header file: %s, line %s)' % (fn, self.fn, (iline + 1))))
def superdir(self):
return os.path.dirname(self.fn)
def iter_pyrocko_traces(self, load_data=True):
for (idata, d) in enumerate(self.data):
fn = os.path.join(d['dir'], d['dfile'])
logger.debug('converting %s', d['dfile'])
try:
if load_data:
ydata = self.read_wf_file(os.path.join(self.superdir, fn), d['nsamp'], storage_types[d['datatype']], d['foff'])
else:
ydata = None
except IOError as e:
if (e.errno == 2):
logger.debug(e)
continue
else:
raise e
dt = (1.0 / d['samprate'])
(yield trace.Trace(station=d['sta'], channel=d['chan'], deltat=dt, tmin=d['time'], tmax=(d['time'] + (d['nsamp'] / d['samprate'])), ydata=ydata)) |
class Host(ni_abc.CLAHost):
def __init__(self, server: ni_abc.ServerHost) -> None:
self.server = server
async def problems(self, aio_client: aio usernames: AbstractSet[str]) -> Mapping[(ni_abc.Status, AbstractSet[str])]:
base_url = '
url = (base_url + ','.join(usernames))
self.server.log(('Checking CLA status: ' + url))
async with aio_client.get(url) as response:
if (response.status >= 300):
msg = f'unexpected response for {response.url!r}: {response.status}'
raise client.HTTPException(msg)
results = json.loads((await response.text()))
self.server.log(('Raw CLA status: ' + str(results)))
status_results = [results[k] for k in results.keys() if (k in usernames)]
self.server.log(('Filtered CLA status: ' + str(status_results)))
if (len(status_results) != len(usernames)):
raise ValueError("# of usernames don't match # of results ({} != {})".format(len(usernames), len(status_results)))
elif any(((x not in (True, False, None)) for x in status_results)):
raise TypeError(('unexpected value in ' + str(status_results)))
failures = {None: ni_abc.Status.username_not_found, False: ni_abc.Status.not_signed}
problems: MutableMapping[(ni_abc.Status, Set[str])] = {}
for (username, result) in results.items():
if (result in failures):
problems.setdefault(failures[result], set()).add(username)
return problems |
def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size, target_sequence_length, max_target_sequence_length, encoder_state, decoder_input):
target_vocab_size = len(target_letter_to_int)
decoder_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
decoder_embed_input = tf.nn.embedding_lookup(decoder_embeddings, decoder_input)
def get_decoder_cell(rnn_size):
decoder_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer((- 0.1), 0.1, seed=2))
return decoder_cell
cell = tf.contrib.rnn.MultiRNNCell([get_decoder_cell(rnn_size) for _ in range(num_layers)])
output_layer = Dense(target_vocab_size, kernel_initializer=tf.truncated_normal_initializer(mean=0.1, stddev=0.1))
with tf.variable_scope('decode'):
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=decoder_embed_input, sequence_length=target_sequence_length, time_major=False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(cell, training_helper, encoder_state, output_layer)
(training_decoder_output, _, _) = tf.contrib.seq2seq.dynamic_decode(training_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length)
with tf.variable_scope('decode', reuse=True):
start_tokens = tf.tile(tf.constant([target_letter_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_token')
predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(decoder_embeddings, start_tokens, target_letter_to_int['<EOS>'])
predicting_decoder = tf.contrib.seq2seq.BasicDecoder(cell, predicting_helper, encoder_state, output_layer)
(predicting_decoder_output, _, _) = tf.contrib.seq2seq.dynamic_decode(predicting_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length)
return (training_decoder_output, predicting_decoder_output) |
def visualize_data():
data = contrib.get_data()
models = morefusion.datasets.YCBVideoModels()
colormap = imgviz.label_colormap()
scenes = {'pcd': trimesh.Scene(), 'grid_target': trimesh.Scene(), 'grid_nontarget_empty': trimesh.Scene(), 'cad': trimesh.Scene()}
rgb = data['rgb']
depth = data['depth']
K = data['intrinsic_matrix']
pcd = morefusion.geometry.pointcloud_from_depth(depth, fx=K[(0, 0)], fy=K[(1, 1)], cx=K[(0, 2)], cy=K[(1, 2)])
nonnan = (~ np.isnan(depth))
geom = trimesh.PointCloud(vertices=pcd[nonnan], colors=rgb[nonnan])
scenes['pcd'].add_geometry(geom)
T_world2cam = None
for instance in data['instances']:
if (T_world2cam is None):
T_world2cam = np.linalg.inv(instance['T_cam2world'])
class_id = instance['class_id']
transform = instance['transform_init']
grid_target = instance['grid_target']
grid_nontarget_empty = instance['grid_nontarget_empty_noground']
cad = models.get_cad(class_id=class_id)
scenes['pcd'].add_geometry(cad, node_name=str(instance['id']), geom_name=str(instance['id']), transform=transform)
scenes['cad'].add_geometry(cad, node_name=str(instance['id']), geom_name=str(instance['id']), transform=transform)
transform_vg = ttf.scale_and_translate(scale=instance['pitch'], translate=instance['origin'])
if (instance['id'] == 0):
color_id = 3
elif (instance['id'] == 1):
color_id = 1
elif (instance['id'] == 2):
color_id = 4
else:
raise ValueError
geom = trimesh.voxel.VoxelGrid(grid_target, transform=transform_vg).as_boxes(colors=colormap[color_id])
scenes['grid_target'].add_geometry(geom)
points = np.argwhere(grid_nontarget_empty)
points = ((points * instance['pitch']) + instance['origin'])
geom = trimesh.PointCloud(vertices=points, colors=colormap[color_id])
scenes['grid_nontarget_empty'].add_geometry(geom)
camera_transform = morefusion.extra.trimesh.to_opengl_transform()
for scene in scenes.values():
scene.camera_transform = camera_transform
return scenes |
.parametrize('proc_name', ['s1', 's2', 's3'])
def test_clean_shutdown(tcp_port, proc_name, xprocess):
class Starter(ProcessStarter):
pattern = 'started'
args = [sys.executable, server_path, tcp_port]
xprocess.ensure(proc_name, Starter)
info = xprocess.getinfo(proc_name)
assert info.isrunning()
children = psutil.Process(info.pid).children()
assert (info.terminate() == 1)
for child in children:
assert ((not child.is_running()) or (child.status() == psutil.STATUS_ZOMBIE)) |
_exception
def get_seatmap(html_seatmap, return_empty_seat=False) -> dict:
seat_map = {}
try:
soup = BeautifulSoup(html_seatmap, 'html.parser')
layout_grid = soup.find(name='div', attrs={'class': 'layout_grid', 'id': 'content-container'})
if return_empty_seat:
reg_expression = 'grid_cell[ ]{1,2}grid_1'
div_grid_cell_s = layout_grid.find_all(name='div', attrs={'class': re.compile(reg_expression)})
else:
reg_expression = 'grid_cell.*?'
div_grid_cell_s = layout_grid.find_all(name='div', attrs={'class': re.compile(reg_expression)})
for grid_cell in div_grid_cell_s:
coordinate = grid_cell['data-key'].strip()
seat_num = grid_cell.get_text().strip()
if seat_num:
seat_map[str(seat_num)] = coordinate
return seat_map
except Exception as e:
debug_p('[get_seatmap] [E]:', traceback.format_exc())
return {} |
def decode_dxt3(data, width, height):
out = (ctypes.c_ubyte * ((width * height) * 4))()
pitch = (width << 2)
image_offset = 0
for (a0, a1, a2, a3, a4, a5, a6, a7, c0_lo, c0_hi, c1_lo, c1_hi, b0, b1, b2, b3) in split_16byte.findall(data):
color0 = (ord(c0_lo) | (ord(c0_hi) << 8))
color1 = (ord(c1_lo) | (ord(c1_hi) << 8))
bits = (((ord(b0) | (ord(b1) << 8)) | (ord(b2) << 16)) | (ord(b3) << 24))
alpha = (((((((ord(a0) | (ord(a1) << 8)) | (ord(a2) << 16)) | (ord(a3) << 24)) | (ord(a4) << 32)) | (ord(a5) << 40)) | (ord(a6) << 48)) | (ord(a7) << 56))
r0 = (color0 & 31)
g0 = ((color0 & 2016) >> 5)
b0 = ((color0 & 63488) >> 11)
r1 = (color1 & 31)
g1 = ((color1 & 2016) >> 5)
b1 = ((color1 & 63488) >> 11)
i = image_offset
for y in range(4):
for x in range(4):
code = (bits & 3)
a = (alpha & 15)
if (code == 0):
(r, g, b) = (r0, g0, b0)
elif (code == 1):
(r, g, b) = (r1, g1, b1)
elif ((code == 3) and (color0 <= color1)):
r = g = b = 0
elif ((code == 2) and (color0 > color1)):
r = (((2 * r0) + r1) // 3)
g = (((2 * g0) + g1) // 3)
b = (((2 * b0) + b1) // 3)
elif ((code == 3) and (color0 > color1)):
r = ((r0 + (2 * r1)) // 3)
g = ((g0 + (2 * g1)) // 3)
b = ((b0 + (2 * b1)) // 3)
else:
assert ((code == 2) and (color0 <= color1))
r = ((r0 + r1) // 2)
g = ((g0 + g1) // 2)
b = ((b0 + b1) // 2)
out[i] = (b << 3)
out[(i + 1)] = (g << 2)
out[(i + 2)] = (r << 3)
out[(i + 3)] = (a << 4)
bits >>= 2
alpha >>= 4
i += 4
i += (pitch - 16)
advance_row = (((image_offset + 16) % pitch) == 0)
image_offset += (((pitch * 3) * advance_row) + 16)
return PackedImageData(width, height, GL_RGBA, GL_UNSIGNED_BYTE, out) |
class DoubleConv(nn.Sequential):
def __init__(self, in_channels, out_channels, encoder, kernel_size=3, order='crg', num_groups=8):
super(DoubleConv, self).__init__()
if encoder:
conv1_in_channels = in_channels
conv1_out_channels = (out_channels // 2)
if (conv1_out_channels < in_channels):
conv1_out_channels = in_channels
(conv2_in_channels, conv2_out_channels) = (conv1_out_channels, out_channels)
else:
(conv1_in_channels, conv1_out_channels) = (in_channels, out_channels)
(conv2_in_channels, conv2_out_channels) = (out_channels, out_channels)
self.add_module('SingleConv1', SingleConv(conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups))
self.add_module('SingleConv2', SingleConv(conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups)) |
class CmdHelp(Command):
key = 'help'
aliases = ['?']
locks = 'cmd:all()'
arg_regex = '\\s|$'
return_cmdset = True
help_more = HELP_MORE
suggestion_cutoff = 0.6
suggestion_maxnum = 5
def msg_help(self, text):
if type(self).help_more:
usemore = True
if (self.session and (self.session.protocol_key in ('websocket', 'ajax/comet'))):
try:
options = self.account.db._saved_webclient_options
if (options and options['helppopup']):
usemore = False
except KeyError:
pass
if usemore:
evmore.msg(self.caller, text, session=self.session)
return
self.msg((text, {'type': 'help'}))
def format_help_entry(title, help_text, aliases=None, suggested=None):
string = (_SEP + '\n')
if title:
string += ('|CHelp for |w%s|n' % title)
if aliases:
string += (' |C(aliases: %s|C)|n' % '|C,|n '.join((('|w%s|n' % ali) for ali in aliases)))
if help_text:
string += ('\n%s' % dedent(help_text.rstrip()))
if suggested:
string += '\n\n|CSuggested:|n '
string += ('%s' % fill('|C,|n '.join((('|w%s|n' % sug) for sug in suggested))))
string.strip()
string += ('\n' + _SEP)
return string
def format_help_list(hdict_cmds, hdict_db):
string = ''
if (hdict_cmds and any(hdict_cmds.values())):
string += ((('\n' + _SEP) + '\n |CCommand help entries|n\n') + _SEP)
for category in sorted(hdict_cmds.keys()):
if (str(category) != 'channel names'):
string += ('\n |w%s|n:\n' % str(category).title())
string += (('|G' + fill('|C, |G'.join(sorted(hdict_cmds[category])))) + '|n')
if (hdict_db and any(hdict_db.values())):
string += ((('\n\n' + _SEP) + '\n\r |COther help entries|n\n') + _SEP)
for category in sorted(hdict_db.keys()):
string += ('\n\r |w%s|n:\n' % str(category).title())
string += (('|G' + fill(', '.join(sorted([str(topic) for topic in hdict_db[category]])))) + '|n')
return string
def check_show_help(self, cmd, caller):
return (cmd.auto_help and cmd.access(caller))
def should_list_cmd(self, cmd, caller):
return True
def parse(self):
self.original_args = self.args.strip()
self.args = self.args.strip().lower()
def func(self):
(query, cmdset) = (self.args, self.cmdset)
caller = self.caller
suggestion_cutoff = self.suggestion_cutoff
suggestion_maxnum = self.suggestion_maxnum
if (not query):
query = 'all'
cmdset.make_unique(caller)
all_cmds = [cmd for cmd in cmdset if self.check_show_help(cmd, caller)]
all_topics = [topic for topic in HelpEntry.objects.all() if topic.access(caller, 'view', default=True)]
all_categories = list(set(([cmd.help_category.lower() for cmd in all_cmds] + [topic.help_category.lower() for topic in all_topics])))
if (query in ('list', 'all')):
hdict_cmd = defaultdict(list)
hdict_topic = defaultdict(list)
for cmd in all_cmds:
if self.should_list_cmd(cmd, caller):
hdict_cmd[cmd.help_category].append(cmd.key)
[hdict_topic[topic.help_category].append(topic.key) for topic in all_topics]
self.msg_help(self.format_help_list(hdict_cmd, hdict_topic))
return
suggestions = None
if (suggestion_maxnum > 0):
vocabulary = (([cmd.key for cmd in all_cmds if cmd] + [topic.key for topic in all_topics]) + all_categories)
[vocabulary.extend(cmd.aliases) for cmd in all_cmds]
suggestions = [sugg for sugg in string_suggestions(query, set(vocabulary), cutoff=suggestion_cutoff, maxnum=suggestion_maxnum) if (sugg != query)]
if (not suggestions):
suggestions = [sugg for sugg in vocabulary if ((sugg != query) and sugg.startswith(query))]
match = [cmd for cmd in all_cmds if (cmd == query)]
if (not match):
_query = (query[1:] if (query[0] in CMD_IGNORE_PREFIXES) else query)
match = [cmd for cmd in all_cmds for m in cmd._matchset if ((m == _query) or ((m[0] in CMD_IGNORE_PREFIXES) and (m[1:] == _query)))]
if (len(match) == 1):
formatted = self.format_help_entry(match[0].key, match[0].get_help(caller, cmdset), aliases=match[0].aliases, suggested=suggestions)
self.msg_help(formatted)
return
match = list(HelpEntry.objects.find_topicmatch(query, exact=True))
if (len(match) == 1):
formatted = self.format_help_entry(match[0].key, match[0].entrytext, aliases=match[0].aliases.all(), suggested=suggestions)
self.msg_help(formatted)
return
if (query in all_categories):
self.msg_help(self.format_help_list({query: [cmd.key for cmd in all_cmds if (cmd.help_category == query)]}, {query: [topic.key for topic in all_topics if (topic.help_category == query)]}))
return
self.msg(self.format_help_entry('', f"No help entry found for '{query}'", None, suggested=suggestions), options={'type': 'help'}) |
class GuiChangeLocalModuleMutationCommand(wx.Command):
def __init__(self, fitID, position, mutation, oldMutation=None):
wx.Command.__init__(self, True, 'Change Local Module Mutation')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.position = position
self.mutation = mutation
self.oldMutation = oldMutation
def Do(self):
cmd = CalcChangeLocalModuleMutationCommand(fitID=self.fitID, position=self.position, mutation=self.mutation, oldMutation=self.oldMutation)
success = self.internalHistory.submit(cmd)
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success |
class RS485(serial.Serial):
def __init__(self, *args, **kwargs):
super(RS485, self).__init__(*args, **kwargs)
self._alternate_rs485_settings = None
def write(self, b):
if (self._alternate_rs485_settings is not None):
self.setRTS(self._alternate_rs485_settings.rts_level_for_tx)
if (self._alternate_rs485_settings.delay_before_tx is not None):
time.sleep(self._alternate_rs485_settings.delay_before_tx)
super(RS485, self).write(b)
super(RS485, self).flush()
if (self._alternate_rs485_settings.delay_before_rx is not None):
time.sleep(self._alternate_rs485_settings.delay_before_rx)
self.setRTS(self._alternate_rs485_settings.rts_level_for_rx)
else:
super(RS485, self).write(b)
def rs485_mode(self):
return self._alternate_rs485_settings
_mode.setter
def rs485_mode(self, rs485_settings):
self._alternate_rs485_settings = rs485_settings |
def get_cfg(blocks: list[BasicBlock]) -> CFG:
succ_map = {}
pred_map: dict[(BasicBlock, list[BasicBlock])] = {}
exits = set()
for block in blocks:
assert (not any((isinstance(op, ControlOp) for op in block.ops[:(- 1)]))), 'Control-flow ops must be at the end of blocks'
succ = list(block.terminator.targets())
if (not succ):
exits.add(block)
for error_point in ([block] + succ):
if error_point.error_handler:
succ.append(error_point.error_handler)
succ_map[block] = succ
pred_map[block] = []
for (prev, nxt) in succ_map.items():
for label in nxt:
pred_map[label].append(prev)
return CFG(succ_map, pred_map, exits) |
def sample_data(opt):
dataset = CECT_dataset(path=opt['src_data'])
n = len(dataset)
X = torch.Tensor(n, 1, 28, 28, 28)
Y = torch.LongTensor(n)
inds = torch.randperm(len(dataset))
for (i, index) in enumerate(inds):
(x, y) = dataset[index]
X[i] = x
Y[i] = y
return (X, Y) |
class RBitfield(BitfieldBase):
def _more(self):
c = self._read(1)
self.bitfield <<= 8
self.bitfield += ord(c)
self.bits += 8
def snoopbits(self, n=8):
if (n > self.bits):
self.needbits(n)
return ((self.bitfield >> (self.bits - n)) & self._mask(n))
def readbits(self, n=8):
if (n > self.bits):
self.needbits(n)
r = ((self.bitfield >> (self.bits - n)) & self._mask(n))
self.bits -= n
self.bitfield &= (~ (self._mask(n) << self.bits))
return r |
_torch
_vision
class CLIPImageProcessingTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase):
image_processing_class = (CLIPImageProcessor if is_vision_available() else None)
def setUp(self):
self.image_processor_tester = CLIPImageProcessingTester(self, num_channels=4)
self.expected_encoded_image_num_channels = 3
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, 'do_resize'))
self.assertTrue(hasattr(image_processing, 'size'))
self.assertTrue(hasattr(image_processing, 'do_center_crop'))
self.assertTrue(hasattr(image_processing, 'center_crop'))
self.assertTrue(hasattr(image_processing, 'do_normalize'))
self.assertTrue(hasattr(image_processing, 'image_mean'))
self.assertTrue(hasattr(image_processing, 'image_std'))
self.assertTrue(hasattr(image_processing, 'do_convert_rgb'))
def test_batch_feature(self):
pass
def test_call_pil_four_channels(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) |
class MyghtyJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Myghty'
aliases = ['javascript+myghty', 'js+myghty']
mimetypes = ['application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy']
url = '
version_added = '0.6'
def __init__(self, **options):
super().__init__(JavascriptLexer, MyghtyLexer, **options) |
def load_modules(location):
if (os.name == 'nt'):
location = location.replace('$PWD', os.getcwd())
location = os.path.expanduser(os.path.expandvars(location))
if (not os.path.exists(location)):
raise OSError("Location '{0}' to load modules does not exist".format(location))
for (p, _, f) in os.walk(location):
for filename in fnmatch.filter(f, '*.py'):
load_module(os.path.join(p, filename)) |
class LockedDropout(nn.Module):
def __init__(self, dropout):
super().__init__()
self.dropout = dropout
def forward(self, x):
dropout = self.dropout
if (not self.training):
return x
m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_((1 - dropout))
mask = Variable(m.div_((1 - dropout)), requires_grad=False)
mask = mask.expand_as(x)
return (mask * x) |
class OffensiveMessageValidatorsTests(TestCase):
def test_accepts_future_date(self):
future_date_validator(datetime(3000, 1, 1, tzinfo=UTC))
def test_rejects_non_future_date(self):
with self.assertRaises(ValidationError):
future_date_validator(datetime(1000, 1, 1, tzinfo=UTC)) |
def randomNetworkOnly(qnnArch):
networkUnitaries = [[]]
for l in range(1, len(qnnArch)):
numInputQubits = qnnArch[(l - 1)]
numOutputQubits = qnnArch[l]
networkUnitaries.append([])
for j in range(numOutputQubits):
unitary = randomQubitUnitary((numInputQubits + 1))
if ((numOutputQubits - 1) != 0):
unitary = qt.tensor(randomQubitUnitary((numInputQubits + 1)), tensoredId((numOutputQubits - 1)))
unitary = swappedOp(unitary, numInputQubits, (numInputQubits + j))
networkUnitaries[l].append(unitary)
return networkUnitaries |
class VacationDirector():
def main(*args) -> None:
outdoorsyVacationBuilder: VacationBuilder = OutdoorsVacationBuilder()
outdoorsyVacation: Vacation = outdoorsyVacationBuilder.addAccommodation_3('Two person tent', 2020, 7, 1, 5, 34).addEvent('Beach').addAccommodation_2('Two person tent').addEvent('Mountains').getVacation()
print(outdoorsyVacation)
cityVacationBuilder: VacationBuilder = CityVacationBuilder()
cityVacation: Vacation = cityVacationBuilder.addAccommodation_3('Grand Facadian', 2020, 8, 1, 5, 0).addAccommodation_3('Hotel Commander', 2020, 8, 6, 2, 0).addEvent('Cirque du Soleil').getVacation()
print(cityVacation) |
def test_fileformatjson_pass_no_substitutions(fs):
payload = '{\n "key1": "value1",\n "key2": "value2",\n "key3": "value3"\n}\n'
in_path = './tests/testfiles/test.json'
fs.create_file(in_path, contents=payload)
context = Context({'ok1': 'ov1', 'fileFormatJson': {'in': in_path, 'out': './tests/testfiles/out/out.json'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 2), 'context should have 2 items'
assert (context['ok1'] == 'ov1')
assert (context['fileFormatJson'] == {'in': in_path, 'out': './tests/testfiles/out/out.json'})
with open('./tests/testfiles/out/out.json') as outfile:
outcontents = json.load(outfile)
assert (len(outcontents) == 3)
assert (outcontents['key1'] == 'value1')
assert (outcontents['key2'] == 'value2')
assert (outcontents['key3'] == 'value3') |
.parametrize('rank', range((_WORLD_SIZE * 2)))
def test_replicated_entries_only_on_rank_0(rank: int) -> None:
local_manifest_0 = get_manifest_for_rank(metadata=SnapshotMetadata(version='0.0.0', world_size=_WORLD_SIZE, manifest=_MANIFEST_0), rank=rank)
local_manifest_1 = get_manifest_for_rank(metadata=SnapshotMetadata(version='0.0.0', world_size=_WORLD_SIZE, manifest=_MANIFEST_0), rank=rank)
assert (local_manifest_0 == local_manifest_1) |
def freeze_training_mode(model):
classes = {type(x) for x in model.modules()}
classes = {x for x in classes if (not hasattr(x, '__constants__'))}
for cls in classes:
cls.__annotations__['training'] = torch.jit.Final[bool]
(yield)
for cls in classes:
cls.__annotations__['training'] = bool |
class HttpPostHandler(Handler):
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config['batch'])
self.format = self.config['format']
self.url = self.config['url']
def get_default_config_help(self):
config = super(HttpPostHandler, self).get_default_config_help()
config.update({'url': 'Fully qualified url to send metrics to', 'format': 'Format to send metrics (PLAIN or JSON)', 'batch': 'How many to store before sending to the graphite server'})
return config
def get_default_config(self):
config = super(HttpPostHandler, self).get_default_config()
config.update({'url': ' 'format': 'PLAIN', 'batch': 100})
return config
def process(self, metric):
self.metrics.append(metric)
if (len(self.metrics) >= self.batch_size):
self.post()
def flush(self):
self.post()
def post(self):
if (self.format == 'JSON'):
header = {'Content-type': 'application/json', 'Accept': 'application/json'}
json_fmt = dict(metric=[])
for metric in self.metrics:
json_fmt['metric'].append(dict(path=metric.path, value=metric.value, timestamp=metric.timestamp, precision=metric.precision, host=metric.host, ttl=metric.ttl))
req = urllib2.Request(self.url, json.dumps(json_fmt), headers=header)
else:
req = urllib2.Request(self.url, '\n'.join([str(m) for m in self.metrics]))
urllib2.urlopen(req)
self.metrics = [] |
class SliderCardsSection(blocks.StructBlock):
title = blocks.CharBlock(required=False)
spacing = blocks.ChoiceBlock(default='xl', choices=[('xl', 'Extra Large'), ('3xl', '3 Extra Large')])
snake_background = blocks.BooleanBlock(required=False, default=False)
cards = blocks.StreamBlock([('simple_text_card', SimpleTextCard()), ('price_card', PriceCard())])
class Meta():
label = 'Slider Cards Section'
icon = 'crosshairs' |
def create_user_access_token(user_obj, client_id, scope, access_token=None, expires_in=9000):
access_token = (access_token or random_string_generator(length=40)())
token_name = access_token[:ACCESS_TOKEN_PREFIX_LENGTH]
token_code = access_token[ACCESS_TOKEN_PREFIX_LENGTH:]
assert (len(token_name) == ACCESS_TOKEN_PREFIX_LENGTH)
assert (len(token_code) >= ACCESS_TOKEN_MINIMUM_CODE_LENGTH)
expires_at = (datetime.utcnow() + timedelta(seconds=expires_in))
application = get_application_for_client_id(client_id)
created = OAuthAccessToken.create(application=application, authorized_user=user_obj, scope=scope, token_type='token', access_token='', token_code=Credential.from_string(token_code), token_name=token_name, expires_at=expires_at, data='')
return (created, access_token) |
def find_candidate_tile_locations(num_rings, base_tile: Tile, align_tiles: list, integer_align=True):
result_tiles = [base_tile]
last_ring = [base_tile]
for i in range(0, num_rings):
print(f'computing ring_{i}')
last_ring_num = len(last_ring)
for last_ring_idx in range(last_ring_num):
print(f'last ring_{last_ring_idx}')
last_layer_tile = last_ring.pop(0)
for align_tile in align_tiles:
(neighbour_tiles, _) = get_all_tiles(last_layer_tile, align_tile, integer_align=integer_align)
for elem in neighbour_tiles:
if (elem not in result_tiles):
result_tiles.append(elem)
last_ring.append(elem)
return result_tiles |
_task('self_supervision_zero_task')
class SelfSupervisionZeroTask(SelfSupervisionTask):
def __init__(self, config: AttrDict):
super().__init__(config)
def init_distributed_data_parallel_model(self):
super().init_distributed_data_parallel_model()
broadcast_buffers = (self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS)
reduce_buffer_size = (2 ** 23)
if (self.config.MODEL.SHARDED_DDP_SETUP.reduce_buffer_size >= 0):
reduce_buffer_size = self.config.MODEL.SHARDED_DDP_SETUP.reduce_buffer_size
logging.info(f'Setting reduce_buffer_size: {reduce_buffer_size}')
if isinstance(self.optimizer, ZeRO):
logging.info('Using ShardedDDP')
self.distributed_model = ShardedDataParallel(module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers, reduce_buffer_size=reduce_buffer_size)
else:
raise NotImplementedError('This DataParallel engine should only be used in conjunction with ZeRO') |
class Effect1472(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
penalize = (False if (('skill' in context) or ('implant' in context) or ('booster' in context)) else True)
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'aoeCloudSize', (container.getModifiedItemAttr('aoeCloudSizeBonus') * level), stackingPenalties=penalize, **kwargs) |
def repass_backward(model, subnet, model_checkpoints, opt_checkpoints, outer_grads_w, loader, theta):
subnet_grads = 0
theta_grads = 0
old_params = model_checkpoints[0]
old_opt = opt_checkpoints[0]
for (batch_idx, (train_x, train_y, _, _)) in enumerate(loader):
(train_x, train_y) = (train_x.cuda(), train_y.cuda().float())
(old_params_, w_mapped) = pseudo_updated_params(model, old_params, old_opt, train_x, train_y, subnet, theta)
if args.score_update:
subnet_grads += torch.autograd.grad(w_mapped, subnet, grad_outputs=outer_grads_w, retain_graph=True)[0]
if args.theta_update:
theta_grads += torch.autograd.grad(w_mapped, theta, grad_outputs=outer_grads_w)[0]
return (subnet_grads, theta_grads) |
class FullSyncPeriodicTimerTest(unittest.TestCase):
def _full_sync_worker_without_timeout(cls) -> bool:
process_group = dist.group.WORLD
interval_threshold = timedelta(seconds=5)
fsp_timer = FullSyncPeriodicTimer(interval_threshold, none_throws(process_group))
return fsp_timer.check()
def _full_sync_worker_with_timeout(cls, timeout: int) -> bool:
process_group = dist.group.WORLD
interval_threshold = timedelta(seconds=5)
fsp_timer = FullSyncPeriodicTimer(interval_threshold, none_throws(process_group))
time.sleep(timeout)
fsp_timer.check()
return fsp_timer.check()
def test_full_sync_pt_multi_process_check_false(self) -> None:
mp_dict = spawn_multi_process(2, 'gloo', self._full_sync_worker_without_timeout)
self.assertFalse(mp_dict[0])
self.assertFalse(mp_dict[1])
def test_full_sync_pt_multi_process_check_true(self) -> None:
mp_dict = spawn_multi_process(2, 'gloo', self._full_sync_worker_with_timeout, 8)
self.assertTrue(mp_dict[0])
self.assertTrue(mp_dict[1])
def test_full_sync_pt_multi_process_edgecase(self) -> None:
mp_dict = spawn_multi_process(2, 'gloo', self._full_sync_worker_with_timeout, 5)
self.assertTrue(mp_dict[0])
self.assertTrue(mp_dict[1])
mp_dict = spawn_multi_process(2, 'gloo', self._full_sync_worker_with_timeout, 4)
self.assertFalse(mp_dict[0])
self.assertFalse(mp_dict[1]) |
class BreakHamiltonianIntoPotentialKineticArraysTest(unittest.TestCase):
def test_simple_hamiltonian(self):
hamiltonian = (((FermionOperator('3^ 1^ 3 1') + FermionOperator('1^ 1')) - FermionOperator('1^ 2')) - FermionOperator('2^ 1'))
(potential_terms, kinetic_terms) = diagonal_coulomb_potential_and_kinetic_terms_as_arrays(hamiltonian)
potential = sum(potential_terms, FermionOperator.zero())
kinetic = sum(kinetic_terms, FermionOperator.zero())
self.assertEqual(potential, (FermionOperator('1^ 1') + FermionOperator('3^ 1^ 3 1')))
self.assertEqual(kinetic, ((- FermionOperator('1^ 2')) - FermionOperator('2^ 1')))
def test_jellium_hamiltonian_correctly_broken_up(self):
grid = Grid(2, 3, 1.0)
hamiltonian = jellium_model(grid, spinless=True, plane_wave=False)
(potential_terms, kinetic_terms) = diagonal_coulomb_potential_and_kinetic_terms_as_arrays(hamiltonian)
potential = sum(potential_terms, FermionOperator.zero())
kinetic = sum(kinetic_terms, FermionOperator.zero())
true_potential = dual_basis_jellium_model(grid, spinless=True, kinetic=False)
true_kinetic = dual_basis_jellium_model(grid, spinless=True, potential=False)
for i in range(count_qubits(true_kinetic)):
coeff = true_kinetic.terms.get(((i, 1), (i, 0)))
if coeff:
true_kinetic -= FermionOperator(((i, 1), (i, 0)), coeff)
true_potential += FermionOperator(((i, 1), (i, 0)), coeff)
self.assertEqual(potential, true_potential)
self.assertEqual(kinetic, true_kinetic)
def test_identity_recognized_as_potential_term(self):
(potential_terms, kinetic_terms) = diagonal_coulomb_potential_and_kinetic_terms_as_arrays(FermionOperator.identity())
self.assertListEqual(list(potential_terms), [FermionOperator.identity()])
self.assertListEqual(list(kinetic_terms), [])
def test_zero_hamiltonian(self):
(potential_terms, kinetic_terms) = diagonal_coulomb_potential_and_kinetic_terms_as_arrays(FermionOperator.zero())
self.assertListEqual(list(potential_terms), [])
self.assertListEqual(list(kinetic_terms), [])
def test_diagonal_coulomb_hamiltonian_class(self):
hamiltonian = DiagonalCoulombHamiltonian(numpy.array([[1, 1], [1, 1]], dtype=float), numpy.array([[0, 1], [1, 0]], dtype=float), constant=2.3)
(potential_terms, kinetic_terms) = diagonal_coulomb_potential_and_kinetic_terms_as_arrays(hamiltonian)
potential = sum(potential_terms, FermionOperator.zero())
kinetic = sum(kinetic_terms, FermionOperator.zero())
expected_potential = ((((2.3 * FermionOperator.identity()) + FermionOperator('0^ 0')) + FermionOperator('1^ 1')) - FermionOperator('1^ 0^ 1 0', 2.0))
expected_kinetic = (FermionOperator('0^ 1') + FermionOperator('1^ 0'))
self.assertEqual(potential, expected_potential)
self.assertEqual(kinetic, expected_kinetic)
def test_type_error_on_bad_input_hamiltonian(self):
with self.assertRaises(TypeError):
diagonal_coulomb_potential_and_kinetic_terms_as_arrays('oops') |
def test_audit_dry_run(monkeypatch, vuln_service, dep_source):
service = vuln_service()
source = dep_source()
auditor = Auditor(service, options=AuditOptions(dry_run=True))
service = pretend.stub(query_all=pretend.call_recorder((lambda s: None)))
logger = pretend.stub(info=pretend.call_recorder((lambda s: None)))
monkeypatch.setattr(auditor, '_service', service)
monkeypatch.setattr(audit, 'logger', logger)
_ = dict(auditor.audit(source))
assert (service.query_all.calls == [])
assert (len(logger.info.calls) == len(list(source.collect()))) |
def get_translated_function(corefunc, language, stage=False):
if (language == 'core'):
return corefunc
lang = importlib.import_module(f'pystage.{language}')
cls = (lang.stage_class if stage else lang.sprite_class)
for (name, func) in inspect.getmembers(cls, predicate=inspect.isfunction):
for i in dis.Bytecode(func):
if (((i.opname == 'LOAD_METHOD') or (i.opname == 'LOAD_ATTR')) and (i.argval == corefunc)):
return name
return None |
class WeekdayCalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = '24-5'
calendar_class = WeekdayCalendar
start_date = pd.Timestamp('2018-01-01', tz=UTC)
end_date = pd.Timestamp('2018-12-31', tz=UTC)
MAX_SESSION_HOURS = 24
GAPS_BETWEEN_SESSIONS = False
HAVE_EARLY_CLOSES = False
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp('2018-01-01', tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp('2018-04-04', tz=UTC)
DAYLIGHT_SAVINGS_DATES = ['2018-04-05', '2018-11-01']
def get_session_block(self):
return self.calendar.all_sessions[1:4]
def test_open_every_weekday(self):
calendar = self.calendar
dates = pd.date_range(self.start_date, self.end_date, tz=UTC)
assert_index_equal(calendar.sessions_in_range(dates[0], dates[(- 1)]), dates[(dates.weekday <= 4)])
def test_open_every_weekday_minute(self):
calendar = self.calendar
minutes = pd.date_range(self.start_date, ((self.end_date + pd.Timedelta('1 Day')) - pd.Timedelta('1 Minute')), freq='min', tz=UTC)
assert_index_equal(calendar.minutes_for_sessions_in_range(self.start_date, self.end_date), minutes[(minutes.weekday <= 4)]) |
def handler(ql: Qiling):
ah = ql.arch.regs.ah
leaffunc = {0: __leaf_00, 2: __leaf_02, 8: __leaf_08, 65: __leaf_41, 66: __leaf_42, 67: __leaf_43}.get(ah)
if (leaffunc is None):
ql.log.exception(f'leaf {ah:02x}h of INT 13h is not implemented')
raise NotImplementedError()
leaffunc(ql) |
def configure_views(app):
('/<key>')
def get(key, db: SQLAlchemy):
try:
kv = db.session.query(KeyValue).filter((KeyValue.key == key)).one()
except NoResultFound:
response = jsonify(status='No such key', context=key)
response.status = '404 Not Found'
return response
return jsonify(key=kv.key, value=kv.value)
('/')
def list(db: SQLAlchemy):
data = [i.key for i in db.session.query(KeyValue).order_by(KeyValue.key)]
return jsonify(keys=data)
('/', methods=['POST'])
def create(request: Request, db: SQLAlchemy):
kv = KeyValue(request.form['key'], request.form['value'])
db.session.add(kv)
db.session.commit()
response = jsonify(status='OK')
response.status = '201 CREATED'
return response
('/<key>', methods=['DELETE'])
def delete(db: SQLAlchemy, key):
db.session.query(KeyValue).filter((KeyValue.key == key)).delete()
db.session.commit()
response = jsonify(status='OK')
response.status = '200 OK'
return response |
class GetImage(rq.ReplyRequest):
_request = rq.Struct(rq.Opcode(73), rq.Set('format', 1, (X.XYPixmap, X.ZPixmap)), rq.RequestLength(), rq.Drawable('drawable'), rq.Int16('x'), rq.Int16('y'), rq.Card16('width'), rq.Card16('height'), rq.Card32('plane_mask'))
_reply = rq.Struct(rq.ReplyCode(), rq.Card8('depth'), rq.Card16('sequence_number'), rq.ReplyLength(), rq.Card32('visual'), rq.Pad(20), rq.Binary('data')) |
class BCPPCompiler(CCompiler):
compiler_type = 'bcpp'
executables = {}
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
src_extensions = (_c_extensions + _cpp_extensions)
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
super().__init__(verbose, dry_run, force)
self.cc = 'bcc32.exe'
self.linker = 'ilink32.exe'
self.lib = 'tlib.exe'
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x', '/r']
def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
(macros, objects, extra_postargs, pp_opts, build) = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
compile_opts = (extra_preargs or [])
compile_opts.append('-c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
(src, ext) = build[obj]
except KeyError:
continue
src = os.path.normpath(src)
obj = os.path.normpath(obj)
self.mkpath(os.path.dirname(obj))
if (ext == '.res'):
continue
if (ext == '.rc'):
try:
self.spawn(['brcc32', '-fo', obj, src])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
if (ext in self._c_extensions):
input_opt = ''
elif (ext in self._cpp_extensions):
input_opt = '-P'
else:
input_opt = ''
output_opt = ('-o' + obj)
try:
self.spawn(((((([self.cc] + compile_opts) + pp_opts) + [input_opt, output_opt]) + extra_postargs) + [src]))
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self, objects, output_libname, output_dir=None, debug=0, target_lang=None):
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = ([output_filename, '/u'] + objects)
if debug:
pass
try:
self.spawn(([self.lib] + lib_args))
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug('skipping %s (up-to-date)', output_filename)
def link(self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None):
(objects, output_dir) = self._fix_object_args(objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warning("I don't know what to do with 'runtime_library_dirs': %s", str(runtime_library_dirs))
if (output_dir is not None):
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if (target_desc == CCompiler.EXECUTABLE):
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
if (export_symbols is None):
def_file = ''
else:
(head, tail) = os.path.split(output_filename)
(modname, ext) = os.path.splitext(tail)
temp_dir = os.path.dirname(objects[0])
def_file = os.path.join(temp_dir, ('%s.def' % modname))
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' {}=_{}'.format(sym, sym))
self.execute(write_file, (def_file, contents), ('writing %s' % def_file))
objects2 = map(os.path.normpath, objects)
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if (ext == '.res'):
resources.append(file)
else:
objects.append(file)
for ell in library_dirs:
ld_args.append(('/L%s' % os.path.normpath(ell)))
ld_args.append('/L.')
ld_args.extend(objects)
ld_args.extend([',', output_filename])
ld_args.append(',,')
for lib in libraries:
libfile = self.find_library_file(library_dirs, lib, debug)
if (libfile is None):
ld_args.append(lib)
else:
ld_args.append(libfile)
ld_args.extend(('import32', 'cw32mt'))
ld_args.extend([',', def_file])
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn(([self.linker] + ld_args))
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug('skipping %s (up-to-date)', output_filename)
def find_library_file(self, dirs, lib, debug=0):
if debug:
dlib = (lib + '_d')
try_names = ((dlib + '_bcpp'), (lib + '_bcpp'), dlib, lib)
else:
try_names = ((lib + '_bcpp'), lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
return None
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if (output_dir is None):
output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext(os.path.normcase(src_name))
if (ext not in (self.src_extensions + ['.rc', '.res'])):
raise UnknownFileError("unknown file type '{}' (from '{}')".format(ext, src_name))
if strip_dir:
base = os.path.basename(base)
if (ext == '.res'):
obj_names.append(os.path.join(output_dir, (base + ext)))
elif (ext == '.rc'):
obj_names.append(os.path.join(output_dir, (base + '.res')))
else:
obj_names.append(os.path.join(output_dir, (base + self.obj_extension)))
return obj_names
def preprocess(self, source, output_file=None, macros=None, include_dirs=None, extra_preargs=None, extra_postargs=None):
(_, macros, include_dirs) = self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = (['cpp32.exe'] + pp_opts)
if (output_file is not None):
pp_args.append(('-o' + output_file))
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
if (self.force or (output_file is None) or newer(source, output_file)):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError as msg:
print(msg)
raise CompileError(msg) |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0004_auto__1622')]
operations = [migrations.RenameField(model_name='sponsorshipbenefit', old_name='value', new_name='internal_value'), migrations.AddField(model_name='sponsorshipbenefit', name='capacity', field=models.PositiveIntegerField(blank=True, help_text='For benefits with limited capacity, set it here.', null=True, verbose_name='Capacity')), migrations.AddField(model_name='sponsorshipbenefit', name='internal_description', field=models.TextField(blank=True, help_text='Any description or notes for internal use.', null=True, verbose_name='Internal Description or Notes')), migrations.AlterField(model_name='sponsorshipbenefit', name='internal_value', field=models.PositiveIntegerField(blank=True, help_text='Value used internally to calculate sponsorship level when applicants construct their own sponsorship packages.', null=True, verbose_name='Internal Value')), migrations.AlterField(model_name='sponsorshipbenefit', name='conflicts', field=models.ManyToManyField(blank=True, help_text='For benefits that conflict with one another,', related_name='_sponsorshipbenefit_conflicts_+', to='sponsors.SponsorshipBenefit', verbose_name='Conflicts')), migrations.AlterField(model_name='sponsorshipbenefit', name='description', field=models.TextField(blank=True, help_text='For display on generated prospectuses and the website.', null=True, verbose_name='Benefit Description')), migrations.AlterField(model_name='sponsorshipbenefit', name='levels', field=models.ManyToManyField(help_text='What sponsorship levels this benefit is included in.', related_name='benefits', to='sponsors.SponsorshipLevel', verbose_name='Sponsorship Levels')), migrations.AlterField(model_name='sponsorshipbenefit', name='minimum_level', field=models.ForeignKey(blank=True, help_text='The minimum sponsorship level required to receive this benefit.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='sponsors.SponsorshipLevel', verbose_name='Minimum Sponsorship Level')), migrations.AlterField(model_name='sponsorshipbenefit', name='name', field=models.CharField(help_text='For display in the application form, statement of work, and sponsor dashboard.', max_length=1024, verbose_name='Benefit Name')), migrations.AlterField(model_name='sponsorshipbenefit', name='program', field=models.ForeignKey(help_text='Which sponsorship program the benefit is associated with.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='sponsors.SponsorshipProgram', verbose_name='Sponsorship Program'))] |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if training_args.should_log else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
if (data_args.task_name is not None):
datasets = load_dataset('glue', data_args.task_name)
else:
data_files = {'train': data_args.train_file, 'validation': data_args.validation_file}
if training_args.do_predict:
if (data_args.test_file is not None):
train_extension = data_args.train_file.split('.')[(- 1)]
test_extension = data_args.test_file.split('.')[(- 1)]
assert (test_extension == train_extension), '`test_file` should have the same extension (csv or json) as `train_file`.'
data_files['test'] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.')
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}')
if data_args.train_file.endswith('.csv'):
datasets = load_dataset('csv', data_files=data_files)
else:
datasets = load_dataset('json', data_files=data_files)
if (data_args.task_name is not None):
is_regression = (data_args.task_name == 'stsb')
if (not is_regression):
label_list = datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (data_args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[data_args.task_name]
else:
non_label_column_names = [name for name in datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
if data_args.pad_to_max_length:
padding = 'max_length'
else:
padding = False
label_to_id = None
if ((model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (data_args.task_name is not None) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
elif ((data_args.task_name is None) and (not is_regression)):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
if ((label_to_id is not None) and ('label' in examples)):
result['label'] = [(label_to_id[l] if (l != (- 1)) else (- 1)) for l in examples['label']]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache))
if training_args.do_train:
if ('train' not in datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if (('validation' not in datasets) and ('validation_matched' not in datasets)):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = datasets[('validation_matched' if (data_args.task_name == 'mnli') else 'validation')]
if (data_args.max_val_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
if (training_args.do_predict or (data_args.task_name is not None) or (data_args.test_file is not None)):
if (('test' not in datasets) and ('test_matched' not in datasets)):
raise ValueError('--do_predict requires a test dataset')
test_dataset = datasets[('test_matched' if (data_args.task_name == 'mnli') else 'test')]
if (data_args.max_test_samples is not None):
test_dataset = test_dataset.select(range(data_args.max_test_samples))
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if (data_args.task_name is not None):
metric = load_metric('glue', data_args.task_name)
def compute_metrics(p: EvalPrediction):
preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions)
preds = (np.squeeze(preds) if is_regression else np.argmax(preds, axis=1))
if (data_args.task_name is not None):
result = metric.compute(predictions=preds, references=p.label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {'mse': ((preds - p.label_ids) ** 2).mean().item()}
else:
return {'accuracy': (preds == p.label_ids).astype(np.float32).mean().item()}
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator)
if training_args.do_train:
checkpoint = None
if (last_checkpoint is not None):
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
if (AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels):
checkpoint = model_args.model_name_or_path
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.save_model()
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if (data_args.task_name == 'mnli'):
tasks.append('mnli-mm')
eval_datasets.append(datasets['validation_mismatched'])
for (eval_dataset, task) in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Test ***')
tasks = [data_args.task_name]
test_datasets = [test_dataset]
if (data_args.task_name == 'mnli'):
tasks.append('mnli-mm')
test_datasets.append(datasets['test_mismatched'])
for (test_dataset, task) in zip(test_datasets, tasks):
test_dataset = test_dataset.remove_columns('label')
predictions = trainer.predict(test_dataset=test_dataset).predictions
predictions = (np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1))
output_test_file = os.path.join(training_args.output_dir, f'test_results_{task}.txt')
if trainer.is_world_process_zero():
with open(output_test_file, 'w') as writer:
logger.info(f'***** Test results {task} *****')
writer.write('index\tprediction\n')
for (index, item) in enumerate(predictions):
if is_regression:
writer.write(f'''{index} {item:3.3f}
''')
else:
item = label_list[item]
writer.write(f'''{index} {item}
''') |
class QuotedString(Token):
def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super().__init__()
quoteChar = quoteChar.strip()
if (not quoteChar):
warnings.warn('quoteChar cannot be the empty string', SyntaxWarning, stacklevel=2)
raise SyntaxError()
if (endQuoteChar is None):
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if (not endQuoteChar):
warnings.warn('endQuoteChar cannot be the empty string', SyntaxWarning, stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = (re.MULTILINE | re.DOTALL)
self.pattern = ('%s(?:[^%s%s]' % (re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (((escChar is not None) and _escapeRegexRangeChars(escChar)) or '')))
else:
self.flags = 0
self.pattern = ('%s(?:[^%s\\n\\r%s]' % (re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (((escChar is not None) and _escapeRegexRangeChars(escChar)) or '')))
if (len(self.endQuoteChar) > 1):
self.pattern += (('|(?:' + ')|(?:'.join((('%s[^%s]' % (re.escape(self.endQuoteChar[:i]), _escapeRegexRangeChars(self.endQuoteChar[i]))) for i in range((len(self.endQuoteChar) - 1), 0, (- 1))))) + ')')
if escQuote:
self.pattern += ('|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += ('|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = (re.escape(self.escChar) + '(.)')
self.pattern += (')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
self.re_match = self.re.match
except sre_constants.error:
warnings.warn(('invalid pattern (%s) passed to Regex' % self.pattern), SyntaxWarning, stacklevel=2)
raise
self.name = str(self)
self.errmsg = ('Expected ' + self.name)
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = (((instring[loc] == self.firstQuoteChar) and self.re_match(instring, loc)) or None)
if (not result):
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
ret = ret[self.quoteCharLen:(- self.endQuoteCharLen)]
if isinstance(ret, str_type):
if (('\\' in ret) and self.convertWhitespaceEscapes):
ws_map = {'\\t': '\t', '\\n': '\n', '\\f': '\x0c', '\\r': '\r'}
for (wslit, wschar) in ws_map.items():
ret = ret.replace(wslit, wschar)
if self.escChar:
ret = re.sub(self.escCharReplacePattern, '\\g<1>', ret)
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return (loc, ret)
def __str__(self):
try:
return super().__str__()
except Exception:
pass
if (self.strRepr is None):
self.strRepr = ('quoted string, starting with %s ending with %s' % (self.quoteChar, self.endQuoteChar))
return self.strRepr |
(frozen=True, order=True, slots=True)
class AreaIdentifier():
region_name: str
area_name: str
def __post_init__(self) -> None:
assert isinstance(self.region_name, str)
assert isinstance(self.area_name, str)
def as_json(self) -> dict:
return {'region': self.region_name, 'area': self.area_name}
def from_json(cls, value: dict) -> Self:
try:
return cls(value['region'], value['area'])
except Exception:
raise
def as_tuple(self) -> tuple[(str, str)]:
return (self.region_name, self.area_name)
def as_string(self) -> str:
return f'{self.region_name}/{self.area_name}'
def from_string(cls, value: str) -> Self:
return cls(*value.split('/', 1))
def __repr__(self) -> str:
return f'region {self.region_name}/area {self.area_name}' |
.parametrize('url, rev', [('git+ None), ('git+ 'master')])
def test_add_with_git_constraint_with_subdirectory(url: str, rev: (str | None), tester: CommandTester, repo: TestRepository) -> None:
repo.add_package(Package('pendulum', '2.0.5'))
tester.execute(url)
expected = '\nUpdating dependencies\nResolving dependencies...\n\nPackage operations: 2 installs, 0 updates, 0 removals\n\n - Installing pendulum (2.0.5)\n - Installing poetry-plugin (0.1.2 9cf87a2)\n\nWriting lock file\n'
constraint = {'git': ' 'subdirectory': 'subdir'}
if rev:
constraint['rev'] = rev
assert_plugin_add_result(tester, expected, constraint) |
def setup_logging(args):
project_name = args.model_ckpt.split('/')[(- 1)]
logger = logging.getLogger(__name__)
log_dir = (Path(args.save_dir) / 'log/')
log_dir.mkdir(exist_ok=True)
filename = f'debug_{accelerator.process_index}.log'
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, handlers=[logging.FileHandler((log_dir / filename)), logging.StreamHandler()])
if accelerator.is_main_process:
wandb.init(project=project_name, config=args)
run_name = wandb.run.name
tb_writer = SummaryWriter()
tb_writer.add_hparams(vars(args), {'0': 0})
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity_info()
transformers.utils.logging.set_verbosity_info()
else:
tb_writer = None
run_name = ''
logger.setLevel(logging.ERROR)
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
return (logger, tb_writer, run_name) |
class NoMPLineBufferedPipeEnd(object):
def __init__(self, name):
self.name = name
self.lines = []
def fileno(self):
return 0
def flush(self):
pass
def close(self):
pass
def write(self, data, udp=False):
self.send(data)
def recv(self, timeout=0):
return self.readline()
def readline(self):
if (not self.lines):
return False
ret = self.lines[0]
self.lines = self.lines[1:]
return ret
def send(self, value):
if (len(self.remote.lines) >= 1000):
return False
self.remote.lines.append(value)
return True |
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('plugins', '0004_merge__0223')]
operations = [migrations.AddField(model_name='plugin', name='maintainer', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='plugins_maintainer', to=settings.AUTH_USER_MODEL, verbose_name='Maintainer')), migrations.RunPython(populate_maintainer)] |
class MetricLogger():
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError('{} object has no attribute {}'.format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg))
return self.delimiter.join(loss_str) |
def main(argv):
display = Display()
if (not display.has_extension('XFIXES')):
if (display.query_extension('XFIXES') is None):
print('XFIXES extension not supported', file=sys.stderr)
return 1
xfixes_version = display.xfixes_query_version()
print(('Found XFIXES version %s.%s' % (xfixes_version.major_version, xfixes_version.minor_version)), file=sys.stderr)
screen = display.screen()
print('Hiding cursor ...', file=sys.stderr)
screen.root.xfixes_hide_cursor()
display.sync()
time.sleep(5)
print('Showing cursor ...', file=sys.stderr)
screen.root.xfixes_show_cursor()
display.sync() |
def load_dataset_files(vol_files=None, load_n=None, mode='train', load_segs=True, load_contours=False, do_mask_vols=False, use_labels=None):
if (vol_files is None):
vol_files = get_dataset_files_list(mode=mode)
if (load_n is None):
load_n = len(vol_files)
vol_size = (160, 192, 224)
vols = np.zeros((((load_n,) + vol_size) + (1,)), dtype=np.float32)
Y_segs = None
Y_contours = None
if load_segs:
Y_segs = np.zeros(((load_n,) + vol_size), dtype=int)
if load_contours:
Y_contours = np.zeros((((load_n,) + vol_size) + (1,)), dtype=int)
ids = []
for i in range(load_n):
if ((i % 50) == 0):
print('Loaded {} of {} files'.format(i, load_n))
data = load_vol_and_seg(vol_files[i], load_seg=load_segs, load_contours=load_contours, do_mask_vol=do_mask_vols, keep_labels=use_labels)
if (data is None):
continue
(vols[i], curr_segs, curr_contours) = data
if load_segs:
Y_segs[i] = curr_segs
if load_contours:
Y_contours[i] = curr_contours
vol_base_name = os.path.basename(vol_files[i]).split('_vol')[0]
ids.append(vol_base_name)
return (vols, Y_segs, Y_contours, ids) |
class TypeOfAny():
__slots__ = ()
unannotated: Final = 1
explicit: Final = 2
from_unimported_type: Final = 3
from_omitted_generics: Final = 4
from_error: Final = 5
special_form: Final = 6
from_another_any: Final = 7
implementation_artifact: Final = 8
suggestion_engine: Final = 9 |
class ImageNetDataPipeline():
def get_val_dataloader() -> torch.utils.data.DataLoader:
data_loader = ImageNetDataLoader(DATASET_DIR, image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.evaluation['batch_size'], is_training=False, num_workers=image_net_config.evaluation['num_workers']).data_loader
return data_loader
def evaluate(model: torch.nn.Module, iterations: int, use_cuda: bool) -> float:
evaluator = ImageNetEvaluator(DATASET_DIR, image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.evaluation['batch_size'], num_workers=image_net_config.evaluation['num_workers'])
return evaluator.evaluate(model, iterations=iterations, use_cuda=use_cuda)
def finetune(model: torch.nn.Module, epochs: int, learning_rate: float, learning_rate_schedule: List, use_cuda: bool):
trainer = ImageNetTrainer(DATASET_DIR, image_size=image_net_config.dataset['image_size'], batch_size=image_net_config.train['batch_size'], num_workers=image_net_config.train['num_workers'])
trainer.train(model, max_epochs=epochs, learning_rate=learning_rate, learning_rate_schedule=learning_rate_schedule, use_cuda=use_cuda) |
class FrozenBot():
def __init__(self, api, about, owner, hide_commands, before_help, after_help, link_preview_in_help, validate_callback_signatures, process_backlog, lang, itself, commands_re, commands, chains, scheduler, main_component_id, bot_id, shared_memory, update_processors, override_i18n):
object.__setattr__(self, '_frozen', False)
self.api = api
self.about = about
self.owner = owner
self._hide_commands = hide_commands
self.before_help = before_help
self.after_help = after_help
self.link_preview_in_help = link_preview_in_help
self.validate_callback_signatures = validate_callback_signatures
self.process_backlog = process_backlog
self.lang = lang
self._commands_re = commands_re
self._main_component_id = main_component_id
self._bot_id = bot_id
self._shared_memory = shared_memory
self._scheduler = scheduler
self._chains = chains
self._update_processors = update_processors
self._commands = {name: command.for_bot(self) for (name, command) in commands.items()}
self.override_i18n = override_i18n
self.logger = logbook.Logger('botogram bot')
self._lang_inst = utils.get_language(lang)
self.itself = itself
self.itself.set_api(api)
self._inline_paginate = {}
self._frozen = True
def __reduce__(self):
args = (self.api, self.about, self.owner, self._hide_commands, self.before_help, self.after_help, self.link_preview_in_help, self.validate_callback_signatures, self.process_backlog, self.lang, self.itself, self._commands_re, self._commands, self._chains, self._scheduler, self._main_component_id, self._bot_id, self._shared_memory, self._update_processors, self.override_i18n)
return (restore, args)
def __setattr__(self, name, value):
if self._frozen:
raise FrozenBotError("Can't alter a frozen bot")
return object.__setattr__(self, name, value)
def __eq__(self, other):
return (self._bot_id == other._bot_id)
def before_processing(self, func):
raise FrozenBotError("Can't add hooks to a bot at runtime")
def process_message(self, func):
raise FrozenBotError("Can't add hooks to a bot at runtime")
def poll_update(self, func):
raise FrozenBotError("Can't add hooks to a bot at runtime")
def message_equals(self, string, ignore_case=True):
raise FrozenBotError("Can't add hooks to a bot at runtime")
def message_contains(self, string, ignore_case=True, multiple=False):
raise FrozenBotError("Can't add hooks to a bot at runtime")
def message_matches(self, regex, flags=0, multiple=False):
raise FrozenBotError("Can't add hooks to a bot at runtime")
def command(self, name, hidden=False):
raise FrozenBotError("Can't add commands to a bot at runtime")
def callback(self, name, hidden=False):
raise FrozenBotError("Can't add callbacks to a bot at runtime")
def timer(self, interval):
raise FrozenBotError("Can't add timers to a bot at runtime")
def prepare_memory(self, func):
raise FrozenBotError("Can't register a shared memory preparer to a bot at runtime")
('_shared_memory', '1.0', 'Rename the decorator to _memory')
def init_shared_memory(self, func):
return self.prepare_memory(func)
def chat(self, id):
return self.api.call('getChat', {'chat_id': id}, expect=objects.Chat)
def _edit_create_fake_message_object(self, chat, message):
if hasattr(message, 'message_id'):
message = message.id
if hasattr(chat, 'id'):
chat = chat.id
return objects.Message({'message_id': message, 'from': {'id': self.itself.id, 'first_name': ''}, 'date': 0, 'chat': {'id': chat, 'type': ''}}, self.api)
def edit_message(self, chat, message, text, syntax=None, preview=True, extra=None, attach=None):
msg = self._edit_create_fake_message_object(chat, message)
msg.edit(text, syntax, preview, extra, attach)
def edit_caption(self, chat, message, caption, extra=None, attach=None):
msg = self._edit_create_fake_message_object(chat, message)
msg.edit_caption(caption, extra, attach)
def process(self, update):
if (not isinstance(update, objects.Update)):
raise ValueError('Only Update objects are allowed')
update.set_api(self.api)
try:
for (kind, processor) in self._update_processors.items():
if (getattr(update, kind) is None):
continue
processor(self, self._chains, update)
break
except api_module.ChatUnavailableError as e:
self.logger.warning(('Chat %s is not available to your bot:' % e.chat_id))
self.logger.warning(str(e))
self.logger.warning(('Update #%s processing aborted!' % update.update_id))
for hook in self._chains['chat_unavalable_hooks']:
self.logger.debug(('Executing %s for chat %s...' % (hook.name, e.chat_id)))
hook.call(self, e.chat_id, e.reason)
def scheduled_tasks(self, current_time=None, wrap=True):
def wrapper(task):
def process():
return task.process(self)
return process
tasks = self._scheduler.now(current=current_time)
if wrap:
return [wrapper(job) for job in tasks]
return list(tasks)
def register_update_processor(self, kind, processor):
raise FrozenBotError("Can't register new update processors at runtime")
def _(self, message, **args):
if (message in self.override_i18n):
return (self.override_i18n[message] % args)
else:
return (self._lang_inst.gettext(message) % args)
def available_commands(self, all=False):
s = sorted(self._commands.values(), key=(lambda command: command.name))
c = sorted(s, key=(lambda command: command.order))
for command in c:
is_hidden = (command.hidden or (command.name in self._hide_commands))
if (all or (not is_hidden)):
(yield command)
def _call(self, func, component=None, **available):
available.setdefault('bot', self)
if (component is not None):
def lazy_shared():
return self._shared_memory.of(self._bot_id, component)
available.setdefault('shared', utils.CallLazyArgument(lazy_shared))
return utils.call(func, **available)
('bot.hide_commands', '1.0', 'Use ("name", hidden=True) instead')
def hide_commands(self):
return self._hide_commands |
def main(args: argparse.Namespace):
log_level = logging.INFO
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)], level=log_level)
logger.setLevel(log_level)
set_seed(args.seed)
wandb.init()
wandb.run.name = args.revision
if (args.dataset_name is not None):
nli_dataset = load_dataset(args.dataset_name, args.dataset_config_name, split='validation', use_auth_token=(args.auth_token if args.auth_token else None))
else:
logger.info(f'Loading local dataset file {args.dataset_file}')
if (args.dataset_file.endswith('.csv') or args.dataset_file.endswith('.txt')):
nli_dataset = load_dataset('csv', data_files=args.dataset_file, delimiter='\t', split='train')
else:
nli_dataset = load_dataset('json', data_files=args.dataset_file, delimiter='\t', split='train')
if (not isinstance(nli_dataset.features['label'], ClassLabel)):
nli_dataset = nli_dataset.class_encode_column('label')
label_list = nli_dataset.features['label'].names
num_labels = len(label_list)
label_name_to_id = {v: i for (i, v) in enumerate(label_list)}
label_id_to_name = {v: k for (k, v) in label_name_to_id.items()}
(sentence1_key, sentence2_key) = ('premise', 'hypothesis')
config_kwargs = {'use_auth_token': (args.auth_token if args.auth_token else None), 'revision': args.revision}
config = ViTConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, **config_kwargs)
model = InterpretablePIXELForSequenceClassification.from_pretrained(args.model_name_or_path, config=config, pooling_mode=PoolingMode.CLS, **config_kwargs)
text_renderer = PangoCairoTextRenderer.from_pretrained((args.text_renderer_name_or_path if args.text_renderer_name_or_path else args.model_name_or_path), fallback_fonts_dir=args.fallback_fonts_dir, **config_kwargs)
if args.max_seq_length:
text_renderer.max_seq_length = args.max_seq_length
resize_model_embeddings(model, text_renderer.max_seq_length)
transforms = get_transforms(do_resize=True, size=(text_renderer.pixels_per_patch, (text_renderer.pixels_per_patch * text_renderer.max_seq_length)))
formatting_fn = glue_strip_spaces
def preprocess_function(example: dict):
result = {}
if sentence2_key:
encoding = text_renderer(text=(formatting_fn(example[sentence1_key]), formatting_fn(example[sentence2_key])))
else:
encoding = text_renderer(text=formatting_fn(example[sentence1_key]))
result['pixel_values'] = transforms(Image.fromarray(encoding.pixel_values)).unsqueeze(0)
result['attention_mask'] = get_attention_mask(encoding.num_text_patches, seq_length=text_renderer.max_seq_length).unsqueeze(0)
return result
def print_top_classes(predictions: torch.Tensor, top_k: int, **kwargs):
prob = torch.softmax(predictions, dim=1)
class_indices = predictions.data.topk(top_k, dim=1)[1][0].tolist()
max_str_len = 0
class_names = []
for cls_idx in class_indices:
class_names.append(label_id_to_name[cls_idx])
if (len(label_id_to_name[cls_idx]) > max_str_len):
max_str_len = len(label_id_to_name[cls_idx])
logger.info(f'Top {top_k} classes:')
for cls_idx in class_indices:
output_string = f' {cls_idx} : {label_id_to_name[cls_idx]}'
output_string += ((' ' * (max_str_len - len(label_id_to_name[cls_idx]))) + '\t\t')
output_string += 'value = {:.3f}\t prob = {:.1f}%'.format(predictions[(0, cls_idx)], (100 * prob[(0, cls_idx)]))
logger.info(output_string)
logger.info('\n')
def process_example(ex: Dict[(str, Union[(str, int)])]):
if sentence2_key:
logger.info(f'Sentence 1: {ex[sentence1_key]}')
logger.info(f'Sentence 2: {ex[sentence2_key]}')
else:
logger.info(f'Sentence: {ex[sentence1_key]}')
inputs = preprocess_function(ex)
model.eval()
output = model(**inputs)
print_top_classes(output['logits'], min(num_labels, 5))
attn_vis = generate_visualization(model, inputs, image_hw=int((math.sqrt(text_renderer.max_seq_length) * text_renderer.pixels_per_patch)), patch_size=text_renderer.pixels_per_patch)
img = wandb.Image(format_img(inputs['pixel_values']))
label = label_id_to_name[ex['label']]
prediction = label_id_to_name[torch.argmax(output['logits']).detach().item()]
vis = wandb.Image(attn_vis)
return (img, label, prediction, vis)
nli_dataset = nli_dataset.shuffle().select(range(min(args.num_samples, 50)))
dataloader = DataLoader(nli_dataset, collate_fn=(lambda x: x), shuffle=False, batch_size=1)
data = []
for (i, example) in enumerate(dataloader):
data.append(process_example(example[0]))
vis_table = wandb.Table(columns=['image', 'label', 'prediction', 'attention_vis'], data=data)
wandb.log({f'Data': vis_table}) |
class UrlFieldTest(StringTestMixin, BaseFieldTestMixin, FieldTestCase):
field_class = partial(fields.Url, 'endpoint')
def test_defaults(self):
field = fields.Url('endpoint')
assert (not field.required)
assert (field.__schema__ == {'type': 'string'})
def test_invalid_object(self, app):
app.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
field = fields.Url('foobar')
with app.test_request_context('/'):
with pytest.raises(fields.MarshallingError):
field.output('foo', None)
def test_simple(self, app, mocker):
app.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
field = fields.Url('foobar')
obj = mocker.Mock(foo=42)
with app.test_request_context('/'):
assert ('/42' == field.output('foo', obj))
def test_absolute(self, app, mocker):
app.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
field = fields.Url('foobar', absolute=True)
obj = mocker.Mock(foo=42)
with app.test_request_context('/'):
assert (' == field.output('foo', obj))
def test_absolute_scheme(self, app, mocker):
app.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
field = fields.Url('foobar', absolute=True, scheme='
obj = mocker.Mock(foo=42)
with app.test_request_context('/', base_url='
assert (' == field.output('foo', obj))
def test_without_endpoint_invalid_object(self, app):
app.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
field = fields.Url()
with app.test_request_context('/foo'):
with pytest.raises(fields.MarshallingError):
field.output('foo', None)
def test_without_endpoint(self, app, mocker):
app.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
field = fields.Url()
obj = mocker.Mock(foo=42)
with app.test_request_context('/foo'):
assert ('/42' == field.output('foo', obj))
def test_without_endpoint_absolute(self, app, mocker):
app.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
field = fields.Url(absolute=True)
obj = mocker.Mock(foo=42)
with app.test_request_context('/foo'):
assert (' == field.output('foo', obj))
def test_without_endpoint_absolute_scheme(self, app, mocker):
app.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
field = fields.Url(absolute=True, scheme='
obj = mocker.Mock(foo=42)
with app.test_request_context('/foo', base_url='
assert (' == field.output('foo', obj))
def test_with_blueprint_invalid_object(self, app):
bp = Blueprint('foo', __name__, url_prefix='/foo')
bp.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
app.register_blueprint(bp)
field = fields.Url()
with app.test_request_context('/foo/foo'):
with pytest.raises(fields.MarshallingError):
field.output('foo', None)
def test_with_blueprint(self, app, mocker):
bp = Blueprint('foo', __name__, url_prefix='/foo')
bp.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
app.register_blueprint(bp)
field = fields.Url()
obj = mocker.Mock(foo=42)
with app.test_request_context('/foo/foo'):
assert ('/foo/42' == field.output('foo', obj))
def test_with_blueprint_absolute(self, app, mocker):
bp = Blueprint('foo', __name__, url_prefix='/foo')
bp.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
app.register_blueprint(bp)
field = fields.Url(absolute=True)
obj = mocker.Mock(foo=42)
with app.test_request_context('/foo/foo'):
assert (' == field.output('foo', obj))
def test_with_blueprint_absolute_scheme(self, app, mocker):
bp = Blueprint('foo', __name__, url_prefix='/foo')
bp.add_url_rule('/<foo>', 'foobar', view_func=(lambda x: x))
app.register_blueprint(bp)
field = fields.Url(absolute=True, scheme='
obj = mocker.Mock(foo=42)
with app.test_request_context('/foo/foo', base_url='
assert (' == field.output('foo', obj)) |
def generate_bin_op_both_wrappers(cl: ClassIR, fn: FuncIR, fn_rev: FuncIR, emitter: Emitter, gen: WrapperGenerator) -> None:
emitter.emit_line('if (PyObject_IsInstance(obj_left, (PyObject *){})) {{'.format(emitter.type_struct_name(cl)))
gen.emit_arg_processing(error=GotoHandler('typefail'), raise_exception=False)
handle_third_pow_argument(fn, emitter, gen, if_unsupported=['goto typefail2;'])
if ((fn.name == '__pow__') and (len(fn.args) == 3)):
fwd_not_implemented_handler = 'goto typefail2;'
else:
fwd_not_implemented_handler = 'goto typefail;'
gen.emit_call(not_implemented_handler=fwd_not_implemented_handler)
gen.emit_error_handling()
emitter.emit_line('}')
emitter.emit_label('typefail')
emitter.emit_line('if (PyObject_IsInstance(obj_right, (PyObject *){})) {{'.format(emitter.type_struct_name(cl)))
gen.set_target(fn_rev)
gen.arg_names = ['right', 'left']
gen.emit_arg_processing(error=GotoHandler('typefail2'), raise_exception=False)
handle_third_pow_argument(fn_rev, emitter, gen, if_unsupported=['goto typefail2;'])
gen.emit_call()
gen.emit_error_handling()
emitter.emit_line('} else {')
generate_bin_op_reverse_dunder_call(fn, emitter, fn_rev.name)
emitter.emit_line('}')
emitter.emit_label('typefail2')
emitter.emit_line('Py_INCREF(Py_NotImplemented);')
emitter.emit_line('return Py_NotImplemented;')
gen.finish() |
def Transpose2D_block(filters, stage, kernel_size=(3, 3), upsample_rate=(2, 2), transpose_kernel_size=(4, 4), use_batchnorm=False, skip=None):
def layer(input_tensor):
(conv_name, bn_name, relu_name, up_name) = handle_block_names(stage)
x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate, padding='same', name=up_name, use_bias=(not use_batchnorm))(input_tensor)
if use_batchnorm:
x = BatchNormalization(name=(bn_name + '1'))(x)
x = Activation('relu', name=(relu_name + '1'))(x)
if (skip is not None):
x = Concatenate()([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm, conv_name=(conv_name + '2'), bn_name=(bn_name + '2'), relu_name=(relu_name + '2'))(x)
return x
return layer |
def define_test_input(args):
if (args.test_case == 1):
a = np.array([[1, 2], [3, 4]])
elif (args.test_case == 2):
np.random.seed(42)
(dim1, dim2) = np.random.randint(1, 100, (2,))
a = np.random.rand(dim1, dim2)
elif (args.test_case == 3):
a = np.arange(24).reshape(2, 3, 4)
elif (args.test_case == 4):
a = np.arange(100).reshape(2, 5, 5, 2)
return a |
def hyphenate_each_word(language: str, transcribed_data: list[TranscribedData]) -> (list[list[str]] | None):
lang_region = language_check(language)
if (lang_region is None):
print(f"{ULTRASINGER_HEAD} {red_highlighted('Error in hyphenation for language ')} {blue_highlighted(language)}{red_highlighted(', maybe you want to disable it?')}")
return None
hyphenated_word = []
try:
hyphenator = create_hyphenator(lang_region)
for i in tqdm(enumerate(transcribed_data)):
pos = i[0]
hyphenated_word.append(hyphenation(transcribed_data[pos].word, hyphenator))
except:
print(f"{ULTRASINGER_HEAD} {red_highlighted('Error in hyphenation for language ')} {blue_highlighted(language)}{red_highlighted(', maybe you want to disable it?')}")
return None
return hyphenated_word |
def test_create_lane_links_junction3():
planview = []
lanec = []
lanel = []
laner = []
lanesec = []
lanes = []
rm = pyodrx.RoadMark(pyodrx.RoadMarkType.solid, 0.2, rule=pyodrx.MarkRule.no_passing)
geom = []
geom.append(pyodrx.Line(50))
geom.append(pyodrx.Arc(0.01, angle=(np.pi / 2)))
geom.append(pyodrx.Line(50))
for i in range(len(geom)):
planview.append(pyodrx.PlanView())
planview[i].add_geometry(geom[i])
lanec.append(pyodrx.Lane(a=3))
lanel.append(pyodrx.Lane(a=3))
laner.append(pyodrx.Lane(a=3))
lanec[i].add_roadmark(rm)
lanel[i].add_roadmark(rm)
laner[i].add_roadmark(rm)
lanesec.append(pyodrx.LaneSection(0, lanec[i]))
lanesec[i].add_right_lane(lanel[i])
lanesec[i].add_left_lane(laner[i])
lanes.append(pyodrx.Lanes())
lanes[i].add_lanesection(lanesec[i])
road1 = pyodrx.Road(1, planview[0], lanes[0])
road1.add_predecessor(pyodrx.ElementType.junction, 1)
road2 = pyodrx.Road(2, planview[1], lanes[1], road_type=1)
road2.add_predecessor(pyodrx.ElementType.road, 1, pyodrx.ContactPoint.start)
road2.add_successor(pyodrx.ElementType.road, 3, pyodrx.ContactPoint.start)
road3 = pyodrx.Road(3, planview[2], lanes[2])
road3.add_predecessor(pyodrx.ElementType.junction, 1)
odr = pyodrx.OpenDrive('myroad')
odr.add_road(road1)
odr.add_road(road2)
odr.add_road(road3)
odr.adjust_roads_and_lanes()
assert (int(road2.lanes.lanesections[0].rightlanes[0].links.get_predecessor_id()) == 1)
assert (int(road2.lanes.lanesections[0].rightlanes[0].links.get_successor_id()) == (- 1))
assert (int(road2.lanes.lanesections[0].leftlanes[0].links.get_predecessor_id()) == (- 1))
assert (int(road2.lanes.lanesections[0].leftlanes[0].links.get_successor_id()) == 1)
assert (road1.lanes.lanesections[0].rightlanes[0].links.get_predecessor_id() == None)
assert (road1.lanes.lanesections[0].rightlanes[0].links.get_successor_id() == None)
assert (road1.lanes.lanesections[0].leftlanes[0].links.get_predecessor_id() == None)
assert (road1.lanes.lanesections[0].leftlanes[0].links.get_successor_id() == None)
assert (road3.lanes.lanesections[0].rightlanes[0].links.get_predecessor_id() == None)
assert (road3.lanes.lanesections[0].rightlanes[0].links.get_successor_id() == None)
assert (road3.lanes.lanesections[0].leftlanes[0].links.get_predecessor_id() == None)
assert (road3.lanes.lanesections[0].leftlanes[0].links.get_successor_id() == None)
assert (version_validation(None, odr, wanted_schema='xodr') == ValidationResponse.OK) |
def get_inference_utils(opt):
assert (opt.inference_crop in ['center', 'nocrop'])
normalize = get_normalize_method(opt.mean, opt.std, opt.no_mean_norm, opt.no_std_norm)
if (opt.train_crop == 'other'):
spatial_transform = [Resize((opt.scale_h, opt.scale_w)), RandomCrop(opt.sample_size), ToTensor()]
else:
spatial_transform = [Resize(opt.sample_size)]
if (opt.inference_crop == 'center'):
spatial_transform.append(CenterCrop(opt.sample_size))
spatial_transform.append(ToTensor())
if (opt.input_type == 'flow'):
spatial_transform.append(PickFirstChannels(n=2))
spatial_transform.extend([ScaleValue(opt.value_scale), normalize])
spatial_transform = Compose(spatial_transform)
temporal_transform = []
if (opt.sample_t_stride > 1):
temporal_transform.append(TemporalSubsampling(opt.sample_t_stride))
temporal_transform.append(SlidingWindow(opt.sample_duration, opt.inference_stride))
temporal_transform = TemporalCompose(temporal_transform)
(inference_data, collate_fn) = get_inference_data(opt.video_path, opt.annotation_path, opt.dataset, opt.input_type, opt.file_type, opt.inference_subset, spatial_transform, temporal_transform)
inference_loader = torch.utils.data.DataLoader(inference_data, batch_size=opt.inference_batch_size, shuffle=False, num_workers=opt.n_threads, pin_memory=True, worker_init_fn=worker_init_fn, collate_fn=collate_fn)
return (inference_loader, inference_data.class_names) |
class MiscTests(unittest.TestCase):
def test_client_default_logger(self):
client = Protocol(CLIENT)
logger = logging.getLogger('websockets.client')
self.assertIs(client.logger, logger)
def test_server_default_logger(self):
server = Protocol(SERVER)
logger = logging.getLogger('websockets.server')
self.assertIs(server.logger, logger)
def test_client_custom_logger(self):
logger = logging.getLogger('test')
client = Protocol(CLIENT, logger=logger)
self.assertIs(client.logger, logger)
def test_server_custom_logger(self):
logger = logging.getLogger('test')
server = Protocol(SERVER, logger=logger)
self.assertIs(server.logger, logger) |
class TableDataModel(DataModel):
db_view: DataModel = None
def set_db_view(self, db_data_model: DataModel) -> None:
self.db_view = db_data_model
def get_llm_side_data(self, serialize_method: str='tsv', num_visible_rows: int=3) -> Any:
table_data = self.raw_data
table_name = self.raw_data_name
table_path = self.raw_data_path
formatted_table = serialize_df(table_data, table_name, table_path, serialize_method, num_visible_rows)
return formatted_table
def get_human_side_data(self, mode: str='HEAD') -> Any:
if (mode == 'HEAD'):
return self.raw_data.head()
elif (mode == 'FULL'):
return self.raw_data
else:
raise ValueError(f'Unsupported mode: {mode}')
def to_react_table(table: DataFrame) -> str:
columns = list(map((lambda item: {'accessorKey': item, 'header': item}), table.columns.tolist()))
data = table.fillna('').to_dict(orient='records')
table = json.dumps({'columns': columns, 'data': data})
return table |
.end_to_end()
def test_errors_during_loading_nodes_have_info(runner, tmp_path):
source = '\n from __future__ import annotations\n from pathlib import Path\n from typing import Any\n import attrs\n import pickle\n\n \n class PickleNode:\n name: str\n path: Path\n signature: str = "id"\n\n def state(self) -> str | None:\n if self.path.exists():\n return str(self.path.stat().st_mtime)\n return None\n\n def load(self) -> Any:\n return pickle.loads(self.path.read_bytes())\n\n def save(self, value: Any) -> None:\n self.path.write_bytes(pickle.dumps(value))\n\n def task_example(\n value=PickleNode(name="node", path=Path(__file__).parent / "file.txt")\n ): pass\n '
tmp_path.joinpath('task_example.py').write_text(textwrap.dedent(source))
tmp_path.joinpath('file.txt').touch()
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.FAILED)
assert ('task_example.py::task_example' in result.output)
assert ('Exception while loading node' in result.output)
assert ('_pytask/execute.py' not in result.output) |
def positive_region(region):
(west, east, south, north) = [float(x) for x in region]
assert (((- 180.0) - 360.0) <= west < 180.0)
assert ((- 180.0) < east <= (180.0 + 360.0))
assert ((- 90.0) <= south < 90.0)
assert ((- 90.0) < north <= 90.0)
if (east < west):
east += 360.0
if (west < (- 180.0)):
west += 360.0
east += 360.0
return (west, east, south, north) |
def test_unset_setting(tester: CommandTester, config: Config, config_cache_dir: Path) -> None:
tester.execute('virtualenvs.path /some/path')
tester.execute('virtualenvs.path --unset')
tester.execute('--list')
cache_dir = json.dumps(str(config_cache_dir))
venv_path = json.dumps(os.path.join('{cache-dir}', 'virtualenvs'))
expected = f'''cache-dir = {cache_dir}
experimental.system-git-client = false
installer.max-workers = null
installer.modern-installation = true
installer.no-binary = null
installer.parallel = true
virtualenvs.create = true
virtualenvs.in-project = null
virtualenvs.options.always-copy = false
virtualenvs.options.no-pip = false
virtualenvs.options.no-setuptools = false
virtualenvs.options.system-site-packages = false
virtualenvs.path = {venv_path} # {(config_cache_dir / 'virtualenvs')}
virtualenvs.prefer-active-python = false
virtualenvs.prompt = "{{project_name}}-py{{python_version}}"
warnings.export = true
'''
assert (config.set_config_source.call_count == 0)
assert (tester.io.fetch_output() == expected) |
('I wait until the editor has started')
def wait_editor(qtbot, editor_pid_watcher):
if (not editor_pid_watcher.has_pidfile):
with qtbot.wait_signal(editor_pid_watcher.appeared, raising=False):
pass
if (not editor_pid_watcher.manual_check()):
pytest.fail('Editor pidfile failed to appear!') |
_scoring('wer')
class WerScorer(object):
def __init__(self, *unused):
self.reset()
def reset(self):
self.distance = 0
self.ref_length = 0
def add_string(self, ref, pred):
import editdistance
ref_items = ref.split()
pred_items = pred.split()
self.distance += editdistance.eval(ref_items, pred_items)
self.ref_length += len(ref_items)
def result_string(self):
return f'WER: {self.score()}'
def score(self):
return (((100.0 * self.distance) / self.ref_length) if (self.ref_length > 0) else 0) |
def set_max_weight(ctx, param, value):
if (value is not None):
prev = ctx.meta.get(SET_LIMIT, None)
if (prev is None):
ctx.meta[SET_LIMIT] = 'max-weight'
elif (prev != 'max-weight'):
raise click.UsageError('Cannot specify both --num-files and --max-weight')
return value |
def find_enums(tree):
for node in ast.walk(tree):
if (not isinstance(node, ast.Assign)):
continue
if (node.type_comment is None):
continue
if ('.' not in node.type_comment):
continue
if (not node.type_comment.startswith('Q')):
continue
comment = node.type_comment.strip("'")
(mod, cls) = comment.rsplit('.', maxsplit=1)
assert (len(node.targets) == 1)
name = node.targets[0].id
(yield (mod, cls, name)) |
.parametrize('package', invalid_files)
def test_upload_badFilename(package, root, testapp):
resp = testapp.post('/', params={':action': 'file_upload'}, upload_files=[('content', package, b'')], expect_errors=1)
assert (resp.status == '400 Bad Request')
assert (f'Bad filename: {package}' in resp.text) |
.requires_user_action
class TextMotionSelectWindowEventsTest(WindowEventsTestCase):
number_of_checks = 10
motion_keys = (key.MOTION_UP, key.MOTION_RIGHT, key.MOTION_DOWN, key.MOTION_LEFT, key.MOTION_NEXT_PAGE, key.MOTION_PREVIOUS_PAGE, key.MOTION_BACKSPACE, key.MOTION_DELETE)
def setUp(self):
super(TextMotionSelectWindowEventsTest, self).setUp()
self.chosen_key = None
self.checks_passed = 0
def on_key_press(self, symbol, modifiers):
if (symbol == key.X):
self._select_next_key()
def on_text_motion_select(self, motion):
if (motion != self.chosen_key):
self.fail_test('Expected "{}", received "{}"'.format(key.motion_string(self.chosen_key), key.motion_string(motion)))
else:
self.checks_passed += 1
if (self.checks_passed >= self.number_of_checks):
self.pass_test()
else:
self._select_next_key()
def _select_next_key(self):
self.chosen_key = random.choice(self.motion_keys)
self._update_question()
def _update_question(self):
self.question = 'Please hold <Shift> and press:\n\n{} ({})\n\n\nPress the X key if you do not have this motion key.\nPress Esc if test does not pass.'.format(key.motion_string(self.chosen_key), key.symbol_string(self.chosen_key))
self._render_question()
def test_key_text_motion_select(self):
self._select_next_key()
self._test_main() |
def test_preferred_colorscheme_unsupported(request, quteproc_new):
if request.config.webengine:
pytest.skip('preferred-color-scheme is supported')
args = (_base_args(request.config) + ['--temp-basedir'])
quteproc_new.start(args)
quteproc_new.open_path('data/darkmode/prefers-color-scheme.html')
content = quteproc_new.get_content()
assert (content == 'Preference support missing.') |
class D_GET_LOGITS(nn.Module):
def __init__(self, ndf):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.joint_conv = nn.Sequential(nn.Conv2d(((ndf * 16) + 256), (ndf * 2), 3, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 2), 1, 4, 1, 0, bias=False))
def forward(self, out, y):
y = y.view((- 1), 256, 1, 1)
y = y.repeat(1, 1, 4, 4)
h_c_code = torch.cat((out, y), 1)
out = self.joint_conv(h_c_code)
return out |
def construct_attr(attr_base, attr):
base_rtype = attr_base.get_rtype()
if isinstance(attr_base, CurComp):
if base_rtype.has_property(attr):
return CurCompAttr(attr_base, attr)
if isinstance(base_rtype, rt.Component):
if base_rtype.has_property(attr):
return SubCompAttr(attr_base, attr)
if isinstance(base_rtype, rt.InterfaceView):
if base_rtype.has_property(attr):
return InterfaceAttr(attr_base, attr)
if isinstance(base_rtype, rt.Signal):
dtype = base_rtype.get_dtype()
if (isinstance(dtype, rdt.Struct) and dtype.has_property(attr)):
return StructAttr(attr_base, attr)
raise AssertionError(f'internal error: no available expression nodes for {attr_base}!') |
def model_loader(model_name, dataset_name, device, num_channels, num_classes, img_size):
logger.info(('Load model [%s] for dataset [%s]. Train using device [%s].' % (model_name, dataset_name, device)))
net_glob = None
if ((model_name == 'cnn') and (dataset_name == 'cifar')):
net_glob = CNNCifar(num_classes).to(device)
elif ((model_name == 'cnn') and (dataset_name == 'mnist')):
net_glob = CNNMnist(num_channels, num_classes).to(device)
elif ((model_name == 'cnn') and (dataset_name == 'fashion_mnist')):
net_glob = CNNFashion(num_channels, num_classes).to(device)
elif ((model_name == 'cnn') and (dataset_name == 'uci')):
net_glob = UCI_CNN(n_class=6).to(device)
elif ((model_name == 'cnn') and (dataset_name == 'realworld')):
net_glob = UCI_CNN(n_class=8).to(device)
elif ((model_name == 'lstm') and (dataset_name == 'loop')):
net_glob = LSTM(img_size[1], img_size[1], img_size[1], output_last=True)
elif (model_name == 'mlp'):
len_in = 1
for x in img_size:
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=64, dim_out=num_classes).to(device)
return net_glob |
class DeletionTests(AuthenticatedAPITestCase):
def setUpTestData(cls):
cls.test_name = OffTopicChannelName.objects.create(name='lemons-lemonade-stand')
cls.test_name_2 = OffTopicChannelName.objects.create(name='bbq-with-bisk')
def test_deleting_unknown_name_returns_404(self):
url = reverse('api:bot:offtopicchannelname-detail', args=('unknown-name',))
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_deleting_known_name_returns_204(self):
url = reverse('api:bot:offtopicchannelname-detail', args=(self.test_name.name,))
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
def test_name_gets_deleted(self):
url = reverse('api:bot:offtopicchannelname-detail', args=(self.test_name_2.name,))
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
url = reverse('api:bot:offtopicchannelname-list')
response = self.client.get(url)
self.assertNotIn(self.test_name_2.name, response.json()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.