code stringlengths 281 23.7M |
|---|
class BarOptions(Options):
def spaceRatio(self):
return self._config_get()
def spaceRatio(self, num: float):
if ((num < 0) or (num > 1)):
raise ValueError('Ratio must be between 0 and 1')
self._config(num)
def stacked(self):
return self._config_get()
def stacked(self, num):
self._config(num)
def height(self):
return self._config_get()
def height(self, num):
self._config(num)
def depth(self):
return self._config_get()
def depth(self, num):
self._config(num) |
.parametrize('vm_class, code, expect_exception, expect_gas_used', ((ConstantinopleVM, assemble(opcode_values.PUSH20, CANONICAL_ADDRESS_B, opcode_values.BALANCE), None, (3 + 400)), (ConstantinopleVM, assemble(opcode_values.SELFBALANCE), InvalidInstruction, 1000000), (IstanbulVM, assemble(opcode_values.PUSH20, CANONICAL_ADDRESS_B, opcode_values.BALANCE), None, (3 + 700)), (IstanbulVM, assemble(opcode_values.SELFBALANCE), None, 5), (MuirGlacierVM, assemble(opcode_values.PUSH20, CANONICAL_ADDRESS_B, opcode_values.BALANCE), None, (3 + 700)), (MuirGlacierVM, assemble(opcode_values.SELFBALANCE), None, 5), (BerlinVM, assemble(opcode_values.PUSH20, CANONICAL_ADDRESS_B, opcode_values.BALANCE), None, (3 + 100)), (BerlinVM, assemble(opcode_values.SELFBALANCE), None, 5)))
def test_balance(vm_class, code, expect_exception, expect_gas_used):
sender_balance =
vm = setup_vm(vm_class)
vm.state.set_balance(CANONICAL_ADDRESS_B, sender_balance)
vm.state.persist()
comp = run_computation(vm, CANONICAL_ADDRESS_B, code)
assert (comp.msg.sender == CANONICAL_ADDRESS_B)
if expect_exception:
assert isinstance(comp.error, expect_exception)
else:
assert comp.is_success
assert (comp.stack_pop1_int() == sender_balance)
assert (len(comp._stack) == 0)
assert (comp.get_gas_used() == expect_gas_used) |
class DummyGeneratorTest(unittest.TestCase):
def setUp(self):
np.random.seed(42)
def test_time_index(self):
with self.assertRaises(RuntimeError):
_make_time_index(0.001, 100)
time_index = _make_time_index(1, 1000)
self.assertTrue(np.allclose(time_index, np.arange(0, 1, 0.001)))
time_index = _make_time_index(5, 50)
self.assertEqual(time_index.shape[0], (50 * 5))
def test_consistent_behavior(self):
sample_rate = 50
duration = 1
n_samples = (duration * sample_rate)
sim_Hb = simulated_hemodynamics(amplitude=1, sample_rate=sample_rate, duration=duration)
physio_noise = dummy_physiological_noise(amplitude=4e-07, sample_rate=sample_rate, interest_freq=1, phase=(np.pi / 4), duration=duration)
measurement_noise = powerlaw_psd_gaussian(exponent=1.0, size=n_samples)
self.assertEqual(sim_Hb.shape, physio_noise.shape)
self.assertEqual(sim_Hb.shape, measurement_noise.shape)
def test_sim_hemodynamics(self):
duration = 30
sample_rate = 50
sim_Hb1 = simulated_hemodynamics(amplitude=1, sample_rate=sample_rate, duration=duration)
sim_Hb2 = simulated_hemodynamics(amplitude=1, sample_rate=(sample_rate * 2), duration=duration)
ts1 = _make_time_index(duration, sample_rate)
ts2 = _make_time_index(duration, (sample_rate * 2))
self.assertAlmostEqual(np.abs((ts1[np.argmax(sim_Hb1)] - 5.0)), 0)
self.assertAlmostEqual(np.abs((ts2[np.argmax(sim_Hb2)] - 5.0)), 0)
def test_physiological_noise(self):
duration = 1
sample_rate = 1000
interest_freq = 1
sim_Hb = simulated_hemodynamics(amplitude=1, sample_rate=sample_rate, duration=duration)
cardiac_wave = dummy_physiological_noise(amplitude=4e-07, sample_rate=sample_rate, interest_freq=interest_freq, phase=(np.pi / 4), duration=duration)
self.assertEqual(sim_Hb.shape, cardiac_wave.shape)
(psds, freqs) = psd_array_multitaper(cardiac_wave, sample_rate, n_jobs=12)
power_log10 = np.log10(psds)
ind_of_peak = np.unravel_index(np.argmax(power_log10, axis=None), power_log10.shape)[0]
self.assertAlmostEqual(freqs[ind_of_peak], interest_freq)
cardiac_wave_double_freq = dummy_physiological_noise(amplitude=4e-07, sample_rate=(sample_rate * 2), interest_freq=interest_freq, phase=(np.pi / 4), duration=duration)
self.assertTrue(np.allclose(cardiac_wave, cardiac_wave_double_freq[::2]))
def test_pink_noise(self):
beta = 1
sample_rate = 500
duration = 2
n_samples = (duration * sample_rate)
y = powerlaw_psd_gaussian(exponent=beta, size=n_samples)
(psds, freqs) = psd_array_multitaper(y, sample_rate, n_jobs=12)
psds_smooth = savgol_filter(psds, 101, 3)
low_freq_power = np.median(psds_smooth[:10])
med_freq_power = np.median(psds_smooth[80:110])
high_freq_power = np.median(psds_smooth[400:])
self.assertGreaterEqual(low_freq_power, med_freq_power)
self.assertGreaterEqual(med_freq_power, high_freq_power)
y_double_freq = powerlaw_psd_gaussian(exponent=beta, size=(2 * n_samples))
(psds, freqs) = psd_array_multitaper(y_double_freq, sample_rate, n_jobs=12)
psds_smooth = savgol_filter(psds, 101, 3)
low_freq_power = np.median(psds_smooth[:10])
med_freq_power = np.median(psds_smooth[80:110])
high_freq_power = np.median(psds_smooth[400:])
self.assertGreaterEqual(low_freq_power, med_freq_power)
self.assertGreaterEqual(med_freq_power, high_freq_power)
def test_motion_noise(self):
duration = 20
sample_rate = 5000
noise = motion_noise(motion_amplitude=3, motion_duration_mean=0.5, sample_rate=sample_rate, sample_duration=duration)
nonzero = noise[(noise > 0)]
slope = (((nonzero[(- 1)] - nonzero[0]) / len(nonzero)) * sample_rate)
self.assertAlmostEqual(slope, 3.0, places=2)
sample_rate = 150
noise = motion_noise(motion_amplitude=1.75, motion_duration_mean=0.5, sample_rate=sample_rate, sample_duration=duration)
nonzero = noise[(noise > 0)]
slope = (((nonzero[(- 1)] - nonzero[0]) / len(nonzero)) * sample_rate)
self.assertAlmostEqual(slope, 1.75, places=2) |
def move(src, dest):
src = path.expanduser(src)
dest = path.expanduser(dest)
if ('*' in src):
prepare_folder(dest)
for file in glob.iglob(src):
os.rename(file, path.join(dest, path.basename(file)))
else:
if (not exists(src)):
return
prepare_folder(path.dirname(dest))
os.rename(src, dest) |
class OptionSeriesTreegraphDatalabelsTextpath(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def enabled(self):
return self._config_get(False)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
def test_used_variables():
(_, use_map) = define_use_map()
assert (use_map.used_variables == {Variable('u', Integer.int32_t()), Variable('v', Integer.int32_t(), 3), Variable('v', Integer.int32_t(), 4), Variable('v', Integer.int32_t(), 1), Variable('v', Integer.int32_t(), 2), Variable('w', Integer.int32_t(), 1)}) |
(IToggleField)
class ToggleField(MToggleField, EditableField):
def _get_control_value(self):
return self.control.isChecked()
def _get_control_text(self):
return self.control.text()
def _set_control_value(self, value):
return self.control.setChecked(value)
def _set_control_text(self, text):
return self.control.setText(text)
def _set_control_icon(self, icon):
if (icon is not None):
self.control.setIcon(icon.create_icon())
else:
self.control.setIcon(QIcon())
def _observe_control_value(self, remove=False):
if remove:
self.control.toggled.disconnect(self._update_value)
else:
self.control.toggled.connect(self._update_value)
def _get_control_alignment(self):
return self.alignment
def _set_control_alignment(self, alignment):
self.control.setStyleSheet(f'text-align: {alignment}') |
class GetSchemaViewTests(TestCase):
def test_openapi(self):
schema_view = get_schema_view(title='With OpenAPI')
assert isinstance(schema_view.initkwargs['schema_generator'], openapi.SchemaGenerator)
assert (renderers.OpenAPIRenderer in schema_view.cls().renderer_classes)
.skipif((not coreapi.coreapi), reason='coreapi is not installed')
def test_coreapi(self):
with override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'}):
schema_view = get_schema_view(title='With CoreAPI')
assert isinstance(schema_view.initkwargs['schema_generator'], coreapi.SchemaGenerator)
assert (renderers.CoreAPIOpenAPIRenderer in schema_view.cls().renderer_classes) |
_tag
def feincms_parentlink(of_, feincms_page, **kwargs):
level = int(kwargs.get('level', 1))
if ((feincms_page.level + 1) == level):
return feincms_page.get_absolute_url()
elif ((feincms_page.level + 1) < level):
return '#'
try:
return feincms_page.get_ancestors()[(level - 1)].get_absolute_url()
except IndexError:
return '#' |
class IdentityElimination(PipelineStage):
name = 'identity-elimination'
def run(self, task: DecompilerTask):
(identity_graph, dataflow) = self._parse_cfg(task)
variable_replacer = _VariableReplacer(dataflow)
for identity_group in identity_graph.yield_identities():
variable_replacer.replace_variables(identity_group, identity_graph.find_replacement_variable_of_group(identity_group))
def _parse_cfg(task: DecompilerTask) -> Tuple[(_IdentityGraph, _IdentityDataflow)]:
dataflow = _IdentityDataflow()
identity_graph = _IdentityGraph(task.function_parameters)
for basic_block in task.graph:
for instruction in basic_block.instructions:
dataflow.parse_dataflow(instruction, basic_block)
if isinstance(instruction, Assignment):
identity_graph.initialize_no_identity_of(instruction)
identity_graph.add_assignment(instruction, basic_block)
elif isinstance(instruction, Relation):
identity_graph.no_identity_of[instruction.destination] = {instruction.value}
return (identity_graph, dataflow) |
class UserFieldUpdateForm(BaseUserFieldForm):
async def get_form_class(cls, user_field: UserField) -> type['UserFieldUpdateForm']:
class UserFieldForm(UserFieldUpdateForm):
configuration = FormFieldPopulateJSON(CONFIGURATION_FORM_CLASS_MAP[user_field.type])
return UserFieldForm |
class Solution(object):
def find_diff(self, s, t):
if ((s is None) or (t is None)):
raise TypeError('s or t cannot be None')
seen = {}
for char in s:
if (char in seen):
seen[char] += 1
else:
seen[char] = 1
for char in t:
try:
seen[char] -= 1
except KeyError:
return char
if (seen[char] < 0):
return char
return None |
class Mul(BinaryOp):
def __init__(self, A, B):
if (A.shape[(- 1)] != B.shape[0]):
raise ValueError(('Illegal op on a %s-tensor with a %s-tensor.' % (A.shape, B.shape)))
fsA = A.arg_function_spaces[(- 1)]
fsB = B.arg_function_spaces[0]
assert space_equivalence(fsA, fsB), 'Cannot perform argument contraction over middle indices. They must be in the same function space.'
super(Mul, self).__init__(A, B)
self._args = (A.arguments()[:(- 1)] + B.arguments()[1:])
_property
def arg_function_spaces(self):
(A, B) = self.operands
return (A.arg_function_spaces[:(- 1)] + B.arg_function_spaces[1:])
def arguments(self):
return self._args |
class DetectResponse():
def __init__(self, language: str, isReliable: bool=True, confidence: float=1.0):
self.language = language
self.isReliable = isReliable
self.confidence = confidence
def __repr__(self):
return (self.__class__.__qualname__ + f'(language={repr(self.language)}, isReliable={repr(self.isReliable)}, confidence={repr(self.confidence)})') |
def build_where_parameter(query_text, scope, sources):
conditions = []
params = []
if sources:
tmp = []
for source in sources:
tmp.append('netloc = %s')
params.append(source)
conditions.append((('(' + ' or '.join(tmp)) + ')'))
if (scope == 'content'):
conditions.append('MATCH(%s)')
params.append((' ' + query_text))
else:
conditions.append('MATCH(%s)')
params.append((' ' + query_text))
conditional = ' and '.join(conditions)
return (conditional, params) |
.parametrize(('hfamily', 'hdegree', 'vfamily', 'vdegree'), [(f, d, vf, vd) for (vf, vd) in (CG + DG) for (f, d) in (CG + DG)])
def test_scalar_assembly(extmesh, hfamily, hdegree, vfamily, vdegree):
mesh = extmesh(4, 4, 2)
fspace = FunctionSpace(mesh, hfamily, hdegree, vfamily=vfamily, vdegree=vdegree)
u = TrialFunction(fspace)
v = TestFunction(fspace)
assemble((inner(u, v) * dx))
assemble((inner(grad(u), grad(v)) * dx)) |
class GroupUniqueNameValidator(object):
def __init__(self, message=None):
if (not message):
message = "Group with the alias '{}' already exists."
self.message = message
def __call__(self, form, field):
if UsersLogic.group_alias_exists(field.data):
raise wtforms.ValidationError(self.message.format(field.data)) |
class AffectNet(Dataset):
_expressions = {0: 'neutral', 1: 'happy', 2: 'sad', 3: 'surprise', 4: 'fear', 5: 'disgust', 6: 'anger', 7: 'contempt', 8: 'none'}
_expressions_indices = {8: [0, 1, 2, 3, 4, 5, 6, 7], 5: [0, 1, 2, 3, 6]}
def __init__(self, root_path, subset='test', transform_image_shape=None, transform_image=None, n_expression=5, verbose=1, cleaned_set=True):
self.root_path = Path(root_path).expanduser()
self.subset = subset
self.image_path = self.root_path.joinpath(subset)
self.transform_image_shape = transform_image_shape
self.transform_image = transform_image
self.verbose = verbose
self.cleaned_set = cleaned_set
if (n_expression not in [5, 8]):
raise ValueError(f'n_expression should be either 5 or 8, but got n_expression={n_expression}')
self.n_expression = n_expression
self.pickle_path = self.root_path.joinpath(f'{subset}_fullpath.pkl')
with open(self.pickle_path, 'br') as f:
data = pickle.load(f)
self.data = data
self.keys = []
self.skipped = {'other': [], 'pt_pt_error': [], 'expression': [], 'cleaned': []}
expressions = []
for (key, value) in data.items():
if (key == 'folder'):
continue
if (int(value['expression']) not in self._expressions_indices[self.n_expression]):
self.skipped['expression'].append(key)
continue
if (self.cleaned_set and (not value['expression_correct'])):
self.skipped['cleaned'].append(key)
continue
expression = int(value['expression'])
if self.cleaned_set:
valence = float(value['valence'])
arousal = float(value['arousal'])
intensity = math.sqrt(((valence ** 2) + (arousal ** 2)))
if ((expression == 0) and (intensity >= 0.2)):
self.skipped['other'].append(key)
continue
elif ((expression == 1) and ((valence <= 0) or (intensity <= 0.2))):
self.skipped['other'].append(key)
continue
elif ((expression == 2) and ((valence >= 0) or (intensity <= 0.2))):
self.skipped['other'].append(key)
continue
elif ((expression == 3) and ((arousal <= 0) or (intensity <= 0.2))):
self.skipped['other'].append(key)
continue
elif ((expression == 4) and ((not ((arousal >= 0) and (valence <= 0))) or (intensity <= 0.2))):
self.skipped['other'].append(key)
continue
elif ((expression == 5) and ((valence >= 0) or (intensity <= 0.3))):
self.skipped['other'].append(key)
continue
elif ((expression == 6) and ((arousal <= 0) or (intensity <= 0.2))):
self.skipped['other'].append(key)
continue
elif ((expression == 7) and ((valence >= 0) or (intensity <= 0.2))):
self.skipped['other'].append(key)
continue
if ((self.n_expression == 5) and (expression == 6)):
expression = 4
expressions.append(expression)
self.keys.append(key)
expressions = np.array(expressions)
self.sample_per_class = {label: np.sum((expressions == label)) for label in np.unique(expressions)}
self.expression_weights = np.array([(1.0 / self.sample_per_class[e]) for e in expressions])
self.average_per_class = int(np.mean(list(self.sample_per_class.values())))
if self.verbose:
skipped = sum([len(self.skipped[key]) for key in self.skipped])
msg = f" -- {len(self.keys)} images, skipped {len(self.skipped)} images ({len(self.skipped['pt_pt_error'])} with large errors)."
print(msg)
print(f'Samples per class : {self.sample_per_class}')
def __len__(self):
return len(self.keys)
def __getitem__(self, index):
key = self.keys[index]
sample_data = self.data[key]
image_file = self.image_path.joinpath(key).as_posix()
valence = torch.tensor([float(sample_data['valence'])], dtype=torch.float32)
arousal = torch.tensor([float(sample_data['arousal'])], dtype=torch.float32)
expression = int(sample_data['expression'])
if ((self.n_expression == 5) and (expression == 6)):
expression = 4
landmarks = sample_data['landmarks_fan']
if isinstance(landmarks, list):
landmarks = np.array(landmarks)
image = io.imread(image_file)
if (self.transform_image_shape is not None):
bounding_box = [landmarks.min(axis=0)[0], landmarks.min(axis=0)[1], landmarks.max(axis=0)[0], landmarks.max(axis=0)[1]]
(image, landmarks) = self.transform_image_shape(image, bb=bounding_box)
image = np.ascontiguousarray(image)
if (self.transform_image is not None):
image = self.transform_image(image)
return dict(valence=valence, arousal=arousal, expression=expression, image=image, au=[]) |
def get_features_stats(feature: pd.Series, feature_type: ColumnType) -> FeatureQualityStats:
def get_percentage_from_all_values(value: Union[(int, float)]) -> float:
return np.round(((100 * value) / all_values_count), 2)
result = FeatureQualityStats(feature_type=feature_type.value)
all_values_count = feature.shape[0]
if (not (all_values_count > 0)):
return result
result.number_of_rows = all_values_count
result.missing_count = int(feature.isnull().sum())
result.count = int(feature.count())
all_values_count = feature.shape[0]
value_counts = feature.value_counts(dropna=False)
result.missing_percentage = np.round(((100 * result.missing_count) / all_values_count), 2)
unique_count: int = feature.nunique()
result.unique_count = unique_count
result.unique_percentage = get_percentage_from_all_values(unique_count)
result.most_common_value = value_counts.index[0]
result.most_common_value_percentage = get_percentage_from_all_values(value_counts.iloc[0])
if ((result.count > 0) and pd.isnull(result.most_common_value)):
result.most_common_not_null_value = value_counts.index[1]
result.most_common_not_null_value_percentage = get_percentage_from_all_values(value_counts.iloc[1])
if (feature_type == ColumnType.Numerical):
if (not np.issubdtype(feature, np.number)):
feature = feature.astype(float)
if isinstance(result.most_common_value, float):
result.most_common_value = np.round(result.most_common_value, 5)
result.infinite_count = int(np.sum(np.isinf(feature)))
result.infinite_percentage = get_percentage_from_all_values(result.infinite_count)
result.max = np.round(feature.max(), 2)
result.min = np.round(feature.min(), 2)
common_stats = dict(feature.describe())
std = common_stats['std']
result.std = np.round(std, 2)
result.mean = np.round(common_stats['mean'], 2)
result.percentile_25 = np.round(common_stats['25%'], 2)
result.percentile_50 = np.round(common_stats['50%'], 2)
result.percentile_75 = np.round(common_stats['75%'], 2)
if (feature_type == ColumnType.Datetime):
result.most_common_value = str(result.most_common_value)
result.max = str(feature.max())
result.min = str(feature.min())
return result |
class Options():
def __init__(self, options=None):
self._options = ({} if (options is None) else options)
self._used_options = set()
def __getitem__(self, name: str):
self._used_options.add(name)
return self._options[name]
def __call__(self, name: str, default):
self._used_options.add(name)
return self._options.get(name, default)
def provided(self, name: str) -> bool:
return (name in self._options)
def check_unused(self):
unused = (set(self._options.keys()) - self._used_options)
if unused:
raise TypeError(''.join([('Unused argument%s: ' % (('s' if (len(unused) > 1) else ''),)), ', '.join((('%s=%s' % (x, self._options[x])) for x in unused))]))
def update_if_not_set(self, **kwargs):
for (k, v) in kwargs.items():
if (k not in self._options):
self._options[k] = v
def __repr__(self) -> str:
return f'Options({str(self._options)})' |
class Settings():
def __init__(self, page: primitives.PageModel, options: dict, component: primitives.HtmlModel=None):
self.page = page
self.component = component
self.__headerVisible = True
self.__ctx = {}
def headerVisible(self):
return self.__headerVisible
def headerVisible(self, flag: bool):
self.__headerVisible = flag
self.__ctx['headerVisible'] = flag |
class DescribeWorkflowExecutionResponse():
execution_configuration: WorkflowExecutionConfiguration = None
workflow_execution_info: WorkflowExecutionInfo = None
pending_activities: List[PendingActivityInfo] = field(default_factory=list)
pending_children: List[PendingChildExecutionInfo] = field(default_factory=list) |
.parametrize('xp', ALL_XP)
def test_array_module_cpu_gpu_helpers(xp):
error = "Only numpy and cupy arrays are supported, but found <class 'int'> instead. If get_array_module module wasn't called directly, this might indicate a bug in Thinc."
with pytest.raises(ValueError, match=error):
get_array_module(0)
zeros = xp.zeros((1, 2))
xp_ = get_array_module(zeros)
assert (xp_ == xp)
if (xp == numpy):
assert is_numpy_array(zeros)
assert (not is_numpy_array((1, 2)))
else:
assert is_cupy_array(zeros)
assert (not is_cupy_array((1, 2))) |
class OptionSeriesBarSonificationContexttracks(Options):
def activeWhen(self) -> 'OptionSeriesBarSonificationContexttracksActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesBarSonificationContexttracksActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesBarSonificationContexttracksMapping':
return self._config_sub_data('mapping', OptionSeriesBarSonificationContexttracksMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionSeriesBarSonificationContexttracksPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesBarSonificationContexttracksPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def timeInterval(self):
return self._config_get(None)
def timeInterval(self, num: float):
self._config(num, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False)
def valueInterval(self):
return self._config_get(None)
def valueInterval(self, num: float):
self._config(num, js_type=False)
def valueMapFunction(self):
return self._config_get('linear')
def valueMapFunction(self, value: Any):
self._config(value, js_type=False)
def valueProp(self):
return self._config_get('"x"')
def valueProp(self, text: str):
self._config(text, js_type=False) |
def test_download_log(monkeypatch, mock_get_info, tmp_path):
def mock(*args, **kwargs):
file_path = kwargs['to_file']
with open(file_path, 'w') as f:
f.write('0.3,5.7')
monkeypatch.setattr(f'{task_core_path}.download_file', mock)
download_log(TASK_ID, str((tmp_path / 'web_test_tmp.json')))
with open(str((tmp_path / 'web_test_tmp.json'))) as f:
assert (f.read() == '0.3,5.7') |
class SigningSerializer(Serializer):
def encode(msg: Message) -> bytes:
msg = cast(SigningMessage, msg)
message_pb = ProtobufMessage()
dialogue_message_pb = DialogueMessage()
signing_msg = signing_pb2.SigningMessage()
dialogue_message_pb.message_id = msg.message_id
dialogue_reference = msg.dialogue_reference
dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]
dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]
dialogue_message_pb.target = msg.target
performative_id = msg.performative
if (performative_id == SigningMessage.Performative.SIGN_TRANSACTION):
performative = signing_pb2.SigningMessage.Sign_Transaction_Performative()
terms = msg.terms
Terms.encode(performative.terms, terms)
raw_transaction = msg.raw_transaction
RawTransaction.encode(performative.raw_transaction, raw_transaction)
signing_msg.sign_transaction.CopyFrom(performative)
elif (performative_id == SigningMessage.Performative.SIGN_MESSAGE):
performative = signing_pb2.SigningMessage.Sign_Message_Performative()
terms = msg.terms
Terms.encode(performative.terms, terms)
raw_message = msg.raw_message
RawMessage.encode(performative.raw_message, raw_message)
signing_msg.sign_message.CopyFrom(performative)
elif (performative_id == SigningMessage.Performative.SIGNED_TRANSACTION):
performative = signing_pb2.SigningMessage.Signed_Transaction_Performative()
signed_transaction = msg.signed_transaction
SignedTransaction.encode(performative.signed_transaction, signed_transaction)
signing_msg.signed_transaction.CopyFrom(performative)
elif (performative_id == SigningMessage.Performative.SIGNED_MESSAGE):
performative = signing_pb2.SigningMessage.Signed_Message_Performative()
signed_message = msg.signed_message
SignedMessage.encode(performative.signed_message, signed_message)
signing_msg.signed_message.CopyFrom(performative)
elif (performative_id == SigningMessage.Performative.ERROR):
performative = signing_pb2.SigningMessage.Error_Performative()
error_code = msg.error_code
ErrorCode.encode(performative.error_code, error_code)
signing_msg.error.CopyFrom(performative)
else:
raise ValueError('Performative not valid: {}'.format(performative_id))
dialogue_message_pb.content = signing_msg.SerializeToString()
message_pb.dialogue_message.CopyFrom(dialogue_message_pb)
message_bytes = message_pb.SerializeToString()
return message_bytes
def decode(obj: bytes) -> Message:
message_pb = ProtobufMessage()
signing_pb = signing_pb2.SigningMessage()
message_pb.ParseFromString(obj)
message_id = message_pb.dialogue_message.message_id
dialogue_reference = (message_pb.dialogue_message.dialogue_starter_reference, message_pb.dialogue_message.dialogue_responder_reference)
target = message_pb.dialogue_message.target
signing_pb.ParseFromString(message_pb.dialogue_message.content)
performative = signing_pb.WhichOneof('performative')
performative_id = SigningMessage.Performative(str(performative))
performative_content = {}
if (performative_id == SigningMessage.Performative.SIGN_TRANSACTION):
pb2_terms = signing_pb.sign_transaction.terms
terms = Terms.decode(pb2_terms)
performative_content['terms'] = terms
pb2_raw_transaction = signing_pb.sign_transaction.raw_transaction
raw_transaction = RawTransaction.decode(pb2_raw_transaction)
performative_content['raw_transaction'] = raw_transaction
elif (performative_id == SigningMessage.Performative.SIGN_MESSAGE):
pb2_terms = signing_pb.sign_message.terms
terms = Terms.decode(pb2_terms)
performative_content['terms'] = terms
pb2_raw_message = signing_pb.sign_message.raw_message
raw_message = RawMessage.decode(pb2_raw_message)
performative_content['raw_message'] = raw_message
elif (performative_id == SigningMessage.Performative.SIGNED_TRANSACTION):
pb2_signed_transaction = signing_pb.signed_transaction.signed_transaction
signed_transaction = SignedTransaction.decode(pb2_signed_transaction)
performative_content['signed_transaction'] = signed_transaction
elif (performative_id == SigningMessage.Performative.SIGNED_MESSAGE):
pb2_signed_message = signing_pb.signed_message.signed_message
signed_message = SignedMessage.decode(pb2_signed_message)
performative_content['signed_message'] = signed_message
elif (performative_id == SigningMessage.Performative.ERROR):
pb2_error_code = signing_pb.error.error_code
error_code = ErrorCode.decode(pb2_error_code)
performative_content['error_code'] = error_code
else:
raise ValueError('Performative not valid: {}.'.format(performative_id))
return SigningMessage(message_id=message_id, dialogue_reference=dialogue_reference, target=target, performative=performative, **performative_content) |
def _deparse(seq):
if (seq is None):
return seq
pattern = ''
for (op, arg) in seq:
if (op == c.ANY):
pattern += '.'
elif (op == c.LITERAL):
if (chr(arg) in sre_parse.SPECIAL_CHARS):
pattern += '\\'
pattern += chr(arg)
elif (op == c.MAX_REPEAT):
(min, max, item) = arg
pattern += _deparse(item)
if ((min == 0) and (max == c.MAXREPEAT)):
pattern += '*'
elif ((min == 0) and (max == 1)):
pattern += '?'
elif ((min == 1) and (max == c.MAXREPEAT)):
pattern += '+'
elif (min == max == 1):
pass
elif (min == max):
pattern += (('{' + str(min)) + '}')
else:
pattern += (((('{' + str(min)) + ',') + str(max)) + '}')
elif ((op == c.AT) and (arg == c.AT_END)):
pattern += '$'
elif (op == c.SUBPATTERN):
(arg0, arg1, arg2, sseq) = arg
pattern += (('(' + _deparse(sseq)) + ')')
elif (op == c.BRANCH):
(must_be_none, branches) = arg
pattern += '|'.join([_deparse(a) for a in branches])
elif (op == c.RANGE):
(low, high) = arg
pattern += ((chr(low) + '-') + chr(high))
elif (op == c.IN):
assert isinstance(arg, list)
if ((len(arg) == 1) and (arg[0][0] == c.CATEGORY)):
pattern += _deparse(arg)
else:
pattern += (('[' + ''.join([_deparse([a]) for a in arg])) + ']')
elif (op == c.CATEGORY):
pattern += CATEGORY_PATTERNS[arg].pattern
elif (op == c.NEGATE):
pattern += '^'
elif (op == c.GROUPREF):
pattern += f'\{arg}'
else:
assert False, f'unsupported regex pattern {op} with arg {arg}'
return pattern |
def _process_cert(key: Crypto, cert: CertRequest, path_prefix: str):
assert (cert.public_key is not None)
message = cert.get_message(cert.public_key)
signature = key.sign_message(message).encode('ascii').hex()
Path(cert.get_absolute_save_path(path_prefix)).write_bytes(signature.encode('ascii')) |
class SimpleEditor(Editor):
def init(self, parent):
self._ts = ts = TupleStructure(self)
self._ui = ui = ts.view.ui(ts, parent, kind='subpanel').trait_set(parent=self.ui)
self.control = ui.control
self.set_tooltip()
def update_editor(self):
ts = self._ts
for (i, value) in enumerate(self.value):
setattr(ts, ('f%d' % i), value)
def get_error_control(self):
return self._ui.get_error_controls() |
('/plugin/<plugin>', methods=['GET', 'POST'])
def plugin(plugin):
plugins = _ui.plugins
selected_plugin = list(filter((lambda p: (p['name'] == plugin)), plugins))
result = False
if (request.method == 'POST'):
config_data = {}
config_data['config_file'] = '{}/{}/{}'.format('./plugins', selected_plugin[0]['name'], 'config.json')
for (key, value) in request.form.items():
config_data[key] = value
_ui.plugins = config_data
if config_data:
result = True
plugins = _ui.plugins
selected_plugin = list(filter((lambda p: (p['name'] == plugin)), plugins))
return render_template('plugin_settings.html', plugin=selected_plugin[0], result=result, request=request.method) |
class ScienceBeamParserSessionSource(_ScienceBeamParserSessionDerivative):
def __init__(self, session: 'ScienceBeamParserBaseSession', source_path: str, source_media_type: str):
super().__init__(session=session)
self.source_path = source_path
self.source_media_type = source_media_type
self.lazy_pdf_path = LazyLoaded[str](self._get_or_convert_to_pdf_path)
self.lazy_alto_xml_path = LazyLoaded[str](self._parse_to_alto_xml)
self.lazy_parsed_layout_document = LazyLoaded[ScienceBeamParserSessionParsedLayoutDocument](self._parse_to_parsed_layout_document)
def parser(self) -> ScienceBeamBaseParser:
return self.session.parser
def doc_to_pdf_enabled(self) -> bool:
return self.parser.doc_to_pdf_enabled
def doc_converter_wrapper(self) -> DocConverterWrapper:
return self.parser.doc_converter_wrapper
def doc_to_pdf_convert_parameters(self) -> dict:
return self.parser.doc_to_pdf_convert_parameters
def pdfalto_wrapper(self) -> PdfAltoWrapper:
return self.parser.pdfalto_wrapper
def document_request_parameters(self) -> DocumentRequestParameters:
return self.session.document_request_parameters
def _get_or_convert_to_pdf_path(self) -> str:
LOGGER.info('media_type=%r (filename=%r)', self.source_media_type, self.source_path)
if (self.source_media_type in DOC_TO_PDF_SUPPORTED_MEDIA_TYPES):
if (not self.doc_to_pdf_enabled):
LOGGER.info('doc to pdf not enabled')
raise UnsupportedRequestMediaTypeScienceBeamParserError('doc to pdf not enabled')
target_temp_file = self.doc_converter_wrapper.convert(self.source_path, **self.doc_to_pdf_convert_parameters)
return target_temp_file
if (self.source_media_type != MediaTypes.PDF):
raise UnsupportedRequestMediaTypeScienceBeamParserError(('unsupported media type: %r' % self.source_media_type))
return self.source_path
def _parse_to_alto_xml(self) -> str:
output_path = os.path.join(self.temp_dir, TEMP_ALTO_XML_FILENAME)
self.pdfalto_wrapper.convert_pdf_to_pdfalto_xml(str(self.lazy_pdf_path.get()), str(output_path), first_page=self.document_request_parameters.first_page, last_page=self.document_request_parameters.last_page)
return output_path
def _parse_to_parsed_layout_document(self) -> ScienceBeamParserSessionParsedLayoutDocument:
pdf_path = self.lazy_pdf_path.get()
root = etree.parse(self.lazy_alto_xml_path.get())
layout_document = normalize_layout_document(parse_alto_root(root), preserve_empty_pages=True)
return ScienceBeamParserSessionParsedLayoutDocument(self.session, layout_document=layout_document, pdf_path=pdf_path)
def get_parsed_layout_document(self) -> ScienceBeamParserSessionParsedLayoutDocument:
return self.lazy_parsed_layout_document.get()
def get_layout_document(self) -> LayoutDocument:
return self.get_parsed_layout_document().layout_document
def get_local_file_for_response_media_type(self, response_media_type: str) -> str:
if (response_media_type == MediaTypes.PDF):
return self.lazy_pdf_path.get()
if (response_media_type == MediaTypes.ALTO_XML):
return self.lazy_alto_xml_path.get()
return self.get_parsed_layout_document().get_local_file_for_response_media_type(response_media_type) |
def get_referents(ghidra_analysis, func, pc_address):
block_model = BasicBlockModel(ghidra_analysis.current_program)
blocks = block_model.getCodeBlocksContaining(func.getBody(), ghidra_analysis.monitor)
referents = []
for block in iter_array(blocks, ghidra_analysis.monitor):
destinations = block.getDestinations(ghidra_analysis.monitor)
for destination in iter_array(destinations, ghidra_analysis.monitor):
referent = destination.getReferent()
if (referent <= pc_address):
referents.append(referent)
else:
break
return referents |
def test_transform_osci_languages(raw_commits, expected_language_commits):
df = get_languages_commits(df=raw_commits, company=PushEventsCommitsSchema.company, language=PushEventsCommitsSchema.language, commits_id_field=PushEventsCommitsSchema.sha, result_field='commits')
assert (sorted([(row.company, row.language, row.commits) for row in df.collect()], key=(lambda x: (x[0], x[1]))) == expected_language_commits) |
class TestChoiceFieldHTMLFormRenderer(TestCase):
def setUp(self):
choices = ((1, 'Option1'), (2, 'Option2'), (12, 'Option12'))
class TestSerializer(serializers.Serializer):
test_field = serializers.ChoiceField(choices=choices, initial=2)
self.TestSerializer = TestSerializer
self.renderer = HTMLFormRenderer()
def test_render_initial_option(self):
serializer = self.TestSerializer()
result = self.renderer.render(serializer.data)
self.assertIsInstance(result, SafeText)
self.assertInHTML('<option value="2" selected>Option2</option>', result)
self.assertInHTML('<option value="1">Option1</option>', result)
self.assertInHTML('<option value="12">Option12</option>', result)
def test_render_selected_option(self):
serializer = self.TestSerializer(data={'test_field': '12'})
serializer.is_valid()
result = self.renderer.render(serializer.data)
self.assertIsInstance(result, SafeText)
self.assertInHTML('<option value="12" selected>Option12</option>', result)
self.assertInHTML('<option value="1">Option1</option>', result)
self.assertInHTML('<option value="2">Option2</option>', result) |
(name='set')
('--type', 'type_', default=None, type=click.Choice((CONFIG_SUPPORTED_KEY_TYPES + [None])), help='Specify the type of the value.')
('JSON_PATH', required=True)
('VALUE', required=True, type=str)
_ctx
def set_command(ctx: Context, json_path: str, value: str, type_: Optional[str]) -> None:
try:
agent_config_manager = AgentConfigManager.load(ctx.cwd)
current_value = None
with contextlib.suppress(VariableDoesNotExist):
current_value = agent_config_manager.get_variable(json_path)
if (type_ is None):
converted_value = convert_value_str_to_type(value, 'str')
if (current_value is not None):
with contextlib.suppress(Exception):
converted_value = convert_value_str_to_type(value, type(current_value).__name__)
else:
converted_value = convert_value_str_to_type(value, cast(str, type_))
agent_config_manager.set_variable(json_path, converted_value)
agent_config_manager.dump_config()
except ExtraPropertiesError as e:
raise ClickException(f'Attribute `{e.args[0][0]}` is not allowed to change!')
except (ValueError, AEAException) as e:
raise ClickException(str(e.args[0])) |
(np.mod)
def mod(x, y, out=None, out_like=None, sizing='optimal', method='raw', **kwargs):
def _mod_repr(x, y):
return (x % y)
def _mod_raw(x, y, n_frac):
precision_cast = ((lambda m: np.array(m, dtype=object)) if (n_frac >= _n_word_max) else (lambda m: m))
return ((x.val * precision_cast((2 ** (n_frac - x.n_frac)))) % (y.val * precision_cast((2 ** (n_frac - y.n_frac)))))
if (not isinstance(x, Fxp)):
x = Fxp(x)
if (not isinstance(y, Fxp)):
y = Fxp(y)
signed = (x.signed or y.signed)
n_int = (max(x.n_int, y.n_int) if signed else min(x.n_int, y.n_int))
n_frac = max(x.n_frac, y.n_frac)
n_word = ((int(signed) + n_int) + n_frac)
optimal_size = (signed, n_word, n_int, n_frac)
return _function_over_two_vars(repr_func=_mod_repr, raw_func=_mod_raw, x=x, y=y, out=out, out_like=out_like, sizing=sizing, method=method, optimal_size=optimal_size, **kwargs) |
_field_header([ofproto.OXM_OF_VLAN_VID, ofproto.OXM_OF_VLAN_VID_W])
class MTVlanVid(OFPMatchField):
pack_str = '!H'
def __init__(self, header, value, mask=None):
super(MTVlanVid, self).__init__(header)
self.value = value
self.mask = mask
def field_parser(cls, header, buf, offset):
m = super(MTVlanVid, cls).field_parser(header, buf, offset)
m.value &= (~ ofproto.OFPVID_PRESENT)
return m |
def move_all_anim_curves():
def check_overlapping(anim_curves, choice, current_time, offset_val):
for anim_curve in anim_curves:
key_cnt = anim_curve.numKeys()
message = 'Some Keys are overlapping within Offset Value\n'
message += 'Do you want continue on Moving other Keys ?\n'
for i in range(0, key_cnt):
key_time = anim_curve.getTime(i)
if (choice == 'forward'):
if (key_time <= (current_time + offset_val)):
range_dialog = pm.confirmDialog(title='Error', message=message, button=['Yes', 'No'], cancelButton='No', dismissString='No')
if (range_dialog == 'Yes'):
return 1
else:
raise RuntimeError('Move Keys process interrupted by User.')
if (choice == 'back'):
if (key_time >= (current_time + offset_val)):
range_dialog = pm.confirmDialog(title='Error', message=message, button=['Yes', 'No'], cancelButton='No', dismissString='No')
if (range_dialog == 'Yes'):
return 1
else:
raise RuntimeError('Move Keys process interrupted by User.')
def move_all_keys(choice):
offset_val = offset_intfield.getValue()
if (offset_val < 1):
raise RuntimeError('Enter an Offset Value greater than 0.')
if (choice == 'back'):
offset_val = (offset_intfield.getValue() * (- 1))
unlock_val = unlock_state.getValue1()
current_time = pm.currentTime()
anim_curves = pm.ls(type='animCurve')
non_moved_curves = []
if (choice == 'back'):
check_overlapping(anim_curves, choice, current_time, offset_val)
for anim_curve in anim_curves:
try:
if ((unlock_val is True) and anim_curve.isLocked()):
anim_curve.setLocked(0)
key_cnt = anim_curve.numKeys()
for i in range(1, (key_cnt + 1)):
if (choice == 'forward'):
ind = (key_cnt - i)
if (choice == 'back'):
ind = (i - 1)
if (anim_curve.getTime(ind) >= current_time):
pm.keyframe(anim_curve, index=ind, iub=False, animation='objects', relative=True, option='move', tc=offset_val)
except:
if (anim_curve not in non_moved_curves):
non_moved_curves.append(anim_curve)
continue
if (not non_moved_curves):
pm.confirmDialog(title='Info', message='Keys Moved Successfully.', button='OK')
else:
message = 'Anim Curves can NOT be moved:\r\n'
message += '\r'
for i in range(0, len(non_moved_curves)):
message += ('%s\n' % non_moved_curves[i])
if (i > 30):
message += '+ More...\n'
break
print(non_moved_curves)
pm.confirmDialog(title='Error', message=message, button='OK')
window_name = 'move_keys_window'
if pm.window(window_name, q=True, ex=True):
pm.deleteUI(window_name, wnd=True)
move_keys_win = pm.window(window_name, title='Move Keys', s=0, rtf=1)
with pm.columnLayout(rs=5, cal='center'):
pm.text(l=' MOVE ALL KEYS')
pm.text(l=' relatively from currentTime')
pm.text(l=' (overlapping Keys will NOT be moved)')
with pm.rowColumnLayout(nc=3, cw=[(1, 70), (2, 70), (3, 70)]):
def exec_move_all_keys_back(*args):
move_all_keys('back')
pm.button(l='-', c=exec_move_all_keys_back)
offset_intfield = pm.intField()
def exec_move_all_keys_forward(*args):
move_all_keys('forward')
pm.button(l='+', c=exec_move_all_keys_forward)
with pm.columnLayout():
unlock_state = pm.checkBoxGrp(l='Unlock & Move', v1=1)
pm.showWindow(move_keys_win) |
(name='intercom')
def get_intercom_for_testing():
test_queue = Queue()
interface = InterComBackEndBinding(testing=True, analysis_service=AnalysisServiceMock(), compare_service=ServiceMock(test_queue), unpacking_service=ServiceMock(test_queue))
interface.WAIT_TIME = 2
(yield interface)
interface.shutdown()
test_queue.close() |
class EnumDisplays(Enums):
def withoutZeros(self):
return self._set_value('function(context){return context.dataset.data[context.dataIndex] !== 0}', js_type=True)
def aboveThreshold(self, value: float, included: bool=True, absolute: bool=False):
if absolute:
expr = 'Math.abs(context.dataset.data[context.dataIndex])'
else:
expr = 'context.dataset.data[context.dataIndex]'
if included:
return self._set_value(('function(context) {return %s >= %s}' % (expr, value)), js_type=True)
return self._set_value(('function(context) {return %s > %s}' % (expr, value)), js_type=True)
def belowThreshold(self, value: float, included: bool=True, absolute: bool=False):
if absolute:
expr = 'Math.abs(context.dataset.data[context.dataIndex])'
else:
expr = 'context.dataset.data[context.dataIndex]'
if included:
return self._set_value(('function(context){return %s <= %s}' % (expr, value)), js_type=True)
return self._set_value(('function(context){return %s < %s}' % (expr, value)), js_type=True) |
_required
_passes_test(is_speaker, 'index')
def edit_activity_proposal(request, event_slug, activity_id):
event = get_object_or_404(Event, event_slug=event_slug)
if event.schedule_confirmed:
messages.error(request, _('The activity proposal edition is already closed, the schedule is confirmed or the event is not accepting proposals through this page. Please contact the Event Organization Team to submit it.'))
return redirect(reverse('index', args=[event_slug]))
event_user = get_object_or_404(EventUser, user=request.user, event=event)
activity = get_object_or_404(Activity, event=event, owner=event_user, pk=activity_id)
activity_form = ActivityProposalForm((request.POST or None), (request.FILES or None), instance=activity)
if request.POST:
if activity_form.is_valid():
try:
activity = activity_form.save()
return redirect(reverse('image_cropping', args=[event_slug, activity.pk]))
except Exception as error_message:
logger.error(error_message)
messages.error(request, _('There was a problem submitting the proposal. Please check the form for errors.'))
return render(request, 'activities/proposal.html', update_event_info(event_slug, {'form': activity_form, 'errors': [], 'multipart': True}, event=event)) |
class JoinPoint():
def __init__(self, id, clients_executing_completing_task=None, any_task_completes_parent=None):
if (clients_executing_completing_task is None):
clients_executing_completing_task = []
if (any_task_completes_parent is None):
any_task_completes_parent = []
self.id = id
self.any_task_completes_parent = any_task_completes_parent
self.clients_executing_completing_task = clients_executing_completing_task
self.num_clients_executing_completing_task = len(clients_executing_completing_task)
self.preceding_task_completes_parent = (self.num_clients_executing_completing_task > 0)
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return (isinstance(other, type(self)) and (self.id == other.id))
def __repr__(self, *args, **kwargs):
return ('JoinPoint(%s)' % self.id) |
class OptionSeriesDumbbellSonificationTracksMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def extractClownTrans(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if (('Tensei Shitara Slime datta ken' in item['tags']) and chp):
return buildReleaseMessageWithType(item, 'Tensei Shitara Slime Datta Ken', vol, chp, frag=frag, postfix=postfix)
return False |
def h4(T, A1, A2, KeyID):
assert ((len(T) == 16) and (type(T) == bytearray))
assert ((len(A1) == 6) and (type(A1) == bytearray))
assert ((len(A2) == 6) and (type(A2) == bytearray))
Hash = bytearray(32)
DAK = bytearray(16)
h = hmac.HMAC(T, hashes.SHA256(), backend=default_backend())
message = bytearray()
message.extend(KeyID)
message.extend(A1)
message.extend(A2)
assert (len(message) == ((4 + 6) + 6))
h.update(message)
Hash = bytearray(h.finalize())
DAK = Hash[:16]
return (Hash, DAK) |
class LimitsCustomizationView(CustomizationView):
def __init__(self):
CustomizationView.__init__(self)
limits_widget = LimitsWidget()
self._limits_widget = limits_widget
self.addHeading('X-axis')
self.addRow('Minimum', limits_widget.x_minimum_stack)
self.addRow('Maximum', limits_widget.x_maximum_stack)
self.addHeading('Y-axis')
self.addRow('Minimum', limits_widget.y_minimum_stack)
self.addRow('Maximum', limits_widget.y_maximum_stack)
def setAxisTypes(self, x_axis_type, y_axis_type):
self._limits_widget.switchInputOnX(x_axis_type)
self._limits_widget.switchInputOnY(y_axis_type)
def revertCustomization(self, plot_config: 'PlotConfig'):
self._limits_widget.limits = plot_config.limits
def applyCustomization(self, plot_config: 'PlotConfig'):
plot_config.limits = self._limits_widget.limits |
def read_node(expected_node, wire_file, node_index):
with open(wire_file, 'rb') as f:
for index in node_index:
f.seek(index, 0)
parts = f.readline().decode('utf8').strip().split(' ')
(pip, node) = parts[0:2]
wires = parts[2:]
assert (node == expected_node), repr((node, expected_node, index))
(yield wires) |
class PointLoad(Source):
__version__ = 0
point_load = Instance(tvtk.PointLoad, args=(), allow_none=False, record=True)
output_info = PipelineInfo(datasets=['image_data'], attribute_types=['any'], attributes=['any'])
view = View(Group(Item(name='point_load', style='custom', resizable=True), label='PointLoad', show_labels=False), resizable=True)
def __init__(self, **traits):
super(PointLoad, self).__init__(**traits)
self.point_load.on_trait_change(self.render)
self.outputs = [self.point_load]
def has_output_port(self):
return True
def get_output_object(self):
return self.point_load.output_port |
class CmdNoMatch(Command):
key = _CMD_NOMATCH
locks = 'cmd:all()'
def __init__(self, **kwargs):
self.menu = kwargs.pop('building_menu', None)
super().__init__(**kwargs)
def func(self):
raw_string = self.args.rstrip()
if (self.menu is None):
log_err("When CMDNOMATCH was called, the building menu couldn't be found")
self.caller.msg("|rThe building menu couldn't be found, remove the CmdSet.|n")
self.caller.cmdset.delete(BuildingMenuCmdSet)
return
choice = self.menu.current_choice
if (raw_string in self.menu.keys_go_back):
if self.menu.keys:
self.menu.move(back=True)
elif self.menu.parents:
self.menu.open_parent_menu()
else:
self.menu.display()
elif choice:
if choice.nomatch(raw_string):
self.caller.msg(choice.format_text())
else:
for choice in self.menu.relevant_choices:
if ((choice.key.lower() == raw_string.lower()) or any(((raw_string.lower() == alias) for alias in choice.aliases))):
self.menu.move(choice.key)
return
self.msg('|rUnknown command: {}|n.'.format(raw_string)) |
class TestBugLink():
def test_short_false_with_title(self):
bug = mock.MagicMock()
bug.bug_id = 1234567
bug.title = 'Lucky bug number'
link = util.bug_link(None, bug)
assert (link == "<a target='_blank' href=' class='notblue'>BZ#1234567</a> Lucky bug number")
def test_short_false_with_title_sanitizes_safe_tags(self):
bug = mock.MagicMock()
bug.bug_id = 1234567
bug.title = 'Check <b>this</b> out'
link = util.bug_link(None, bug)
assert (link == "<a target='_blank' href=' class='notblue'>BZ#1234567</a> Check <b>this</b> out")
def test_short_false_with_title_sanitizes_unsafe_tags(self):
bug = mock.MagicMock()
bug.bug_id = 1473091
bug.title = '<disk> <driver name="..."> should be optional'
link = util.bug_link(None, bug)
bleach_v = pkg_resources.parse_version(bleach.__version__)
if (bleach_v >= pkg_resources.parse_version('3.0.0')):
assert (link == '<a target=\'_blank\' href=\' class=\'notblue\'>BZ#1473091</a> <disk> <driver name="..."> should be optional')
else:
assert (link == '<a target=\'_blank\' href=\' class=\'notblue\'>BZ#1473091</a> <disk> <driver name="..."> should be optional</driver></disk>')
def test_short_false_without_title(self):
bug = mock.MagicMock()
bug.bug_id = 1234567
bug.title = None
link = util.bug_link(None, bug)
assert (link == "<a target='_blank' href=' class='notblue'>BZ#1234567</a> <i class='fa fa-spinner fa-spin fa-fw'></i>")
def test_short_true(self):
bug = mock.MagicMock()
bug.bug_id = 1234567
bug.title = 'Lucky bug number'
link = util.bug_link(None, bug, True)
assert (link == "<a target='_blank' href=' class='notblue'>BZ#1234567</a>") |
def filter_firewall_access_proxy_ssh_client_cert_data(json):
option_list = ['auth_ca', 'cert_extension', 'name', 'permit_agent_forwarding', 'permit_port_forwarding', 'permit_pty', 'permit_user_rc', 'permit_x11_forwarding', 'source_address']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
class TagsFilter(admin.SimpleListFilter):
title = 'Tags'
parameter_name = 'tags'
def lookups(self, request, model_admin):
tags = model_admin.model.objects.order_by().values('tags').distinct()
with connection.cursor() as cursor:
cursor.execute('SELECT UNNEST(tags) AS tag FROM frontend_maillog GROUP BY tag')
tags = cursor.fetchall()
return ((t[0], t[0]) for t in tags)
def queryset(self, request, queryset):
if self.value():
return queryset.filter(tags__contains=[self.value()])
else:
return queryset |
def get_fake_bucket_resource(fake_bucket_data_input):
data_creater = FakeBucketDataCreater(fake_bucket_data_input.id, fake_bucket_data_input.project)
for lifecycle in fake_bucket_data_input.lifecycles:
data_creater.AddLifecycleDict(action=lifecycle.action, age=lifecycle.conditions.get('age'), created_before=lifecycle.conditions.get('created_before'), matches_storage_class=lifecycle.conditions.get('matches_storage_class'), num_newer_versions=lifecycle.conditions.get('num_newer_versions'), is_live=lifecycle.conditions.get('is_live'))
return data_creater.get_resource() |
class MyEnv(gym.Env):
def __init__(self):
super().__init__()
self.action_space = Discrete(2)
def seed(self, seed=None):
(self.np_random, seed) = seeding.np_random(seed)
def reset(self, env_info={'env_id': 0}):
assert ('env_id' in env_info)
self.env_id = env_info['env_id']
self.x = ((self.np_random.rand() * 2.0) - 1.0)
self.identifier = self.np_random.rand()
obs = {'x': self.x, 'identifier': self.identifier, 'env_id': self.env_id}
return obs
def step(self, action):
if (action == 0):
self.x -= 0.3
else:
self.x += 0.3
done = ((self.x < (- 1)) or (self.x > 1))
obs = ({'x': self.x, 'identifier': self.identifier, 'env_id': self.env_id}, self.x, done, {})
return obs |
class Tofi(Selector):
def supported() -> bool:
return is_installed('tofi')
def name() -> str:
return 'tofi'
def show_character_selection(self, characters: Dict[(str, str)], recent_characters: List[str], prompt: str, show_description: bool, use_icons: bool, keybindings: Dict[(Action, str)], additional_args: List[str]) -> Tuple[(Union[(Action, DEFAULT, CANCEL)], Union[(List[str], Shortcut)])]:
parameters = ['tofi', '--require-match=true', '--matching-algorithm=fuzzy', f'--prompt-text={prompt}', '--print-index=true', *additional_args]
tofi = run(parameters, input='\n'.join(self.basic_format_characters(characters)), capture_output=True, encoding='utf-8')
return (DEFAULT(), [self.extract_char_from_input(list(characters)[int(tofi.stdout.strip())])])
def show_skin_tone_selection(self, tones_emojis: List[str], prompt: str, additional_args: List[str]) -> Tuple[(int, str)]:
tofi = run(['tofi', '--require-match=true', f'--prompt-text={prompt}', *additional_args], input='\n'.join(tones_emojis), capture_output=True, encoding='utf-8')
return (tofi.returncode, tofi.stdout)
def show_action_menu(self, additional_args: List[str]) -> List[Action]:
tofi = run(['tofi', '--require-match=true', '--matching-algorithm=fuzzy', *additional_args], input='\n'.join([it.value for it in Action if (it != Action.MENU)]), capture_output=True, encoding='utf-8')
return [Action(tofi.stdout.strip())] |
class ICMP(Module):
def __init__(self, ip, ip_address, debug=False):
self.ip = ip
self.ip_address = ip_address
self.debug = debug
self.tx_packets = []
self.tx_packet = ICMPPacket()
self.rx_packet = ICMPPacket()
self.ip.set_icmp_callback(self.callback)
def send(self, packet):
packet.encode()
if self.debug:
print_icmp('')
print_icmp(packet)
ip_packet = ip.IPPacket(packet)
ip_packet.version = 4
ip_packet.ihl = 5
ip_packet.total_length = (len(packet) + ip_packet.ihl)
ip_packet.identification = 0
ip_packet.flags = 0
ip_packet.fragment_offset = 0
ip_packet.ttl = 128
ip_packet.sender_ip = self.ip_address
ip_packet.target_ip =
ip_packet.checksum = 0
ip_packet.protocol = icmp_protocol
self.ip.send(ip_packet)
def callback(self, packet):
packet = ICMPPacket(packet)
packet.decode()
if self.debug:
print_icmp('')
print_icmp(packet)
self.process(packet)
def process(self, packet):
pass |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'state': {'required': True, 'type': 'str', 'choices': ['present', 'absent']}, 'dnsfilter_profile': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['dnsfilter_profile']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['dnsfilter_profile']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'dnsfilter_profile')
(is_error, has_changed, result, diff) = fortios_dnsfilter(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
def fragment(geom, prim_coord, step=6, step_num=50):
assert (geom.coord_type != 'cart')
index_ = geom.internal.get_index_of_prim_coord(prim_coord)
prim_val = geom.coords[index_]
print(prim_coord, index_, prim_val)
step_ = (step / step_num)
base_step = np.zeros_like(geom.coords)
base_step[index_] = (step / step_num)
cart_coords = list()
for i in range(step_num):
print(f'{i:02d} ... ', end='')
new_coords = (geom.coords + base_step)
geom.coords = new_coords
print((not geom.internal.backtransform_failed))
cart_coords.append(geom.cart_coords.copy())
return cart_coords |
class Operator(Node):
def __init__(self) -> None:
super().__init__()
self._attrs['inputs'] = None
self._attrs['has_profiler'] = False
def __call__(self, *args: List[Tensor]) -> List[Tensor]:
raise NotImplementedError
def __deepcopy__(self, memo):
result = type(self)(**self._get_op_attributes())
memo[id(self)] = result
result._attrs = copy.deepcopy(self._attrs, memo)
return result
def _set_depth(self) -> None:
max_depth = 0
if (self._attrs['inputs'] is not None):
for inp in self._attrs['inputs']:
max_depth = max(max_depth, inp._attrs['depth'])
inp._attrs['dst_ops'].add(self)
self._attrs['depth'] = max_depth
def __str__(self) -> str:
output = {}
for key in self._attrs.keys():
if ((key in ('inputs', 'args', 'outputs', 'original_inputs')) and (self._attrs[key] is not None)):
output[key] = [x._attrs['name'] for x in self._attrs[key]]
else:
output[key] = self._attrs[key]
return pformat(output, indent=2)
def gen_profiler(self, workdir: str=None, dynamic_profiling_strategy=None) -> None:
return
def profile(self, workdir='./', devices=None, dynamic_profiling_strategy=DynamicProfileStrategy.MAX) -> None:
return
def gen_function(self) -> str:
raise NotImplementedError('gen_function is not defined for {}'.format(self))
def replace_input_tensor(self, old_tensor, new_tensor) -> None:
self._attrs['inputs'] = [(new_tensor if (tensor is old_tensor) else tensor) for tensor in self._attrs['inputs']]
def _get_op_attributes(self) -> Dict[(str, Any)]:
return {}
def _inputs_for_pseudo_code(self):
return self._attrs['inputs']
def _outputs_for_pseudo_code(self):
return self._attrs['outputs']
def _args_for_pseudo_code(self):
return [f'{key}={value}' for (key, value) in self._get_op_attributes().items()]
def _pseudo_code_helper(self, node: Any, with_shape: bool) -> str:
if isinstance(node, list):
if ((len(node) > 3) and isinstance(node[0], Tensor)):
return ',\n'.join((self._pseudo_code_helper(n, with_shape) for n in node))
else:
return ', '.join((self._pseudo_code_helper(n, with_shape) for n in node))
if isinstance(node, Node):
return node.pseudo_code(with_shape)
return str(node)
def pseudo_code(self, with_shape=True):
args = self._pseudo_code_helper(self._args_for_pseudo_code(), with_shape)
inputs = self._pseudo_code_helper(self._inputs_for_pseudo_code(), with_shape)
outputs = self._pseudo_code_helper(self._outputs_for_pseudo_code(), with_shape)
name = self._attrs.get('name', None)
return f'''# {name}
({outputs})
= {self._attrs['op']}({args})(
{inputs})
''' |
class OptionScaleProjection(Options):
def axis(self):
return self._config_get()
def axis(self, val: str):
self._config(val)
def center(self):
return self._config_get()
def center(self, values: List[float]):
self._config(values)
def padding(self):
return self._config_get()
def padding(self, val: int):
self._config(val)
def projection(self):
return self._config_get()
def projection(self, val: str):
self._config(val)
def projectionScale(self):
return self._config_get()
def projectionScale(self, val: int):
self._config(val)
def projectionOffset(self):
return self._config_get()
def projectionOffset(self, values: List[float]):
self._config(values) |
class LiveVideo(AbstractCrudObject):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isLiveVideo = True
super(LiveVideo, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
ad_break_config = 'ad_break_config'
ad_break_failure_reason = 'ad_break_failure_reason'
broadcast_start_time = 'broadcast_start_time'
copyright = 'copyright'
creation_time = 'creation_time'
dash_ingest_url = 'dash_ingest_url'
dash_preview_url = 'dash_preview_url'
description = 'description'
embed_html = 'embed_html'
field_from = 'from'
id = 'id'
ingest_streams = 'ingest_streams'
is_manual_mode = 'is_manual_mode'
is_reference_only = 'is_reference_only'
live_views = 'live_views'
overlay_url = 'overlay_url'
permalink_url = 'permalink_url'
planned_start_time = 'planned_start_time'
recommended_encoder_settings = 'recommended_encoder_settings'
seconds_left = 'seconds_left'
secure_stream_url = 'secure_stream_url'
status = 'status'
stream_url = 'stream_url'
targeting = 'targeting'
title = 'title'
total_views = 'total_views'
video = 'video'
class Projection():
cubemap = 'CUBEMAP'
equirectangular = 'EQUIRECTANGULAR'
half_equirectangular = 'HALF_EQUIRECTANGULAR'
class SpatialAudioFormat():
ambix_4 = 'ambiX_4'
class Status():
live_now = 'LIVE_NOW'
scheduled_canceled = 'SCHEDULED_CANCELED'
scheduled_live = 'SCHEDULED_LIVE'
scheduled_unpublished = 'SCHEDULED_UNPUBLISHED'
unpublished = 'UNPUBLISHED'
class StereoscopicMode():
left_right = 'LEFT_RIGHT'
mono = 'MONO'
top_bottom = 'TOP_BOTTOM'
class StreamType():
ambient = 'AMBIENT'
regular = 'REGULAR'
class BroadcastStatus():
live = 'LIVE'
live_stopped = 'LIVE_STOPPED'
processing = 'PROCESSING'
scheduled_canceled = 'SCHEDULED_CANCELED'
scheduled_expired = 'SCHEDULED_EXPIRED'
scheduled_live = 'SCHEDULED_LIVE'
scheduled_unpublished = 'SCHEDULED_UNPUBLISHED'
unpublished = 'UNPUBLISHED'
vod = 'VOD'
class Source():
owner = 'owner'
target = 'target'
class LiveCommentModerationSetting():
value_default = 'DEFAULT'
discussion = 'DISCUSSION'
followed = 'FOLLOWED'
follower = 'FOLLOWER'
no_hyperlink = 'NO_HYPERLINK'
protected_mode = 'PROTECTED_MODE'
restricted = 'RESTRICTED'
slow = 'SLOW'
supporter = 'SUPPORTER'
tagged = 'TAGGED'
class PersistentStreamKeyStatus():
disable = 'DISABLE'
enable = 'ENABLE'
regenerate = 'REGENERATE'
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {}
enums = {}
request = FacebookRequest(node_id=self['id'], method='DELETE', endpoint='/', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=AbstractCrudObject, api_type='NODE', response_parser=ObjectParser(reuse_object=self))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {'target_token': 'string'}
enums = {}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=LiveVideo, api_type='NODE', response_parser=ObjectParser(reuse_object=self))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {'allow_bm_crossposting': 'bool', 'content_tags': 'list<string>', 'cross_share_to_group_ids': 'list<string>', 'crossposting_actions': 'list<map>', 'custom_labels': 'list<string>', 'description': 'string', 'direct_share_status': 'unsigned int', 'embeddable': 'bool', 'end_live_video': 'bool', 'event_params': 'Object', 'is_audio_only': 'bool', 'is_manual_mode': 'bool', 'live_comment_moderation_setting': 'list<live_comment_moderation_setting_enum>', 'master_ingest_stream_id': 'string', 'og_icon_id': 'string', 'og_phrase': 'string', 'persistent_stream_key_status': 'persistent_stream_key_status_enum', 'place': 'Object', 'planned_start_time': 'datetime', 'privacy': 'string', 'published': 'bool', 'schedule_custom_profile_image': 'file', 'schedule_feed_background_image': 'file', 'sponsor_id': 'string', 'sponsor_relationship': 'unsigned int', 'status': 'status_enum', 'stream_type': 'stream_type_enum', 'tags': 'list<int>', 'targeting': 'Object', 'title': 'string'}
enums = {'live_comment_moderation_setting_enum': LiveVideo.LiveCommentModerationSetting.__dict__.values(), 'persistent_stream_key_status_enum': LiveVideo.PersistentStreamKeyStatus.__dict__.values(), 'status_enum': LiveVideo.Status.__dict__.values(), 'stream_type_enum': LiveVideo.StreamType.__dict__.values()}
request = FacebookRequest(node_id=self['id'], method='POST', endpoint='/', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=LiveVideo, api_type='NODE', response_parser=ObjectParser(reuse_object=self))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_blocked_users(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.user import User
param_types = {'uid': 'string'}
enums = {}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/blocked_users', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=User, api_type='EDGE', response_parser=ObjectParser(target_class=User, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_comments(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.comment import Comment
param_types = {'filter': 'filter_enum', 'live_filter': 'live_filter_enum', 'order': 'order_enum', 'since': 'datetime'}
enums = {'filter_enum': Comment.Filter.__dict__.values(), 'live_filter_enum': Comment.LiveFilter.__dict__.values(), 'order_enum': Comment.Order.__dict__.values()}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/comments', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=Comment, api_type='EDGE', response_parser=ObjectParser(target_class=Comment, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_crosspost_shared_pages(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.page import Page
param_types = {}
enums = {}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/crosspost_shared_pages', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=Page, api_type='EDGE', response_parser=ObjectParser(target_class=Page, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_crossposted_broadcasts(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {}
enums = {}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/crossposted_broadcasts', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=LiveVideo, api_type='EDGE', response_parser=ObjectParser(target_class=LiveVideo, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_errors(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.livevideoerror import LiveVideoError
param_types = {}
enums = {}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/errors', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=LiveVideoError, api_type='EDGE', response_parser=ObjectParser(target_class=LiveVideoError, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_input_stream(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.livevideoinputstream import LiveVideoInputStream
param_types = {}
enums = {}
request = FacebookRequest(node_id=self['id'], method='POST', endpoint='/input_streams', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=LiveVideoInputStream, api_type='EDGE', response_parser=ObjectParser(target_class=LiveVideoInputStream, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_polls(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.videopoll import VideoPoll
param_types = {}
enums = {}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/polls', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=VideoPoll, api_type='EDGE', response_parser=ObjectParser(target_class=VideoPoll, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_poll(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.videopoll import VideoPoll
param_types = {'close_after_voting': 'bool', 'correct_option': 'unsigned int', 'default_open': 'bool', 'options': 'list<string>', 'question': 'string', 'show_gradient': 'bool', 'show_results': 'bool'}
enums = {}
request = FacebookRequest(node_id=self['id'], method='POST', endpoint='/polls', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=VideoPoll, api_type='EDGE', response_parser=ObjectParser(target_class=VideoPoll, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_reactions(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.profile import Profile
param_types = {'type': 'type_enum'}
enums = {'type_enum': Profile.Type.__dict__.values()}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/reactions', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=Profile, api_type='EDGE', response_parser=ObjectParser(target_class=Profile, api=self._api))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {'ad_break_config': 'LiveVideoAdBreakConfig', 'ad_break_failure_reason': 'string', 'broadcast_start_time': 'datetime', 'copyright': 'VideoCopyright', 'creation_time': 'datetime', 'dash_ingest_url': 'string', 'dash_preview_url': 'string', 'description': 'string', 'embed_html': 'Object', 'from': 'Object', 'id': 'string', 'ingest_streams': 'list<LiveVideoInputStream>', 'is_manual_mode': 'bool', 'is_reference_only': 'bool', 'live_views': 'unsigned int', 'overlay_url': 'string', 'permalink_url': 'string', 'planned_start_time': 'datetime', 'recommended_encoder_settings': 'LiveVideoRecommendedEncoderSettings', 'seconds_left': 'int', 'secure_stream_url': 'string', 'status': 'string', 'stream_url': 'string', 'targeting': 'LiveVideoTargeting', 'title': 'string', 'total_views': 'string', 'video': 'AdVideo'}
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['Projection'] = LiveVideo.Projection.__dict__.values()
field_enum_info['SpatialAudioFormat'] = LiveVideo.SpatialAudioFormat.__dict__.values()
field_enum_info['Status'] = LiveVideo.Status.__dict__.values()
field_enum_info['StereoscopicMode'] = LiveVideo.StereoscopicMode.__dict__.values()
field_enum_info['StreamType'] = LiveVideo.StreamType.__dict__.values()
field_enum_info['BroadcastStatus'] = LiveVideo.BroadcastStatus.__dict__.values()
field_enum_info['Source'] = LiveVideo.Source.__dict__.values()
field_enum_info['LiveCommentModerationSetting'] = LiveVideo.LiveCommentModerationSetting.__dict__.values()
field_enum_info['PersistentStreamKeyStatus'] = LiveVideo.PersistentStreamKeyStatus.__dict__.values()
return field_enum_info |
class OozieWorkflowParser(object):
def __init__(self, file_fetcher, prune_forks=False, prune_joins=False, production=False, debug=False):
self.file_fetcher = file_fetcher
self.prune_forks = prune_forks
self.prune_joins = prune_joins
self.production = production
self.debug = debug
def load_workflow(self, primary_workflow_name, args):
oozie_config = plugins.manager.get_oozie_config(args)
cluster_config = oozie_config.cluster_config()
(primary_dag, sub_dags) = self._parse_and_build_dags(primary_workflow_name, cluster_config, oozie_config)
if self._requires_cluster_resource(primary_dag, sub_dags, cluster_config):
primary_dag = self._insert_cluster_resource(primary_dag, cluster_config)
primary_dag['compatibility_version'] = VERSION_STRING
if oozie_config.plugin_config():
primary_dag['plugin_config'] = oozie_config.plugin_config()
if oozie_config.default_task_args():
primary_dag['default_task_args'] = oozie_config.default_task_args()
if oozie_config.dag_args():
primary_dag['dag_args'] = oozie_config.dag_args()
if oozie_config.dag_imports():
primary_dag['imports'] = oozie_config.dag_imports()
primary_dag['name'] = (args.dag_name or primary_dag['name'])
wf = Workflow(primary=primary_dag, secondary=list(sub_dags.values()), filename=('oozie:' + primary_workflow_name))
if (args.prune_nodes or args.only_nodes):
wf = wf.prune(prune_nodes=args.prune_nodes, only_nodes=args.only_nodes)
if oozie_config.upstream_operators():
primary_copy = wf.primary.copy()
primary_copy['before'] = oozie_config.upstream_operators()
wf = Workflow(primary=primary_copy, secondary=wf.secondary, filename=('oozie:' + primary_workflow_name))
return wf
def partition_actions(data):
operators = [ac for ac in data['action'] if (not isinstance(ac, OozieSubWorkflowBuilderBase))]
sub_workflows = [ac for ac in data['action'] if isinstance(ac, OozieSubWorkflowBuilderBase)]
logger.debug('Partitioned actions for workflow %s. Operators are %s, sub_workflows are %s', data['name'], [operator.name for operator in operators], [sub_workflow.name for sub_workflow in sub_workflows])
assert (len((operators + sub_workflows)) == len(data['action'])), 'Some actions were not partitioned!'
return {'operators': operators, 'sub_workflows': sub_workflows}
def _parse_workflow(self, filename, cluster_config, oozie_config):
parsed = xmltodict.parse(self.file_fetcher.fetch_file_content(filename))
loaded = OozieWorkflowSchema(context={'cluster_config': cluster_config, 'oozie_plugin': oozie_config, 'macro_translator': JspMacroTranslator(oozie_config.jsp_macros()), 'production': self.production}).load(parsed)
if loaded.errors:
raise Exception('Errors parsing file {}: {}'.format(filename, loaded.errors))
data_copy = loaded.data.copy()
data_copy.update(self.partition_actions(loaded.data))
return data_copy
def _sub_workflow_target_name(sub_workflow_action):
return '/'.join(sub_workflow_action['app_path'].split('/')[3:])
def _parse_all(self, primary_workflow_path_name, cluster_config, oozie_config):
def _build_workflow_path(name):
return os.path.join(name, 'workflow.xml')
path = _build_workflow_path(primary_workflow_path_name)
logger.debug('parsing primary workflow from path %s', path)
primary = self._parse_workflow(path, cluster_config, oozie_config)
sub_workflows = {}
parsed_sub_workflow_targets = set()
q = [wf for wf in primary['sub_workflows']]
while q:
item = q.pop()
swf_path_name = self._sub_workflow_target_name(item.get_action())
if (swf_path_name in parsed_sub_workflow_targets):
continue
parsed_sub_workflow_targets.add(swf_path_name)
swf_path = _build_workflow_path(swf_path_name)
logger.debug('parsing sub workflow from path %s', swf_path)
swf = self._parse_workflow(swf_path, cluster_config, oozie_config)
sub_workflows[swf['name']] = swf
item.set_target_name(swf['name'])
q += [wf for wf in swf['sub_workflows']]
return (primary, sub_workflows)
def _flow_control_nodes(self, wf):
result = {'start': ([wf['start']['to']], wf['start']), wf['end'].get('name', 'end'): ([], wf['end'])}
if wf.get('kill'):
result[wf['kill'].get('name', 'kill')] = ([], wf['kill'])
for decision in wf['decision']:
if (not self.debug):
raise Exception('decision node found: `{}` not supported right now! Set --debug to build without raising this error, but only at your own risk!'.format(decision['name']))
logger.warning('decision node found: `%s`. Including all downstream branches because --debug was specified', decision['name'])
cases = [case['to'] for case in decision['switch']['case']]
default = decision['switch']['default']['to']
result[decision['name']] = ((cases + [default]), decision)
return result
def _fork_nodes(wf):
return {fork['name']: ([path['start'] for path in fork['path']], fork) for fork in wf['fork']}
def _join_nodes(wf):
return {join['name']: ([join['to']], join) for join in wf['join']}
def _operator_nodes(wf):
return {op.name: ([op.ok_to], op) for op in wf['operators']}
def _sub_workflow_nodes(wf):
return {swf.name: ([swf.ok_to], swf) for swf in wf['sub_workflows']}
def _all_nodes(self, wf):
result = {}
result.update(self._flow_control_nodes(wf))
result.update(self._fork_nodes(wf))
result.update(self._join_nodes(wf))
result.update(self._operator_nodes(wf))
result.update(self._sub_workflow_nodes(wf))
return result
def _build_graph(graph_name, nodes):
graph = nx.DiGraph()
logger.debug('Creating graph %s from nodes %s', graph_name, nodes.keys())
graph.add_nodes_from(nodes.keys())
graph.add_edges_from(((upstream_name, downstream_name) for (upstream_name, (downstream_node_names, node)) in six.iteritems(nodes) for downstream_name in downstream_node_names))
if (not nx.algorithms.components.is_weakly_connected(graph)):
components = list(nx.algorithms.components.weakly_connected_components(graph))
logger.warning('Multiple connected components found for graph `%s`: %s', graph_name, components)
if (not nx.algorithms.dag.is_directed_acyclic_graph(graph)):
raise exceptions.CyclicWorkflowException('Invalid graph `{}`: not a DAG!'.format(graph_name))
logger.debug('Successfully created graph %s with nodes %s', graph_name, graph.nodes)
return graph
def _prune_flow_control_nodes(self, wf, graph):
prune_nodes = list(self._flow_control_nodes(wf).keys())
if self.prune_forks:
prune_nodes += list(self._fork_nodes(wf).keys())
if self.prune_joins:
prune_nodes += list(self._join_nodes(wf).keys())
return _GraphUtil.prune_nodes(graph, nodes=prune_nodes)
def _build_dag(self, wf):
graph = self._build_graph(wf['name'], self._all_nodes(wf))
self._prune_flow_control_nodes(wf, graph)
operator_nodes = self._operator_nodes(wf)
fork_join_nodes = self._fork_nodes(wf)
fork_join_nodes.update(self._join_nodes(wf))
sub_workflow_nodes = self._sub_workflow_nodes(wf)
operators = []
sub_workflows = []
for node_name in nx.algorithms.dag.topological_sort(graph):
upstream = list(graph.predecessors(node_name))
base_operator = ({'upstream_dependencies': upstream} if upstream else {})
node = fork_join_nodes.get(node_name)
if node:
base_operator.update(node[1]['operator'])
base_operator['name'] = node[1]['name']
operators.append(base_operator)
continue
node = operator_nodes.get(node_name)
if node:
base_operator.update(node[1].get_operator())
operators.append(base_operator)
continue
swf = sub_workflow_nodes.get(node_name)
if (not swf):
raise Exception('Unrecognized node name: `{}` (swf nodes are {})'.format(node_name, list(sub_workflow_nodes.keys())))
base_operator.update(swf[1].get_operator())
sub_workflows.append(base_operator)
result = {'name': wf['name']}
if operators:
result['operators'] = operators
if sub_workflows:
result['sub_dags'] = sub_workflows
return result
def _parse_and_build_dags(self, primary_workflow_name, cluster_config, oozie_plugin):
(primary, sub) = self._parse_all(primary_workflow_name, cluster_config, oozie_plugin)
primary_dag = self._build_dag(primary)
sub_dags = {}
for (name, swf) in six.iteritems(sub):
sub_dags[name] = self._build_dag(swf)
return self._fixup_dags(primary_dag, sub_dags, cluster_config)
def _fixup_dags(self, primary_dag, sub_dags, cluster_config):
fixedup_primary_dag = self._fixup_subdags(primary_dag, sub_dags, cluster_config)
fixedup_sub_dags = {dag['name']: dag for dag in [self._fixup_subdags(sub_dag, sub_dags, cluster_config) for sub_dag in sub_dags.values()]}
return (fixedup_primary_dag, fixedup_sub_dags)
def _fixup_subdags(self, dag, all_sub_dags, cluster_config):
subdags = dag.get('sub_dags')
if (not subdags):
return dag
copy = dag.copy()
copy['sub_dags'] = [sd.copy() for sd in subdags]
for sd in copy['sub_dags']:
target_subdag = all_sub_dags.get(sd['target'])
if self._requires_cluster_resource(target_subdag, all_sub_dags, cluster_config):
sd['requires_resources'] = [cluster_config.managed_resource['name']]
return copy
def _requires_cluster_resource(self, dag, sub_dags, cluster_config):
if (not cluster_config.managed_resource):
return False
resource_name = cluster_config.managed_resource['name']
return (any(((resource_name in op.get('requires_resources', [])) for op in dag.get('operators', []))) or any((self._requires_cluster_resource(sub_dags[sd['target']], sub_dags, cluster_config) for sd in dag.get('sub_dags', []))))
def _insert_cluster_resource(wf, cluster_config):
copy = wf.copy()
copy['resources'] = [cluster_config.managed_resource]
return copy |
class DeclStatMixB(SimpleEntity, StatusMixin):
__tablename__ = 'DeclStatMixBs'
__mapper_args__ = {'polymorphic_identity': 'DeclStatMixB'}
b_id = Column('id', Integer, ForeignKey('SimpleEntities.id'), primary_key=True)
def __init__(self, **kwargs):
super(DeclStatMixB, self).__init__(**kwargs)
StatusMixin.__init__(self, **kwargs) |
def test_create_user_with_ldap_info_create_proper_stalker_user_with_groups(ldap_server, create_test_db):
login = 'pipeline'
password = 'password'
from anima import defaults
assert (defaults.ldap_base_dn != '')
stalker_group_names = ['admins', 'Normal Users', 'Power Users']
from stalker.db.session import DBSession
from stalker import Group
for stalker_group_name in stalker_group_names:
if (not Group.query.filter((Group.name == stalker_group_name)).first()):
new_group = Group(name=stalker_group_name)
DBSession.add(new_group)
DBSession.commit()
from ldap3 import Server, Connection
server = Server(defaults.ldap_server_address)
conn = Connection(server, login, password)
conn.bind()
from anima.utils import create_user_with_ldap_info
new_user = create_user_with_ldap_info(conn, defaults.ldap_base_dn, login, password)
assert (len(new_user.groups) > 0)
assert (len(new_user.groups) == 2)
assert (new_user.groups[0].name == 'admins')
assert (new_user.groups[1].name == 'Power Users')
assert (new_user.groups[0].name in stalker_group_names)
assert (new_user.groups[1].name in stalker_group_names) |
def test_force_defaults_radio():
html = '<input type="radio" name="radio-1" class="my_radio" checked="checked" value="cb">'
expected_html = '<input type="radio" name="radio-1" class="my_radio" value="cb">'
rendered_html = htmlfill.render(html, defaults=dict())
assert (expected_html == rendered_html), rendered_html |
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Snippet', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100)), ('language', models.CharField(max_length=20, null=True))])] |
class RestrictedTypeTransformer(TypeTransformer[T], ABC):
def __init__(self, name: str, t: Type[T]):
super().__init__(name, t)
def get_literal_type(self, t: Optional[Type[T]]=None) -> LiteralType:
raise RestrictedTypeError(f'Transformer for type {self.python_type} is restricted currently')
def to_literal(self, ctx: FlyteContext, python_val: T, python_type: Type[T], expected: LiteralType) -> Literal:
raise RestrictedTypeError(f'Transformer for type {self.python_type} is restricted currently')
def to_python_value(self, ctx: FlyteContext, lv: Literal, expected_python_type: Type[T]) -> T:
raise RestrictedTypeError(f'Transformer for type {self.python_type} is restricted currently') |
class TestProperties(EvenniaTestCase):
def setUp(self):
self.obj: TestObjectPropertiesClass = create.create_object(TestObjectPropertiesClass, key='testobj')
def tearDown(self):
self.obj.delete()
def test_attribute_properties(self):
obj = self.obj
self.assertEqual(obj.db.attr1, 'attr1')
self.assertEqual(obj.attributes.get('attr1'), 'attr1')
self.assertEqual(obj.attr1, 'attr1')
self.assertEqual(obj.attributes.get('attr2', category='attrcategory'), 'attr2')
self.assertEqual(obj.db.attr2, None)
self.assertEqual(obj.attr2, 'attr2')
self.assertEqual(obj.db.attr3, None)
self.assertFalse(obj.attributes.has('attr3'))
self.assertEqual(obj.attr3, 'attr3')
self.assertEqual(obj.db.attr4, 'attr4')
self.assertEqual(obj.attributes.get('attr4'), 'attr4')
self.assertEqual(obj.attr4, 'attr4')
obj.attr3 = 'attr3b'
self.assertEqual(obj.db.attr3, 'attr3b')
self.assertTrue(obj.attributes.has('attr3'))
def test_tag_properties(self):
obj = self.obj
self.assertTrue(obj.tags.has('tag1'))
self.assertTrue(obj.tags.has('tag2', category='tagcategory'))
self.assertTrue(obj.tags.has('tag3'))
self.assertTrue(obj.aliases.has('testalias'))
self.assertTrue(obj.permissions.has('testperm'))
self.assertFalse(hasattr(obj, 'property_initialized'))
def test_tag_category_properties(self):
obj = self.obj
self.assertFalse(obj.tags.has('category_tag1'))
self.assertTrue(obj.tags.has('category_tag1', category='tagcategory1'))
self.assertTrue(obj.tags.has('category_tag1', category='tagcategory2'))
self.assertTrue(obj.tags.has('category_tag2', category='tagcategory2'))
self.assertTrue(obj.tags.has('category_tag3', category='tagcategory2'))
self.assertEqual(obj.tagcategory1, ['category_tag1'])
self.assertEqual(set(obj.tagcategory2), set(['category_tag1', 'category_tag2', 'category_tag3']))
def test_tag_category_properties_external_modification(self):
obj = self.obj
self.assertEqual(obj.tagcategory1, ['category_tag1'])
self.assertEqual(set(obj.tagcategory2), set(['category_tag1', 'category_tag2', 'category_tag3']))
obj.tags.add('category_tag2', category='tagcategory1')
self.assertEqual(set(obj.tags.get(category='tagcategory1')), set(['category_tag1', 'category_tag2']))
self.assertEqual(set(obj.tagcategory1), set(['category_tag1', 'category_tag2']))
obj.tags.add('category_tag4', category='tagcategory2')
obj.tags.remove('category_tag3', category='tagcategory2')
self.assertEqual(set(obj.tags.get(category='tagcategory2', return_list=True)), set(['category_tag1', 'category_tag2', 'category_tag4']))
self.assertEqual(set(obj.tagcategory2), set(['category_tag1', 'category_tag2', 'category_tag3', 'category_tag4']))
del obj.tagcategory1
self.assertEqual(obj.tags.get(category='tagcategory1', return_list=True), [])
self.assertEqual(obj.tagcategory1, ['category_tag1'])
del obj.tagcategory2
self.assertEqual(obj.tags.get(category='tagcategory2', return_list=True), [])
self.assertEqual(set(obj.tagcategory2), set(['category_tag1', 'category_tag2', 'category_tag3']))
def test_object_awareness(self):
obj = self.obj
self.assertEqual(obj.cusattr, 10)
self.assertEqual(obj.settest, 5)
obj.awaretest = 10
self.assertEqual(obj.cusattr, 15)
obj.cusattr = 10
self.assertEqual(obj.cusattr, 20)
self.assertEqual(obj.settest, 10)
obj.cusattr += 10
self.assertEqual(obj.attributes.get('cusattr'), 30)
self.assertEqual(obj.settest, 30)
self.assertEqual(obj.cusattr, 40)
obj.awaretest = 0
obj.cusattr += 20
self.assertEqual(obj.attributes.get('cusattr'), 50)
self.assertEqual(obj.settest, 50)
self.assertEqual(obj.cusattr, 50)
del obj.cusattr
self.assertEqual(obj.cusattr, 5)
self.assertEqual(obj.settest, 5)
('TODO: Needs more research')
def test_stored_object_queries(self):
obj1 = create.create_object(TestObjectPropertiesClass, key='obj1')
obj2 = create.create_object(TestObjectPropertiesClass, key='obj2')
obj1.attr1 = obj2
self.assertEqual(obj1.attr1, obj2)
self.assertEqual(obj1.attributes.get('attr1'), obj2)
obj1.attributes.reset_cache()
self.assertEqual(obj1.attributes.get('attr1'), obj2)
self.assertIn(obj1, TestObjectPropertiesClass.objects.get_by_attribute('attr1'))
self.assertEqual(list(TestObjectPropertiesClass.objects.get_by_attribute('attr1', value=obj2)), [obj1])
query = TestObjectPropertiesClass.objects.filter(db_attributes__db_key='attr1', db_attributes__db_value=obj2)
self.assertEqual(list(query), [obj1])
obj1.delete()
obj2.delete() |
def test_cool_all_multi_core_all_multichr_chromosome():
outfile_pref = NamedTemporaryFile(prefix='differential_tad', delete=True)
args = '--targetMatrix {} --controlMatrix {} --tadDomains {} -t {} -o {} -m {} -mr {}'.format((ROOT + 'GSM2644945_Untreated-R1.100000_chr1_chr2.cool'), (ROOT + 'GSM2644947_Auxin2days-R1.100000_chr1_chr2.cool'), (ROOT + 'untreated_R1_domains_chr1_chr2.bed'), 4, outfile_pref.name, 'all', 'all').split()
compute(hicDifferentialTAD.main, args, 5)
with open((ROOT + 'untreated_R1_domains_chr1_chr2.bed'), 'r') as file:
number_of_tads = len(file.readlines())
with open((outfile_pref.name + '_accepted.diff_tad'), 'r') as file:
number_output_tads = len(file.readlines())
with open((outfile_pref.name + '_rejected.diff_tad'), 'r') as file:
number_output_tads += len(file.readlines())
number_output_tads -= 8
assert (number_of_tads == number_output_tads)
assert (number_of_tads == all_tads_present((ROOT + 'untreated_R1_domains_chr1_chr2.bed'), (outfile_pref.name + '_accepted.diff_tad'), (outfile_pref.name + '_rejected.diff_tad')))
assert are_files_equal((outfile_pref.name + '_accepted.diff_tad'), (ROOT + 'multichromosome_accepted.diff_tad'), delta=2, skip=4)
assert are_files_equal((outfile_pref.name + '_rejected.diff_tad'), (ROOT + 'multichromosome_rejected.diff_tad'), delta=2, skip=4) |
def encode(ffrom, fto, data_segment):
ffrom = BytesIO(file_read(ffrom))
fto = BytesIO(file_read(fto))
(from_bw, from_bl, from_ldr, from_ldr_w, from_data_pointers, from_code_pointers) = disassemble(ffrom, data_segment.from_data_offset_begin, data_segment.from_data_offset_end, data_segment.from_data_begin, data_segment.from_data_end, data_segment.from_code_begin, data_segment.from_code_end)
(to_bw, to_bl, to_ldr, to_ldr_w, to_data_pointers, to_code_pointers) = disassemble(fto, data_segment.to_data_offset_begin, data_segment.to_data_offset_end, data_segment.to_data_begin, data_segment.to_data_end, data_segment.to_code_begin, data_segment.to_code_end)
(data_pointers_header, data_pointers) = create_data_pointers_patch_block(ffrom, fto, data_segment.from_data_offset_begin, data_segment.from_data_begin, data_segment.from_data_end, from_data_pointers, to_data_pointers)
(code_pointers_header, code_pointers) = create_code_pointers_patch_block(ffrom, fto, data_segment.from_code_begin, data_segment.from_code_end, from_code_pointers, to_code_pointers)
bw = create_patch_block(ffrom, fto, from_bw, to_bw)
bl = create_patch_block(ffrom, fto, from_bl, to_bl)
ldr = create_patch_block(ffrom, fto, from_ldr, to_ldr)
ldr_w = create_patch_block(ffrom, fto, from_ldr_w, to_ldr_w)
(headers, datas) = zip(data_pointers, code_pointers, bw, bl, ldr, ldr_w)
patch = b''.join((([data_pointers_header, code_pointers_header] + list(headers)) + list(datas)))
return (ffrom, fto, patch) |
def mapear_xml(xml_data):
mapeamento = {}
def percorrer_xml(elemento, path=''):
novo_path = (((path + '.') + elemento.tag) if path else elemento.tag)
if elemento.text:
mapeamento[novo_path] = elemento.text
for child in elemento:
percorrer_xml(child, novo_path)
root = ET.fromstring(xml_data)
percorrer_xml(root)
return mapeamento |
def _get_prescribing_entries(bnf_code_prefixes, orgs, org_type, date=None):
db = get_db()
(items_matrix, quantity_matrix, actual_cost_matrix) = _get_prescribing_for_codes(db, bnf_code_prefixes)
if (items_matrix is None):
return
group_by_org = get_row_grouper(org_type)
items_matrix = group_by_org.sum(items_matrix)
quantity_matrix = group_by_org.sum(quantity_matrix)
actual_cost_matrix = group_by_org.sum(actual_cost_matrix)
org_offsets = [(org, group_by_org.offsets[org.pk]) for org in orgs if (org.pk in group_by_org.offsets)]
if date:
try:
date_offsets = [(date, db.date_offsets[date])]
except KeyError:
raise BadDate(date)
else:
date_offsets = sorted(db.date_offsets.items())
for (date, col_offset) in date_offsets:
for (org, row_offset) in org_offsets:
index = (row_offset, col_offset)
items = items_matrix[index]
if (items == 0):
continue
entry = {'items': items, 'quantity': quantity_matrix[index], 'actual_cost': round(actual_cost_matrix[index], 2), 'date': date, 'row_id': org.pk, 'row_name': org.name}
if (org_type == 'practice'):
entry['ccg'] = org.ccg_id
entry['setting'] = org.setting
(yield entry) |
def test_select_empty_option_value_selected():
html = '\n<select name="select-1" class="my_select">\n <option value="">this is option-1</option>\n</select>\n'
expected_html = '\n<select name="select-1" class="my_select">\n <option value="" selected="selected">this is option-1</option>\n</select>\n'
rendered_html = htmlfill.render(html, defaults={'select-1': ''})
assert (expected_html == rendered_html), rendered_html |
def pytest_collection_modifyitems(session, config, items):
from firedrake.utils import complex_mode, SLATE_SUPPORTS_COMPLEX
for item in items:
(test_file, *_) = item.location
if (not test_file.startswith('tests/slate/')):
continue
if ((not SLATE_SUPPORTS_COMPLEX) and complex_mode):
item.add_marker(pytest.mark.skip(reason='Slate support for complex mode is missing')) |
class BitrateDialog(xbmcgui.WindowXMLDialog):
slider_control = None
bitrate_label = None
initial_bitrate_value = 0
selected_transcode_value = 0
def __init__(self, *args, **kwargs):
log.debug('BitrateDialog: __init__')
xbmcgui.WindowXML.__init__(self, *args, **kwargs)
def onInit(self):
log.debug('ActionMenu: onInit')
self.action_exitkeys_id = [10, 13]
self.slider_control = self.getControl(3000)
self.slider_control.setInt(self.initial_bitrate_value, 400, 100, 15000)
self.bitrate_label = self.getControl(3030)
bitrate_label_string = (str(self.slider_control.getInt()) + ' Kbs')
self.bitrate_label.setLabel(bitrate_label_string)
def onFocus(self, control_id):
pass
def doAction(self, action_id):
pass
def onMessage(self, message):
log.debug('ActionMenu: onMessage: {0}', message)
def onAction(self, action):
bitrate_label_string = (str(self.slider_control.getInt()) + ' Kbs')
self.bitrate_label.setLabel(bitrate_label_string)
if (action.getId() == 10):
self.close()
elif (action.getId() == 92):
self.close()
elif (action.getId() == 7):
self.selected_transcode_value = self.slider_control.getInt()
self.close()
def onClick(self, control_id):
if (control_id == 3000):
log.debug('ActionMenu: Selected Item: {0}', control_id) |
def extractLitliberCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Carnival Lights', 'Carnival Lights', 'oel'), ('Life Reconstructed', 'Life Reconstructed', 'oel'), ('North of Happenstance', 'North of Happenstance', 'oel'), ('Inside Edge', 'The Inside Edge', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_rule_macro():
(app, db, admin) = setup()
with app.app_context():
(Model1, _) = create_models(db)
db.create_all()
view = CustomModelView(Model1, db.session, create_template='macro.html', form_create_rules=(rules.Macro('test', arg='foobar'), rules.Macro('test_lib.another_test')))
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
assert (rv.status_code == 200)
data = rv.data.decode('utf-8')
assert ('Value = foobar' in data)
assert ('Hello another_test' in data) |
def test2():
intf = RpcJobConsumerInternal(job_queue=None, run_flag=None, system_state=None, state_lock=None, test_mode=None, lowrate=True)
intf.check_open_rpc_interface()
j1 = intf.rpc_interface.check_ok()
print('Job 1:', j1)
try:
j2 = intf.rpc_interface.get_lowrate_job()
except Exception as err:
print('Hit exception: ', err)
import pdb
pdb.set_trace()
print('What?: ', err)
print('Job 2:', j2)
print('Running IPython')
import IPython
IPython.embed() |
(bunnies)
class WrappedClassResource():
_some_fish = Fish()
on_patch = {}
(validate_param, 'limit')
def on_get(self, req, resp, bunnies):
self._capture(req, resp, bunnies)
(validate_param, 'limit')
def on_head(self, req, resp, bunnies):
self._capture(req, resp, bunnies)
(_some_fish)
def on_post(self, req, resp, fish, bunnies):
self._capture(req, resp, bunnies)
self.fish = fish
(_some_fish.hook)
def on_put(self, req, resp, fish, bunnies):
self._capture(req, resp, bunnies)
self.fish = fish
def _capture(self, req, resp, bunnies):
self.req = req
self.resp = resp
self.bunnies = bunnies |
class CompiledRouterNode():
def __init__(self, raw_segment, method_map=None, resource=None, uri_template=None):
self.children = []
self.raw_segment = raw_segment
self.method_map = method_map
self.resource = resource
self.uri_template = uri_template
self.is_var = False
self.is_complex = False
self.num_fields = 0
self.var_name = None
self.var_pattern = None
self.var_converter_map = []
matches = list(_FIELD_PATTERN.finditer(raw_segment))
if (not matches):
self.is_var = False
else:
self.is_var = True
self.num_fields = len(matches)
for field in matches:
if field.group('cname'):
self.var_converter_map.append((field.group('fname'), field.group('cname'), field.group('argstr')))
if (matches[0].span() == (0, len(raw_segment))):
assert (len(matches) == 1)
self.is_complex = False
field = matches[0]
self.var_name = field.group('fname')
else:
escaped_segment = re.sub('[\\.\\(\\)\\[\\]\\?\\$\\*\\+\\^\\|]', '\\\\\\g<0>', raw_segment)
pattern_text = _FIELD_PATTERN.sub('(?P<\\2>.+)', escaped_segment)
pattern_text = (('^' + pattern_text) + '$')
self.is_complex = True
self.var_pattern = re.compile(pattern_text)
if self.is_complex:
assert self.is_var
def matches(self, segment):
return (segment == self.raw_segment)
def conflicts_with(self, segment):
assert (not self.matches(segment))
other = CompiledRouterNode(segment)
if self.is_var:
if self.is_complex:
if other.is_complex:
return (_FIELD_PATTERN.sub('v', self.raw_segment) == _FIELD_PATTERN.sub('v', segment))
return False
else:
return (other.is_var and (not other.is_complex))
return False |
def migrate(path: Path) -> None:
ert_config = local_storage_get_ert_config()
ens_config = ert_config.ensemble_config
for experiment in path.glob('experiments/*'):
response_info = {}
for response in ens_config.response_configuration:
response_info[response.name] = response.to_dict()
with open((experiment / 'responses.json'), 'w', encoding='utf-8') as fout:
fout.write(json.dumps(response_info, default=str))
with open((path / 'index.json'), encoding='utf-8') as f:
index_json = json.load(f)
index_json['version'] = 3
with open((path / 'index.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(index_json)) |
def create_user(**data):
if (('username' not in data) or ('password' not in data)):
raise ValueError('username and password are required.')
data['password'] = generate_password_hash(data.pop('password'), method='pbkdf2:sha256')
db_users = json.load(open('users.json'))
db_users[data['username']] = data
json.dump(db_users, open('users.json', 'w'))
return data |
def get_edge_labels(node: bn.BMGNode) -> List[str]:
t = type(node)
if (t not in _edge_labels):
return (['UNKNOWN'] * len(node.inputs))
labels = _edge_labels[t]
if isinstance(labels, list):
result = labels
else:
assert isinstance(labels, Callable)
result = labels(node)
assert (isinstance(result, list) and (len(result) == len(node.inputs)))
return result |
class EmerFlowTimeout(base_tests.SimpleProtocol):
def runTest(self):
logging.info('Running Emergency_Flow_Timeout test')
of_ports = config['port_map'].keys()
of_ports.sort()
self.assertTrue((len(of_ports) > 1), 'Not enough ports for test')
delete_all_flows(self.controller)
logging.info('Inserting an emergency flow with timeout values')
logging.info('Expecting switch to generate error ')
pkt = simple_tcp_packet()
match = parse.packet_to_flow_match(pkt)
match.in_port = of_ports[0]
request = ofp.message.flow_add()
request.match = match
request.flags = (request.flags | ofp.OFPFF_EMERG)
request.hard_timeout = 9
request.idle_timeout = 9
act = ofp.action.output()
act.port = of_ports[1]
request.actions.append(act)
logging.info('Inserting flow')
self.controller.message_send(request)
do_barrier(self.controller)
(response, pkt) = self.controller.poll(exp_msg=ofp.OFPT_ERROR, timeout=5)
self.assertTrue((response is not None), 'Switch did not reply with error message')
self.assertTrue((response.err_type == ofp.OFPET_FLOW_MOD_FAILED), 'Error message type is not flow mod failed ')
self.assertTrue((response.code == ofp.OFPFMFC_BAD_EMERG_TIMEOUT), 'Error Message code is not bad emergency timeout') |
class SmartTeleporterMapLink(MapLink):
symbol = 't'
display_symbol = ' '
direction_name = 'teleport'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.paired_teleporter = None
def at_empty_target(self, start_direction, end_direction):
xygrid = self.xymap.xygrid
if (not self.paired_teleporter):
symbol = self.symbol
found_teleporters = []
for (iy, line) in xygrid.items():
for (ix, node_or_link) in xygrid[iy].items():
if ((node_or_link.symbol == symbol) and (node_or_link is not self)):
found_teleporters.append(node_or_link)
if (not found_teleporters):
raise MapParserError('found no matching teleporter to link to.', self)
if (len(found_teleporters) > 1):
raise MapParserError(f'found too many matching teleporters (must be exactly one more): {found_teleporters}', self)
other_teleporter = found_teleporters[0]
self.paired_teleporter = other_teleporter
other_teleporter.paired_teleporter = self
return self.paired_teleporter
def get_direction(self, start_direction):
if (not self.directions):
neighbors = self.get_linked_neighbors()
if (len(neighbors) != 1):
raise MapParserError('must have exactly one link connected to it.', self)
(direction, link) = next(iter(neighbors.items()))
if hasattr(link, 'node_index'):
raise MapParserError('can only connect to a Link. Found {link} in direction {direction}.', self)
direction_name = self.direction_name
if (start_direction == direction_name):
self.directions = {direction_name: direction, direction: direction_name}
else:
self.directions = {start_direction: direction_name, direction_name: direction}
return self.directions.get(start_direction) |
class TriggerThread(threading.Thread):
def __init__(self, redischannel, rate, lrate):
threading.Thread.__init__(self)
self.redischannel = redischannel
self.rate = rate
self.lrate = lrate
self.key = ('x%d.%s' % (rate, redischannel))
self.previous = None
self.interval = None
self.running = True
self.timer = []
def stop(self):
monitor.debug(('flushing %d timers' % len(self.timer)))
for t in self.timer:
t.cancel()
self.timer = []
self.running = False
def run(self):
global count
global interval
pubsub = patch.pubsub()
pubsub.subscribe('CLOCKMULTIPLIER_UNBLOCK')
pubsub.subscribe(self.redischannel)
while self.running:
for item in pubsub.listen():
if ((not self.running) or (not (item['type'] == 'message'))):
break
if (item['channel'] == self.redischannel):
now = time.time()
count += 1
for t in self.timer:
t.cancel()
self.timer = []
if (self.previous == None):
self.previous = now
continue
elif (self.interval == None):
self.interval = (now - self.previous)
self.previous = now
else:
self.interval = (((1 - self.lrate) * self.interval) + (self.lrate * (now - self.previous)))
self.previous = now
val = float(item['data'])
patch.setvalue(self.key, val)
for number in range(1, self.rate):
delay = (number * (self.interval / self.rate))
t = threading.Timer(delay, patch.setvalue, args=[self.key, val])
t.start()
self.timer.append(t) |
class SKLearnForestTransformer(SKLearnTransformer):
def __init__(self, model: Union[(RandomForestClassifier, RandomForestRegressor)], feature_names: Sequence[str], classification_labels: Optional[Sequence[str]]=None, classification_weights: Optional[Sequence[float]]=None):
super().__init__(model, feature_names, classification_labels, classification_weights)
def build_aggregator_output(self) -> Dict[(str, Any)]:
raise NotImplementedError('build_aggregator_output must be implemented')
def determine_target_type(self) -> str:
raise NotImplementedError('determine_target_type must be implemented')
def transform(self) -> Ensemble:
check_is_fitted(self._model, ['estimators_'])
estimators = self._model.estimators_
ensemble_classes = None
if self._classification_labels:
ensemble_classes = self._classification_labels
if isinstance(self._model, RandomForestClassifier):
check_is_fitted(self._model, ['classes_'])
if (ensemble_classes is None):
ensemble_classes = [str(c) for c in self._model.classes_]
ensemble_models: Sequence[Tree] = [SKLearnDecisionTreeTransformer(m, self._feature_names).transform() for m in estimators]
return Ensemble(self._feature_names, ensemble_models, self.build_aggregator_output(), target_type=self.determine_target_type(), classification_labels=ensemble_classes, classification_weights=self._classification_weights) |
def simulated_hemodynamics(amplitude: float, duration: float, sample_rate: float, pad_len: int=0) -> np.ndarray:
ts = _make_time_index(duration, sample_rate)
hrf = double_gamma_hrf(ts, amplitude)
if (int((duration * sample_rate)) == 0):
logging.warning('Your int(duration * sample_rate) is zero. Returned array will be empty.')
if (pad_len > int((duration * sample_rate))):
logging.warning('Your pad_len is larger than the whole data. Returned array will be all zeros.')
hrf = np.pad(hrf, (pad_len, 0), 'constant')[:int((duration * sample_rate))]
return hrf |
class OptionSeriesFunnel3dSonificationContexttracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class MsrnModel(PreTrainedModel):
config_class = MsrnConfig
def __init__(self, args, conv=default_conv):
super(MsrnModel, self).__init__(args)
self.n_blocks = args.n_blocks
n_feats = args.n_feats
rgb_range = args.rgb_range
kernel_size = 3
scale = args.scale
act = nn.ReLU(True)
self.sub_mean = MeanShift(rgb_range, rgb_mean=args.rgb_mean, rgb_std=args.rgb_std)
modules_head = [conv(3, n_feats, kernel_size)]
modules_body = nn.ModuleList()
for i in range(self.n_blocks):
if args.bam:
modules_body.append(MsrBamBlock(n_feats=n_feats))
else:
modules_body.append(MsrBlock(n_feats=n_feats))
modules_tail = [nn.Conv2d((n_feats * (self.n_blocks + 1)), n_feats, 1, padding=0, stride=1), conv(n_feats, n_feats, kernel_size), Upsampler(conv, scale, n_feats, act=False), conv(n_feats, 3, kernel_size)]
self.add_mean = MeanShift(rgb_range, sign=1, rgb_mean=args.rgb_mean, rgb_std=args.rgb_std)
self.head = nn.Sequential(*modules_head)
self.body = nn.Sequential(*modules_body)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.head(x)
res = x
MSRB_out = []
for i in range(self.n_blocks):
x = self.body[i](x)
MSRB_out.append(x)
MSRB_out.append(res)
res = torch.cat(MSRB_out, 1)
x = self.tail(res)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if (name.find('tail') >= 0):
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError(f'While copying the parameter named {name}, whose dimensions in the model are {own_state[name].size()} and whose dimensions in the checkpoint are {param.size()}.')
elif strict:
if (name.find('tail') == (- 1)):
raise KeyError(f'unexpected key "{name}" in state_dict')
if strict:
missing = (set(own_state.keys()) - set(state_dict.keys()))
if (len(missing) > 0):
raise KeyError(f'missing keys in state_dict: "{missing}"') |
class PrometheusHandler(Handler):
SUPPORTED_PROTOCOL = PrometheusMessage.protocol_id
def setup(self) -> None:
if self.context.prometheus_dialogues.enabled:
self.context.logger.info('setting up PrometheusHandler')
def handle(self, message: Message) -> None:
message = cast(PrometheusMessage, message)
prometheus_dialogues = cast(PrometheusDialogues, self.context.prometheus_dialogues)
prometheus_dialogue = cast(PrometheusDialogue, prometheus_dialogues.update(message))
if (prometheus_dialogue is None):
self._handle_unidentified_dialogue(message)
return
if (message.performative == PrometheusMessage.Performative.RESPONSE):
self.context.logger.debug(f'Prometheus response ({message.code}): {message.message}')
else:
self.context.logger.debug(f'got unexpected prometheus message: Performative = {PrometheusMessage.Performative}')
def _handle_unidentified_dialogue(self, msg: Message) -> None:
self.context.logger.info('received invalid message={}, unidentified dialogue.'.format(msg))
def teardown(self) -> None: |
def setup_logging(*, logfile: str=None, console_level: Union[(str, int)]='INFO', console_formatter: str='json', logfile_formatter: str='legacy') -> None:
try:
console_level_int = int(console_level)
except ValueError:
try:
console_level_int = int(logging.getLevelName(console_level.upper()))
except ValueError:
logger.warning('Unknown logging level %s, falling back to INFO.', console_level)
console_level_int = logging.INFO
logging_config: Dict = {'version': 1, 'disable_existing_loggers': False, 'formatters': {'console-plain': {'()': structlog.stdlib.ProcessorFormatter, 'processor': _FormatRenderer(colors=False, fmt='{log_color}{level_uc:>8s}: {event:s}'), 'foreign_pre_chain': _sl_foreign_pre_chain}, 'console-colored': {'()': structlog.stdlib.ProcessorFormatter, 'processor': _FormatRenderer(colors=True, fmt='{log_color}{level_uc:>8s}: {event:s}'), 'foreign_pre_chain': _sl_foreign_pre_chain}, 'legacy': {'()': structlog.stdlib.ProcessorFormatter, 'processor': _FormatRenderer(colors=False, fmt='{timestamp_local_ctime} {process:d}/{thread_name:s} {file:s}:{line:d} {level_uc:s} {event:s}'), 'foreign_pre_chain': _sl_foreign_pre_chain}, 'json': {'()': structlog.stdlib.ProcessorFormatter, 'processor': structlog.processors.JSONRenderer(), 'foreign_pre_chain': _sl_foreign_pre_chain}}, 'handlers': {'console': {'level': None, 'class': 'logging.StreamHandler', 'formatter': None, 'stream': 'ext://sys.stderr'}, 'file': {'level': None, 'class': 'logging.handlers.WatchedFileHandler', 'filename': None, 'formatter': None}}, 'loggers': {'': {'handlers': None, 'level': 'DEBUG', 'propagate': True}}}
if (console_formatter not in logging_config['formatters'].keys()):
raise UsageError('Event formatter {} is unknown.'.format(console_formatter))
if (logfile_formatter not in logging_config['formatters'].keys()):
raise UsageError('Event formatter {} is unknown.'.format(logfile_formatter))
logging_config['handlers']['console']['formatter'] = console_formatter
logging_config['handlers']['console']['level'] = console_level_int
if (logfile is not None):
logging_config['handlers']['file']['filename'] = logfile
logging_config['handlers']['file']['level'] = min(console_level_int, logging.INFO)
logging_config['handlers']['file']['formatter'] = logfile_formatter
else:
del logging_config['handlers']['file']
logging_config['loggers']['']['handlers'] = logging_config['handlers'].keys()
logging.config.dictConfig(logging_config) |
.parametrize('discrete, dtype', [(True, np.int32), (False, np.float64)])
def test_gridprop_params_no_date(discrete, dtype):
grid = xtgeo.create_box_grid((2, 5, 1))
params = xtg_im_ecl.gridprop_params(values=np.zeros(10, dtype=dtype), name='myprop', grid=grid, fracture=False, date=None)
assert (params['name'] == 'myprop')
assert (date not in params)
assert (params['dualporo'] == grid.dualporo)
assert (params['dualperm'] == grid.dualperm)
assert (params['discrete'] is discrete)
assert (params['values'].shape == (2, 5, 1)) |
class TestUITraits(BaseTestMixin, unittest.TestCase):
def setUp(self):
BaseTestMixin.setUp(self)
def tearDown(self):
BaseTestMixin.tearDown(self)
def test_orientation(self):
obj = ObjectWithUITraits()
self.assertEqual(obj.orientation, 'vertical')
obj.orientation = 'h'
self.assertEqual(obj.orientation, 'horizontal')
def test_editor_style(self):
obj = ObjectWithUITraits()
self.assertEqual(obj.style, 'simple')
obj.style = 'r'
self.assertEqual(obj.style, 'readonly')
def test_layout(self):
obj = ObjectWithUITraits()
self.assertEqual(obj.layout, 'normal')
def test_an_object(self):
obj = ObjectWithUITraits()
obj.an_object = '[1,2,3][0]'
self.assertEqual(obj.an_object, '[1,2,3][0]')
actual = eval(obj.an_object_, {}, {})
self.assertEqual(actual, 1) |
def expand_sdk_name(name, activating):
if ('upstream-master' in name):
errlog('upstream-master SDK has been renamed main')
name = name.replace('upstream-master', 'main')
if ('fastcomp' in name):
exit_with_error('the fastcomp backend is no longer supported. Please use an older version of emsdk (for example 3.1.29) if you want to install the old fastcomp-based SDK')
if (name in ('tot', 'sdk-tot', 'tot-upstream')):
if activating:
installed = get_installed_sdk_version()
if installed:
debug_print('activating currently installed SDK; not updating tot version')
return ('sdk-releases-%s-64bit' % installed)
return find_tot_sdk()
if ('-upstream' in name):
name = name.replace('-upstream', '')
name = resolve_sdk_aliases(name, verbose=True)
fullname = name
version = fullname.replace('sdk-', '').replace('releases-', '').replace('-64bit', '').replace('tag-', '')
sdk = ('sdk-' if (not name.startswith('releases-')) else '')
releases_info = load_releases_info()['releases']
release_hash = get_release_hash(version, releases_info)
if release_hash:
full_name = ('%sreleases-%s-64bit' % (sdk, release_hash))
print(("Resolving SDK version '%s' to '%s'" % (version, full_name)))
return full_name
if (len(version) == 40):
global extra_release_tag
extra_release_tag = version
return ('%sreleases-%s-64bit' % (sdk, version))
return name |
def load(fd, encoding='utf-8', handler=None, **defaults):
if hasattr(fd, 'read'):
text = fd.read()
else:
with codecs.open(fd, 'r', encoding) as f:
text = f.read()
handler = (handler or detect_format(text, handlers))
return loads(text, encoding, handler, **defaults) |
class AbstractCircularFilter(Filter, ABC):
radius: float = pd.Field(..., title='Filter Radius', description='Radius of the filter to convolve with supplied spatial data. Note: the corresponding feature size expressed in the device is typically sqrt(3) times smaller than the radius. For best results, it is recommended to make your radius about twice as large as the desired feature size.', units=MICROMETER)
design_region_dl: float = pd.Field(..., title='Grid Size in Design Region', description='Grid size in the design region. This sets the length scale for the conic convolution filter.', units=MICROMETER)
def filter_radius_pixels(self) -> int:
return np.ceil((self.radius / self.design_region_dl))
_validator(pre=True)
def _deprecate_feature_size(cls, values):
if ('feature_size' in values):
raise pd.ValidationError("The 'feature_size' field of circular filters available in 2.4 pre-releases was renamed to 'radius' for the official 2.4.0 release. If you're seeing this message, please change your script to use that field name.")
return values
def make_kernel(self, coords_rad: jnp.array) -> jnp.array:
def _check_kernel_size(kernel: jnp.array, signal_in: jnp.array) -> jnp.array:
kernel_shape = kernel.shape
input_shape = signal_in.shape
if any(((k_shape > in_shape) for (k_shape, in_shape) in zip(kernel_shape, input_shape))):
new_kernel = kernel.copy()
for (axis, (len_kernel, len_input)) in enumerate(zip(kernel_shape, input_shape)):
if (len_kernel > len_input):
rm_pixels_total = (len_kernel - len_input)
rm_pixels_edge = int(np.ceil((rm_pixels_total / 2)))
indices_truncated = np.arange(rm_pixels_edge, (len_kernel - rm_pixels_edge))
new_kernel = new_kernel.take(indices=indices_truncated.astype(int), axis=axis)
log.warning(f"The filter input has shape {input_shape} whereas the kernel has shape {kernel_shape}. These shapes are incompatible as the input must be larger than the kernel along all dimensions. The kernel will automatically be resized to {new_kernel.shape} to be less than the input shape. If this is unexpected, either reduce the filter 'radius' or increase the input array's size.")
return new_kernel
return kernel
def evaluate(self, spatial_data: jnp.array) -> jnp.array:
rho = jnp.squeeze(spatial_data)
num_dims = len(rho.shape)
coords_1d = np.arange((- self.filter_radius_pixels), (self.filter_radius_pixels + 1))
meshgrid_args = [coords_1d.copy() for _ in range(num_dims)]
meshgrid_coords = np.meshgrid(*meshgrid_args)
coords_rad = np.sqrt(np.sum([np.square(v) for v in meshgrid_coords], axis=0))
kernel = self.make_kernel(coords_rad)
kernel = self._check_kernel_size(kernel=kernel, signal_in=rho)
num = jsp.signal.convolve(rho, kernel, mode='same')
den = jsp.signal.convolve(jnp.ones_like(rho), kernel, mode='same')
return (num / den) |
('os.path.exists', return_value=False)
('os.path.expanduser', return_value=temp_dir)
('subprocess.check_call', side_effect=subprocess.CalledProcessError(1, 'check_call'))
('Updater.sdlog.error')
('Updater.sdlog.info')
def test_write_last_updated_flags_dom0_folder_creation_fail(mocked_info, mocked_error, mocked_call, mocked_expand, mocked_path_exists):
error_log = [call('Error writing last updated flag to sd-app'), call("Command 'check_call' returned non-zero exit status 1.")]
updater._write_last_updated_flags_to_disk()
mocked_error.assert_has_calls(error_log) |
def process_task(task: Task) -> None:
stdout = task._stdout
stderr = task._stderr
if (task._proc.returncode != 0):
task._failed = True
_LOGGER.info('Failed: [{name}]\ncmd:\n{cmd}\nstderr:\n{stderr}\nstdout:{stdout}'.format(name=task._name, cmd=task._cmd, stderr=stderr, stdout=stdout))
task._ret = (- 1)
else:
_LOGGER.debug('Successful: [{name}]\ncmd:\n{cmd}\nstderr:\n{stderr}\nstdout:{stdout}'.format(name=task._name, cmd=task._cmd, stderr=stderr, stdout=stdout))
task._ret = 0 |
def extractOxytlWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.