code stringlengths 281 23.7M |
|---|
class BarDataList(List[BarData]):
reqId: int
contract: Contract
endDateTime: Union[(datetime, date_, str, None)]
durationStr: str
barSizeSetting: str
whatToShow: str
useRTH: bool
formatDate: int
keepUpToDate: bool
chartOptions: List[TagValue]
def __init__(self, *args):
super().__init__(*args)
self.updateEvent = Event('updateEvent')
def __eq__(self, other):
return (self is other)
def __hash__(self):
return id(self) |
.django_db
def test_statement_timeout_successfully_runs_within_timeout():
test_timeout_in_seconds = 1
pg_sleep_in_seconds = 0.5
_db_timeout(test_timeout_in_seconds)
def test_timeout_success():
with connection.cursor() as cursor:
cursor.execute(f'SELECT pg_sleep({pg_sleep_in_seconds:.2f})')
try:
start = perf_counter()
test_timeout_success()
except Exception:
assert False
else:
assert ((perf_counter() - start) >= pg_sleep_in_seconds) |
class MasterSyncItem(QStandardItem):
ITEM_TYPE = (QStandardItem.UserType + 35)
def __init__(self, master):
QStandardItem.__init__(self)
self.name = master.name
self.button = MasterSyncButtonHelper(master)
self.parent_item = None
def master(self):
return self.button.master()
def synchronized(self):
return self.button.get_sync_state()
def synchronized(self, value):
self.button.set_sync_state(value)
self.model().setData(self.index(), value)
def __eq__(self, item):
return (self.button == item)
def __gt__(self, item):
return (self.button > item) |
class TreeInferer():
def __init__(self, perm_container):
self.perms_by_genome = defaultdict(list)
for perm in chain(perm_container.ref_perms, perm_container.target_perms):
self.perms_by_genome[perm.genome_name].append(perm)
def _genome_distance(self, genome_1, genome_2):
breakpoints_1 = set()
n_blocks_1 = 0
for perm in self.perms_by_genome[genome_1]:
n_blocks_1 += len(perm.blocks)
for (bl_1, bl_2) in zip(perm.blocks[:(- 1)], perm.blocks[1:]):
bp = sorted([(- bl_1.signed_id()), bl_2.signed_id()])
breakpoints_1.add(tuple(bp))
breakpoints_2 = set()
n_blocks_2 = 0
for perm in self.perms_by_genome[genome_2]:
n_blocks_2 += len(perm.blocks)
for (bl_1, bl_2) in zip(perm.blocks[:(- 1)], perm.blocks[1:]):
bp = sorted([(- bl_1.signed_id()), bl_2.signed_id()])
breakpoints_2.add(tuple(bp))
return (min(len(breakpoints_1), len(breakpoints_2)) - len((breakpoints_1 & breakpoints_2)))
def build(self):
MIN_LEN = 1e-06
genomes = list(self.perms_by_genome.keys())
taxas = list(map(Leaf, sorted(genomes)))
for t in taxas:
t.terminal = True
distances = defaultdict((lambda : {}))
for (t_1, t_2) in combinations_with_replacement(taxas, 2):
distances[t_1][t_2] = self._genome_distance(t_1.identifier, t_2.identifier)
distances[t_2][t_1] = distances[t_1][t_2]
def calc_q(taxas):
q_matrix = defaultdict((lambda : {}))
for (t_1, t_2) in combinations(taxas, 2):
other_dist = 0
for other_t in taxas:
other_dist += distances[t_1][other_t]
other_dist += distances[t_2][other_t]
q_matrix[t_1][t_2] = (((len(taxas) - 2) * distances[t_1][t_2]) - other_dist)
q_matrix[t_2][t_1] = q_matrix[t_1][t_2]
return q_matrix
while (len(taxas) > 1):
q_matrix = calc_q(taxas)
lowest_dst = float('inf')
lowest_pair = None
for (t_1, t_2) in sorted(combinations(taxas, 2)):
if (q_matrix[t_1][t_2] < lowest_dst):
lowest_dst = q_matrix[t_1][t_2]
lowest_pair = (t_1, t_2)
new_taxa = Tree()
new_taxa.terminal = False
(old_1, old_2) = sorted(lowest_pair)
other_dist = 0
for other_taxa in taxas:
other_dist += distances[old_1][other_taxa]
other_dist -= distances[old_2][other_taxa]
div_dist = (((0.5 / (len(taxas) - 2)) * other_dist) if (len(taxas) > 2) else 0)
dist_1 = ((0.5 * distances[old_1][old_2]) + div_dist)
dist_2 = (distances[old_1][old_2] - dist_1)
(dist_1, dist_2) = (max(MIN_LEN, dist_1), max(MIN_LEN, dist_2))
new_taxa.add_edge((old_1, None, dist_1))
new_taxa.add_edge((old_2, None, dist_2))
taxas.remove(old_1)
taxas.remove(old_2)
for other_taxa in taxas:
distances[new_taxa][other_taxa] = (0.5 * ((distances[old_1][other_taxa] + distances[old_2][other_taxa]) - distances[old_1][old_2]))
distances[other_taxa][new_taxa] = distances[new_taxa][other_taxa]
distances[new_taxa][new_taxa] = 0
taxas.append(new_taxa)
tree = list(taxas)[0]
return tree |
_side_effects
class FalDbt():
def __init__(self, project_dir: Optional[str]=None, profiles_dir: Optional[str]=None, select: List[str]=[], exclude: Tuple[str]=tuple(), selector: Optional[str]=None, threads: Optional[int]=None, state: Optional[str]=None, profile_target: Optional[str]=None, args_vars: str='{}', generated_models: Dict[(str, Path)]={}):
if (not version.is_version_plus('1.0.0')):
raise NotImplementedError(f'dbt version {version.DBT_VCURRENT} is no longer supported, please upgrade to dbt 1.0.0 or above')
if (project_dir is None):
project_dir = os.getcwd()
if (profiles_dir is None):
profiles_dir = str(default_profiles_dir())
project_dir = os.path.realpath(os.path.expanduser(project_dir))
profiles_dir = os.path.realpath(os.path.expanduser(profiles_dir))
vars = parse.parse_cli_vars(args_vars)
flags = lib.initialize_dbt_flags(profiles_dir=profiles_dir, project_dir=project_dir, threads=threads, profile_target=profile_target, vars=vars)
self.project_dir = flags.PROJECT_DIR
self.profiles_dir = flags.PROFILES_DIR
self._state = None
if (state is not None):
self._state = Path(os.path.realpath(os.path.expanduser(state)))
self.scripts_dir = parse.get_scripts_dir(self.project_dir, args_vars)
self._config = parse.get_dbt_config(project_dir=self.project_dir, profiles_dir=self.profiles_dir, profile_target=profile_target, threads=threads, args_vars=args_vars)
self._run_results = DbtRunResult(parse.get_dbt_results(self.project_dir, self._config))
if self._run_results.native_run_result:
if (profile_target is None):
profile_target = _get_custom_target(self._run_results)
if (profile_target is not None):
self._config = parse.get_dbt_config(project_dir=self.project_dir, profiles_dir=self.profiles_dir, threads=threads, profile_target=profile_target, args_vars=args_vars)
lib.register_adapters(self._config)
parse_result = self._dbt_invoke('parse')
native_manifest: Manifest = parse_result.result
args = CompileArgs(selector, select, select, exclude, self._state, None)
self._compile_task = CompileTask(args, self._config, native_manifest)
self._compile_task._runtime_initialize()
self._manifest = DbtManifest(native_manifest)
freshness_execution_results = DbtFreshnessExecutionResult(parse.get_dbt_sources_artifact(self.project_dir, self._config))
(self.models, self.sources, self.tests) = self._manifest._map_nodes(self._run_results, freshness_execution_results, generated_models)
normalized_model_paths = parse.normalize_paths(self.project_dir, self.source_paths)
self._global_script_paths = parse.get_global_script_configs(normalized_model_paths)
self.features = self._find_features()
self._environments = None
telemetry.log_api(action='faldbt_initialized', dbt_config=self._config)
def _dbt_invoke(self, cmd: str, args: Optional[List[str]]=None) -> dbtRunnerResult:
runner = dbtRunner()
if (args is None):
args = []
project_args = ['--project-dir', self.project_dir, '--profiles-dir', self.profiles_dir, '--target', self._profile_target]
return runner.invoke((([cmd] + project_args) + args))
def model_paths(self) -> List[str]:
return self._config.model_paths
(details='Use model_paths instead')
def source_paths(self) -> List[str]:
return self.model_paths
def _profile_target(self):
return self._config.target_name
def threads(self):
return self._config.threads
def target_path(self):
return self._config.target_path
def project_name(self):
return self._config.project_name
def list_sources(self) -> List[DbtSource]:
with telemetry.log_time('list_sources', dbt_config=self._config):
return self.sources
def list_models_ids(self) -> Dict[(str, str)]:
with telemetry.log_time('list_models_ids', dbt_config=self._config):
res = {}
for model in self.models:
res[model.unique_id] = model.status
return res
def list_models(self) -> List[DbtModel]:
with telemetry.log_time('list_models', dbt_config=self._config):
return self.models
def list_tests(self) -> List[DbtTest]:
with telemetry.log_time('list_tests', dbt_config=self._config):
return self.tests
def list_features(self) -> List[Feature]:
with telemetry.log_time('list_features', dbt_config=self._config):
return self.features
def _find_features(self) -> List[Feature]:
models = self.models
models = list(filter((lambda model: ((FAL in model.meta) and isinstance(model.meta[FAL], dict) and ('feature_store' in model.meta[FAL]) and (len(list(model.columns.keys())) > 0))), models))
features = []
for model in models:
for column_name in model.columns.keys():
if (column_name == model.meta[FAL]['feature_store']['entity_column']):
continue
if (column_name == model.meta[FAL]['feature_store']['timestamp_column']):
continue
features.append(Feature(model=model.name, column=column_name, description=model.columns[column_name].description, entity_column=model.meta[FAL]['feature_store']['entity_column'], timestamp_column=model.meta[FAL]['feature_store']['timestamp_column']))
return features
def _model(self, target_model_name: str, target_package_name: Optional[str]) -> ManifestNode:
target_model: MaybeNonSource = self._manifest.native_manifest.resolve_ref(target_model_name, target_package_name, None, self.project_dir, self.project_dir)
package_str = (f"'{target_package_name}'." if target_package_name else '')
model_str = f"{package_str}'{target_model_name}'"
if (target_model is None):
raise Exception(f'Could not find model {model_str}')
if isinstance(target_model, Disabled):
raise RuntimeError(f'Model {model_str} is disabled')
return target_model
def ref(self, target_1: str, target_2: Optional[str]=None) -> pd.DataFrame:
with telemetry.log_time('ref', dbt_config=self._config):
target_model_name = target_1
target_package_name = None
if (target_2 is not None):
target_package_name = target_1
target_model_name = target_2
target_model = self._model(target_model_name, target_package_name)
return lib.fetch_target(self.project_dir, self.profiles_dir, target_model, self._profile_target, config=self._config)
def _source(self, target_source_name: str, target_table_name: str) -> SourceDefinition:
target_source: MaybeParsedSource = self._manifest.native_manifest.resolve_source(target_source_name, target_table_name, self.project_dir, self.project_dir)
if (target_source is None):
raise RuntimeError(f"Could not find source '{target_source_name}'.'{target_table_name}'")
if isinstance(target_source, Disabled):
raise RuntimeError(f"Source '{target_source_name}'.'{target_table_name}' is disabled")
return target_source
def source(self, target_source_name: str, target_table_name: str) -> pd.DataFrame:
with telemetry.log_time('source', dbt_config=self._config):
target_source = self._source(target_source_name, target_table_name)
return lib.fetch_target(self.project_dir, self.profiles_dir, target_source, self._profile_target, config=self._config)
def write_to_source(self, data: pd.DataFrame, target_source_name: str, target_table_name: str, *, dtype: Any=None, mode: str='append'):
with telemetry.log_time('write_to_source', dbt_config=self._config, additional_props={'args': {'mode': mode}}):
target_source = self._source(target_source_name, target_table_name)
write_mode = lib.WriteModeEnum(mode.lower().strip())
if (write_mode == lib.WriteModeEnum.APPEND):
lib.write_target(data, self.project_dir, self.profiles_dir, self._profile_target, target_source, dtype=dtype, config=self._config)
elif (write_mode == lib.WriteModeEnum.OVERWRITE):
lib.overwrite_target(data, self.project_dir, self.profiles_dir, self._profile_target, target_source, dtype=dtype, config=self._config)
else:
raise Exception(f'write_to_source mode `{mode}` not supported')
def write_to_model(self, data: pd.DataFrame, target_1: str, target_2: Optional[str]=None, *, dtype: Any=None, mode: str='overwrite'):
with telemetry.log_time('write_to_model', dbt_config=self._config, additional_props={'args': {'mode': mode}}):
target_model_name = target_1
target_package_name = None
if (target_2 is not None):
target_package_name = target_1
target_model_name = target_2
target_model = self._model(target_model_name, target_package_name)
write_mode = lib.WriteModeEnum(mode.lower().strip())
if (write_mode == lib.WriteModeEnum.APPEND):
lib.write_target(data, self.project_dir, self.profiles_dir, self._profile_target, target_model, dtype=dtype, config=self._config)
elif (write_mode == lib.WriteModeEnum.OVERWRITE):
lib.overwrite_target(data, self.project_dir, self.profiles_dir, self._profile_target, target_model, dtype=dtype, config=self._config)
else:
raise Exception(f'write_to_model mode `{mode}` not supported')
def execute_sql(self, sql: str) -> pd.DataFrame:
with telemetry.log_time('execute_sql', dbt_config=self._config):
compiled_result = lib.compile_sql(self.project_dir, self.profiles_dir, self._profile_target, sql, config=self._config)
if hasattr(compiled_result, 'compiled_code'):
sql = compiled_result.compiled_code
else:
sql = compiled_result.compiled_sql
return lib.execute_sql(self.project_dir, self.profiles_dir, self._profile_target, sql, config=self._config)
def _load_environment(self, name: str) -> 'BaseEnvironment':
if (self._environments is None):
self._environments = parse.load_environments(self.project_dir)
return self._environments[name] |
class Car():
entry_point = 'config'
def __init__(self, names, root_path, config_paths, variables=None):
if (variables is None):
variables = {}
if isinstance(names, str):
self.names = [names]
else:
self.names = names
self.root_path = root_path
self.config_paths = config_paths
self.variables = variables
def mandatory_var(self, name):
try:
return self.variables[name]
except KeyError:
raise exceptions.SystemSetupError(f'Car "{self.name}" requires config key "{name}"')
def name(self):
return '+'.join(self.names)
def config(self):
return self.name
def safe_name(self):
return '_'.join(self.names)
def __str__(self):
return self.name |
def extractTheirontreebloomsCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TestDescriptors(unittest.TestCase):
def test_set_name(self):
class D():
def __set_name__(self, owner, name):
self.name = (name + 'x')
def __get__(self, instance, owner):
if (instance is not None):
return 1
return self
class C():
c: int = D()
self.assertEqual(C.c.name, 'cx')
class C():
c: int = field(default=D(), init=False)
self.assertEqual(C.c.name, 'cx')
self.assertEqual(C().c, 1)
def test_non_descriptor(self):
class D():
def __set_name__(self, owner, name):
self.name = (name + 'x')
class C():
c: int = field(default=D(), init=False)
self.assertEqual(C.c.name, 'cx')
def test_lookup_on_instance(self):
class D():
pass
d = D()
d.__set_name__ = Mock()
class C():
i: int = field(default=d, init=False)
self.assertEqual(d.__set_name__.call_count, 0)
def test_lookup_on_class(self):
class D():
pass
D.__set_name__ = Mock()
class C():
i: int = field(default=D(), init=False)
self.assertEqual(D.__set_name__.call_count, 1) |
def fetch_psc_hierarchy(psc_code: str) -> dict:
codes = [psc_code, psc_code[:2], psc_code[:1], (psc_code[:3] if (psc_code[0] == 'A') else None)]
toptier_code = {}
midtier_code = {}
subtier_code = {}
base_code = {}
if psc_code[0].isalpha():
try:
psc_top = PSC.objects.get(code=codes[2])
toptier_code = {'code': psc_top.code, 'description': psc_top.description}
except PSC.DoesNotExist:
pass
try:
psc_mid = PSC.objects.get(code=codes[1])
midtier_code = {'code': psc_mid.code, 'description': psc_mid.description}
except PSC.DoesNotExist:
pass
try:
psc = PSC.objects.get(code=codes[0])
base_code = {'code': psc.code, 'description': psc.description}
except PSC.DoesNotExist:
pass
if (codes[3] is not None):
try:
psc_rd = PSC.objects.get(code=codes[3])
subtier_code = {'code': psc_rd.code, 'description': psc_rd.description}
except PSC.DoesNotExist:
pass
results = {'toptier_code': toptier_code, 'midtier_code': midtier_code, 'subtier_code': subtier_code, 'base_code': base_code}
return results |
class Migration(migrations.Migration):
dependencies = [('forum', '0013_auto__1809')]
operations = [migrations.AddField(model_name='comment', name='url', field=models.CharField(blank=True, default='', max_length=60, null=True)), migrations.AddField(model_name='parent_comment', name='url', field=models.CharField(blank=True, default='', max_length=60, null=True))] |
class TestSchemaSubsetFilter(unittest.TestCase):
def setUp(self):
self.maxDiff = None
('schema.loader.warn')
def test_load_subset_definitions_raises_when_no_subset_found(self, mock_warn):
with self.assertRaisesRegex(ValueError, "--subset specified, but no subsets found in \\['foo\\*.yml'\\]"):
subset_filter.load_subset_definitions(['foo*.yml'])
def test_basic_merging(self):
basics = {'base': {'fields': '*'}, 'event': {}}
network = {'network': {'fields': '*'}}
subsets = {}
subset_filter.merge_subsets(subsets, basics)
subset_filter.merge_subsets(subsets, network)
expected_subsets = {**basics, **network}
self.assertEqual(subsets, expected_subsets)
def test_merging_superset(self):
supersets = {'log': {'fields': '*'}, 'process': {'fields': '*'}}
supserseded = {'log': {'fields': {'syslog': {'fields': '*'}}}, 'process': {'fields': {'parent': {'fields': '*'}}}}
subsets = {}
subset_filter.merge_subsets(subsets, supersets)
subset_filter.merge_subsets(subsets, supserseded)
self.assertEqual(subsets, supersets)
subsets = {}
subset_filter.merge_subsets(subsets, supserseded)
subset_filter.merge_subsets(subsets, supersets)
self.assertEqual(subsets, supersets)
def test_subset_option_merging(self):
subset1 = {'log': {'enabled': False}, 'network': {'enabled': False, 'fields': '*'}, 'base': {'fields': {'message': {'index': False}}}}
subset2 = {'log': {'enabled': False}, 'network': {'fields': '*'}, 'base': {'fields': {'message': {}}}}
expected = {'log': {'enabled': False}, 'network': {'fields': '*'}, 'base': {'fields': {'message': {}}}}
merged = {}
subset_filter.merge_subsets(merged, subset1)
subset_filter.merge_subsets(merged, subset2)
self.assertEqual(merged, expected)
def test_strip_non_ecs_options(self):
subset = {'log': {'custom_option': True, 'enabled': False, 'fields': {'syslog': {'custom_option': True}}}}
expected = {'log': {'enabled': False, 'fields': {'syslog': {}}}}
subset_filter.strip_non_ecs_options(subset)
self.assertEqual(subset, expected)
def schema_log(self):
return {'log': {'schema_details': {'root': False}, 'field_details': {'name': 'log', 'type': 'group'}, 'fields': {'level': {'field_details': {'name': 'level', 'type': 'keyword'}}, 'origin': {'field_details': {'name': 'origin', 'intermediate': True, 'type': 'object'}, 'fields': {'function': {'field_details': {'name': 'function', 'type': 'keyword'}}, 'foo': {'field_details': {'name': 'foo', 'type': 'keyword'}}}}}}}
def test_extract_matching_fields_explicit_all_fields_notation(self):
subset = {'log': {'fields': '*'}}
filtered_fields = subset_filter.extract_matching_fields(self.schema_log(), subset)
self.assertEqual(filtered_fields, self.schema_log())
def test_extract_matching_fields_subfields_only_notation(self):
subset = {'log': {'fields': {'origin': {'fields': '*'}}}}
filtered_fields = subset_filter.extract_matching_fields(self.schema_log(), subset)
expected_fields = {'log': {'schema_details': {'root': False}, 'field_details': {'name': 'log', 'type': 'group'}, 'fields': {'origin': {'field_details': {'name': 'origin', 'intermediate': True, 'type': 'object'}, 'fields': {'function': {'field_details': {'name': 'function', 'type': 'keyword'}}, 'foo': {'field_details': {'name': 'foo', 'type': 'keyword'}}}}}}}
self.assertEqual(filtered_fields, expected_fields)
def test_extract_matching_individual_field(self):
subset = {'log': {'fields': {'origin': {'fields': {'function': {}}}}}}
filtered_fields = subset_filter.extract_matching_fields(self.schema_log(), subset)
expected_fields = {'log': {'schema_details': {'root': False}, 'field_details': {'name': 'log', 'type': 'group'}, 'fields': {'origin': {'field_details': {'name': 'origin', 'intermediate': True, 'type': 'object'}, 'fields': {'function': {'field_details': {'name': 'function', 'type': 'keyword'}}}}}}}
self.assertEqual(filtered_fields, expected_fields)
def test_extract_field_with_options(self):
subset = {'log': {'enabled': False, 'fields': {'level': {'custom_option': True}, 'origin': {'custom_option': False, 'fields': {'function': {}}}}}}
filtered_fields = subset_filter.extract_matching_fields(self.schema_log(), subset)
expected_fields = {'log': {'schema_details': {'root': False}, 'field_details': {'name': 'log', 'type': 'group', 'enabled': False}, 'fields': {'level': {'field_details': {'name': 'level', 'type': 'keyword', 'custom_option': True}}, 'origin': {'field_details': {'name': 'origin', 'intermediate': False, 'custom_option': False, 'description': 'Intermediate field included by adding option with subset', 'level': 'custom', 'type': 'object', 'short': 'Intermediate field included by adding option with subset', 'normalize': []}, 'fields': {'function': {'field_details': {'name': 'function', 'type': 'keyword'}}}}}}}
self.assertEqual(filtered_fields, expected_fields)
def test_generate_docs_only_paths_no_entries(self):
subset = {'process': {'fields': {'same_as_process': {}, 'meta_entry': {'fields': {'type': {}}}}}}
docs_only_paths_empty = subset_filter.generate_docs_only_paths(subset)
self.assertEqual(docs_only_paths_empty, [])
def test_generate_docs_only_paths_with_entries(self):
subset = {'process': {'fields': {'same_as_process': {'docs_only': True}, 'meta_entry': {'fields': {'type': {'docs_only': True}}}}}}
expected_list = ['process.same_as_process', 'process.meta_entry.type']
docs_only_paths = subset_filter.generate_docs_only_paths(subset)
self.assertEqual(docs_only_paths, expected_list)
def test_generate_docs_only_subset(self):
paths = ['process.same_as_process', 'process.meta_entry.type']
expected_subset = {'process': {'fields': {'same_as_process': {}, 'meta_entry': {'fields': {'type': {}}}}}}
subset = subset_filter.generate_docs_only_subset(paths)
self.assertEqual(expected_subset, subset)
def test_remove_docs_only_entries(self):
paths = ['process.same_as_process', 'process.meta_entry.type']
orig_subset = {'process': {'fields': {'same_as_process': {'docs_only': True}, 'meta_entry': {'fields': {'type': {'docs_only': True}}}, 'name': {}}}}
expected_subset = {'process': {'fields': {'name': {}}}}
result = subset_filter.remove_docs_only_entries(paths, orig_subset)
self.assertEqual(result, expected_subset) |
def evaluate(actor, environment, eval_episodes=10):
actor.update(wait=True)
avg_reward = 0.0
for _ in range(eval_episodes):
timestep = environment.reset()
actor.observe_first(timestep)
while (not timestep.last()):
action = actor.select_action(timestep.observation)
timestep = environment.step(action)
actor.observe(action, timestep)
avg_reward += timestep.reward
avg_reward /= eval_episodes
d4rl_score = environment.get_normalized_score(avg_reward)
logging.info('')
logging.info('Evaluation over %d episodes: %.3f', eval_episodes, d4rl_score)
logging.info('')
return d4rl_score |
class NotificationSchema(Schema):
class Meta():
type_ = 'notification'
self_view = 'v1.notification_detail'
self_view_kwargs = {'id': '<id>'}
inflect = dasherize
id = fields.Str(dump_only=True)
created_at = fields.DateTime(dump_only=True)
is_read = fields.Boolean()
content = fields.Nested(NotificationContentSchema)
user = Relationship(self_view='v1.notification_user', self_view_kwargs={'id': '<id>'}, related_view='v1.user_detail', related_view_kwargs={'notification_id': '<id>'}, schema='UserSchema', type_='user') |
def test_generate_ass_will_end_up_with_an_empty_scene(create_test_data, store_local_session, create_pymel, create_maya_env):
data = create_test_data
pm = create_pymel
gen = RepresentationGenerator(version=data['building1_yapi_model_main_v003'])
gen.generate_ass()
assert (pm.sceneName() == '') |
def calc_losses(epochs=500, seed=0, learn_rate=0.001, nr_hash_vector=1000, n_hash=3, n_words=1000, size_vector=10):
random.seed(seed)
nb_epoch = epochs
learn_rate = learn_rate
nr_hash_vector = nr_hash_vector
words = [str(i) for i in range(n_words)]
true_vectors = numpy.random.uniform((- 0.1), 0.1, (len(words), size_vector))
hash_vectors = numpy.random.uniform((- 0.1), 0.1, (nr_hash_vector, size_vector))
examples = list(zip(words, true_vectors))
losses = []
for epoch in range(nb_epoch):
random.shuffle(examples)
loss = 0.0
for (word, truth) in examples:
keys = [(mmh3.hash(word, k) % nr_hash_vector) for k in range(n_hash)]
hash_vector = reduce((lambda a, b: (a + b)), [hash_vectors[k] for k in keys])
diff = (hash_vector - truth)
for key in keys:
hash_vectors[key] -= (learn_rate * diff)
loss += (diff ** 2).sum()
losses.append(loss)
return losses |
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = '\nvlans:\n 100:\n description: "tagged"\n faucet_vips: ["10.0.0.254/24"]\n'
CONFIG = ('\n nd_neighbor_timeout: 2\n max_resolve_backoff_time: 1\n proactive_learn_v4: True\n' + CONFIG_TAGGED_BOILER)
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
(first_host, second_host) = host_pair
first_host_alias_ip = ipaddress.ip_interface('10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(self.scrape_prometheus_var('vlan_neighbors', {'ipv': '4', 'vlan': '100'}), 1) |
def calc_subnet(ip, prefix, is_ipv6=False):
if is_ipv6:
af = socket.AF_INET6
n = 16
else:
af = socket.AF_INET
n = 4
msk = ip_prefix_convert(prefix, is_ipv6=is_ipv6)
try:
n_ip = socket.inet_pton(af, ip)
n_msk = socket.inet_pton(af, msk)
except:
return None
results = []
for i in range(n):
results.append((n_ip[i] & n_msk[i]))
return socket.inet_ntop(af, bytes(results)) |
def test_outbox_put():
agent_address = 'Agent0'
receiver_address = 'Agent1'
msg = DefaultMessage(dialogue_reference=('', ''), message_id=1, target=0, performative=DefaultMessage.Performative.BYTES, content=b'hello')
msg.to = receiver_address
msg.sender = agent_address
dummy_connection = _make_dummy_connection()
multiplexer = Multiplexer([dummy_connection])
outbox = OutBox(multiplexer)
inbox = InBox(multiplexer)
multiplexer.connect()
wait_for_condition((lambda : dummy_connection.is_connected), 15, 'Connection is not connected')
envelope = Envelope(to=receiver_address, sender=agent_address, message=msg)
outbox.put(envelope)
wait_for_condition((lambda : inbox.empty()), 15, 'Inbox must not be empty after putting an envelope')
multiplexer.disconnect() |
def test_image_spec(mock_image_spec_builder):
image_spec = ImageSpec(name='FLYTEKIT', builder='dummy', packages=['pandas'], apt_packages=['git'], python_version='3.8', registry='', base_image='cr.flyte.org/flyteorg/flytekit:py3.8-latest', cuda='11.2.2', cudnn='8', requirements=REQUIREMENT_FILE, registry_config=REGISTRY_CONFIG_FILE)
image_spec = image_spec.with_commands('echo hello')
image_spec = image_spec.with_packages('numpy')
image_spec = image_spec.with_apt_packages('wget')
assert (image_spec.python_version == '3.8')
assert (image_spec.base_image == 'cr.flyte.org/flyteorg/flytekit:py3.8-latest')
assert (image_spec.packages == ['pandas', 'numpy'])
assert (image_spec.apt_packages == ['git', 'wget'])
assert (image_spec.registry == '')
assert (image_spec.requirements == REQUIREMENT_FILE)
assert (image_spec.registry_config == REGISTRY_CONFIG_FILE)
assert (image_spec.cuda == '11.2.2')
assert (image_spec.cudnn == '8')
assert (image_spec.name == 'flytekit')
assert (image_spec.builder == 'dummy')
assert (image_spec.source_root is None)
assert (image_spec.env is None)
assert (image_spec.pip_index is None)
assert (image_spec.is_container() is True)
assert (image_spec.commands == ['echo hello'])
tag = calculate_hash_from_image_spec(image_spec)
assert (image_spec.image_name() == f'flytekit:{tag}')
ctx = context_manager.FlyteContext.current_context()
with context_manager.FlyteContextManager.with_context(ctx.with_execution_state(ctx.execution_state.with_params(mode=ExecutionState.Mode.TASK_EXECUTION))):
os.environ[_F_IMG_ID] = 'flytekit:123'
assert (image_spec.is_container() is False)
ImageBuildEngine.register('dummy', mock_image_spec_builder)
ImageBuildEngine.build(image_spec)
assert ('dummy' in ImageBuildEngine._REGISTRY)
assert (calculate_hash_from_image_spec(image_spec) == tag)
assert (image_spec.exist() is False)
del ImageBuildEngine._REGISTRY['dummy']
ImageBuildEngine.build(image_spec)
with pytest.raises(Exception):
image_spec.builder = 'flyte'
ImageBuildEngine.build(image_spec)
image_spec.with_commands('ls')
assert (image_spec.commands == ['echo hello']) |
class AvatarProfilePicture(AbstractCrudObject):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAvatarProfilePicture = True
super(AvatarProfilePicture, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
id = 'id'
url = 'url'
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if ((batch is None) and ((success is not None) or (failure is not None))):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {}
enums = {}
request = FacebookRequest(node_id=self['id'], method='GET', endpoint='/', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=AvatarProfilePicture, api_type='NODE', response_parser=ObjectParser(reuse_object=self))
request.add_params(params)
request.add_fields(fields)
if (batch is not None):
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {'id': 'string', 'url': 'string'}
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info |
def upgrade():
op.add_column('currentprivacypreference', sa.Column('fides_user_device_provided_identity_id', sa.String(), nullable=True))
op.create_unique_constraint('fides_user_device_identity_privacy_notice', 'currentprivacypreference', ['fides_user_device_provided_identity_id', 'privacy_notice_id'])
op.create_index(op.f('ix_currentprivacypreference_fides_user_device_provided_identity_id'), 'currentprivacypreference', ['fides_user_device_provided_identity_id'], unique=False)
op.create_foreign_key('currentprivacypreference_fides_user_device_provided_identi_fkey', 'currentprivacypreference', 'providedidentity', ['fides_user_device_provided_identity_id'], ['id'])
op.add_column('privacypreferencehistory', sa.Column('fides_user_device', sqlalchemy_utils.types.encrypted.encrypted_type.StringEncryptedType(), nullable=True))
op.add_column('privacypreferencehistory', sa.Column('fides_user_device_provided_identity_id', sa.String(), nullable=True))
op.add_column('privacypreferencehistory', sa.Column('hashed_fides_user_device', sa.String(), nullable=True))
op.create_index(op.f('ix_privacypreferencehistory_fides_user_device_provided_identity_id'), 'privacypreferencehistory', ['fides_user_device_provided_identity_id'], unique=False)
op.create_index(op.f('ix_privacypreferencehistory_hashed_fides_user_device'), 'privacypreferencehistory', ['hashed_fides_user_device'], unique=False)
op.create_foreign_key('privacypreferencehistory_fides_user_device_provided_identi_fkey', 'privacypreferencehistory', 'providedidentity', ['fides_user_device_provided_identity_id'], ['id']) |
def test_wf1_branches():
def t1(a: int) -> typing.NamedTuple('OutputsBC', t1_int_output=int, c=str):
return ((a + 2), 'world')
def t2(a: str) -> str:
return a
def my_wf(a: int, b: str) -> (int, str):
(x, y) = t1(a=a)
d = conditional('test1').if_((x == 4)).then(t2(a=b)).elif_((x >= 5)).then(t2(a=y)).else_().fail('Unable to choose branch')
f = conditional('test2').if_((d == 'hello ')).then(t2(a='It is hello')).else_().then(t2(a='Not Hello!'))
return (x, f)
x = my_wf(a=5, b='hello ')
assert (x == (7, 'Not Hello!'))
x = my_wf(a=2, b='hello ')
assert (x == (4, 'It is hello'))
assert (context_manager.FlyteContextManager.size() == 1) |
.parametrize('uptime,warning_expected', [((notify.UPTIME_GRACE_PERIOD + 1), True), ((notify.UPTIME_GRACE_PERIOD - 1), False)])
('Notify.sdlog.error')
('Notify.sdlog.warning')
('Notify.sdlog.info')
def test_warning_shown_if_warning_threshold_exceeded(mocked_info, mocked_warning, mocked_error, uptime, warning_expected):
with TemporaryDirectory() as tmpdir, mock.patch('Notify.LAST_UPDATED_FILE', os.path.join(tmpdir, 'sdw-last-updated')):
historic_date = datetime.date(2013, 6, 5).strftime(updater.DATE_FORMAT)
with open(notify.LAST_UPDATED_FILE, 'w') as f:
f.write(historic_date)
with mock.patch('Notify.get_uptime_seconds') as mocked_uptime:
mocked_uptime.return_value = uptime
warning_should_be_shown = notify.is_update_check_necessary()
assert (warning_should_be_shown is warning_expected)
assert (not mocked_error.called)
if (warning_expected is True):
mocked_warning.assert_called_once()
warning_string = mocked_warning.call_args[0][0]
assert (re.search(UPDATER_WARNING_REGEX, warning_string) is not None)
else:
assert (not mocked_warning.called)
mocked_info.assert_called_once()
info_string = mocked_info.call_args[0][0]
assert (re.search(GRACE_PERIOD_REGEX, info_string) is not None) |
def thread_imu():
print('IMU: thread starting ..')
rospy.Subscriber('uav_imu', Imu, rover.ros_imu_callback)
rate = rospy.Rate(100)
freq = 100.0
t = datetime.datetime.now()
t_pre = datetime.datetime.now()
avg_number = 100
while ((not rospy.is_shutdown()) and rover.on):
t = datetime.datetime.now()
dt = (t - t_pre).total_seconds()
if (dt < 1e-06):
continue
freq = (((freq * (avg_number - 1)) + (1 / dt)) / avg_number)
t_pre = t
rover.freq_imu = freq
rate.sleep()
print('IMU: thread closed!') |
class VelibStation(GbfsStation):
def __init__(self, info, *args, **kwargs):
super(VelibStation, self).__init__(info, *args, **kwargs)
self.extra['uid'] = info['stationCode']
self.extra['station_id'] = info['station_id']
self.extra['banking'] = ('creditcard' in self.extra.get('payment', []))
self.extra['payment-terminal'] = self.extra['banking']
for bt in info.get('num_bikes_available_types', []):
if (next(iter(bt)) != 'ebike'):
continue
self.extra['ebikes'] = int(bt['ebike'])
break |
def add_noise_(x: Tensor, color: NoiseColor, min_level: float, max_level: float, p=1.0):
n = x.size(0)
noise_levels = ((random_mask([n], p, device=x.device) * x.reshape(n, (- 1)).std(dim=1)) * ((torch.rand([n], device=x.device) * (max_level - min_level)) + min_level)).reshape(([n] + ([1] * (x.dim() - 1))))
x += (noise_levels * (colored_noise(x.shape, exponent=color, device=x.device) - 0.5))
return x |
def calibration_parameters(**kwargs):
if ((kwargs.get('id', None) is not None) and (not (MIN_ID <= kwargs['id'] <= MAX_ID))):
raise MyPalletizedataException('The id not right, should be {0} ~ {1}, but received {2}.'.format(MIN_ID, MAX_ID, kwargs['id']))
if ((kwargs.get('degree', None) is not None) and (not (MIN_ANGLE <= kwargs['degree'] <= MAX_ANGLE))):
raise MyPalletizedataException('degree value not right, should be {0} ~ {1}, but received {2}'.format(MIN_ANGLE, MAX_ANGLE, kwargs['degree']))
if (kwargs.get('degrees', None) is not None):
degrees = kwargs['degrees']
if (not isinstance(degrees, list)):
raise MyPalletizedataException('`degrees` must be a list.')
if (len(degrees) not in [3, 4]):
raise MyPalletizedataException('The length of `degrees` must be 3 / 4.')
for (idx, angle) in enumerate(degrees):
if (not (MIN_ANGLE <= angle <= MAX_ANGLE)):
raise MyPalletizedataException('Has invalid degree value, error on index {0}. Degree should be {1} ~ {2}.'.format(idx, MIN_ANGLE, MAX_ANGLE))
if (kwargs.get('coords', None) is not None):
coords = kwargs['coords']
if (not isinstance(coords, list)):
raise MyPalletizedataException('`coords` must be a list.')
if (len(coords) != 4):
raise MyPalletizedataException('The length of `coords` must be 4.')
if ((kwargs.get('speed', None) is not None) and (not (0 <= kwargs['speed'] <= 100))):
raise MyPalletizedataException(('speed value not right, should be 0 ~ 100, the error speed is %s' % kwargs['speed']))
if (kwargs.get('rgb', None) is not None):
rgb_str = ['r', 'g', 'b']
for (i, v) in enumerate(kwargs['rgb']):
if (not (0 <= v <= 255)):
raise MyPalletizedataException(('The RGB value needs be 0 ~ 255, but the %s is %s' % (rgb_str[i], v))) |
class SKT_OT_insertKeyNames(Operator):
bl_idname = 'skt.insert_key_names'
bl_label = 'Insert Shape Key Names'
bl_description = 'Insert Shape Key Names from Clipboard (Each name per Row)'
bl_options = {'INTERNAL'}
def execute(self, context):
scn = context.scene
for key in context.window_manager.clipboard.split('\n'):
if len(key):
item = scn.customshapekeylist.add()
item.name = key
item.obj_type = 'STRING'
item.obj_id = len(scn.customshapekeylist)
scn.shapekeytransfer_list_index = (len(scn.customshapekeylist) - 1)
self.report({'INFO'}, 'Added shape key names from Clipboard')
return {'FINISHED'} |
def print_log_urls(config: Dict[(str, Any)], instance_id: str, logger: logging.Logger, all_stages: bool=False, failed_only: bool=False) -> None:
pc_service = build_private_computation_service(config['private_computation'], config['mpc'], config['pid'], config.get('post_processing_handlers', {}), config.get('pid_post_processing_handlers', {}))
log_urls = pc_service.get_log_urls(instance_or_id=instance_id, all_stages=all_stages, failed_only=failed_only)
if (not log_urls):
logger.warning(f'Unable to get log container urls for instance {instance_id}')
return
print(' print log urls ')
for (stage, log_url) in log_urls.items():
print(f'[{stage}]: {log_url}') |
class CommitteeReportsPacParty(CommitteeReports):
__tablename__ = 'ofec_reports_pac_party_mv'
all_loans_received_period = db.Column(db.Numeric(30, 2))
all_loans_received_ytd = db.Column(db.Numeric(30, 2))
allocated_federal_election_levin_share_period = db.Column(db.Numeric(30, 2))
calendar_ytd = db.Column(db.Integer)
cash_on_hand_beginning_calendar_ytd = db.Column(db.Numeric(30, 2))
cash_on_hand_close_ytd = db.Column(db.Numeric(30, 2))
coordinated_expenditures_by_party_committee_period = db.Column('coordinated_expenditures_by_party_committee_period', db.Numeric(30, 2))
coordinated_expenditures_by_party_committee_ytd = db.Column(db.Numeric(30, 2))
fed_candidate_committee_contribution_refunds_ytd = db.Column(db.Numeric(30, 2))
fed_candidate_committee_contributions_period = db.Column(db.Numeric(30, 2))
fed_candidate_committee_contributions_ytd = db.Column(db.Numeric(30, 2))
fed_candidate_contribution_refunds_period = db.Column(db.Numeric(30, 2))
independent_expenditures_period = db.Column('independent_expenditures_period', db.Numeric(30, 2))
independent_expenditures_ytd = db.Column(db.Numeric(30, 2))
loan_repayments_made_period = db.Column(db.Numeric(30, 2))
loan_repayments_made_ytd = db.Column(db.Numeric(30, 2))
loan_repayments_received_period = db.Column(db.Numeric(30, 2))
loan_repayments_received_ytd = db.Column(db.Numeric(30, 2))
loans_made_period = db.Column(db.Numeric(30, 2))
loans_made_ytd = db.Column(db.Numeric(30, 2))
net_contributions_period = db.Column(db.Numeric(30, 2), index=True)
net_contributions_ytd = db.Column(db.Numeric(30, 2))
net_operating_expenditures_period = db.Column(db.Numeric(30, 2))
net_operating_expenditures_ytd = db.Column(db.Numeric(30, 2))
non_allocated_fed_election_activity_period = db.Column(db.Numeric(30, 2))
non_allocated_fed_election_activity_ytd = db.Column(db.Numeric(30, 2))
nonfed_share_allocated_disbursements_period = db.Column(db.Numeric(30, 2))
other_fed_operating_expenditures_period = db.Column(db.Numeric(30, 2))
other_fed_operating_expenditures_ytd = db.Column(db.Numeric(30, 2))
other_fed_receipts_period = db.Column(db.Numeric(30, 2))
other_fed_receipts_ytd = db.Column(db.Numeric(30, 2))
shared_fed_activity_nonfed_ytd = db.Column(db.Numeric(30, 2))
shared_fed_activity_period = db.Column(db.Numeric(30, 2))
shared_fed_activity_ytd = db.Column(db.Numeric(30, 2))
shared_fed_operating_expenditures_period = db.Column(db.Numeric(30, 2))
shared_fed_operating_expenditures_ytd = db.Column(db.Numeric(30, 2))
shared_nonfed_operating_expenditures_period = db.Column(db.Numeric(30, 2))
shared_nonfed_operating_expenditures_ytd = db.Column(db.Numeric(30, 2))
subtotal_summary_page_period = db.Column(db.Numeric(30, 2))
subtotal_summary_ytd = db.Column(db.Numeric(30, 2))
total_fed_disbursements_period = db.Column(db.Numeric(30, 2))
total_fed_disbursements_ytd = db.Column(db.Numeric(30, 2))
total_fed_election_activity_period = db.Column(db.Numeric(30, 2))
total_fed_election_activity_ytd = db.Column(db.Numeric(30, 2))
total_fed_operating_expenditures_period = db.Column(db.Numeric(30, 2))
total_fed_operating_expenditures_ytd = db.Column(db.Numeric(30, 2))
total_fed_receipts_period = db.Column(db.Numeric(30, 2))
total_fed_receipts_ytd = db.Column(db.Numeric(30, 2))
total_nonfed_transfers_period = db.Column(db.Numeric(30, 2))
total_nonfed_transfers_ytd = db.Column(db.Numeric(30, 2))
total_operating_expenditures_period = db.Column(db.Numeric(30, 2))
total_operating_expenditures_ytd = db.Column(db.Numeric(30, 2))
transfers_from_affiliated_party_period = db.Column(db.Numeric(30, 2))
transfers_from_affiliated_party_ytd = db.Column(db.Numeric(30, 2))
transfers_from_nonfed_account_period = db.Column(db.Numeric(30, 2))
transfers_from_nonfed_account_ytd = db.Column(db.Numeric(30, 2))
transfers_from_nonfed_levin_period = db.Column(db.Numeric(30, 2))
transfers_from_nonfed_levin_ytd = db.Column(db.Numeric(30, 2))
transfers_to_affiliated_committee_period = db.Column(db.Numeric(30, 2))
transfers_to_affilitated_committees_ytd = db.Column(db.Numeric(30, 2))
report_form = db.Column('form_tp', db.String) |
class OptionSeriesColumnStatesInactive(Options):
def animation(self) -> 'OptionSeriesColumnStatesInactiveAnimation':
return self._config_sub_data('animation', OptionSeriesColumnStatesInactiveAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def opacity(self):
return self._config_get(0.2)
def opacity(self, num: float):
self._config(num, js_type=False) |
def unwrap_redirect(urlin, resolve_redirects=True):
try:
url = unshortenit.UnshortenIt(urlcache=CacheObject()).unshorten(urlin, resolve_30x=resolve_redirects)
return url
except (unshortenit.NotFound, unshortenit.UnshortenFailed, requests.exceptions.ConnectionError):
return None |
def test_data_quality_test_target_prediction_correlation() -> None:
test_dataset = pd.DataFrame({'category_feature': ['n', 'd', 'p', 'n'], 'numerical_feature': [0, 1, 2, 5], 'target': [0, 0, 0, 1], 'prediction': [0, 0, 1, 1]})
mapping = ColumnMapping(categorical_features=['category_feature'], numerical_features=['numerical_feature'])
suite = TestSuite(tests=[TestTargetPredictionCorrelation(gt=0.5, method='cramer_v')])
suite.run(current_data=test_dataset, reference_data=None, column_mapping=mapping)
assert suite
assert suite.show()
assert suite.json() |
def query_cache(db_conn_cur: sqlite3.Cursor, table: str, op_type: str, op_keys: str, query_template: str, algo: str=None, device: str=None, entry_id: int=None, suppress_print: bool=False, exec_key: str=None, dtype_acc: str=None) -> List[Tuple[(int, str, str, str, str, str)]]:
exec_entry_sha1 = None
if ((op_keys is not None) and (exec_key is None)):
if op_type.startswith('gemm'):
exec_key = make_gemm_exec_key(op_keys)
elif op_type.startswith('bmm'):
exec_key = make_bmm_exec_key(op_keys)
elif op_type.startswith('group_gemm'):
exec_key = make_group_gemm_exec_key(op_keys)
else:
raise RuntimeError(('invalid op_type: ' + op_type))
if (exec_key is not None):
if (not suppress_print):
_LOGGER.info("exec_key: '%s'", exec_key)
exec_entry_sha1 = hashlib.sha1(exec_key.encode('utf-8')).hexdigest()
if (not suppress_print):
_LOGGER.info("exec_sha1: '%s'", exec_entry_sha1)
query_args = {'table': table, 'op_type': op_type}
query_args['has_exec_entry_sha1'] = False
if (exec_entry_sha1 is not None):
query_args['exec_entry_sha1'] = exec_entry_sha1
query_args['has_exec_entry_sha1'] = True
query_args['has_device'] = False
if (device is not None):
query_args['device'] = device
query_args['has_device'] = True
query_args['has_entry_id'] = False
if (entry_id is not None):
query_args['id'] = entry_id
query_args['has_entry_id'] = True
query_args['has_dtype_acc'] = False
if (dtype_acc is not None):
query_args['dtype_acc'] = dtype_acc
query_args['has_dtype_acc'] = True
query_args['has_algo'] = False
if (algo is not None):
query_args['algo'] = algo
query_args['has_algo'] = True
query = query_template.render(**query_args)
if (not suppress_print):
print('query:{}'.format(query))
db_conn_cur.execute(query)
entries = db_conn_cur.fetchall()
if (not suppress_print):
_LOGGER.info('entries: id, op_type, algo, device, exec_entry')
for entry in entries:
if (not suppress_print):
print('entry: {}'.format(entry))
return entries |
class MessageCommand():
name: str = ''
callable_name: str = ''
args: Tuple = tuple()
kwargs: Mapping[(str, Any)] = {}
def __init__(self, name: str, callable_name: str, args: Collection[Any]=None, kwargs: Optional[Mapping[(str, Any)]]=None):
self.name = name
self.callable_name = callable_name
if (args is not None):
self.args = tuple(args)
if (kwargs is not None):
self.kwargs = dict(kwargs)
self.verify()
def __str__(self):
return '<MessageCommand: {name}, {callable_name}({params})>'.format(name=self.name, callable_name=self.callable_name, params=', '.join((self.args + tuple((('%r=%r' % i) for i in self.kwargs.items())))))
def verify(self):
assert (isinstance(self.name, str) and self.name), f'name {self.name!r} must be a non-empty string.'
assert (isinstance(self.callable_name, str) and self.callable_name), f'callable {self.callable_name!r} must be a non-empty string.'
assert isinstance(self.args, CCollection), f'args {self.args!r} must be a collection.'
assert isinstance(self.kwargs, CMapping), f'kwargs {self.kwargs!r} must be a mapping.' |
class UpdateThread(QObject, threading.Thread):
update_signal = Signal(MasterInfo)
master_errors_signal = Signal(str, list)
error_signal = Signal(str, str)
timediff_signal = Signal(str, float)
username_signal = Signal(str, str)
def __init__(self, monitoruri, masteruri, delayed_exec=0.0, parent=None):
QObject.__init__(self)
threading.Thread.__init__(self)
self._monitoruri = monitoruri
self._masteruri = masteruri
self._delayed_exec = delayed_exec
self.setDaemon(True)
def run(self):
try:
delay = ((self._delayed_exec + 0.5) + random.random())
time.sleep(delay)
socket.setdefaulttimeout(25)
remote_monitor = xmlrpcclient.ServerProxy(self._monitoruri)
try:
(muri, errors) = remote_monitor.masterErrors()
self.master_errors_signal.emit(muri, errors)
except xmlrpcclient.Fault as _err:
rospy.logwarn(('Older master_discovery on %s detected. It does not support master error reports!' % self._masteruri))
try:
myts = time.time()
(muri, remote_ts) = remote_monitor.getCurrentTime()
self.timediff_signal.emit(muri, ((remote_ts - myts) - ((time.time() - myts) / 2.0)))
except xmlrpcclient.Fault as _errts:
rospy.logwarn(('Older master_discovery on %s detected. It does not support getCurrentTime!' % self._masteruri))
try:
(muri, username) = remote_monitor.getUser()
self.username_signal.emit(muri, username)
except xmlrpcclient.Fault as _errts:
rospy.logwarn(('Older master_discovery on %s detected. It does not support getUser!' % self._masteruri))
remote_info = remote_monitor.masterInfo()
master_info = MasterInfo.from_list(remote_info)
master_info.check_ts = time.time()
self.update_signal.emit(master_info)
except Exception:
import traceback
formatted_lines = traceback.format_exc(1).splitlines()
rospy.logwarn('Cannot update ROS state, connection to %s failed:\n\t%s', utf8(self._monitoruri), formatted_lines[(- 1)])
self.error_signal.emit(self._masteruri, formatted_lines[(- 1)])
finally:
if (socket is not None):
socket.setdefaulttimeout(None) |
(nopython=True, cache=const.numba_cache)
def ball_pocket_collision_coeffs(rvw, s, a, b, r, mu, m, g, R):
if ((s == const.spinning) or (s == const.pocketed) or (s == const.stationary)):
return (np.inf, np.inf, np.inf, np.inf, np.inf)
phi = ptmath.angle(rvw[1])
v = ptmath.norm3d(rvw[1])
u = get_u(rvw, R, phi, s)
K = (((- 0.5) * mu) * g)
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
ax = (K * ((u[0] * cos_phi) - (u[1] * sin_phi)))
ay = (K * ((u[0] * sin_phi) + (u[1] * cos_phi)))
(bx, by) = ((v * cos_phi), (v * sin_phi))
(cx, cy) = (rvw[(0, 0)], rvw[(0, 1)])
A = (0.5 * ((ax ** 2) + (ay ** 2)))
B = ((ax * bx) + (ay * by))
C = (((ax * (cx - a)) + (ay * (cy - b))) + (0.5 * ((bx ** 2) + (by ** 2))))
D = ((bx * (cx - a)) + (by * (cy - b)))
E = ((0.5 * (((((a ** 2) + (b ** 2)) + (cx ** 2)) + (cy ** 2)) - (r ** 2))) - ((cx * a) + (cy * b)))
return (A, B, C, D, E) |
class TD3Agent(acme.Actor, acme.VariableSource):
builder: builder_lib.TD3Builder
def __init__(self, environment_spec: specs.EnvironmentSpec, networks: Dict[(str, networks_lib.FeedForwardNetwork)], config: td3_config.TD3Config, random_key: jax_types.PRNGKey, logger: Optional[loggers.Logger]=None, counter: Optional[counting.Counter]=None):
min_replay_size = config.min_replay_size
config.min_replay_size = 1
self.builder = builder_lib.TD3Builder(config)
policy = td3_networks.apply_policy_sample(networks, eval_mode=False)
replay_tables = self.builder.make_replay_tables(environment_spec, policy)
replay_server = reverb.Server(replay_tables, port=None)
replay_client = reverb.Client(f'localhost:{replay_server.port}')
(learner_key, actor_key, self._key) = jax.random.split(random_key, 3)
dataset = self.builder.make_dataset_iterator(replay_client)
learner = self.builder.make_learner(random_key=learner_key, networks=networks, dataset=dataset, environment_spec=environment_spec, logger_fn=(lambda _, steps_key=None, task=None: logger), counter=counter)
adder = self.builder.make_adder(replay_client, environment_spec, policy)
actor = self.builder.make_actor(actor_key, policy, environment_spec, adder=adder, variable_source=learner)
def random_exploration_policy(key, observation):
del observation
action_spec = environment_spec.actions
(key, subkey) = jax.random.split(key)
action_dist = tfd.Uniform(low=jnp.broadcast_to(action_spec.minimum, action_spec.shape), high=jnp.broadcast_to(action_spec.maximum, action_spec.shape))
action = action_dist.sample(seed=subkey)
return (action, key)
self._actor = actor
self._learner = learner
self._replay_server = replay_server
self._num_observations = 0
self._min_observations = min_replay_size
self._random_exploration_policy = jax.jit(random_exploration_policy, backend='cpu')
def select_action(self, observation: types.NestedArray):
if (self._num_observations > self._min_observations):
return self._actor.select_action(observation)
else:
(action, self._key) = self._random_exploration_policy(self._key, observation)
return utils.to_numpy(action)
def observe_first(self, timestep: dm_env.TimeStep):
return self._actor.observe_first(timestep)
def observe(self, action: types.NestedArray, next_timestep: dm_env.TimeStep):
self._num_observations += 1
self._actor.observe(action, next_timestep)
def update(self, wait: bool=True):
if (self._num_observations < self._min_observations):
return
self._learner.step()
self._actor.update(wait=wait)
def get_variables(self, names: Sequence[str]) -> List[types.NestedArray]:
return self._learner.get_variables(names) |
def is_any_api_key(key):
if (',' in key):
keys = key.split(',')
for k in keys:
if is_any_api_key(k):
return True
return False
else:
return (is_openai_api_key(key) or is_api2d_key(key) or is_azure_api_key(key) or is_freeai_api_key(key)) |
def qimage_function(antialiasing=True):
def antialias_func(image_func):
def qimage_conv_func(image, box_dims):
_np_image = image_func(np_from_QImage(image))
pil_image = Image.fromarray(_np_image)
if antialiasing:
pil_image.thumbnail(box_dims, Image.ANTIALIAS)
else:
pil_image.thumbnail(box_dims)
_np_image = np.array(pil_image)
image = QImage_from_np(_np_image)
return (image, _np_image)
return qimage_conv_func
return antialias_func |
def check_PTR_record(ip):
global debug
global domain_data
try:
if debug:
logging.debug('\t\t> Checking {0} ip reverse DNS hostname'.format(ip))
'\n temp_ip = ip.split(\'.\')\n temp_ip.reverse()\n reverse_ip=""\n for i in temp_ip:\n reverse_ip = reverse_ip + i +\'.\'\n reverse_ip = reverse_ip + \'in-addr.arpa\'\n '
reverse_name = dns.reversename.from_address(ip).to_text()[:(- 1)]
return reverse_name
except Exception as inst:
print(type(inst))
print(inst.args)
print(inst)
(x, y) = inst
print('x =', x)
print('y =', y)
return '' |
class OptionSeriesVariablepieDatalabelsTextpath(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def enabled(self):
return self._config_get(False)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
class CondaEnvironment(BaseEnvironment[Path], make_thread_safe=True):
packages: List[str]
inherit_from_local: bool = False
def from_config(cls, config: Dict[(str, Any)]) -> CondaEnvironment:
user_provided_packages = config.get('packages', [])
for raw_requirement in user_provided_packages:
raw_requirement = raw_requirement.replace(' ', '')
if raw_requirement.startswith('python'):
continue
version_identifier = raw_requirement[len('python'):]
if (version_identifier and (version_identifier[0] in ('=', '<', '>', '!'))):
raise RuntimeError('Conda environments cannot customize their Python version.')
python_version = sysconfig.get_python_version()
final_packages = (user_provided_packages + [f'python={python_version}'])
inherit_from_local = config.get('_inherit_from_local', False)
return cls(final_packages, inherit_from_local=inherit_from_local)
def key(self) -> str:
return hashlib.sha256(' '.join(self.packages).encode()).hexdigest()
def _get_or_create(self) -> Path:
env_path = (_BASE_CONDA_DIR / self.key)
if env_path.exists():
return env_path
with rmdir_on_fail(env_path):
self._run_conda('create', '--yes', '--prefix', env_path, *self.packages)
return env_path
def _run_conda(self, *args, **kwargs) -> None:
log_env(self, 'Installing conda environment')
conda_executable = get_conda_executable()
subprocess.check_call([conda_executable, *args], **kwargs, text=True)
def open_connection(self, conn_info: Path) -> DualPythonIPC:
if self.inherit_from_local:
primary_env_path = Path(sys.exec_prefix)
else:
primary_env = get_primary_virtual_env()
primary_env_path = primary_env.get_or_create()
secondary_env_path = conn_info
return DualPythonIPC(self, primary_env_path, secondary_env_path) |
def test_cross_attention_adapter() -> None:
base = fl.Chain(fl.Attention(embedding_dim=4))
adapter = CrossAttentionAdapter(base.Attention).inject()
assert (list(base) == [adapter])
assert (len(list(adapter.layers(fl.Linear))) == 6)
assert (len(list(base.layers(fl.Linear))) == 6)
injection_points = list(adapter.layers(InjectionPoint))
assert (len(injection_points) == 4)
for ip in injection_points:
assert (len(ip) == 1)
assert isinstance(ip[0], fl.Linear)
adapter.eject()
assert (len(base) == 1)
assert isinstance(base[0], fl.Attention)
assert (len(list(adapter.layers(fl.Linear))) == 2)
assert (len(list(base.layers(fl.Linear))) == 4)
injection_points = list(adapter.layers(InjectionPoint))
assert (len(injection_points) == 4)
for ip in injection_points:
assert (len(ip) == 0) |
class SQLNewConnInThreadHook(SQLHook):
def _execute(self, payload):
thd = Thread(target=self.execute_sqls)
self._dbh = payload.get_conn(payload.current_db)
log.info('Running sql file: {} for {}'.format(self.file_path, payload.socket))
thd.start()
time.sleep(1) |
def validate_parameters(fos):
mod_params = fos._module.params
selector = mod_params['selector']
params = mod_params['params']
if params:
for (param_key, param_value) in params.items():
if (not isinstance(param_value, (bool, int, str, list))):
return (False, {'message': ('value of param:%s must be atomic' % param_key)})
acceptable_param_names = list(module_selectors_defs[selector]['params'].keys())
provided_param_names = list((params.keys() if params else []))
params_valid = True
for param_name in acceptable_param_names:
if ((param_name not in provided_param_names) and eval(module_selectors_defs[selector]['params'][param_name]['required'])):
params_valid = False
break
if params_valid:
for param_name in provided_param_names:
if (param_name not in acceptable_param_names):
params_valid = False
break
if (not params_valid):
param_summary = [('%s(%s, %s)' % (param_name, param['type'], ('required' if eval(param['required']) else 'optional'))) for (param_name, param) in module_selectors_defs[selector]['params'].items()]
fos._module.warn(('selector:%s expects params:%s' % (selector, str(param_summary))))
return (True, {}) |
def meta_train(theta_model, phi_model, train_loader, val_loader, optimizer, optimizer_maml, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0):
running_loss_theta = 0.0
running_loss_phi = 0.0
criterion = nn.CrossEntropyLoss()
global global_step, global_epoch
while (global_epoch < nepochs):
theta_model.train()
phi_model.train()
h = open(logfile_name, 'a')
running_loss = 0.0
for (step, (x, mel, fname)) in tqdm(enumerate(train_loader)):
model_copy = copy.deepcopy(theta_model)
current_lr = learning_rate_decay(init_lr, global_step)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
optimizer.zero_grad()
optimizer_maml.zero_grad()
(x, mel) = (Variable(x), Variable(mel))
if use_cuda:
(x, mel) = (x.cuda(), mel.cuda())
nce_loss = theta_model(mel)
loss_thetamodel = nce_loss
nce_loss.backward(retain_graph=False)
grad_norm = torch.nn.utils.clip_grad_norm_(theta_model.parameters(), clip_thresh)
optimizer_maml.step()
phi_model.encoder = copy.deepcopy(theta_model.encoder)
theta_model = model_copy
valence_outputs = phi_model(mel)
loss_phimodel = criterion(valence_outputs, x)
loss_phimodel.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(theta_model.parameters(), clip_thresh)
optimizer.step()
if ((global_step % checkpoint_interval) == 0):
save_checkpoint(theta_model, optimizer, global_step, checkpoint_dir, global_epoch)
log_value('Phi Loss', float(loss_phimodel.item()), global_step)
log_value('Theta Loss', float(loss_thetamodel.item()), global_step)
log_value('gradient norm', grad_norm, global_step)
log_value('learning rate', current_lr, global_step)
global_step += 1
running_loss_phi += loss_phimodel.item()
running_loss_theta += loss_thetamodel.item()
averaged_loss = (running_loss / len(train_loader))
log_value('theta loss (per epoch)', averaged_loss, global_epoch)
h.write((((((('Theta Loss after epoch ' + str(global_epoch)) + ': ') + format((running_loss_theta / len(train_loader)))) + ' Phi Loss: ') + format((running_loss_phi / len(train_loader)))) + '\n'))
h.close()
global_epoch += 1
return (theta_model, phi_model) |
class OptionPlotoptionsAreaSonificationTracksMappingLowpass(Options):
def frequency(self) -> 'OptionPlotoptionsAreaSonificationTracksMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsAreaSonificationTracksMappingLowpassFrequency)
def resonance(self) -> 'OptionPlotoptionsAreaSonificationTracksMappingLowpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsAreaSonificationTracksMappingLowpassResonance) |
def test_get_model_messages(conversation_with_messages):
model_messages = conversation_with_messages.get_model_messages()
assert (len(model_messages) == 4)
assert all((isinstance(msg, ModelMessage) for msg in model_messages))
assert (model_messages[0].content == 'Hello')
assert (model_messages[1].content == 'Hi')
assert (model_messages[2].content == 'How are you?')
assert (model_messages[3].content == "I'm good, thanks") |
class TestRaiseErrors(tests.LimitedTestCase):
def test_raise_new_error(self):
for _ in range(3):
with self.assertRaises(socket.gaierror) as error:
greendns._raise_new_error(greendns.EAI_EAGAIN_ERROR)
self.assertIsNone(error.exception.__traceback__)
self.assertIsNone(greendns.EAI_EAGAIN_ERROR.__traceback__) |
def fortios_icap(data, fos):
fos.do_member_operation('icap', 'server-group')
if data['icap_server_group']:
resp = icap_server_group(data, fos)
else:
fos._module.fail_json(msg=('missing task body: %s' % 'icap_server_group'))
return ((not is_successful_status(resp)), (is_successful_status(resp) and (resp['revision_changed'] if ('revision_changed' in resp) else True)), resp, {}) |
(auto_attribs=True)
class BodyUploadLocalFile():
file_upload: File
additional_properties: Dict[(str, Any)] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[(str, Any)]:
file_upload = self.file_upload.to_tuple()
field_dict: Dict[(str, Any)] = {}
field_dict.update(self.additional_properties)
field_dict.update({'file_upload': file_upload})
return field_dict
def to_multipart(self) -> Dict[(str, Any)]:
file_upload = self.file_upload.to_tuple()
field_dict: Dict[(str, Any)] = {}
field_dict.update({key: (None, str(value).encode(), 'text/plain') for (key, value) in self.additional_properties.items()})
field_dict.update({'file_upload': file_upload})
return field_dict
def from_dict(cls: Type[T], src_dict: Dict[(str, Any)]) -> T:
d = src_dict.copy()
file_upload = File(payload=BytesIO(d.pop('file_upload')))
body_upload_local_file = cls(file_upload=file_upload)
body_upload_local_file.additional_properties = d
return body_upload_local_file
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return (key in self.additional_properties) |
def build_exploit_search(platform, port, type, description, cve, keyword):
search_query = ''
if platform:
search_query = ((('platform:' + '"') + platform) + '"')
if port:
search_query = ((((search_query + ' port:') + '"') + port) + '"')
if type:
search_query = ((((search_query + ' type:') + '"') + type) + '"')
if description:
search_query = ((((search_query + ' description') + '"') + description) + '"')
if cve:
search_query = ((search_query + ' ') + cve)
if keyword:
search_query = ((keyword + ' ') + search_query)
results = api.exploits.search(search_query)
print_exploits(results) |
class OptionPlotoptionsParetoSonificationTracksMappingHighpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class P_IC():
def uOfXT(self, x, t):
p_L = 0.0
phi_L = (tank_dim[(nd - 1)] - water_level)
phi = (x[(nd - 1)] - water_level)
p = (p_L - (g[(nd - 1)] * ((rho_0 * (phi_L - phi)) + ((rho_1 - rho_0) * (smoothedHeaviside_integral(smoothing, phi_L) - smoothedHeaviside_integral(smoothing, phi))))))
return p |
def main():
if (len(sys.argv) != 5):
print('Usage setup_toolchain.py <visual studio path> <win sdk path> <runtime dirs> <target_cpu>')
sys.exit(2)
win_sdk_path = sys.argv[2]
runtime_dirs = sys.argv[3]
target_cpu = sys.argv[4]
cpus = ('x86', 'x64', 'arm64')
assert (target_cpu in cpus)
vc_bin_dir = ''
for cpu in cpus:
args = _SetupScript(cpu, win_sdk_path)
args.extend(('&&', 'set'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(variables, _) = popen.communicate()
env = _ExtractImportantEnvironment(variables)
env['PATH'] = ((runtime_dirs + ';') + env['PATH'])
if (cpu == target_cpu):
for path in env['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, 'cl.exe')):
vc_bin_dir = os.path.realpath(path)
break
if win_sdk_path:
additional_includes = (('{sdk_dir}\\Include\\shared;' + '{sdk_dir}\\Include\\um;') + '{sdk_dir}\\Include\\winrt;').format(sdk_dir=win_sdk_path)
env['INCLUDE'] = (additional_includes + env['INCLUDE'])
env_block = _FormatAsEnvironmentBlock(env)
with open(('environment.' + cpu), 'wb') as f:
f.write(env_block.encode())
assert vc_bin_dir
print(('vc_bin_dir = "%s"' % vc_bin_dir)) |
class TestHyperlinkedRelatedField(URLPatternsTestCase, APITestCase):
included = [path('namespaced/<int:pk>/', dummy_pk_view, name='namespaced')]
urlpatterns = [path('v1/', include((included, 'v1'), namespace='v1')), path('v2/', include((included, 'v2'), namespace='v2'))]
def setUp(self):
super().setUp()
class MockQueryset():
def get(self, pk):
return ('object %s' % pk)
self.field = serializers.HyperlinkedRelatedField(view_name='namespaced', queryset=MockQueryset())
request = factory.get('/')
request.versioning_scheme = NamespaceVersioning()
request.version = 'v1'
self.field._context = {'request': request}
def test_bug_2489(self):
assert (self.field.to_internal_value('/v1/namespaced/3/') == 'object 3')
with pytest.raises(serializers.ValidationError):
self.field.to_internal_value('/v2/namespaced/3/') |
class CsccNotifierTest(scanner_base_db.ScannerBaseDbTestCase):
def setUp(self):
super(CsccNotifierTest, self).setUp()
self.maxDiff = None
self.api_quota = {'securitycenter': {'max_calls': 14, 'period': 1.0}}
def tearDown(self):
super(CsccNotifierTest, self).tearDown()
('google.cloud.forseti.common.util.date_time.get_utc_now_datetime')
def _populate_and_retrieve_violations(self, mock_get_utc_now):
fake_datetime = datetime.datetime(2010, 8, 28, 10, 20, 30, 0)
mock_get_utc_now.return_value = fake_datetime
scanner_index_id = self.populate_db(inv_index_id=self.inv_index_id2)
violations = self.violation_access.list(scanner_index_id=scanner_index_id)
violations_as_dict = []
for violation in violations:
violations_as_dict.append(scanner_dao.convert_sqlalchemy_object_to_dict(violation))
violations_as_dict = notifier.convert_to_timestamp(violations_as_dict)
return violations_as_dict
def test_can_transform_to_findings_in_api_mode(self):
expected_findings = [['f3eb2be2ed015563d7dc4d4aea798a0b', {'category': 'FIREWALL_BLACKLIST_VIOLATION_111', 'resource_name': 'full_name_111', 'name': 'organizations/11111/sources/22222/findings/f3eb2be2ed015563d7dc4d4aea798a0b', 'parent': 'organizations/11111/sources/22222', 'event_time': '2010-08-28T10:20:30Z', 'state': 'ACTIVE', 'source_properties': {'source': 'FORSETI', 'rule_name': 'disallow_all_ports_111', 'inventory_index_id': 'iii', 'resource_data': '"inventory_data_111"', 'db_source': 'table:violations/id:1', 'rule_index': 111, 'violation_data': '"{\\"policy_names\\": [\\"fw-tag-match_111\\"], \\"recommended_actions\\": {\\"DELETE_FIREWALL_RULES\\": [\\"fw-tag-match_111\\"]}}"', 'resource_id': 'fake_firewall_111', 'scanner_index_id': , 'resource_type': 'firewall_rule'}}], ['73f4a4ac87a76a2e9d2c7854ac8fa077', {'category': 'FIREWALL_BLACKLIST_VIOLATION_222', 'resource_name': 'full_name_222', 'name': 'organizations/11111/sources/22222/findings/73f4a4ac87a76a2e9d2c7854ac8fa077', 'parent': 'organizations/11111/sources/22222', 'event_time': '2010-08-28T10:20:30Z', 'state': 'ACTIVE', 'source_properties': {'source': 'FORSETI', 'rule_name': 'disallow_all_ports_222', 'inventory_index_id': 'iii', 'resource_data': '"inventory_data_222"', 'db_source': 'table:violations/id:2', 'rule_index': 222, 'violation_data': '"{\\"policy_names\\": [\\"fw-tag-match_222\\"], \\"recommended_actions\\": {\\"DELETE_FIREWALL_RULES\\": [\\"fw-tag-match_222\\"]}}"', 'resource_id': 'fake_firewall_222', 'scanner_index_id': , 'resource_type': 'firewall_rule'}}]]
violations_as_dict = self._populate_and_retrieve_violations()
finding_results = cscc_notifier.CsccNotifier('iii', self.api_quota)._transform_for_api(violations_as_dict, source_id='organizations/11111/sources/22222')
self.assertEqual(expected_findings, ast.literal_eval(json.dumps(finding_results)))
def test_api_is_invoked_correctly(self):
notifier = cscc_notifier.CsccNotifier(self.api_quota, None)
notifier._send_findings_to_cscc = mock.MagicMock()
notifier.LOGGER = mock.MagicMock()
self.assertEqual(0, notifier._send_findings_to_cscc.call_count)
notifier.run(None, source_id='111')
calls = notifier._send_findings_to_cscc.call_args_list
call = calls[0]
(_, kwargs) = call
self.assertEqual('111', kwargs['source_id'])
def test_outdated_findings_are_found(self):
NEW_FINDINGS = [['abc', {'category': 'BUCKET_VIOLATION', 'resource_name': 'organization/123/project/inventoryscanner/bucket/isthispublic/', 'name': 'organizations/123/sources/560/findings/abc', 'parent': 'organizations/123/sources/560', 'event_time': '2019-03-12T16:06:19Z', 'state': 'ACTIVE', 'source_properties': {'source': 'FORSETI', 'rule_name': 'Bucket acls rule to search for public buckets', 'inventory_index_id': 789, 'resource_data': '{"bucket": "isthispublic", "entity": "allUsers", "id": "isthispublic/allUsers", "role": "READER"}', 'db_source': 'table:violations/id:94953', 'rule_index': 0, 'violation_data': '{"bucket": "isthispublic", "domain": "", "email": "", "entity": "allUsers", "full_name": "organization/123/project/inventoryscanner/bucket/isthispublic/", "project_id": "inventoryscanner-henry", "role": "READER"}', 'resource_id': 'isthispublic', 'scanner_index_id': , 'resource_type': 'bucket'}}]]
FINDINGS_IN_CSCC = [['ffe', {'category': 'BUCKET_VIOLATION', 'resource_name': 'organization/123/project/inventoryscanner/bucket/isthispublic/', 'name': 'organizations/123/sources/560/findings/ffe', 'parent': 'organizations/123/sources/560', 'event_time': '2019-03-12T16:06:19Z', 'state': 'ACTIVE', 'source_properties': {'source': 'FORSETI', 'rule_name': 'Bucket acls rule to search for public buckets', 'inventory_index_id': 789, 'resource_data': '{"bucket": "isthispublic", "entity": "allUsers", "id": "isthispublic/allUsers", "role": "READER"}', 'db_source': 'table:violations/id:94953', 'rule_index': 0, 'violation_data': '{"bucket": "isthispublic", "domain": "", "email": "", "entity": "allUsers", "full_name": "organization/123/project/inventoryscanner/bucket/isthispublic/", "project_id": "inventoryscanner", "role": "READER"}', 'resource_id': 'isthispublic', 'scanner_index_id': , 'resource_type': 'bucket'}}], ['hij', {'category': 'BUCKET_VIOLATION', 'resource_name': 'organization/123/project/inventoryscanner/bucket/nolongerpublic/', 'name': 'organizations/123/sources/560/findings/hij', 'parent': 'organizations/123/sources/560', 'event_time': '2019-03-12T16:06:19Z', 'state': 'INACTIVE', 'source_properties': {'source': 'FORSETI', 'rule_name': 'Bucket acls rule to search for public buckets', 'inventory_index_id': 789, 'resource_data': '{"bucket": "nolongerpublic", "entity": "allUsers", "id": "nolongerpublic/allUsers", "role": "READER"}', 'db_source': 'table:violations/id:94953', 'rule_index': 0, 'violation_data': '{"bucket": "nolongerpublic", "domain": "", "email": "", "entity": "allUsers", "full_name": "organization/123/project/inventoryscanner/bucket/nolongerpublic/", "project_id": "inventoryscanner", "role": "READER"}', 'resource_id': 'nolongerpublic', 'scanner_index_id': , 'resource_type': 'bucket'}}]]
EXPECTED_INACTIVE_FINDINGS = [['ffe', {'category': 'BUCKET_VIOLATION', 'resource_name': 'organization/123/project/inventoryscanner-henry/bucket/isthispublic/', 'name': 'organizations/123/sources/560/findings/ffe', 'parent': 'organizations/123/sources/560', 'event_time': '2019-03-12T16:06:19Z', 'state': 'INACTIVE', 'source_properties': {'source': 'FORSETI', 'rule_name': 'Bucket acls rule to search for public buckets', 'inventory_index_id': 789, 'resource_data': '{"bucket": "isthispublic", "entity": "allUsers", "id": "isthispublic/allUsers", "role": "READER"}', 'db_source': 'table:violations/id:94953', 'rule_index': 0, 'violation_data': '{"bucket": "isthispublic", "domain": "", "email": "", "entity": "allUsers", "full_name": "organization/123/project/inventoryscanner/bucket/isthispublic/", "project_id": "inventoryscanner", "role": "READER"}', 'resource_id': 'isthispublic', 'scanner_index_id': , 'resource_type': 'bucket'}}]]
notifier = cscc_notifier.CsccNotifier('123', self.api_quota)
inactive_findings = notifier.find_inactive_findings(NEW_FINDINGS, FINDINGS_IN_CSCC)
self.assertEqual(EXPECTED_INACTIVE_FINDINGS[0][1]['state'], inactive_findings[0][1]['state'])
def test_outdated_findings_are_not_found(self):
NEW_FINDINGS = [['abc', {'category': 'BUCKET_VIOLATION', 'resource_name': 'organization/123/project/inventoryscanner/bucket/isthispublic/', 'name': 'organizations/123/sources/560/findings/abc', 'parent': 'organizations/123/sources/560', 'event_time': '2019-03-12T16:06:19Z', 'state': 'ACTIVE', 'source_properties': {'source': 'FORSETI', 'rule_name': 'Bucket acls rule to search for public buckets', 'inventory_index_id': 789, 'resource_data': '{"bucket": "isthispublic", "entity": "allUsers", "id": "isthispublic/allUsers", "role": "READER"}', 'db_source': 'table:violations/id:94953', 'rule_index': 0, 'violation_data': '{"bucket": "isthispublic", "domain": "", "email": "", "entity": "allUsers", "full_name": "organization/123/project/inventoryscanner/bucket/isthispublic/", "project_id": "inventoryscanner-henry", "role": "READER"}', 'resource_id': 'isthispublic', 'scanner_index_id': , 'resource_type': 'bucket'}}]]
FINDINGS_IN_CSCC = [['abc', {'category': 'BUCKET_VIOLATION', 'resource_name': 'organization/123/project/inventoryscanner/bucket/isthispublic/', 'name': 'organizations/123/sources/560/findings/abc', 'parent': 'organizations/123/sources/560', 'event_time': '2019-03-12T16:06:19Z', 'state': 'ACTIVE', 'source_properties': {'source': 'FORSETI', 'rule_name': 'Bucket acls rule to search for public buckets', 'inventory_index_id': 789, 'resource_data': '{"bucket": "isthispublic", "entity": "allUsers", "id": "isthispublic/allUsers", "role": "READER"}', 'db_source': 'table:violations/id:94953', 'rule_index': 0, 'violation_data': '{"bucket": "isthispublic", "domain": "", "email": "", "entity": "allUsers", "full_name": "organization/123/project/inventoryscanner/bucket/isthispublic/", "project_id": "inventoryscanner-henry", "role": "READER"}', 'resource_id': 'isthispublic', 'scanner_index_id': , 'resource_type': 'bucket'}}]]
notifier = cscc_notifier.CsccNotifier('123', self.api_quota)
inactive_findings = notifier.find_inactive_findings(NEW_FINDINGS, FINDINGS_IN_CSCC)
assert (len(inactive_findings) == 0)
('google.cloud.forseti.common.gcp_api.securitycenter.SecurityCenterClient')
def test_empty_list_api_response(self, mock_list):
source_id = 'organizations/123/sources/456'
violations = [{'violation_hash': '311', 'resource_name': 'readme1', 'resource_data': {u'ipv4Enabled': True, u'authorizedNetworks': [{u'expirationTime': u'1970-01-01T00:00:00Z', u'kind': u'sql#aclEntry', u'value': u'0.0.0.0/0'}]}, 'resource_id': 'readme1', 'violation_type': 'CLOUD_SQL_VIOLATION', 'created_at_datetime': '2018-03-26T04:37:51Z', 'scanner_index_id': 122, 'rule_name': 'Cloud SQL rule to search for publicly exposed instances', 'full_name': 'organization/123/project/cicd-henry/cloudsqlinstance/456/', 'rule_index': 0, 'violation_data': {u'instance_name': u'readme1', u'require_ssl': False, u'project_id': u'readme1', u'authorized_networks': [u'0.0.0.0/0'], u'full_name': u'organization/123/project/cicd-henry/cloudsqlinstance/456/'}, 'id': 99185, 'resource_type': 'cloudsqlinstance'}]
mock_list.list_findings.return_value = {'readTime': '111'}
notifier = cscc_notifier.CsccNotifier('abc', self.api_quota)
notifier._send_findings_to_cscc(violations, source_id)
self.assertFalse(mock_list.update_finding.called) |
class Cart(Model):
has_many({'elements': 'CartElement'})
updated_at = Field.datetime(default=now, update=now)
total_denorm = Field.float(default=0.0)
revision = Field.string(default=(lambda : uuid4().hex), update=(lambda : uuid4().hex))
def _sum_elements(self, row):
summable = (CartElement.quantity.cast('float') * Product.price).sum()
sum = row.elements.join('product').select(summable).first()
return (sum[summable] or 0.0)
_save
def _rebuild_total(self, row):
row.total_denorm = self._sum_elements(row)
('total')
def _compute_total(self, row):
return self._sum_elements(row) |
class OptionSeriesOrganizationSonificationTracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class AlreadyOpenException(Exception):
def __init__(self, path, error):
Exception.__init__(self)
self.path = path
self.error = error
def __repr__(self):
return ('%s <path=%s>::%s' % (self.__class__, utf8(self.path), repr(self.error)))
def __str__(self):
return self.error |
def binary_search(arr, low, high, x):
if (high >= low):
mid = ((high + low) // 2)
if (arr[mid] == x):
return mid
elif (arr[mid] > x):
return binary_search(arr, low, (mid - 1), x)
else:
return binary_search(arr, (mid + 1), high, x)
else:
return (- 1) |
class Limiter(object):
__slots__ = ('_rate', '_capacity', '_storage')
def __init__(self, rate, capacity, storage):
if (not isinstance(rate, (float, int))):
raise TypeError('rate must be an int or float')
if (rate <= 0):
raise ValueError('rate must be > 0')
if (not isinstance(capacity, int)):
raise TypeError('capacity must be an int')
if (capacity < 1):
raise ValueError('capacity must be >= 1')
if (not isinstance(storage, StorageBase)):
raise TypeError('storage must be a subclass of StorageBase')
self._rate = rate
self._capacity = capacity
self._storage = storage
def consume(self, key, num_tokens=1):
if (not key):
if (key is None):
raise TypeError('key may not be None')
raise ValueError('key must not be a non-empty string or bytestring')
if (num_tokens is None):
raise TypeError('num_tokens may not be None')
if (num_tokens < 1):
raise ValueError('num_tokens must be >= 1')
self._storage.replenish(key, self._rate, self._capacity)
return self._storage.consume(key, num_tokens) |
def get_subreddit_threads(POST_ID: str):
print_substep('Logging into Reddit.')
content = {}
if settings.config['reddit']['creds']['2fa']:
print('\nEnter your two-factor authentication code from your authenticator app.\n')
code = input('> ')
print()
pw = settings.config['reddit']['creds']['password']
passkey = f'{pw}:{code}'
else:
passkey = settings.config['reddit']['creds']['password']
username = settings.config['reddit']['creds']['username']
if str(username).casefold().startswith('u/'):
username = username[2:]
try:
reddit = praw.Reddit(client_id=settings.config['reddit']['creds']['client_id'], client_secret=settings.config['reddit']['creds']['client_secret'], user_agent='Accessing Reddit threads', username=username, passkey=passkey, check_for_async=False)
except ResponseException as e:
if (e.response.status_code == 401):
print('Invalid credentials - please check them in config.toml')
except:
print('Something went wrong...')
print_step('Getting subreddit threads...')
similarity_score = 0
if (not settings.config['reddit']['thread']['subreddit']):
try:
subreddit = reddit.subreddit(re.sub('r\\/', '', input('What subreddit would you like to pull from? ')))
except ValueError:
subreddit = reddit.subreddit('askreddit')
print_substep('Subreddit not defined. Using AskReddit.')
else:
sub = settings.config['reddit']['thread']['subreddit']
print_substep(f'Using subreddit: r/{sub} from TOML config')
subreddit_choice = sub
if str(subreddit_choice).casefold().startswith('r/'):
subreddit_choice = subreddit_choice[2:]
subreddit = reddit.subreddit(subreddit_choice)
if POST_ID:
submission = reddit.submission(id=POST_ID)
elif (settings.config['reddit']['thread']['post_id'] and (len(str(settings.config['reddit']['thread']['post_id']).split('+')) == 1)):
submission = reddit.submission(id=settings.config['reddit']['thread']['post_id'])
elif settings.config['ai']['ai_similarity_enabled']:
threads = subreddit.hot(limit=50)
keywords = settings.config['ai']['ai_similarity_keywords'].split(',')
keywords = [keyword.strip() for keyword in keywords]
keywords_print = ', '.join(keywords)
print(f'Sorting threads by similarity to the given keywords: {keywords_print}')
(threads, similarity_scores) = sort_by_similarity(threads, keywords)
(submission, similarity_score) = get_subreddit_undone(threads, subreddit, similarity_scores=similarity_scores)
else:
threads = subreddit.hot(limit=25)
submission = get_subreddit_undone(threads, subreddit)
if (submission is None):
return get_subreddit_threads(POST_ID)
elif ((not submission.num_comments) and (settings.config['settings']['storymode'] == 'false')):
print_substep('No comments found. Skipping.')
exit()
submission = check_done(submission)
upvotes = submission.score
ratio = (submission.upvote_ratio * 100)
num_comments = submission.num_comments
threadurl = f'
print_substep(f'Video will be: {submission.title} :thumbsup:', style='bold green')
print_substep(f'Thread url is: {threadurl} :thumbsup:', style='bold green')
print_substep(f'Thread has {upvotes} upvotes', style='bold blue')
print_substep(f'Thread has a upvote ratio of {ratio}%', style='bold blue')
print_substep(f'Thread has {num_comments} comments', style='bold blue')
if similarity_score:
print_substep(f'Thread has a similarity score up to {round((similarity_score * 100))}%', style='bold blue')
content['thread_url'] = threadurl
content['thread_title'] = submission.title
content['thread_id'] = submission.id
content['is_nsfw'] = submission.over_18
content['comments'] = []
if settings.config['settings']['storymode']:
if (settings.config['settings']['storymodemethod'] == 1):
content['thread_post'] = posttextparser(submission.selftext)
else:
content['thread_post'] = submission.selftext
else:
for top_level_comment in submission.comments:
if isinstance(top_level_comment, MoreComments):
continue
if (top_level_comment.body in ['[removed]', '[deleted]']):
continue
if (not top_level_comment.stickied):
sanitised = sanitize_text(top_level_comment.body)
if ((not sanitised) or (sanitised == ' ')):
continue
if (len(top_level_comment.body) <= int(settings.config['reddit']['thread']['max_comment_length'])):
if (len(top_level_comment.body) >= int(settings.config['reddit']['thread']['min_comment_length'])):
if ((top_level_comment.author is not None) and (sanitize_text(top_level_comment.body) is not None)):
content['comments'].append({'comment_body': top_level_comment.body, 'comment_url': top_level_comment.permalink, 'comment_id': top_level_comment.id})
print_substep('Received subreddit threads Successfully.', style='bold green')
return content |
class AWSSecretsManagerService(SecretsManagerService):
def __init__(self, region: str, access_key_id: Optional[str]=None, access_key_data: Optional[str]=None, config: Optional[Dict[(str, Any)]]=None) -> None:
self.secret_gateway = AWSSecretsManagerGateway(region, access_key_id, access_key_data, config)
def create_secret(self, secret_name: str, secret_value: str, tags: Optional[Dict[(str, str)]]=None) -> str:
secret_id = self.secret_gateway.create_secret(secret_name=secret_name, secret_value=secret_value, tags=tags)
return secret_id
def get_secret(self, secret_id: str) -> StringSecret:
secret = self.secret_gateway.get_secret(secret_id=secret_id)
return secret
async def create_secret_async(self, secret_name: str, secret_value: str, tags: Optional[Dict[(str, str)]]=None) -> str:
loop = asyncio.get_running_loop()
result = (await loop.run_in_executor(None, self.create_secret, secret_name, secret_value, tags))
return result
async def get_secret_async(self, secret_id: str) -> StringSecret:
loop = asyncio.get_running_loop()
result = (await loop.run_in_executor(None, self.get_secret, secret_id))
return result
def delete_secret(self, secret_id: str) -> None:
self.secret_gateway.delete_secret(secret_id=secret_id)
async def delete_secret_async(self, secret_id: str) -> None:
loop = asyncio.get_running_loop()
(await loop.run_in_executor(None, self.delete_secret, secret_id)) |
class CustomDict(dict):
def __init__(self, data: List[Tuple[(Any, Any)]]) -> None:
for (i, (key, value)) in enumerate(data):
if isinstance(value, Path):
data[i] = (key, str(value))
if isinstance(value, set):
data[i] = (key, list(value))
super().__init__(data) |
def run(app: ((ASGIApplication | Callable[(..., Any)]) | str), *, host: str='127.0.0.1', port: int=8000, uds: (str | None)=None, fd: (int | None)=None, loop: LoopSetupType='auto', (type[asyncio.Protocol] | HTTPProtocolType)='auto', ws: (type[asyncio.Protocol] | WSProtocolType)='auto', ws_max_size: int=, ws_max_queue: int=32, ws_ping_interval: (float | None)=20.0, ws_ping_timeout: (float | None)=20.0, ws_per_message_deflate: bool=True, lifespan: LifespanType='auto', interface: InterfaceType='auto', reload: bool=False, reload_dirs: ((list[str] | str) | None)=None, reload_includes: ((list[str] | str) | None)=None, reload_excludes: ((list[str] | str) | None)=None, reload_delay: float=0.25, workers: (int | None)=None, env_file: ((str | os.PathLike[str]) | None)=None, log_config: ((dict[(str, Any)] | str) | None)=LOGGING_CONFIG, log_level: ((str | int) | None)=None, access_log: bool=True, proxy_headers: bool=True, server_header: bool=True, date_header: bool=True, forwarded_allow_ips: ((list[str] | str) | None)=None, root_path: str='', limit_concurrency: (int | None)=None, backlog: int=2048, limit_max_requests: (int | None)=None, timeout_keep_alive: int=5, timeout_graceful_shutdown: (int | None)=None, ssl_keyfile: (str | None)=None, ssl_certfile: ((str | os.PathLike[str]) | None)=None, ssl_keyfile_password: (str | None)=None, ssl_version: int=SSL_PROTOCOL_VERSION, ssl_cert_reqs: int=ssl.CERT_NONE, ssl_ca_certs: (str | None)=None, ssl_ciphers: str='TLSv1', headers: (list[tuple[(str, str)]] | None)=None, use_colors: (bool | None)=None, app_dir: (str | None)=None, factory: bool=False, h11_max_incomplete_event_size: (int | None)=None) -> None:
if (app_dir is not None):
sys.path.insert(0, app_dir)
config = Config(app, host=host, port=port, uds=uds, fd=fd, loop=loop, ws=ws, ws_max_size=ws_max_size, ws_max_queue=ws_max_queue, ws_ping_interval=ws_ping_interval, ws_ping_timeout=ws_ping_timeout, ws_per_message_deflate=ws_per_message_deflate, lifespan=lifespan, interface=interface, reload=reload, reload_dirs=reload_dirs, reload_includes=reload_includes, reload_excludes=reload_excludes, reload_delay=reload_delay, workers=workers, env_file=env_file, log_config=log_config, log_level=log_level, access_log=access_log, proxy_headers=proxy_headers, server_header=server_header, date_header=date_header, forwarded_allow_ips=forwarded_allow_ips, root_path=root_path, limit_concurrency=limit_concurrency, backlog=backlog, limit_max_requests=limit_max_requests, timeout_keep_alive=timeout_keep_alive, timeout_graceful_shutdown=timeout_graceful_shutdown, ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, ssl_keyfile_password=ssl_keyfile_password, ssl_version=ssl_version, ssl_cert_reqs=ssl_cert_reqs, ssl_ca_certs=ssl_ca_certs, ssl_ciphers=ssl_ciphers, headers=headers, use_colors=use_colors, factory=factory, h11_max_incomplete_event_size=h11_max_incomplete_event_size)
server = Server(config=config)
if ((config.reload or (config.workers > 1)) and (not isinstance(app, str))):
logger = logging.getLogger('uvicorn.error')
logger.warning("You must pass the application as an import string to enable 'reload' or 'workers'.")
sys.exit(1)
if config.should_reload:
sock = config.bind_socket()
ChangeReload(config, target=server.run, sockets=[sock]).run()
elif (config.workers > 1):
sock = config.bind_socket()
Multiprocess(config, target=server.run, sockets=[sock]).run()
else:
server.run()
if (config.uds and os.path.exists(config.uds)):
os.remove(config.uds)
if ((not server.started) and (not config.should_reload) and (config.workers == 1)):
sys.exit(STARTUP_FAILURE) |
def set_proxy(ip: str, port: int):
profile = webdriver.FirefoxProfile()
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.socks', ip)
profile.set_preference('network.proxy.socks_port', port)
profile.set_preference('network.proxy.socks_version', 5)
profile.set_preference('network.proxy.socks_remote_dns', True)
profile.update_preferences()
return profile |
def test_index_population(plugdir_and_storage):
(plugdir, storage) = plugdir_and_storage
manager = repo_manager.BotRepoManager(storage, plugdir, (os.path.join(assets, 'repos', 'simple.json'),))
manager.index_update()
index_entry = manager[repo_manager.REPO_INDEX]
assert (repo_manager.LAST_UPDATE in index_entry)
assert ('pluginname1' in index_entry['name1/err-reponame1'])
assert ('pluginname2' in index_entry['name2/err-reponame2']) |
class OptionSeriesPolygonSonificationPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
_required
_required
_required(UserAdminPermission)
_required
_POST
def dc_user_profile_password_modal_form(request, username):
user = get_edited_user(request, username)
status = 200
pform = AdminChangePasswordForm(user, request.POST)
if pform.is_valid():
status = pform.save(request)
if (status == 200):
messages.success(request, _('User password was successfully changed'))
return redirect('dc_user_profile', user.username)
return render(request, 'gui/dc/profile/profile_password_form.html', {'user': user, 'pform': pform}, status=status) |
('foremast.awslambda.api_gateway_event.api_gateway_event.boto3')
('foremast.awslambda.api_gateway_event.api_gateway_event.get_details')
('foremast.awslambda.api_gateway_event.api_gateway_event.get_env_credential')
('foremast.awslambda.api_gateway_event.api_gateway_event.get_properties')
def test_apigateway(get_properties, get_env_credential, get_details, boto3):
test = APIGateway(rules=TEST_RULES)
assert test |
class OptionPlotoptionsColumnpyramidSonificationPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
def test_distance_nearest():
spacing = 0.5
coords = grid_coordinates((5, 10, (- 20), (- 17)), spacing=spacing)
distance = median_distance(coords, k_nearest=1)
npt.assert_allclose(distance, spacing)
assert (distance.shape == coords[0].shape)
coords = tuple((coord.ravel() for coord in coords))
distance = median_distance(coords, k_nearest=1)
npt.assert_allclose(distance, spacing)
assert (distance.shape == coords[0].shape) |
class LBFGS(Optimizer):
def __init__(self, geometry: Geometry, keep_last: int=7, beta: float=1, max_step: float=0.2, double_damp: bool=True, gamma_mult: bool=False, line_search: bool=False, mu_reg: Optional[float]=None, max_mu_reg_adaptions: int=10, control_step: bool=True, **kwargs) -> None:
self.coord_diffs = list()
self.grad_diffs = list()
super().__init__(geometry, max_step=max_step, **kwargs)
self.beta = beta
self.keep_last = int(keep_last)
self.double_damp = double_damp
self.gamma_mult = gamma_mult
self.mu_reg = mu_reg
self.max_mu_reg_adaptions = max_mu_reg_adaptions
self.line_search = ((not self.is_cos) and line_search)
self.control_step = control_step
self.tot_adapt_mu_cycles = 0
if self.mu_reg:
self.mu_reg_0 = self.mu_reg
self.update_mu_reg = get_update_mu_reg(logger=self.logger)
self.control_step = False
self.double_damp = False
self.line_search = False
self.log(f'''Regularized-L-BFGS (_reg={self.mu_reg:.6f}) requested.
Disabling double damping, step control and line search.''')
def reset(self):
self.coord_diffs = list()
self.grad_diffs = list()
def _get_opt_restart_info(self):
opt_restart_info = {'coord_diffs': np.array(self.coord_diffs).tolist(), 'grad_diffs': np.array(self.grad_diffs).tolist(), 'double_damp': self.double_damp, 'gamma_mult': self.gamma_mult, 'keep_last': self.keep_last}
return opt_restart_info
def _set_opt_restart_info(self, opt_restart_info):
self.coord_diffs = [np.array(cd) for cd in opt_restart_info['coord_diffs']]
self.grad_diffs = [np.array(gd) for gd in opt_restart_info['grad_diffs']]
for attr in ('double_damp', 'gamma_mult', 'keep_last'):
setattr(self, attr, opt_restart_info[attr])
def get_lbfgs_step(self, forces):
return bfgs_multiply(self.coord_diffs, self.grad_diffs, forces, beta=self.beta, gamma_mult=self.gamma_mult, mu_reg=self.mu_reg, logger=self.logger)
def optimize(self):
if (self.is_cos and self.align):
(rot_vecs, rot_vec_lists, _) = self.fit_rigid(vector_lists=(self.steps, self.forces, self.coord_diffs, self.grad_diffs))
(rot_steps, rot_forces, rot_coord_diffs, rot_grad_diffs) = rot_vec_lists
self.steps = rot_steps
self.forces = rot_forces
self.coord_diffs = rot_coord_diffs
self.grad_diffs = rot_grad_diffs
forces = self.geometry.forces
self.forces.append(forces)
energy = self.geometry.energy
self.energies.append(energy)
norm = np.linalg.norm(forces)
if (not self.is_cos):
self.log(f' Energy={energy: >24.6f} au')
self.log(f'norm(forces)={norm: >24.6f} au / bohr (rad)')
if ((self.cur_cycle > 0) and (self.forces[(- 2)].size == forces.size)):
y = (self.forces[(- 2)] - forces)
s = self.steps[(- 1)]
if self.double_damp:
(s, y) = double_damp(s, y, s_list=self.coord_diffs, y_list=self.grad_diffs)
self.grad_diffs.append(y)
self.coord_diffs.append(s)
self.coord_diffs = self.coord_diffs[(- self.keep_last):]
self.grad_diffs = self.grad_diffs[(- self.keep_last):]
(ip_gradient, ip_step) = (None, None)
if (self.line_search and (self.cur_cycle > 0)):
(ip_energy, ip_gradient, ip_step) = poly_line_search(energy, self.energies[(- 2)], (- forces), (- self.forces[(- 2)]), self.steps[(- 1)])
if ((ip_gradient is not None) and (ip_step is not None)):
forces = (- ip_gradient)
self.log('Interpolation succeeded')
else:
ip_step = np.zeros_like(forces)
step = self.get_lbfgs_step(forces)
adapt_mu_cycles = 0
while (self.mu_reg and (self.cur_cycle > 0)):
self.log(f'Adapt _reg={self.mu_reg:.6f}, norm(step)={np.linalg.norm(step):.6f}')
if (adapt_mu_cycles == self.max_mu_reg_adaptions):
raise Exception('Adapation of mu_reg failed! Breaking!')
try:
trial_energy = self.geometry.get_energy_at((self.geometry.coords + step))
(self.mu_reg, recompute_step) = self.update_mu_reg(self.mu_reg, energy, trial_energy, (- forces), step)
except NeedNewInternalsException:
self.log('Internal coordinate breakdown in linesearch!')
recompute_step = False
if (not recompute_step):
self.log(f'Next _reg={self.mu_reg:.6f}')
break
step = self.get_lbfgs_step(forces)
adapt_mu_cycles += 1
if self.mu_reg:
self.tot_adapt_mu_cycles += (adapt_mu_cycles + 1)
step = (step + ip_step)
if ((self.mu_reg and (self.cur_cycle == 0)) or self.control_step):
step = scale_by_max_step(step, self.max_step)
return step
def postprocess_opt(self):
if self.mu_reg:
msg = f'''
Number of updates: {self.tot_adapt_mu_cycles}'''
self.log(msg) |
class Encicla(BikeShareSystem):
sync = True
meta = {'system': 'Encicla', 'company': ['Sistema de Bicicletas Publicas del Valle de Aburra']}
def update(self, scraper=None):
scraper = (scraper or PyBikesScraper())
stations = []
data = json.loads(scraper.request(FEED_URL))
for item in data:
if (int(item['cdo']) != 0):
continue
station = EnciclaStation(item)
stations.append(station)
self.stations = stations |
class EmptyOpticalElement(OpticalElement):
def forward(self, wavefront):
return wavefront
def backward(self, wavefront):
return wavefront
def get_transformation_matrix_forward(self, wavelength=1):
return np.array(1)
def get_transformation_matrix_backward(self, wavelength=1):
return np.array(1) |
class OptionsNetwork(DataClass):
def autoResize(self):
return self._attrs['autoResize']
def autoResize(self, val):
self._attrs['autoResize'] = val
def width(self):
return self._attrs['width']
def width(self, val):
self._attrs['width'] = val
def height(self):
return self._attrs['height']
def height(self, val):
self._attrs['height'] = val
def locale(self):
return self._attrs['locale']
def locale(self, val):
self._attrs['locale'] = val
def nodes(self) -> OptionNode:
return self.has_attribute(OptionNode)
def edges(self) -> OptionEdge:
return self.has_attribute(OptionEdge)
def layout(self) -> OptionLayout:
return self.has_attribute(OptionLayout)
def physics(self) -> OptionPhysics:
return self.has_attribute(OptionPhysics)
def interaction(self) -> OptionInteraction:
return self.has_attribute(OptionInteraction)
def manipulation(self) -> OptionManipulation:
return self.has_attribute(OptionManipulation)
def managed(self):
return self.get(True)
def managed(self, flag: bool):
self.set(flag) |
def make_header(**kwargs):
navbar_items = dbc.Row(html.Ul(id=server.config['NAVBAR_CONTAINER_ID'], className='navbar-nav'), no_gutters=True, className='ml-auto flex-nowrap mt-0', align='center')
return dbc.Navbar(id='header', className='sticky-top', color='primary', dark=True, children=[make_brand(), dbc.NavbarToggler(id='navbar-toggler'), dbc.Collapse(navbar_items, id='navbar-collapse', navbar=True)], **kwargs) |
class Error(ProtocolEvent):
def __init__(self, parsed):
super(Error, self).__init__('error')
self.error = None
self.code = None
self.description = None
if isinstance(parsed, dict):
if ('event' in parsed):
self.error = parsed['event']
if isinstance(parsed['event'], dict):
self.code = parsed['event'].get('code', None)
self.description = parsed['event'].get('description', None)
elif ('errors' in parsed):
self.error = parsed['errors']
else:
self.error = parsed
else:
self.error = parsed
def __repr__(self):
return ('stream:event:Error(%s)' % self.error) |
class ParallelCrawler(Crawler):
def __init__(self, config):
super(ParallelCrawler, self).__init__(config)
self._write_lock = threading.Lock()
self._dispatch_queue = Queue()
self._shutdown_event = threading.Event()
def _start_workers(self):
self._shutdown_event.clear()
for _ in range(self.config.threads):
worker = threading.Thread(target=self._process_queue)
worker.daemon = True
worker.start()
def _process_queue(self):
while (not self._shutdown_event.is_set()):
try:
callback = self._dispatch_queue.get(timeout=1)
except Empty:
continue
callback()
self._dispatch_queue.task_done()
def run(self, resource):
try:
self._start_workers()
resource.accept(self)
self._dispatch_queue.join()
finally:
self._shutdown_event.set()
time.sleep(2)
return self.config.progresser
def dispatch(self, callback):
self._dispatch_queue.put(callback) |
.benchmark
def test_baker_ts_dimer_synthesis(fixture_store):
converged = 0
tot_cycles = 0
tot_dimer_force_evals = 0
tot_cycles_failed = 0
tot_dimer_force_evals_failed = 0
bags = fixture_store['results_bag']
for (k, v) in bags.items():
if (not k.startswith('test_baker_ts_dimer')):
continue
print(k)
try:
energies_match = v['energies_match']
converged += (1 if energies_match else 0)
cycles = v['cycles']
force_evals = v['dimer_force_evals']
if energies_match:
tot_cycles += cycles
tot_dimer_force_evals += force_evals
else:
tot_cycles_failed += cycles
tot_dimer_force_evals_failed += force_evals
for (kk, vv) in v.items():
print('\t', kk, vv)
except KeyError:
print('\tFailed!')
print('### Converged')
print(f' Total cycles: {tot_cycles}')
print(f' Total dimer force evaluations: {tot_dimer_force_evals}')
print(f' Converged: {converged}/{len(bags)}')
print('### Failed')
print(f' Total cycles: {tot_cycles_failed}')
print(f' Total dimer force evaluations: {tot_dimer_force_evals_failed}') |
class IEnumConnections(IUnknown):
_iid_ = GUID('{B196B287-BAB4-101A-B69C-00AA00341D07}')
_idlflags_ = []
def __iter__(self):
return self
if (sys.version_info >= (3, 0)):
def __next__(self):
(cp, fetched) = self.Next(1)
if (fetched == 0):
raise StopIteration
return cp
else:
def next(self):
(cp, fetched) = self.Next(1)
if (fetched == 0):
raise StopIteration
return cp |
def test_dao_delete(dao, default_entity_dict):
req = ServeRequest(**default_entity_dict)
res: ServerResponse = dao.create(req)
dao.delete({'prompt_name': 'my_prompt_1', 'sys_code': 'dbgpt'})
res: ServerResponse = dao.get_one({'prompt_name': 'my_prompt_1', 'sys_code': 'dbgpt'})
assert (res is None) |
def validate_model(model, val_loader, full=None):
print('Validating the model')
model.eval()
y_true = []
y_pred = []
fnames = []
running_loss = 0.0
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for (step, (latents, lid, lengths, fname)) in enumerate(val_loader):
(sorted_lengths, indices) = torch.sort(lengths.view((- 1)), dim=0, descending=True)
sorted_lengths = sorted_lengths.long().numpy()
(latents, lid) = (latents[indices], lid[indices])
indices = indices.numpy().tolist()
fname = [f for (index, f) in sorted(zip(indices, fname))]
(latents, lid) = (Variable(latents), Variable(lid))
(latents, lid) = (latents.cuda().long(), lid.cuda().long())
logits = model(latents.long())
loss = criterion(logits, lid.long())
running_loss += loss.item()
targets = lid.cpu().view((- 1)).numpy()
y_true += targets.tolist()
predictions = return_classes(logits)
y_pred += predictions.tolist()
fnames += fname
if (full is not None):
ff = open(((exp_dir + '/eval_') + str(full).zfill(3)), 'a')
assert (len(fnames) == len(y_pred))
for (f, yp, yt) in list(zip(fnames, y_pred, y_true)):
if (yp == yt):
continue
ff.write((((((f + ' ') + str(yp)) + ' ') + str(yt)) + '\n'))
ff.close()
averaged_loss = (running_loss / len(val_loader))
recall = get_metrics(y_pred, y_true)
log_value('Unweighted Recall per epoch', recall, global_epoch)
log_value('validation loss (per epoch)', averaged_loss, global_epoch)
print('Validation Loss: ', averaged_loss)
print('Unweighted Recall for the validation set: ', recall)
print('\n')
return (recall, model) |
class SMBus():
def __init__(self, busnum):
self.busnum = busnum
try:
self.bus = gpios.HWPorts.get_i2c_ctrl(busnum)
except:
self.bus = None
def write_quick(self, addr):
try:
i2c = self.bus.get_port(addr)
i2c.write([])
except:
pass
def read_byte(self, addr):
try:
i2c = self.bus.get_port(addr)
res = i2c.read(1)
except:
res = None
return res
def write_byte(self, addr, val):
try:
i2c = self.bus.get_port(addr)
i2c.write(val)
except:
pass
def read_byte_data(self, addr, cmd):
try:
i2c = self.bus.get_port(addr)
res = i2c.read_from(int(cmd), readlen=1)
except:
res = None
return res
def write_byte_data(self, addr, cmd, val):
try:
i2c = self.bus.get_port(addr)
i2c.write_to(int(cmd), val)
except:
pass
def read_word_data(self, addr, cmd):
try:
i2c = self.bus.get_port(addr)
res = i2c.read_from(int(cmd), readlen=2)
except:
res = None
return res
def write_word_data(self, addr, cmd, val):
try:
i2c = self.bus.get_port(addr)
i2c.write_to(int(cmd), val)
except:
pass
def read_block_data(self, addr, cmd, dlen=0):
try:
i2c = self.bus.get_port(addr)
res = i2c.read_from(int(cmd), readlen=dlen)
except:
res = None
return res
def read_i2c_block_data(self, addr, cmd, dlen=0):
self.read_block_data(addr, cmd, dlen)
def write_block_data(self, addr, cmd, vals):
try:
i2c = self.bus.get_port(addr)
i2c.write_to(int(cmd), vals)
except:
pass
def write_i2c_block_data(self, addr, cmd, vals):
self.write_block_data(addr, cmd, vals) |
class TestNull(util.ColorAsserts, unittest.TestCase):
def test_null_input(self):
c = Color('okhsv', [NaN, 0.5, 0.75], 1)
self.assertTrue(c.is_nan('hue'))
def test_gray_null(self):
c = Color('gray').convert('okhsv')
self.assertTrue(c.is_nan('hue'))
def test_none_input(self):
c = Color('color(--okhsv none 0% 75% / 1)')
self.assertTrue(c.is_nan('hue'))
def test_null_normalization_min_sat(self):
c = Color('color(--okhsv 270 0% 0.75 / 1)').normalize()
self.assertTrue(c.is_nan('hue')) |
def get_abi(contract_sources: Dict[(str, str)], solc_version: Optional[str]=None, allow_paths: Optional[str]=None, remappings: Optional[list]=None, silent: bool=True) -> Dict:
final_output = {Path(k).stem: {'abi': json.loads(v), 'contractName': Path(k).stem, 'type': 'interface', 'source': None, 'offset': None, 'sha1': sha1(v.encode()).hexdigest()} for (k, v) in contract_sources.items() if (Path(k).suffix == '.json')}
for (path, source) in [(k, v) for (k, v) in contract_sources.items() if (Path(k).suffix == '.vy')]:
input_json = generate_input_json({path: source}, language='Vyper')
input_json['settings']['outputSelection']['*'] = {'*': ['abi']}
try:
output_json = compile_from_input_json(input_json, silent, allow_paths)
except Exception:
continue
name = Path(path).stem
final_output[name] = {'abi': output_json['contracts'][path][name]['abi'], 'contractName': name, 'type': 'interface', 'source': source, 'offset': [0, len(source)], 'sha1': sha1(contract_sources[path].encode()).hexdigest()}
solc_sources = {k: v for (k, v) in contract_sources.items() if (Path(k).suffix == '.sol')}
if (not solc_sources):
return final_output
if solc_version:
compiler_targets = {solc_version: list(solc_sources)}
else:
compiler_targets = find_solc_versions(solc_sources, install_needed=True, silent=silent)
for (version, path_list) in compiler_targets.items():
to_compile = {k: v for (k, v) in contract_sources.items() if (k in path_list)}
set_solc_version(version)
input_json = generate_input_json(to_compile, language='Solidity', remappings=remappings)
input_json['settings']['outputSelection']['*'] = {'*': ['abi'], '': ['ast']}
output_json = compile_from_input_json(input_json, silent, allow_paths)
source_nodes = solcast.from_standard_output(output_json)
abi_json = {k: v for (k, v) in output_json['contracts'].items() if (k in path_list)}
for (path, name, data) in [(k, x, y) for (k, v) in abi_json.items() for (x, y) in v.items()]:
contract_node = next((i[name] for i in source_nodes if (i.absolutePath == path)))
dependencies = []
for node in [i for i in contract_node.dependencies if (i.nodeType == 'ContractDefinition')]:
dependency_name = node.name
path_str = node.parent().absolutePath
dependencies.append(_get_alias(dependency_name, path_str))
final_output[name] = {'abi': data['abi'], 'ast': output_json['sources'][path]['ast'], 'contractName': name, 'dependencies': dependencies, 'type': 'interface', 'source': contract_sources[path], 'offset': contract_node.offset, 'sha1': sha1(contract_sources[path].encode()).hexdigest()}
return final_output |
class APISpecsView(MethodView):
def __init__(self, *args, **kwargs):
self.loader = kwargs.pop('loader')
super(APISpecsView, self).__init__(*args, **kwargs)
def get(self):
try:
return jsonify(self.loader())
except:
import logging
logging.exception('jsonify failure; defaulting to json.dumps')
specs = json.dumps(self.loader())
return Response(specs, mimetype='application/json') |
def test_entity_unique_key(default_entity_dict):
with db.session() as session:
entity = ServeEntity(**default_entity_dict)
session.add(entity)
with pytest.raises(Exception):
with db.session() as session:
entity = ServeEntity(**{'prompt_name': 'my_prompt_1', 'sys_code': 'dbgpt', 'prompt_language': 'zh', 'model': 'vicuna-13b-v1.5'})
session.add(entity) |
class TestMetadataFilter(unittest.TestCase):
def test_metadata_defined_vs_undefined(self):
metadata_filter = MetadataFilter(metadata_name='name')
self.assertTrue(metadata_filter('name', Int(name=True).as_ctrait()), 'Expected the filter to return true')
self.assertFalse(metadata_filter('name', Int().as_ctrait()), 'Expected the filter to return false')
def test_metadata_defined_as_none_is_same_as_undefined(self):
metadata_filter = MetadataFilter(metadata_name='name')
self.assertFalse(metadata_filter('name', Int(name=None).as_ctrait()), 'Expected the filter to return false')
def test_filter_equality(self):
filter1 = MetadataFilter(metadata_name='name')
filter2 = MetadataFilter(metadata_name='name')
self.assertEqual(filter1, filter2)
self.assertEqual(hash(filter1), hash(filter2))
def test_filter_not_equal_name_different(self):
filter1 = MetadataFilter(metadata_name='number')
filter2 = MetadataFilter(metadata_name='name')
self.assertNotEqual(filter1, filter2)
def test_filter_not_equal_different_type(self):
filter1 = MetadataFilter(metadata_name='name')
imposter = mock.Mock()
imposter.metadata_name = 'name'
self.assertNotEqual(imposter, filter1)
def test_slots(self):
filter = MetadataFilter(metadata_name='name')
with self.assertRaises(AttributeError):
filter.__dict__
with self.assertRaises(AttributeError):
filter.__weakref__
def test_repr_value(self):
metadata_filter = MetadataFilter(metadata_name='name')
actual = repr(metadata_filter)
self.assertEqual(actual, "MetadataFilter(metadata_name='name')")
def test_eval_repr_roundtrip(self):
metadata_filter = MetadataFilter(metadata_name='name')
self.assertEqual(eval(repr(metadata_filter)), metadata_filter) |
def create_sales_invoice(si_data: JsonDict, so_code: str, update_stock=0, submit=True, shipping_label=None, warehouse_allocations=None, invoice_response=None, so_data: Optional[JsonDict]=None):
if (not invoice_response):
invoice_response = {}
if (not so_data):
so_data = {}
so = frappe.get_doc('Sales Order', so_code)
if so_data:
fully_cancelled = update_cancellation_status(so_data, so)
if fully_cancelled:
create_unicommerce_log(status='Invalid', message='Sales order was cancelled before invoicing.')
return
channel = so.get(CHANNEL_ID_FIELD)
facility_code = so.get(FACILITY_CODE_FIELD)
existing_si = frappe.db.get_value('Sales Invoice', {INVOICE_CODE_FIELD: si_data['code']})
if existing_si:
si = frappe.get_doc('Sales Invoice', existing_si)
create_unicommerce_log(status='Invalid', message='Sales Invoice already exists, skipped')
return si
settings = frappe.get_cached_doc(SETTINGS_DOCTYPE)
channel_config = frappe.get_cached_doc('Unicommerce Channel', channel)
uni_line_items = si_data['invoiceItems']
warehouse = settings.get_integration_to_erpnext_wh_mapping(all_wh=True).get(facility_code)
shipping_package_code = si_data.get('shippingPackageCode')
shipping_package_info = (_get_shipping_package(so_data, shipping_package_code) or {})
tracking_no = (invoice_response.get('trackingNumber') or shipping_package_info.get('trackingNumber'))
shipping_provider_code = (invoice_response.get('shippingProviderCode') or shipping_package_info.get('shippingProvider') or shipping_package_info.get('shippingCourier'))
shipping_package_status = shipping_package_info.get('status')
si = make_sales_invoice(so.name)
si_line_items = _get_line_items(uni_line_items, warehouse, so.name, channel_config.cost_center, warehouse_allocations)
si.set('items', si_line_items)
si.set('taxes', get_taxes(uni_line_items, channel_config))
si.set(INVOICE_CODE_FIELD, si_data['code'])
si.set(SHIPPING_PACKAGE_CODE_FIELD, shipping_package_code)
si.set(SHIPPING_PROVIDER_CODE, shipping_provider_code)
si.set(TRACKING_CODE_FIELD, tracking_no)
si.set(IS_COD_CHECKBOX, so_data['cod'])
si.set(SHIPPING_METHOD_FIELD, shipping_package_info.get('shippingMethod'))
si.set(SHIPPING_PACKAGE_STATUS_FIELD, shipping_package_status)
si.set(CHANNEL_ID_FIELD, channel)
si.set_posting_time = 1
si.posting_date = get_unicommerce_date(si_data['created'])
si.transaction_date = si.posting_date
si.naming_series = (channel_config.sales_invoice_series or settings.sales_invoice_series)
si.delivery_date = so.delivery_date
si.ignore_pricing_rule = 1
si.update_stock = (False if settings.delivery_note else update_stock)
si.flags.raw_data = si_data
si.insert()
_verify_total(si, si_data)
attach_unicommerce_docs(sales_invoice=si.name, invoice=si_data.get('encodedInvoice'), label=shipping_label, invoice_code=si_data['code'], package_code=si_data.get('shippingPackageCode'))
item_warehouses = {d.warehouse for d in si.items}
for wh in item_warehouses:
if (update_stock and cint(frappe.db.get_value('Warehouse', wh, 'is_group'))):
return si
if submit:
si.submit()
if cint(channel_config.auto_payment_entry):
make_payment_entry(si, channel_config, si.posting_date)
return si |
class OptimizerTest(chex.TestCase):
(with_jit=True, without_jit=True)
def test_minimize_cem(self):
def objective(x):
return jnp.sum(jnp.square(x), axis=(- 1))
def var_fn(x):
return optimizers.minimize_cem(objective, x, jax.random.PRNGKey(0), bounds=((jnp.ones(2) * (- 10.0)), (jnp.ones(2) * 10.0)), n_iterations=100, population_size=400, elite_fraction=0.1, alpha=0.1, return_mean_elites=False)
x0 = (jnp.ones(2) * 2)
(result, _) = var_fn(x0)
chex.assert_tree_all_close(result, jnp.zeros(2), rtol=0.0001, atol=0.0001) |
class CPUSensor(BaseSensor):
name = 'cpu\\d*'
desc = _('Average CPU usage')
cpus = re.compile('\\Acpu\\d*\\Z')
last = None
if ps_v1_api:
cpu_count = ps.NUM_CPUS
else:
cpu_count = ps.cpu_count()
def check(self, sensor):
if self.cpus.match(sensor):
if (len(sensor) == 3):
nber = 0
else:
nber = (int(sensor[3:]) if (len(sensor) > 3) else 999)
if (nber >= self.cpu_count):
print(sensor)
print(self.cpu_count)
print(len(sensor))
raise ISMError(_('Invalid number of CPUs.'))
return True
def get_value(self, sensor):
if (sensor == 'cpu'):
return '{:02.0f}%'.format(self._fetch_cpu())
elif CPUSensor.cpus.match(sensor):
cpus = self._fetch_cpu(percpu=True)
return '{:02.0f}%'.format(cpus[int(sensor[3:])])
return None
def _fetch_cpu(self, percpu=False):
if percpu:
return cpu_load
r = 0.0
for i in cpu_load:
r += i
r /= self.cpu_count
return r |
class Node(_StatusModel, _JsonPickleModel, _UserTasksModel):
_esysinfo = ('sysinfo', 'diskinfo', 'zpools', 'nictags', 'overlay_rules')
_vlan_id = None
_sysinfo_shown = ('Boot Time', 'Manufacturer', 'Product', 'Serial Number', 'SKU Number', 'HW Version', 'HW Family', 'Datacenter Name', 'VM Capable', 'CPU Type', 'CPU Virtualization', 'CPU Physical Cores', 'Live Image', 'Bhyve Capable', 'Bhyve Max Vcpus')
ZPOOL = 'zones'
DEFAULT_OVERLAY_PORT = 4789
DEFAULT_OVERLAY_IP = '0.0.0.0'
VMS_SIZE_TOTAL_KEY = 'vms-size-total:%s'
VMS_SIZE_DC_KEY = 'vms-size-dc:%s:%s'
BHYVE_MIN_PLATFORM_SHORT =
NODES_ALL_KEY = 'nodes_list'
NODES_ALL_EXPIRES = 300
NICTAGS_ALL_KEY = 'nictag_list'
NICTAGS_ALL_EXPIRES = None
SYSTEM_VERSION_EXPIRES = None
OFFLINE = 1
ONLINE = 2
UNREACHABLE = 3
UNLICENSED = 9
STATUS_DB = ((ONLINE, ugettext_noop('online')), (OFFLINE, ugettext_noop('maintenance')), (UNREACHABLE, ugettext_noop('unreachable')), (UNLICENSED, ugettext_noop('unlicensed')))
STATUS = STATUS_DB[:2]
STATUS_OPERATIONAL = frozenset([ONLINE])
STATUS_AVAILABLE_MONITORING = frozenset([ONLINE, OFFLINE, UNREACHABLE])
_pk_key = 'node_uuid'
_log_name_attr = 'hostname'
_cache_status = True
_storage = None
_ns = None
_ip = None
uuid = models.CharField(_('UUID'), max_length=36, primary_key=True)
hostname = models.CharField(_('Hostname'), max_length=128, unique=True)
address = models.CharField(_('Address'), max_length=64)
status = models.SmallIntegerField(_('Status'), choices=STATUS_DB, default=OFFLINE, db_index=True)
owner = models.ForeignKey(User, verbose_name=_('Owner'), default=settings.VMS_NODE_USER_DEFAULT, on_delete=models.PROTECT)
dc = models.ManyToManyField(Dc, through='DcNode', verbose_name=_('Datacenter'), blank=True)
config = models.TextField(_('Config'), blank=True)
cpu = models.PositiveIntegerField(_('CPUs'), help_text=_('Total number of CPUs (cores).'))
ram = models.PositiveIntegerField(_('RAM (MB)'), help_text=_('Total RAM size in MB.'))
cpu_coef = models.DecimalField(_('CPUs coefficient'), max_digits=4, decimal_places=2, default='1', help_text=_('Coefficient for calculating the the total number of virtual CPUs.'))
ram_coef = models.DecimalField(_('RAM coefficient'), max_digits=4, decimal_places=2, default='1', help_text=_('Coefficient for calculating the maximum amount of memory for virtual machines.'))
cpu_free = models.IntegerField(_('Free CPUs'), default=0, editable=False)
ram_free = models.IntegerField(_('Free RAM (MB)'), default=0, editable=False)
is_compute = models.BooleanField(_('Compute'), default=True)
is_backup = models.BooleanField(_('Backup'), default=False)
is_head = models.BooleanField(_('Head node'), default=False)
note = models.TextField(_('Note'), blank=True)
class Meta():
app_label = 'vms'
verbose_name = _('Node')
verbose_name_plural = _('Nodes')
def __unicode__(self):
return ('%s' % self.hostname)
def alias(self):
return self.hostname
def name(self):
return self.hostname
def web_data(self):
return {'hostname': self.hostname, 'status': self.status, 'owner': self.owner.username, 'is_compute': self.is_compute, 'is_backup': self.is_backup, 'note': self.note, 'address': self.address, 'cpu_coef': self.cpu_coef, 'ram_coef': self.ram_coef, 'monitoring_templates': self.monitoring_templates, 'monitoring_hostgroups': self.monitoring_hostgroups}
def cpu_total(self):
decimal.getcontext().rounding = decimal.ROUND_DOWN
return int((self.cpu * float(self.cpu_coef)))
def ram_total(self):
decimal.getcontext().rounding = decimal.ROUND_DOWN
return int((self.ram * float(self.ram_coef)))
def _sysinfo(self):
return self.json.get('sysinfo', {})
def platform_version(self):
return self._sysinfo.get('Live Image', None)
def platform_version_short(self):
version = self.platform_version
if (not version):
return 0
return int(version[:8])
def dc_name(self):
return self._sysinfo.get('Datacenter Name', '')
def domain_name(self):
return self.hostname.partition('.')[2]
def cpu_sockets(self):
return self._sysinfo.get('CPU Physical Cores', None)
def sysinfo(self):
x = self._sysinfo
return {i: x.get(i, '') for i in self._sysinfo_shown}
def bhyve_capable(self):
return bool(self._sysinfo.get('Bhyve Capable', 'false'))
def bhyve_max_vcpus(self):
return int(self._sysinfo.get('Bhyve Max Vcpus', 1000))
def diskinfo(self):
return self.json.get('diskinfo', {})
def zpools(self):
return self.json.get('zpools', {})
def boottime(self):
return int(self._sysinfo.get('Boot Time', 0))
def network_interfaces(self):
return self._sysinfo.get('Network Interfaces', {})
def virtual_network_interfaces(self):
return self._sysinfo.get('Virtual Network Interfaces', {})
def network_aggregations(self):
return self._sysinfo.get('Link Aggregations', {})
def overlays(self):
return self._sysinfo.get('Overlays', {})
def etherstubs(self):
return self._sysinfo.get('Etherstubs', {})
def networking(self):
return {'Network Interfaces': self.network_interfaces, 'Virtual Network Interfaces': self.virtual_network_interfaces, 'Link Aggregations': self.network_aggregations, 'NIC Tags': self.nictags, 'Overlays': self.overlays, 'Etherstubs': self.etherstubs}
def used_nics(self):
all_nics = self.network_interfaces.copy()
all_nics.update(self.virtual_network_interfaces)
return {iface: prop for (iface, prop) in six.iteritems(all_nics) if prop.get('ip4addr')}
def ips(self):
return [nic['ip4addr'] for nic in self.used_nics.values()]
def api_sysinfo(self):
sysinfo = self.sysinfo
sysinfo['networking'] = self.networking
sysinfo['zpools'] = self.zpools
sysinfo['disks'] = self.diskinfo
return sysinfo
def zpool(self):
return self._sysinfo.get('Zpool', None)
def zpool_size(self):
return (int(self._sysinfo.get('Zpool Size in GiB', 0)) * 1024)
def storage(self):
if (not self._storage):
if self.zpool:
try:
self._ns = NodeStorage.objects.select_related('storage').get(node=self, zpool=self.zpool)
self._storage = self._ns.storage
except models.ObjectDoesNotExist:
name = ('%%s' % (self.zpool, self.hostname))[:64]
self._storage = Storage(size=self.zpool_size, owner=self.owner, access=Storage.PUBLIC, name=name, alias=self.zpool, desc='Default local storage pool')
else:
self._storage = Storage(size_coef='1.0', size=0, type=Storage.LOCAL)
return self._storage
def disk(self):
return self.storage.size
def disk(self, value):
self.storage.size = value
def disk_free(self):
return self.storage.size_free
_free.setter
def disk_free(self, value):
if self._ns:
vms_size = ((((self.storage.size_total - self._ns.size_backups) - self._ns.size_snapshots) - self._ns.size_rep_snapshots) - value)
cache.set((self.VMS_SIZE_TOTAL_KEY % self._ns.pk), vms_size)
self.storage.size_free = value
def disk_coef(self):
return self.storage.size_coef
def is_online(self):
return (self.status == self.ONLINE)
def is_offline(self):
return (self.status == self.OFFLINE)
def is_unreachable(self):
return (self.status == self.UNREACHABLE)
def is_unlicensed(self):
return (self.status == self.UNLICENSED)
def ip_address(self):
from vms.models.ipaddress import IPAddress, Subnet
if (self._ip is None):
admin_subnets = Subnet.objects.filter(name__in=(settings.VMS_NET_ADMIN, settings.VMS_NET_ADMIN_OVERLAY))
self._ip = IPAddress.objects.get(subnet__in=admin_subnets, ip=self.address, usage=IPAddress.NODE)
return self._ip
def create_ip_address(self):
from vms.models.ipaddress import IPAddress, Subnet
ipaddr = IPAddress.get_ip_address(self.address)
for subnet in Subnet.objects.filter(name__in=(settings.VMS_NET_ADMIN, settings.VMS_NET_ADMIN_OVERLAY)):
if (ipaddr in subnet.ip_network):
self._ip = IPAddress(subnet=subnet, ip=self.address, usage=IPAddress.NODE, note=self.hostname)
return self._ip
raise IPAddress.DoesNotExist(('IP address "%s" does not belong to any admin network' % self.address))
def vlan_id(self):
assert (self._vlan_id is not None), 'vlan_id is available only during node initialization'
return self._vlan_id
def esysinfo(self):
_json = self.json
return {i: _json.get(i, {}) for i in self._esysinfo}
def esysinfo(self, value):
_json = self.json
for i in self._esysinfo:
_json[i] = value[i]
self.json = _json
def sshkey(self):
return self.json.get('sshkey', None)
def sshkey(self, value):
self.save_item('sshkey', value, save=False)
def authorized_keys(self):
return self.json.get('authorized_keys', '')
_keys.setter
def authorized_keys(self, value):
self.save_item('authorized_keys', value, save=False)
def save_authorized_keys(self, value):
self.authorized_keys = value
self.save(update_resources=False, update_fields=('enc_json', 'changed'))
def nictags(self):
return self.json.get('nictags', [])
def overlay_rules(self):
return self.json.get('overlay_rules', {})
def lifetime(self):
return (int(timezone.now().strftime('%s')) - int(self.created.strftime('%s')))
def zabbix_name(self):
return self.hostname
def zabbix_id(self):
return self.uuid
def zabbix_info(self):
return self.json.get('zabbix', {})
_info.setter
def zabbix_info(self, host):
self.save_item('zabbix', host, save=False)
def save_zabbix_info(self, zxhost=None):
if (zxhost is not None):
self.zabbix_info = zxhost
self.save(update_resources=False, update_fields=('enc_json', 'changed'))
def zabbix_sync(self):
return True
_sync.setter
def zabbix_sync(self, value):
pass
def save_metadata(self, key, value, metadata='internal_metadata', save=True, **kwargs):
return self.save_item(key, value, save=save, metadata=metadata, **kwargs)
def delete_metadata(self, key, metadata='internal_metadata', save=True, **kwargs):
return self.delete_item(key, save=save, metadata=metadata, **kwargs)
def internal_metadata(self):
return self.json.get('internal_metadata', {})
def monitoring_hostgroups(self):
return self.internal_metadata.get('mon_hostgroups', [])
_hostgroups.setter
def monitoring_hostgroups(self, value):
self.save_metadata('mon_hostgroups', value, save=False)
def monitoring_templates(self):
return self.internal_metadata.get('mon_templates', [])
_templates.setter
def monitoring_templates(self, value):
self.save_metadata('mon_templates', value, save=False)
def choose(cls, vm):
return DcNode.choose_node(vm.dc, vm)
def all(cls, clear_cache=False):
if clear_cache:
return cache.delete(cls.NODES_ALL_KEY)
nodes = cache.get(cls.NODES_ALL_KEY)
if (not nodes):
nodes = cls.objects.only('uuid', 'hostname', 'status', 'address', 'is_compute', 'is_backup', 'is_head').order_by('hostname')
cache.set(cls.NODES_ALL_KEY, nodes, cls.NODES_ALL_EXPIRES)
return nodes
def all_nictags(cls, clear_cache=False):
if clear_cache:
cache.delete(cls.NICTAGS_ALL_KEY)
nictags = cache.get(cls.NICTAGS_ALL_KEY)
if (not nictags):
nodes = cls.objects.all()
nictags = {}
for node in nodes:
for nic in node.nictags:
nic_name = nic['name']
nic_type = nic['type'].replace('aggr', 'normal')
if ((nic_name in nictags) and (nic_type != nictags[nic_name])):
raise ValueError('Duplicate NIC tag name with different type exists on another compute node!')
nictags[nic_name] = nic_type
cache.set(cls.NICTAGS_ALL_KEY, nictags, cls.NICTAGS_ALL_EXPIRES)
return nictags
def all_nictags_choices(cls):
return sorted([(name, ('%s (%s)' % (name, typ))) for (name, typ) in six.iteritems(cls.all_nictags())])
def get_node_ip_by_iface(self, iface, node_nics=None):
if (node_nics is None):
node_nics = self.used_nics
if (iface in node_nics):
return node_nics[iface]['ip4addr']
return None
def get_node_ip_by_nictag(self, nictag, node_nics=None):
if (node_nics is None):
node_nics = self.used_nics
for nic in six.itervalues(node_nics):
if (nictag in nic.get('NIC Names', ())):
return nic['ip4addr']
return None
def address_admin(self):
used_nics = self.used_nics
return (self.get_node_ip_by_iface('admin0', node_nics=used_nics) or self.get_node_ip_by_iface('admin_0', node_nics=used_nics) or self.get_node_ip_by_nictag('admin', node_nics=used_nics))
def address_external(self):
used_nics = self.used_nics
return (self.get_node_ip_by_iface('external0', node_nics=used_nics) or self.get_node_ip_by_iface('external_0', node_nics=used_nics) or self.get_node_ip_by_nictag('external', node_nics=used_nics) or self.get_node_ip_by_iface('admin0', node_nics=used_nics) or self.get_node_ip_by_iface('admin_0', node_nics=used_nics) or self.get_node_ip_by_nictag('admin', node_nics=used_nics))
def get_overlay_port(self, overlay_name):
overlay_rule = self.overlay_rules[overlay_name]
return (overlay_rule['port'] or self.DEFAULT_OVERLAY_PORT)
def get_overlay_ip(self, overlay_name, remote=True):
overlay_rule = self.overlay_rules[overlay_name]
overlay_ip = overlay_rule['ip']
if ((not overlay_ip) or (overlay_ip == self.DEFAULT_OVERLAY_IP)):
if remote:
overlay_ip = self.address_external
else:
overlay_ip = self.address_admin
assert overlay_ip, ('IP address was not defined for overlay "%s" on compute node "%s"' % (overlay_name, self))
return overlay_ip
def _initializing_key(self):
return ('node:%s:initializing' % self.uuid)
def is_initializing(self):
return bool(cache.get(self._initializing_key))
def set_initializing(self):
cache.set(self._initializing_key, True, 600)
def del_initializing(self):
cache.delete(self._initializing_key)
def _init_address(self):
sysinfo = self._sysinfo
(ip, admin_iface) = (None, None)
vnics = sysinfo.get('Virtual Network Interfaces', {})
for vnic_name in ('admin0', 'admin_0'):
admin_iface = vnics.get(vnic_name, {})
ip = admin_iface.get('ip4addr', None)
if ip:
break
if (not ip):
for (iface, iface_info) in sysinfo['Network Interfaces'].items():
if ('admin' in iface_info.get('NIC Names', ())):
admin_iface = iface_info
ip = admin_iface.get('ip4addr', None)
break
if (not ip):
raise RuntimeError('Node IP Address not found in sysinfo output')
self._vlan_id = int(admin_iface.get('VLAN', 0))
self.address = ip
def parse_sysinfo(self, esysinfo, update_ip=False):
self.config = esysinfo.pop('config', '')
self.sshkey = esysinfo.pop('sshkey', '')
self.esysinfo = esysinfo
self.hostname = self._sysinfo['Hostname']
if update_ip:
self._init_address()
def sysinfo_changed(self, esysinfo):
new_esysinfo = esysinfo.copy()
config = new_esysinfo.pop('config', '')
new_esysinfo.pop('sshkey', None)
return (not ((self.esysinfo == new_esysinfo) and (self.config == config)))
def sshkey_changed(self, esysinfo):
return (self.sshkey != esysinfo.get('sshkey', ''))
def create_from_sysinfo(cls, uuid, esysinfo, status=ONLINE, is_head=False):
node = cls(uuid=uuid, status=status, is_head=is_head)
node.parse_sysinfo(esysinfo, update_ip=True)
node.set_initializing()
node.save(sync_json=True, update_resources=True, zpool_create=True, clear_cache=True)
if settings.VMS_NODE_DC_DEFAULT:
dc = Dc.objects.get_by_id(settings.VMS_NODE_DC_DEFAULT)
dc_node = DcNode(dc=dc, node=node, strategy=DcNode.SHARED)
dc_node.save(update_resources=True)
if node._ns:
node._ns.dc.add(dc)
return node
def update_from_sysinfo(self, esysinfo):
current_zpools = self.zpools
self.parse_sysinfo(esysinfo, update_ip=False)
new_zpools = self.zpools
self.save(sync_json=True, update_resources=True, zpool_update=True, clear_cache=True, zpools_update=(current_zpools != new_zpools))
def ram_kvm_overhead(self):
return self.json.get('ram_kvm_overhead', 0)
_kvm_overhead.setter
def ram_kvm_overhead(self, value):
self.save_item('ram_kvm_overhead', value, save=False)
def resources(self):
decimal.getcontext().rounding = decimal.ROUND_DOWN
disk_size_total = self.storage.size_total
if self._ns:
disk_size_total -= ((self._ns.size_backups + self._ns.size_snapshots) + self._ns.size_rep_snapshots)
return ((self.cpu * float(self.cpu_coef)), (self.ram * float(self.ram_coef)), disk_size_total)
def get_used_resources(self, dc):
(cpu, ram, disk) = (0, 0, 0)
for vm in self.vm_set.filter(dc=dc):
(vm_cpu, vm_ram, vm_disk) = vm.get_cpu_ram_disk(zpool=self.zpool, ram_overhead=True)
cpu += vm_cpu
ram += vm_ram
disk += vm_disk
return (int(cpu), int(ram), int(disk))
def get_free_resources(self, cpu, ram, disk, dc=None, dc_exclude=None, dcs_exclude=None):
if dc:
vms = self.vm_set.filter(dc=dc)
else:
vms = self.vm_set.all()
if dcs_exclude:
vms = vms.exclude(dc__in=dcs_exclude)
if dc_exclude:
vms = vms.exclude(dc=dc_exclude)
for vm in vms:
(vm_cpu, vm_ram, vm_disk) = vm.get_cpu_ram_disk(zpool=self.zpool, ram_overhead=True)
cpu -= vm_cpu
ram -= vm_ram
disk -= vm_disk
return (int(cpu), int(ram), int(disk))
def get_ram_kvm_overhead(self, dc=None):
if dc:
vms = self.vm_set.filter(dc=dc)
else:
vms = self.vm_set
vms_count = vms.filter(ostype__in=_HVMType.HVM).count()
return (vms_count * settings.VMS_VM_KVM_MEMORY_OVERHEAD)
def update_resources(self, save=False):
if save:
return self.save(update_resources=True)
(self.cpu_free, self.ram_free, self.disk_free) = self.get_free_resources(*self.resources)
self.ram_kvm_overhead = self.get_ram_kvm_overhead()
def get_dc_node(self, dc):
return DcNode.objects.get(dc=dc, node=self)
def get_node_storage(self, dc, zpool):
return NodeStorage.objects.select_related('storage').get(node=self, zpool=zpool, dc=dc)
def get_node_storages(self, dc, zpools):
return NodeStorage.objects.select_related('storage').filter(node=self, zpool__in=zpools, dc=dc)
def sync_json(self):
sysinfo = self._sysinfo
self.cpu = int(sysinfo.get('CPU Total Cores', 0))
self.ram = int(sysinfo.get('MiB of Memory', 0))
self.disk = self.zpool_size
def save(self, sync_json=False, update_resources=True, zpool_update=False, zpool_create=False, zpools_update=False, clear_cache=False, save_ip=False, **kwargs):
if sync_json:
self.sync_json()
update_resources = True
if update_resources:
self.update_resources(save=False)
zpool_update = True
if (self._orig_status == self.status):
status_changed = False
else:
status_changed = True
with transaction.atomic():
ret = super(Node, self).save(**kwargs)
if (save_ip and self._ip):
self._ip.save()
if (clear_cache or status_changed):
self.all(clear_cache=True)
self.all_nictags(clear_cache=True)
if (self.zpool and (zpool_update or zpool_create)):
self.storage.save()
if (zpool_create or (not self._ns)):
self._ns = NodeStorage(node=self, storage=self.storage, zpool=self.zpool)
self._ns.save()
if zpools_update:
node_zpools = self.zpools
node_zpools.pop(self.ZPOOL)
for ns in NodeStorage.objects.select_related('storage').filter(node=self, zpool__in=node_zpools.keys()):
try:
zpool_size = node_zpools[ns.zpool]['size']
except KeyError:
continue
if (ns.storage.size != zpool_size):
with transaction.atomic():
ns.storage.size = zpool_size
ns.storage.save()
ns.save()
if update_resources:
DcNode.update_all(node=self)
return ret
def save_status(self, new_status=None, **kwargs):
kwargs['update_resources'] = False
return super(Node, self).save_status(new_status=new_status, **kwargs)
def delete(self, **kwargs):
ret = super(Node, self).delete(**kwargs)
self.all(clear_cache=True)
self.all_nictags(clear_cache=True)
return ret
def _get_queue(self, speed):
return ((speed + '.') + self.hostname)
def all_queues(self):
return [self._get_queue(i) for i in ('fast', 'slow', 'image', 'backup')]
def fast_queue(self):
return self._get_queue('fast')
def slow_queue(self):
assert self.is_compute, 'Node compute capability disabled'
return self._get_queue('slow')
def backup_queue(self):
assert self.is_backup, 'Node backup capability disabled'
return self._get_queue('backup')
def image_queue(self):
return self._get_queue('image')
def color(self):
return ('#' + str(self.uuid)[30:])
def vendor(self):
return self._sysinfo.get('Manufacturer', '').replace(' Inc.', '')
def model(self):
sysinfo = self._sysinfo
product = sysinfo.get('Product', '')
if (sysinfo.get('Manufacturer', None) == 'IBM'):
product = product.split('-')[0].replace('IBM', '')
return product.replace('Server', '').strip()
def worker(self, queue):
return self._get_queue(queue).replace('.', '', 1)
def _system_version_key(self):
return ('node:%s:system-version' % self.uuid)
def system_version(self):
from que.utils import worker_command
version = cache.get(self._system_version_key)
if (not version):
worker = self.worker('fast')
version = (worker_command('system_version', worker, timeout=0.5) or '')
if version:
cache.set(self._system_version_key, version, self.SYSTEM_VERSION_EXPIRES)
return version
_version.deleter
def system_version(self):
cache.delete(self._system_version_key)
def has_related_tasks(self):
from vms.models.image import Image, ImageVm
for vm in self.vm_set.all():
if vm.tasks:
return True
for ns in self.nodestorage_set.all():
if ns.tasks:
return True
if (self == ImageVm().node):
for img in Image.objects.all():
if img.tasks:
return True
return False |
class TestEvFormParallelTables(TestCase):
def setUp(self):
self.text1 = 'Test text'
self.table2 = evtable.EvTable('Ab', 'Sc', table=[['|ySTR', '|yCON', '|yDEX'], [10, 10, 10]])
self.table3 = evtable.EvTable('|RSkill', '|RLevel', table=[['|yAcro', '|yAnim', '|yArca', '|yAth', '|yDec', '|yHis'], [10, 10, 10, 10, 10, 10]])
self.formdict = {'FORM': _SHEET, 'FORMCHAR': 'x', 'TABLECHAR': 'c'}
def test_parallel_tables(self):
form = evform.EvForm(self.formdict)
form.map(cells={'1': self.text1}, tables={'2': self.table2, '3': self.table3})
self.assertEqual(ansi.strip_ansi(str(form).strip()), _EXPECTED.strip()) |
def main_generator(dut):
test_probe = True
test_writes = True
test_reads = True
if test_probe:
packet = etherbone.EtherbonePacket()
packet.pf = 1
dut.etherbone_model.send(packet)
(yield from dut.etherbone_model.receive())
print(('probe: ' + str(bool(dut.etherbone_model.rx_packet.pr))))
for i in range(2):
if test_writes:
writes_datas = [j for j in range(4)]
writes = etherbone.EtherboneWrites(base_addr=4096, datas=writes_datas)
record = etherbone.EtherboneRecord()
record.writes = writes
record.reads = None
record.bca = 0
record.rca = 0
record.rff = 0
record.cyc = 0
record.wca = 0
record.wff = 0
record.byte_enable = 15
record.wcount = len(writes_datas)
record.rcount = 0
packet = etherbone.EtherbonePacket()
packet.records = [record]
dut.etherbone_model.send(packet)
for i in range(256):
(yield)
if test_reads:
reads_addrs = [(4096 + (4 * j)) for j in range(4)]
reads = etherbone.EtherboneReads(base_ret_addr=4096, addrs=reads_addrs)
record = etherbone.EtherboneRecord()
record.writes = None
record.reads = reads
record.bca = 0
record.rca = 0
record.rff = 0
record.cyc = 0
record.wca = 0
record.wff = 0
record.byte_enable = 15
record.wcount = 0
record.rcount = len(reads_addrs)
packet = etherbone.EtherbonePacket()
packet.records = [record]
dut.etherbone_model.send(packet)
(yield from dut.etherbone_model.receive())
loopback_writes_datas = []
loopback_writes_datas = dut.etherbone_model.rx_packet.records.pop().writes.get_datas()
(s, l, e) = check(writes_datas, loopback_writes_datas)
print(((((('shift ' + str(s)) + ' / length ') + str(l)) + ' / errors ') + str(e))) |
def use_pysqlite3():
import platform
import sqlite3
if ((platform.system() == 'Linux') and (sqlite3.sqlite_version_info < (3, 35, 0))):
try:
import datetime
import subprocess
import sys
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'pysqlite3-binary', '--quiet', '--disable-pip-version-check'])
__import__('pysqlite3')
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S,%f')[:(- 3)]
print(f'{current_time} [embedchain] [INFO]', 'Swapped std-lib sqlite3 with pysqlite3 for ChromaDb compatibility.', f'Your original version was {sqlite3.sqlite_version}.')
except Exception as e:
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S,%f')[:(- 3)]
print(f'{current_time} [embedchain] [ERROR]', 'Failed to swap std-lib sqlite3 with pysqlite3 for ChromaDb compatibility.', 'Error:', e) |
class TestSwagger(unittest.TestCase):
def test_swagger_valid(self):
try:
utils.validate_spec(spec)
except exceptions.SwaggerError as error:
self.fail(str(error))
def test_format_docstring(self):
DOCSTRING = '\n a\n b\n\n c\n '
after_format = format_docstring(DOCSTRING)
expected_format = 'a b \n\n c'
self.assertEqual(after_format, expected_format)
def test_format_not_docstring(self):
after_format = format_docstring(None)
expected_format = ''
self.assertEqual(after_format, expected_format) |
class InlineHiliteExtension(Extension):
def __init__(self, *args, **kwargs):
self.inlinehilite = []
self.config = {'style_plain_text': [False, "Process inline code even when a language is not specified or langauge is specified as 'text'. When 'False', no classes will be added to 'text' code blocksand no scoping will performed. The content will just be escaped.- Default: False"], 'css_class': ['', "Set class name for wrapper element. The default of Highlight will be usedif nothing is set. - Default: ''"], 'custom_inline': [[], 'Custom inline - default []']}
super(InlineHiliteExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md):
config = self.getConfigs()
md.inlinePatterns.register(InlineHilitePattern(BACKTICK_CODE_RE, config, md), 'backtick', 190)
md.registerExtensions(['pymdownx.highlight'], {'pymdownx.highlight': {'_enabled': False}}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.