body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
90e8ebf9dfec2fb03cd11a543007b8ad9dc9e5bc6a7d83e10562532761800286
|
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
"Returns the latest (max) partition value for a table\n\n A filtering criteria should be passed for all fields that are\n partitioned except for the field to be returned. For example,\n if a table is partitioned by (``ds``, ``event_type`` and\n ``event_category``) and you want the latest ``ds``, you'll want\n to provide a filter as keyword arguments for both\n ``event_type`` and ``event_category`` as in\n ``latest_sub_partition('my_table',\n event_category='page', event_type='click')``\n\n :param table_name: the name of the table, can be just the table\n name or a fully qualified table name as ``schema_name.table_name``\n :type table_name: str\n :param schema: schema / database / namespace\n :type schema: str\n :param database: database query will be run against\n :type database: models.Database\n\n :param kwargs: keyword arguments define the filtering criteria\n on the partition list. There can be many of these.\n :type kwargs: str\n >>> latest_sub_partition('sub_partition_table', event_type='click')\n '2018-01-01'\n "
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]['column_names']
for k in kwargs.keys():
if (k not in k in part_fields):
msg = 'Field [{k}] is not part of the portioning key'
raise SupersetTemplateException(msg)
if (len(kwargs.keys()) != (len(part_fields) - 1)):
msg = 'A filter needs to be specified for {} out of the {} fields.'.format((len(part_fields) - 1), len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if (field not in kwargs.keys()):
field_to_return = field
sql = cls._partition_query(table_name, 1, [(field_to_return, True)], kwargs)
df = database.get_df(sql, schema)
if df.empty:
return ''
return df.to_dict()[field_to_return][0]
|
Returns the latest (max) partition value for a table
A filtering criteria should be passed for all fields that are
partitioned except for the field to be returned. For example,
if a table is partitioned by (``ds``, ``event_type`` and
``event_category``) and you want the latest ``ds``, you'll want
to provide a filter as keyword arguments for both
``event_type`` and ``event_category`` as in
``latest_sub_partition('my_table',
event_category='page', event_type='click')``
:param table_name: the name of the table, can be just the table
name or a fully qualified table name as ``schema_name.table_name``
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param kwargs: keyword arguments define the filtering criteria
on the partition list. There can be many of these.
:type kwargs: str
>>> latest_sub_partition('sub_partition_table', event_type='click')
'2018-01-01'
|
superset/db_engine_specs.py
|
latest_sub_partition
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
"Returns the latest (max) partition value for a table\n\n A filtering criteria should be passed for all fields that are\n partitioned except for the field to be returned. For example,\n if a table is partitioned by (``ds``, ``event_type`` and\n ``event_category``) and you want the latest ``ds``, you'll want\n to provide a filter as keyword arguments for both\n ``event_type`` and ``event_category`` as in\n ``latest_sub_partition('my_table',\n event_category='page', event_type='click')``\n\n :param table_name: the name of the table, can be just the table\n name or a fully qualified table name as ``schema_name.table_name``\n :type table_name: str\n :param schema: schema / database / namespace\n :type schema: str\n :param database: database query will be run against\n :type database: models.Database\n\n :param kwargs: keyword arguments define the filtering criteria\n on the partition list. There can be many of these.\n :type kwargs: str\n >>> latest_sub_partition('sub_partition_table', event_type='click')\n '2018-01-01'\n "
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]['column_names']
for k in kwargs.keys():
if (k not in k in part_fields):
msg = 'Field [{k}] is not part of the portioning key'
raise SupersetTemplateException(msg)
if (len(kwargs.keys()) != (len(part_fields) - 1)):
msg = 'A filter needs to be specified for {} out of the {} fields.'.format((len(part_fields) - 1), len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if (field not in kwargs.keys()):
field_to_return = field
sql = cls._partition_query(table_name, 1, [(field_to_return, True)], kwargs)
df = database.get_df(sql, schema)
if df.empty:
return
return df.to_dict()[field_to_return][0]
|
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
"Returns the latest (max) partition value for a table\n\n A filtering criteria should be passed for all fields that are\n partitioned except for the field to be returned. For example,\n if a table is partitioned by (``ds``, ``event_type`` and\n ``event_category``) and you want the latest ``ds``, you'll want\n to provide a filter as keyword arguments for both\n ``event_type`` and ``event_category`` as in\n ``latest_sub_partition('my_table',\n event_category='page', event_type='click')``\n\n :param table_name: the name of the table, can be just the table\n name or a fully qualified table name as ``schema_name.table_name``\n :type table_name: str\n :param schema: schema / database / namespace\n :type schema: str\n :param database: database query will be run against\n :type database: models.Database\n\n :param kwargs: keyword arguments define the filtering criteria\n on the partition list. There can be many of these.\n :type kwargs: str\n >>> latest_sub_partition('sub_partition_table', event_type='click')\n '2018-01-01'\n "
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]['column_names']
for k in kwargs.keys():
if (k not in k in part_fields):
msg = 'Field [{k}] is not part of the portioning key'
raise SupersetTemplateException(msg)
if (len(kwargs.keys()) != (len(part_fields) - 1)):
msg = 'A filter needs to be specified for {} out of the {} fields.'.format((len(part_fields) - 1), len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if (field not in kwargs.keys()):
field_to_return = field
sql = cls._partition_query(table_name, 1, [(field_to_return, True)], kwargs)
df = database.get_df(sql, schema)
if df.empty:
return
return df.to_dict()[field_to_return][0]<|docstring|>Returns the latest (max) partition value for a table
A filtering criteria should be passed for all fields that are
partitioned except for the field to be returned. For example,
if a table is partitioned by (``ds``, ``event_type`` and
``event_category``) and you want the latest ``ds``, you'll want
to provide a filter as keyword arguments for both
``event_type`` and ``event_category`` as in
``latest_sub_partition('my_table',
event_category='page', event_type='click')``
:param table_name: the name of the table, can be just the table
name or a fully qualified table name as ``schema_name.table_name``
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param kwargs: keyword arguments define the filtering criteria
on the partition list. There can be many of these.
:type kwargs: str
>>> latest_sub_partition('sub_partition_table', event_type='click')
'2018-01-01'<|endoftext|>
|
beb48df56d5eac1900a66803a36598e8943d881e1613a71eaa1ee8b1d6b57e28
|
@staticmethod
def create_table_from_csv(form, table):
'Uploads a csv file and creates a superset datasource in Hive.'
def convert_to_hive_type(col_type):
"maps tableschema's types to hive types"
tableschema_to_hive_types = {'boolean': 'BOOLEAN', 'integer': 'INT', 'number': 'DOUBLE', 'string': 'STRING'}
return tableschema_to_hive_types.get(col_type, 'STRING')
bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']
if (not bucket_path):
logging.info('No upload bucket specified')
raise Exception('No upload bucket specified. You can specify one in the config file.')
table_name = form.name.data
schema_name = form.schema.data
if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):
if (('.' in table_name) or schema_name):
raise Exception("You can't specify a namespace. All tables will be uploaded to the `{}` namespace".format(config.get('HIVE_NAMESPACE')))
full_table_name = '{}.{}'.format(config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)
else:
if (('.' in table_name) and schema_name):
raise Exception("You can't specify a namespace both in the name of the table and in the schema field. Please remove one")
full_table_name = ('{}.{}'.format(schema_name, table_name) if schema_name else table_name)
filename = form.csv_file.data.filename
upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']
upload_path = (config['UPLOAD_FOLDER'] + secure_filename(filename))
from tableschema import Table
hive_table_schema = Table(upload_path).infer()
column_name_and_type = []
for column_info in hive_table_schema['fields']:
column_name_and_type.append('`{}` {}'.format(column_info['name'], convert_to_hive_type(column_info['type'])))
schema_definition = ', '.join(column_name_and_type)
import boto3
s3 = boto3.client('s3')
location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)
s3.upload_file(upload_path, bucket_path, os.path.join(upload_prefix, table_name, filename))
sql = f'''CREATE TABLE {full_table_name} ( {schema_definition} )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS
TEXTFILE LOCATION '{location}'
tblproperties ('skip.header.line.count'='1')'''
logging.info(form.con.data)
engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)
engine.execute(sql)
|
Uploads a csv file and creates a superset datasource in Hive.
|
superset/db_engine_specs.py
|
create_table_from_csv
|
riskilla/incubator-superset
| 1
|
python
|
@staticmethod
def create_table_from_csv(form, table):
def convert_to_hive_type(col_type):
"maps tableschema's types to hive types"
tableschema_to_hive_types = {'boolean': 'BOOLEAN', 'integer': 'INT', 'number': 'DOUBLE', 'string': 'STRING'}
return tableschema_to_hive_types.get(col_type, 'STRING')
bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']
if (not bucket_path):
logging.info('No upload bucket specified')
raise Exception('No upload bucket specified. You can specify one in the config file.')
table_name = form.name.data
schema_name = form.schema.data
if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):
if (('.' in table_name) or schema_name):
raise Exception("You can't specify a namespace. All tables will be uploaded to the `{}` namespace".format(config.get('HIVE_NAMESPACE')))
full_table_name = '{}.{}'.format(config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)
else:
if (('.' in table_name) and schema_name):
raise Exception("You can't specify a namespace both in the name of the table and in the schema field. Please remove one")
full_table_name = ('{}.{}'.format(schema_name, table_name) if schema_name else table_name)
filename = form.csv_file.data.filename
upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']
upload_path = (config['UPLOAD_FOLDER'] + secure_filename(filename))
from tableschema import Table
hive_table_schema = Table(upload_path).infer()
column_name_and_type = []
for column_info in hive_table_schema['fields']:
column_name_and_type.append('`{}` {}'.format(column_info['name'], convert_to_hive_type(column_info['type'])))
schema_definition = ', '.join(column_name_and_type)
import boto3
s3 = boto3.client('s3')
location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)
s3.upload_file(upload_path, bucket_path, os.path.join(upload_prefix, table_name, filename))
sql = f'CREATE TABLE {full_table_name} ( {schema_definition} )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS
TEXTFILE LOCATION '{location}'
tblproperties ('skip.header.line.count'='1')'
logging.info(form.con.data)
engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)
engine.execute(sql)
|
@staticmethod
def create_table_from_csv(form, table):
def convert_to_hive_type(col_type):
"maps tableschema's types to hive types"
tableschema_to_hive_types = {'boolean': 'BOOLEAN', 'integer': 'INT', 'number': 'DOUBLE', 'string': 'STRING'}
return tableschema_to_hive_types.get(col_type, 'STRING')
bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']
if (not bucket_path):
logging.info('No upload bucket specified')
raise Exception('No upload bucket specified. You can specify one in the config file.')
table_name = form.name.data
schema_name = form.schema.data
if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):
if (('.' in table_name) or schema_name):
raise Exception("You can't specify a namespace. All tables will be uploaded to the `{}` namespace".format(config.get('HIVE_NAMESPACE')))
full_table_name = '{}.{}'.format(config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)
else:
if (('.' in table_name) and schema_name):
raise Exception("You can't specify a namespace both in the name of the table and in the schema field. Please remove one")
full_table_name = ('{}.{}'.format(schema_name, table_name) if schema_name else table_name)
filename = form.csv_file.data.filename
upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']
upload_path = (config['UPLOAD_FOLDER'] + secure_filename(filename))
from tableschema import Table
hive_table_schema = Table(upload_path).infer()
column_name_and_type = []
for column_info in hive_table_schema['fields']:
column_name_and_type.append('`{}` {}'.format(column_info['name'], convert_to_hive_type(column_info['type'])))
schema_definition = ', '.join(column_name_and_type)
import boto3
s3 = boto3.client('s3')
location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)
s3.upload_file(upload_path, bucket_path, os.path.join(upload_prefix, table_name, filename))
sql = f'CREATE TABLE {full_table_name} ( {schema_definition} )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS
TEXTFILE LOCATION '{location}'
tblproperties ('skip.header.line.count'='1')'
logging.info(form.con.data)
engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)
engine.execute(sql)<|docstring|>Uploads a csv file and creates a superset datasource in Hive.<|endoftext|>
|
db2e3c12fc0fcf9ec8fbdb5e06ce2d2cfcc7cbfed6e791aa44dd479bb51b161d
|
@classmethod
def handle_cursor(cls, cursor, query, session):
'Updates progress information'
from pyhive import hive
unfinished_states = (hive.ttypes.TOperationState.INITIALIZED_STATE, hive.ttypes.TOperationState.RUNNING_STATE)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while (polled.operationState in unfinished_states):
query = session.query(type(query)).filter_by(id=query.id).one()
if (query.status == QueryStatus.STOPPED):
cursor.cancel()
break
log = (cursor.fetch_logs() or '')
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info('Progress total: {}'.format(progress))
needs_commit = False
if (progress > query.progress):
query.progress = progress
needs_commit = True
if (not tracking_url):
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[(- 2)]
logging.info('Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info('Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
logging.info('Job id: {}'.format(job_id))
needs_commit = True
if (job_id and (len(log_lines) > last_log_line)):
for l in log_lines[last_log_line:]:
logging.info('[{}] {}'.format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
time.sleep(hive_poll_interval)
polled = cursor.poll()
|
Updates progress information
|
superset/db_engine_specs.py
|
handle_cursor
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def handle_cursor(cls, cursor, query, session):
from pyhive import hive
unfinished_states = (hive.ttypes.TOperationState.INITIALIZED_STATE, hive.ttypes.TOperationState.RUNNING_STATE)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while (polled.operationState in unfinished_states):
query = session.query(type(query)).filter_by(id=query.id).one()
if (query.status == QueryStatus.STOPPED):
cursor.cancel()
break
log = (cursor.fetch_logs() or )
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info('Progress total: {}'.format(progress))
needs_commit = False
if (progress > query.progress):
query.progress = progress
needs_commit = True
if (not tracking_url):
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[(- 2)]
logging.info('Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info('Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
logging.info('Job id: {}'.format(job_id))
needs_commit = True
if (job_id and (len(log_lines) > last_log_line)):
for l in log_lines[last_log_line:]:
logging.info('[{}] {}'.format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
time.sleep(hive_poll_interval)
polled = cursor.poll()
|
@classmethod
def handle_cursor(cls, cursor, query, session):
from pyhive import hive
unfinished_states = (hive.ttypes.TOperationState.INITIALIZED_STATE, hive.ttypes.TOperationState.RUNNING_STATE)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while (polled.operationState in unfinished_states):
query = session.query(type(query)).filter_by(id=query.id).one()
if (query.status == QueryStatus.STOPPED):
cursor.cancel()
break
log = (cursor.fetch_logs() or )
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info('Progress total: {}'.format(progress))
needs_commit = False
if (progress > query.progress):
query.progress = progress
needs_commit = True
if (not tracking_url):
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[(- 2)]
logging.info('Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info('Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
logging.info('Job id: {}'.format(job_id))
needs_commit = True
if (job_id and (len(log_lines) > last_log_line)):
for l in log_lines[last_log_line:]:
logging.info('[{}] {}'.format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
time.sleep(hive_poll_interval)
polled = cursor.poll()<|docstring|>Updates progress information<|endoftext|>
|
24f164c7c09b34d57503a37cd2dbfc017d3dbe31a323637efafb7ded27e04754
|
@classmethod
def _latest_partition_from_df(cls, df):
'Hive partitions look like ds={partition name}'
if (not df.empty):
return df.ix[(:, 0)].max().split('=')[1]
|
Hive partitions look like ds={partition name}
|
superset/db_engine_specs.py
|
_latest_partition_from_df
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _latest_partition_from_df(cls, df):
if (not df.empty):
return df.ix[(:, 0)].max().split('=')[1]
|
@classmethod
def _latest_partition_from_df(cls, df):
if (not df.empty):
return df.ix[(:, 0)].max().split('=')[1]<|docstring|>Hive partitions look like ds={partition name}<|endoftext|>
|
71f1717bb6846a63b4e81e1779d3968433ec394bdfa52362987ca0b110286b4e
|
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
'\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n '
pass
|
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
|
superset/db_engine_specs.py
|
modify_url_for_impersonation
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
'\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n '
pass
|
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
'\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n '
pass<|docstring|>Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username<|endoftext|>
|
13746ded1374d8acf8c22a5b7c6436495212f3cf73a6c3469de939383d065d10
|
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
'\n Return a configuration dictionary that can be merged with other configs\n that can set the correct properties for impersonating users\n :param uri: URI string\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n :return: Dictionary with configs required for impersonation\n '
configuration = {}
url = make_url(uri)
backend_name = url.get_backend_name()
if ((backend_name == 'hive') and ('auth' in url.query.keys()) and (impersonate_user is True) and (username is not None)):
configuration['hive.server2.proxy.user'] = username
return configuration
|
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation
|
superset/db_engine_specs.py
|
get_configuration_for_impersonation
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
'\n Return a configuration dictionary that can be merged with other configs\n that can set the correct properties for impersonating users\n :param uri: URI string\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n :return: Dictionary with configs required for impersonation\n '
configuration = {}
url = make_url(uri)
backend_name = url.get_backend_name()
if ((backend_name == 'hive') and ('auth' in url.query.keys()) and (impersonate_user is True) and (username is not None)):
configuration['hive.server2.proxy.user'] = username
return configuration
|
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
'\n Return a configuration dictionary that can be merged with other configs\n that can set the correct properties for impersonating users\n :param uri: URI string\n :param impersonate_user: Bool indicating if impersonation is enabled\n :param username: Effective username\n :return: Dictionary with configs required for impersonation\n '
configuration = {}
url = make_url(uri)
backend_name = url.get_backend_name()
if ((backend_name == 'hive') and ('auth' in url.query.keys()) and (impersonate_user is True) and (username is not None)):
configuration['hive.server2.proxy.user'] = username
return configuration<|docstring|>Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation<|endoftext|>
|
ee957e7d94121876f26ea93a853164dc55bc5252942ac7b8995daecf05267324
|
@staticmethod
def mutate_label(label):
'\n Athena only supports lowercase column names and aliases.\n :param str label: Original label which might include uppercase letters\n :return: String that is supported by the database\n '
return label.lower()
|
Athena only supports lowercase column names and aliases.
:param str label: Original label which might include uppercase letters
:return: String that is supported by the database
|
superset/db_engine_specs.py
|
mutate_label
|
riskilla/incubator-superset
| 1
|
python
|
@staticmethod
def mutate_label(label):
'\n Athena only supports lowercase column names and aliases.\n :param str label: Original label which might include uppercase letters\n :return: String that is supported by the database\n '
return label.lower()
|
@staticmethod
def mutate_label(label):
'\n Athena only supports lowercase column names and aliases.\n :param str label: Original label which might include uppercase letters\n :return: String that is supported by the database\n '
return label.lower()<|docstring|>Athena only supports lowercase column names and aliases.
:param str label: Original label which might include uppercase letters
:return: String that is supported by the database<|endoftext|>
|
040072403aa288a38a5043b150785aaf892e828a0271ffc071b982979a53386a
|
@staticmethod
def mutate_label(label):
'\n BigQuery field_name should start with a letter or underscore and contain only\n alphanumeric characters. Labels that start with a number are prefixed with an\n underscore. Any unsupported characters are replaced with underscores and an\n md5 hash is added to the end of the label to avoid possible collisions.\n :param str label: the original label which might include unsupported characters\n :return: String that is supported by the database\n '
label_hashed = ('_' + hashlib.md5(label.encode('utf-8')).hexdigest())
label_mutated = (('_' + label) if re.match('^\\d', label) else label)
label_mutated = re.sub('[^\\w]+', '_', label_mutated)
if (label_mutated != label):
label_mutated += label_hashed
return label_mutated
|
BigQuery field_name should start with a letter or underscore and contain only
alphanumeric characters. Labels that start with a number are prefixed with an
underscore. Any unsupported characters are replaced with underscores and an
md5 hash is added to the end of the label to avoid possible collisions.
:param str label: the original label which might include unsupported characters
:return: String that is supported by the database
|
superset/db_engine_specs.py
|
mutate_label
|
riskilla/incubator-superset
| 1
|
python
|
@staticmethod
def mutate_label(label):
'\n BigQuery field_name should start with a letter or underscore and contain only\n alphanumeric characters. Labels that start with a number are prefixed with an\n underscore. Any unsupported characters are replaced with underscores and an\n md5 hash is added to the end of the label to avoid possible collisions.\n :param str label: the original label which might include unsupported characters\n :return: String that is supported by the database\n '
label_hashed = ('_' + hashlib.md5(label.encode('utf-8')).hexdigest())
label_mutated = (('_' + label) if re.match('^\\d', label) else label)
label_mutated = re.sub('[^\\w]+', '_', label_mutated)
if (label_mutated != label):
label_mutated += label_hashed
return label_mutated
|
@staticmethod
def mutate_label(label):
'\n BigQuery field_name should start with a letter or underscore and contain only\n alphanumeric characters. Labels that start with a number are prefixed with an\n underscore. Any unsupported characters are replaced with underscores and an\n md5 hash is added to the end of the label to avoid possible collisions.\n :param str label: the original label which might include unsupported characters\n :return: String that is supported by the database\n '
label_hashed = ('_' + hashlib.md5(label.encode('utf-8')).hexdigest())
label_mutated = (('_' + label) if re.match('^\\d', label) else label)
label_mutated = re.sub('[^\\w]+', '_', label_mutated)
if (label_mutated != label):
label_mutated += label_hashed
return label_mutated<|docstring|>BigQuery field_name should start with a letter or underscore and contain only
alphanumeric characters. Labels that start with a number are prefixed with an
underscore. Any unsupported characters are replaced with underscores and an
md5 hash is added to the end of the label to avoid possible collisions.
:param str label: the original label which might include unsupported characters
:return: String that is supported by the database<|endoftext|>
|
08a84144d014d9643ea80cae79d9dbfdd4eaa7c2e36c965f473595c20a2ed5b9
|
@classmethod
def truncate_label(cls, label):
'BigQuery requires column names start with either a letter or\n underscore. To make sure this is always the case, an underscore is prefixed\n to the truncated label.\n '
return ('_' + hashlib.md5(label.encode('utf-8')).hexdigest())
|
BigQuery requires column names start with either a letter or
underscore. To make sure this is always the case, an underscore is prefixed
to the truncated label.
|
superset/db_engine_specs.py
|
truncate_label
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def truncate_label(cls, label):
'BigQuery requires column names start with either a letter or\n underscore. To make sure this is always the case, an underscore is prefixed\n to the truncated label.\n '
return ('_' + hashlib.md5(label.encode('utf-8')).hexdigest())
|
@classmethod
def truncate_label(cls, label):
'BigQuery requires column names start with either a letter or\n underscore. To make sure this is always the case, an underscore is prefixed\n to the truncated label.\n '
return ('_' + hashlib.md5(label.encode('utf-8')).hexdigest())<|docstring|>BigQuery requires column names start with either a letter or
underscore. To make sure this is always the case, an underscore is prefixed
to the truncated label.<|endoftext|>
|
62ead1868886099db2e708ff8ba8fada817a1b6efd77491eb81ad0e23a5bd846
|
@classmethod
def _get_fields(cls, cols):
"\n BigQuery dialect requires us to not use backtick in the fieldname which are\n nested.\n Using literal_column handles that issue.\n https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column\n Also explicility specifying column names so we don't encounter duplicate\n column names in the result.\n "
return [sqla.literal_column(c.get('name')).label(c.get('name').replace('.', '__')) for c in cols]
|
BigQuery dialect requires us to not use backtick in the fieldname which are
nested.
Using literal_column handles that issue.
https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column
Also explicility specifying column names so we don't encounter duplicate
column names in the result.
|
superset/db_engine_specs.py
|
_get_fields
|
riskilla/incubator-superset
| 1
|
python
|
@classmethod
def _get_fields(cls, cols):
"\n BigQuery dialect requires us to not use backtick in the fieldname which are\n nested.\n Using literal_column handles that issue.\n https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column\n Also explicility specifying column names so we don't encounter duplicate\n column names in the result.\n "
return [sqla.literal_column(c.get('name')).label(c.get('name').replace('.', '__')) for c in cols]
|
@classmethod
def _get_fields(cls, cols):
"\n BigQuery dialect requires us to not use backtick in the fieldname which are\n nested.\n Using literal_column handles that issue.\n https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column\n Also explicility specifying column names so we don't encounter duplicate\n column names in the result.\n "
return [sqla.literal_column(c.get('name')).label(c.get('name').replace('.', '__')) for c in cols]<|docstring|>BigQuery dialect requires us to not use backtick in the fieldname which are
nested.
Using literal_column handles that issue.
https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column
Also explicility specifying column names so we don't encounter duplicate
column names in the result.<|endoftext|>
|
ce7707f7300dfa8d0389c58e394ad46464610d0fa84ced0f242eca89ba917897
|
def convert_to_hive_type(col_type):
"maps tableschema's types to hive types"
tableschema_to_hive_types = {'boolean': 'BOOLEAN', 'integer': 'INT', 'number': 'DOUBLE', 'string': 'STRING'}
return tableschema_to_hive_types.get(col_type, 'STRING')
|
maps tableschema's types to hive types
|
superset/db_engine_specs.py
|
convert_to_hive_type
|
riskilla/incubator-superset
| 1
|
python
|
def convert_to_hive_type(col_type):
tableschema_to_hive_types = {'boolean': 'BOOLEAN', 'integer': 'INT', 'number': 'DOUBLE', 'string': 'STRING'}
return tableschema_to_hive_types.get(col_type, 'STRING')
|
def convert_to_hive_type(col_type):
tableschema_to_hive_types = {'boolean': 'BOOLEAN', 'integer': 'INT', 'number': 'DOUBLE', 'string': 'STRING'}
return tableschema_to_hive_types.get(col_type, 'STRING')<|docstring|>maps tableschema's types to hive types<|endoftext|>
|
03e9669caaffca961ec020b2338297349bedf6e04fd6acce009ce847c324ee3a
|
def test_no_orgs(self, db):
'Nothing generated'
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills()
assert (not cmock.called)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills(orgs=[])
assert (not cmock.called)
|
Nothing generated
|
tests/celery/test_billing_tasks.py
|
test_no_orgs
|
zconnect-iot/zconnect-django
| 2
|
python
|
def test_no_orgs(self, db):
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills()
assert (not cmock.called)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills(orgs=[])
assert (not cmock.called)
|
def test_no_orgs(self, db):
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills()
assert (not cmock.called)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills(orgs=[])
assert (not cmock.called)<|docstring|>Nothing generated<|endoftext|>
|
ac122c1d4bc6a45f715d23006e0700e75d27ff03448315aed8fb7869c47d60b8
|
def test_non_billed_org(self, db):
'There are orgs but without any associated billing'
org = BilledOrganizationFactory()
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills()
assert (not cmock.called)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills(orgs=[org])
assert (not cmock.called)
|
There are orgs but without any associated billing
|
tests/celery/test_billing_tasks.py
|
test_non_billed_org
|
zconnect-iot/zconnect-django
| 2
|
python
|
def test_non_billed_org(self, db):
org = BilledOrganizationFactory()
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills()
assert (not cmock.called)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills(orgs=[org])
assert (not cmock.called)
|
def test_non_billed_org(self, db):
org = BilledOrganizationFactory()
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills()
assert (not cmock.called)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills') as cmock:
generate_all_outstanding_bills(orgs=[org])
assert (not cmock.called)<|docstring|>There are orgs but without any associated billing<|endoftext|>
|
615c9217c403d326b49fcb427c1f98ebcbef3d10ac16da575543c9fb396287a5
|
def test_generate_outstanding_validation_error(self, db):
'Can handle validation errors'
biller = BillGeneratorFactory()
bill = BillFactory(generated_by=biller)
org = biller.organization
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills', return_value=[bill]) as cmock:
bills = generate_all_outstanding_bills()
assert cmock.called
assert (bills[0] == bill)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills', return_value=[bill]) as cmock:
bills = generate_all_outstanding_bills(orgs=[org])
assert cmock.called
assert (bills[0] == bill)
|
Can handle validation errors
|
tests/celery/test_billing_tasks.py
|
test_generate_outstanding_validation_error
|
zconnect-iot/zconnect-django
| 2
|
python
|
def test_generate_outstanding_validation_error(self, db):
biller = BillGeneratorFactory()
bill = BillFactory(generated_by=biller)
org = biller.organization
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills', return_value=[bill]) as cmock:
bills = generate_all_outstanding_bills()
assert cmock.called
assert (bills[0] == bill)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills', return_value=[bill]) as cmock:
bills = generate_all_outstanding_bills(orgs=[org])
assert cmock.called
assert (bills[0] == bill)
|
def test_generate_outstanding_validation_error(self, db):
biller = BillGeneratorFactory()
bill = BillFactory(generated_by=biller)
org = biller.organization
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills', return_value=[bill]) as cmock:
bills = generate_all_outstanding_bills()
assert cmock.called
assert (bills[0] == bill)
with patch('zconnect.zc_billing.tasks.BilledOrganization.generate_outstanding_bills', return_value=[bill]) as cmock:
bills = generate_all_outstanding_bills(orgs=[org])
assert cmock.called
assert (bills[0] == bill)<|docstring|>Can handle validation errors<|endoftext|>
|
65dd7e90941fa7b63ed3b0cd90ebecca9003efcf91a1b5f6054eb6b756ebbdcd
|
def test_generate_outstanding_actual(self, fake_bill_old):
"Try actually generating bills, as if we haven't done so for two weeks"
generator = fake_bill_old.generated_by
bills = generate_all_outstanding_bills()
assert (len(bills) == 2)
assert (bills[0].generated_by == generator)
assert (bills[1].generated_by == generator)
assert (bills[0].period_start == (fake_bill_old.period_end + timedelta(days=1)))
assert (bills[1].period_start == (bills[0].period_end + timedelta(days=1)))
|
Try actually generating bills, as if we haven't done so for two weeks
|
tests/celery/test_billing_tasks.py
|
test_generate_outstanding_actual
|
zconnect-iot/zconnect-django
| 2
|
python
|
def test_generate_outstanding_actual(self, fake_bill_old):
generator = fake_bill_old.generated_by
bills = generate_all_outstanding_bills()
assert (len(bills) == 2)
assert (bills[0].generated_by == generator)
assert (bills[1].generated_by == generator)
assert (bills[0].period_start == (fake_bill_old.period_end + timedelta(days=1)))
assert (bills[1].period_start == (bills[0].period_end + timedelta(days=1)))
|
def test_generate_outstanding_actual(self, fake_bill_old):
generator = fake_bill_old.generated_by
bills = generate_all_outstanding_bills()
assert (len(bills) == 2)
assert (bills[0].generated_by == generator)
assert (bills[1].generated_by == generator)
assert (bills[0].period_start == (fake_bill_old.period_end + timedelta(days=1)))
assert (bills[1].period_start == (bills[0].period_end + timedelta(days=1)))<|docstring|>Try actually generating bills, as if we haven't done so for two weeks<|endoftext|>
|
008d53842d0165027f4bb48f974ed04c0abda9ea7d8d9df7ab66d0bf16777690
|
def __init__(self, data, seq_len, train):
'\n Parameters\n ----------\n data : list\n List containing the dataset data. For BAIR, it consists of a list of lists of image files, representing\n video frames.\n seq_len : int\n Number of frames to produce.\n train : bool\n Whether to use the training or testing dataset.\n '
assert (seq_len <= 30)
self.data = data
self.nx = 64
self.nc = 3
self.seq_len = seq_len
self.train = train
|
Parameters
----------
data : list
List containing the dataset data. For BAIR, it consists of a list of lists of image files, representing
video frames.
seq_len : int
Number of frames to produce.
train : bool
Whether to use the training or testing dataset.
|
data/bair.py
|
__init__
|
perevalovds/srvp
| 64
|
python
|
def __init__(self, data, seq_len, train):
'\n Parameters\n ----------\n data : list\n List containing the dataset data. For BAIR, it consists of a list of lists of image files, representing\n video frames.\n seq_len : int\n Number of frames to produce.\n train : bool\n Whether to use the training or testing dataset.\n '
assert (seq_len <= 30)
self.data = data
self.nx = 64
self.nc = 3
self.seq_len = seq_len
self.train = train
|
def __init__(self, data, seq_len, train):
'\n Parameters\n ----------\n data : list\n List containing the dataset data. For BAIR, it consists of a list of lists of image files, representing\n video frames.\n seq_len : int\n Number of frames to produce.\n train : bool\n Whether to use the training or testing dataset.\n '
assert (seq_len <= 30)
self.data = data
self.nx = 64
self.nc = 3
self.seq_len = seq_len
self.train = train<|docstring|>Parameters
----------
data : list
List containing the dataset data. For BAIR, it consists of a list of lists of image files, representing
video frames.
seq_len : int
Number of frames to produce.
train : bool
Whether to use the training or testing dataset.<|endoftext|>
|
df89d3be8e5b4fc08f265985137e29b2b16c243e97393d85f406199cd6c8335c
|
def change_seq_len(self, seq_len):
'\n Changes the length of sequences in the dataset.\n\n Parameters\n ----------\n seq_len : int\n New sequence length.\n '
self.seq_len = seq_len
|
Changes the length of sequences in the dataset.
Parameters
----------
seq_len : int
New sequence length.
|
data/bair.py
|
change_seq_len
|
perevalovds/srvp
| 64
|
python
|
def change_seq_len(self, seq_len):
'\n Changes the length of sequences in the dataset.\n\n Parameters\n ----------\n seq_len : int\n New sequence length.\n '
self.seq_len = seq_len
|
def change_seq_len(self, seq_len):
'\n Changes the length of sequences in the dataset.\n\n Parameters\n ----------\n seq_len : int\n New sequence length.\n '
self.seq_len = seq_len<|docstring|>Changes the length of sequences in the dataset.
Parameters
----------
seq_len : int
New sequence length.<|endoftext|>
|
260c07f68973c80733bdc8422d6ec5898a391f6dc8ceeb3c6fca0e0c2bea0c23
|
@classmethod
def make_dataset(cls, data_dir, seq_len, train):
'\n Creates a dataset from the directory where the dataset is saved.\n\n Parameters\n ----------\n data_dir : str\n Path to the dataset.\n seq_len : int\n Number of frames to produce.\n train : bool\n Whether to use the training or testing dataset.\n\n Returns\n -------\n data.bair.BAIR\n '
if train:
data_dir = join(data_dir, 'processed_data', 'train')
else:
data_dir = join(data_dir, 'processed_data', 'test')
data = []
for d1 in sorted(os.listdir(data_dir)):
for d2 in sorted(os.listdir(join(data_dir, d1))):
images = sorted([join(data_dir, d1, d2, img) for img in os.listdir(join(data_dir, d1, d2)) if (os.path.splitext(img)[1] == '.png')])
data.append(images)
return cls(data, seq_len, train)
|
Creates a dataset from the directory where the dataset is saved.
Parameters
----------
data_dir : str
Path to the dataset.
seq_len : int
Number of frames to produce.
train : bool
Whether to use the training or testing dataset.
Returns
-------
data.bair.BAIR
|
data/bair.py
|
make_dataset
|
perevalovds/srvp
| 64
|
python
|
@classmethod
def make_dataset(cls, data_dir, seq_len, train):
'\n Creates a dataset from the directory where the dataset is saved.\n\n Parameters\n ----------\n data_dir : str\n Path to the dataset.\n seq_len : int\n Number of frames to produce.\n train : bool\n Whether to use the training or testing dataset.\n\n Returns\n -------\n data.bair.BAIR\n '
if train:
data_dir = join(data_dir, 'processed_data', 'train')
else:
data_dir = join(data_dir, 'processed_data', 'test')
data = []
for d1 in sorted(os.listdir(data_dir)):
for d2 in sorted(os.listdir(join(data_dir, d1))):
images = sorted([join(data_dir, d1, d2, img) for img in os.listdir(join(data_dir, d1, d2)) if (os.path.splitext(img)[1] == '.png')])
data.append(images)
return cls(data, seq_len, train)
|
@classmethod
def make_dataset(cls, data_dir, seq_len, train):
'\n Creates a dataset from the directory where the dataset is saved.\n\n Parameters\n ----------\n data_dir : str\n Path to the dataset.\n seq_len : int\n Number of frames to produce.\n train : bool\n Whether to use the training or testing dataset.\n\n Returns\n -------\n data.bair.BAIR\n '
if train:
data_dir = join(data_dir, 'processed_data', 'train')
else:
data_dir = join(data_dir, 'processed_data', 'test')
data = []
for d1 in sorted(os.listdir(data_dir)):
for d2 in sorted(os.listdir(join(data_dir, d1))):
images = sorted([join(data_dir, d1, d2, img) for img in os.listdir(join(data_dir, d1, d2)) if (os.path.splitext(img)[1] == '.png')])
data.append(images)
return cls(data, seq_len, train)<|docstring|>Creates a dataset from the directory where the dataset is saved.
Parameters
----------
data_dir : str
Path to the dataset.
seq_len : int
Number of frames to produce.
train : bool
Whether to use the training or testing dataset.
Returns
-------
data.bair.BAIR<|endoftext|>
|
ea49a49c89a507841c4aac62ee320ff50150270ee5d05e540023f5b009fefccd
|
def __init__(self, *args, **kwargs):
'\n create all parts of the layout in their initialised way with default data.\n Changes will be handled by jslayout.coffee\n '
super(XYPlotJSLayout, self).__init__(*args, **kwargs)
self.initialize_select_boxes()
x = []
y = []
self.source = ColumnDataSource(data=dict(x=x, y=y, y_below=y, y_above=y, index=x, y2=y))
self.plot = figure(webgl=constants.WEBGL)
if X_AXIS_DATES:
self.plot.xaxis.formatter = DatetimeTickFormatter(formats=dict(seconds=['%d.%m.%y %H:%M:%S'], minutes=['%d.%m.%y %H:%M:%S'], hourmin=['%d.%m.%y %H:%M:%S'], hours=['%d.%m.%y %Hh'], days=['%d.%m.%y'], months=['%b %y'], years=['%b %y']))
self.plot.xaxis.major_label_orientation = (math.pi / 2)
self.plot.xaxis.major_label_text_baseline = TextBaseline.top
self.plot.xaxis.major_label_text_align = TextAlign.left
if USE_DATA_FILTER:
self.data_filter = MultiSelect(options=OPTIONS_FOR_DATAFILTER, value=OPTIONS_FOR_DATAFILTER, title=('Filter on %s' % COLUMN_FOR_DATAFILTER))
else:
self.data_filter = MultiSelect(options=[], value=[], title='Filtering is disabled')
self.plot.x_range = Range1d(start=0.0, end=10.0)
self.plot.y_range = Range1d(start=0.0, end=10.0)
self.plot.line(x='x', y='y', source=self.source, color='blue', line_width=2)
self.plot.line(x='x', y='y2', source=self.source, color='green', line_width=2)
if DISPLAY_STD:
self.plot.line(x='x', y='y_below', source=self.source, color='red', line_width=1)
self.plot.line(x='x', y='y_above', source=self.source, color='red', line_width=1)
self.table_plot = DataTable(source=self.source, columns=[TableColumn(field='x', title='x average of slided data'), TableColumn(field='y', title='y average of slided data'), TableColumn(field='y2', title='Second Line y2'), TableColumn(field='y_above', title='y + standard derivation'), TableColumn(field='y_below', title='y - standard derivation')])
|
create all parts of the layout in their initialised way with default data.
Changes will be handled by jslayout.coffee
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
__init__
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def __init__(self, *args, **kwargs):
'\n create all parts of the layout in their initialised way with default data.\n Changes will be handled by jslayout.coffee\n '
super(XYPlotJSLayout, self).__init__(*args, **kwargs)
self.initialize_select_boxes()
x = []
y = []
self.source = ColumnDataSource(data=dict(x=x, y=y, y_below=y, y_above=y, index=x, y2=y))
self.plot = figure(webgl=constants.WEBGL)
if X_AXIS_DATES:
self.plot.xaxis.formatter = DatetimeTickFormatter(formats=dict(seconds=['%d.%m.%y %H:%M:%S'], minutes=['%d.%m.%y %H:%M:%S'], hourmin=['%d.%m.%y %H:%M:%S'], hours=['%d.%m.%y %Hh'], days=['%d.%m.%y'], months=['%b %y'], years=['%b %y']))
self.plot.xaxis.major_label_orientation = (math.pi / 2)
self.plot.xaxis.major_label_text_baseline = TextBaseline.top
self.plot.xaxis.major_label_text_align = TextAlign.left
if USE_DATA_FILTER:
self.data_filter = MultiSelect(options=OPTIONS_FOR_DATAFILTER, value=OPTIONS_FOR_DATAFILTER, title=('Filter on %s' % COLUMN_FOR_DATAFILTER))
else:
self.data_filter = MultiSelect(options=[], value=[], title='Filtering is disabled')
self.plot.x_range = Range1d(start=0.0, end=10.0)
self.plot.y_range = Range1d(start=0.0, end=10.0)
self.plot.line(x='x', y='y', source=self.source, color='blue', line_width=2)
self.plot.line(x='x', y='y2', source=self.source, color='green', line_width=2)
if DISPLAY_STD:
self.plot.line(x='x', y='y_below', source=self.source, color='red', line_width=1)
self.plot.line(x='x', y='y_above', source=self.source, color='red', line_width=1)
self.table_plot = DataTable(source=self.source, columns=[TableColumn(field='x', title='x average of slided data'), TableColumn(field='y', title='y average of slided data'), TableColumn(field='y2', title='Second Line y2'), TableColumn(field='y_above', title='y + standard derivation'), TableColumn(field='y_below', title='y - standard derivation')])
|
def __init__(self, *args, **kwargs):
'\n create all parts of the layout in their initialised way with default data.\n Changes will be handled by jslayout.coffee\n '
super(XYPlotJSLayout, self).__init__(*args, **kwargs)
self.initialize_select_boxes()
x = []
y = []
self.source = ColumnDataSource(data=dict(x=x, y=y, y_below=y, y_above=y, index=x, y2=y))
self.plot = figure(webgl=constants.WEBGL)
if X_AXIS_DATES:
self.plot.xaxis.formatter = DatetimeTickFormatter(formats=dict(seconds=['%d.%m.%y %H:%M:%S'], minutes=['%d.%m.%y %H:%M:%S'], hourmin=['%d.%m.%y %H:%M:%S'], hours=['%d.%m.%y %Hh'], days=['%d.%m.%y'], months=['%b %y'], years=['%b %y']))
self.plot.xaxis.major_label_orientation = (math.pi / 2)
self.plot.xaxis.major_label_text_baseline = TextBaseline.top
self.plot.xaxis.major_label_text_align = TextAlign.left
if USE_DATA_FILTER:
self.data_filter = MultiSelect(options=OPTIONS_FOR_DATAFILTER, value=OPTIONS_FOR_DATAFILTER, title=('Filter on %s' % COLUMN_FOR_DATAFILTER))
else:
self.data_filter = MultiSelect(options=[], value=[], title='Filtering is disabled')
self.plot.x_range = Range1d(start=0.0, end=10.0)
self.plot.y_range = Range1d(start=0.0, end=10.0)
self.plot.line(x='x', y='y', source=self.source, color='blue', line_width=2)
self.plot.line(x='x', y='y2', source=self.source, color='green', line_width=2)
if DISPLAY_STD:
self.plot.line(x='x', y='y_below', source=self.source, color='red', line_width=1)
self.plot.line(x='x', y='y_above', source=self.source, color='red', line_width=1)
self.table_plot = DataTable(source=self.source, columns=[TableColumn(field='x', title='x average of slided data'), TableColumn(field='y', title='y average of slided data'), TableColumn(field='y2', title='Second Line y2'), TableColumn(field='y_above', title='y + standard derivation'), TableColumn(field='y_below', title='y - standard derivation')])<|docstring|>create all parts of the layout in their initialised way with default data.
Changes will be handled by jslayout.coffee<|endoftext|>
|
aaa916a2811feafb93dccab78849b3fe43008c4908346518c4dbf1ddd092cb1f
|
def __init__(self, *args, **kwargs):
'\n Create all parts of the layout in their initialised way with default data.\n Register handler for python callbacks\n '
super(XYPlotPythonLayout, self).__init__(*args, **kwargs)
self.change_data_source_ignore_range(None, None, None)
self.plot.y_range.on_change('start', self.change_data_source_in_range)
self.plot.y_range.on_change('end', self.change_data_source_in_range)
self.plot.x_range.on_change('start', self.change_data_source_in_range)
self.plot.x_range.on_change('end', self.change_data_source_in_range)
self.x_axis.on_change('value', self.change_data_source_ignore_range)
self.y_axis.on_change('value', self.change_data_source_ignore_range)
self.table_select.on_change('value', self.change_columns)
self.file_select.on_change('value', self.change_tables)
|
Create all parts of the layout in their initialised way with default data.
Register handler for python callbacks
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
__init__
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def __init__(self, *args, **kwargs):
'\n Create all parts of the layout in their initialised way with default data.\n Register handler for python callbacks\n '
super(XYPlotPythonLayout, self).__init__(*args, **kwargs)
self.change_data_source_ignore_range(None, None, None)
self.plot.y_range.on_change('start', self.change_data_source_in_range)
self.plot.y_range.on_change('end', self.change_data_source_in_range)
self.plot.x_range.on_change('start', self.change_data_source_in_range)
self.plot.x_range.on_change('end', self.change_data_source_in_range)
self.x_axis.on_change('value', self.change_data_source_ignore_range)
self.y_axis.on_change('value', self.change_data_source_ignore_range)
self.table_select.on_change('value', self.change_columns)
self.file_select.on_change('value', self.change_tables)
|
def __init__(self, *args, **kwargs):
'\n Create all parts of the layout in their initialised way with default data.\n Register handler for python callbacks\n '
super(XYPlotPythonLayout, self).__init__(*args, **kwargs)
self.change_data_source_ignore_range(None, None, None)
self.plot.y_range.on_change('start', self.change_data_source_in_range)
self.plot.y_range.on_change('end', self.change_data_source_in_range)
self.plot.x_range.on_change('start', self.change_data_source_in_range)
self.plot.x_range.on_change('end', self.change_data_source_in_range)
self.x_axis.on_change('value', self.change_data_source_ignore_range)
self.y_axis.on_change('value', self.change_data_source_ignore_range)
self.table_select.on_change('value', self.change_columns)
self.file_select.on_change('value', self.change_tables)<|docstring|>Create all parts of the layout in their initialised way with default data.
Register handler for python callbacks<|endoftext|>
|
a19949b2d14d18a4a03f430df329283372ff08fb3879b978aa3c81f567c92b57
|
def initialize_select_boxes(self):
'\n Initial selectboxes\n :return:\n '
files = get_files_by_path(UPLOAD_PATH)
if (not files):
files = ['No file found']
self.file_select = Select(options=files, title='Select a file', value=files[0])
self.data = ColumnDataSource()
self.data.data = get_diagram_data(UPLOAD_PATH, files[0])
super(XYPlotPythonLayout, self).initialize_select_boxes()
|
Initial selectboxes
:return:
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
initialize_select_boxes
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def initialize_select_boxes(self):
'\n Initial selectboxes\n :return:\n '
files = get_files_by_path(UPLOAD_PATH)
if (not files):
files = ['No file found']
self.file_select = Select(options=files, title='Select a file', value=files[0])
self.data = ColumnDataSource()
self.data.data = get_diagram_data(UPLOAD_PATH, files[0])
super(XYPlotPythonLayout, self).initialize_select_boxes()
|
def initialize_select_boxes(self):
'\n Initial selectboxes\n :return:\n '
files = get_files_by_path(UPLOAD_PATH)
if (not files):
files = ['No file found']
self.file_select = Select(options=files, title='Select a file', value=files[0])
self.data = ColumnDataSource()
self.data.data = get_diagram_data(UPLOAD_PATH, files[0])
super(XYPlotPythonLayout, self).initialize_select_boxes()<|docstring|>Initial selectboxes
:return:<|endoftext|>
|
dc14dfde688fa0650767b00f0394afc83a49eeb7368a6170a76278ef22d492fe
|
def change_tables(self, attrname, old, new):
'\n a different file is selected and other tables are available\n '
self.data.data = get_diagram_data(UPLOAD_PATH, self.file_select.value)
tables = self.get_tables(self.data.data)
if (not tables):
tables = ['No table found']
self.table_select.options = tables
self.table_select.value = tables[0]
|
a different file is selected and other tables are available
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
change_tables
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def change_tables(self, attrname, old, new):
'\n \n '
self.data.data = get_diagram_data(UPLOAD_PATH, self.file_select.value)
tables = self.get_tables(self.data.data)
if (not tables):
tables = ['No table found']
self.table_select.options = tables
self.table_select.value = tables[0]
|
def change_tables(self, attrname, old, new):
'\n \n '
self.data.data = get_diagram_data(UPLOAD_PATH, self.file_select.value)
tables = self.get_tables(self.data.data)
if (not tables):
tables = ['No table found']
self.table_select.options = tables
self.table_select.value = tables[0]<|docstring|>a different file is selected and other tables are available<|endoftext|>
|
740f1b8f53871760bec7c4d78f6f1b6d8fbef030a230f771981ac0b02ecde1dd
|
def change_columns(self, attrname, old, new):
'\n a different table is selected and other columns are available\n '
data = dict(self.data.data)
table = self.table_select.value
columns = self.get_colums(data, table)
if (len(columns) == 0):
columns = ['No column found']
self.x_axis.options = columns
self.y_axis.options = columns
self.x_axis.value = columns[0]
if (len(columns) >= 2):
self.y_axis.value = columns[1]
else:
self.y_axis.value = columns[0]
|
a different table is selected and other columns are available
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
change_columns
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def change_columns(self, attrname, old, new):
'\n \n '
data = dict(self.data.data)
table = self.table_select.value
columns = self.get_colums(data, table)
if (len(columns) == 0):
columns = ['No column found']
self.x_axis.options = columns
self.y_axis.options = columns
self.x_axis.value = columns[0]
if (len(columns) >= 2):
self.y_axis.value = columns[1]
else:
self.y_axis.value = columns[0]
|
def change_columns(self, attrname, old, new):
'\n \n '
data = dict(self.data.data)
table = self.table_select.value
columns = self.get_colums(data, table)
if (len(columns) == 0):
columns = ['No column found']
self.x_axis.options = columns
self.y_axis.options = columns
self.x_axis.value = columns[0]
if (len(columns) >= 2):
self.y_axis.value = columns[1]
else:
self.y_axis.value = columns[0]<|docstring|>a different table is selected and other columns are available<|endoftext|>
|
5befadd053e30a49b4c97de1bf688a9203125d7d093c790b97d280cf1b8b7359
|
def change_data_source_in_range(self, attrname, old, new):
'\n deals with data generation after zooming\n '
data = self.get_plot_data(ignore_ranges=False)
self.source.data = dict(x=data['source.data.x'], y=data['source.data.y'], index=data['source.data.index'], y_above=data['source.data.y_above'], y_below=data['source.data.y_below'])
|
deals with data generation after zooming
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
change_data_source_in_range
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def change_data_source_in_range(self, attrname, old, new):
'\n \n '
data = self.get_plot_data(ignore_ranges=False)
self.source.data = dict(x=data['source.data.x'], y=data['source.data.y'], index=data['source.data.index'], y_above=data['source.data.y_above'], y_below=data['source.data.y_below'])
|
def change_data_source_in_range(self, attrname, old, new):
'\n \n '
data = self.get_plot_data(ignore_ranges=False)
self.source.data = dict(x=data['source.data.x'], y=data['source.data.y'], index=data['source.data.index'], y_above=data['source.data.y_above'], y_below=data['source.data.y_below'])<|docstring|>deals with data generation after zooming<|endoftext|>
|
2e84f8eab342eaf8ac59ed8754b85878608c2d9206ae13681172f0d5e7156b07
|
def change_data_source_ignore_range(self, attrname, old, new):
'\n deals with data generation after selecting different columns\n '
data = self.get_plot_data(ignore_ranges=True)
self.plot.x_range.start = data['plot.x_range.start']
self.plot.x_range.end = data['plot.x_range.end']
self.plot.y_range.start = data['plot.y_range.start']
self.plot.y_range.end = data['plot.y_range.end']
self.source.data = dict(x=data['source.data.x'], y=data['source.data.y'], index=data['source.data.index'], y_above=data['source.data.y_above'], y_below=data['source.data.y_below'])
|
deals with data generation after selecting different columns
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
change_data_source_ignore_range
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def change_data_source_ignore_range(self, attrname, old, new):
'\n \n '
data = self.get_plot_data(ignore_ranges=True)
self.plot.x_range.start = data['plot.x_range.start']
self.plot.x_range.end = data['plot.x_range.end']
self.plot.y_range.start = data['plot.y_range.start']
self.plot.y_range.end = data['plot.y_range.end']
self.source.data = dict(x=data['source.data.x'], y=data['source.data.y'], index=data['source.data.index'], y_above=data['source.data.y_above'], y_below=data['source.data.y_below'])
|
def change_data_source_ignore_range(self, attrname, old, new):
'\n \n '
data = self.get_plot_data(ignore_ranges=True)
self.plot.x_range.start = data['plot.x_range.start']
self.plot.x_range.end = data['plot.x_range.end']
self.plot.y_range.start = data['plot.y_range.start']
self.plot.y_range.end = data['plot.y_range.end']
self.source.data = dict(x=data['source.data.x'], y=data['source.data.y'], index=data['source.data.index'], y_above=data['source.data.y_above'], y_below=data['source.data.y_below'])<|docstring|>deals with data generation after selecting different columns<|endoftext|>
|
2a5b4192a1318f25921cc2e488b0caa239aadff8f09cf882dcffcbf67b05ef30
|
def get_plot_data(self, ignore_ranges=True):
"\n Calculating data\n :param ignore_ranges: True if complete data should be used, false if min and max depends on current ranges\n :return: dictionary with data for redraw\n 'source.data.x': numpy array, floats\n 'source.data.y': numpy array, floats\n 'source.data.index': numpy array, floats\n 'source.data.y_above': numpy array, floats\n 'source.data.y_below': numpy array, floats\n 'plot.x_range.start': float\n 'plot.x_range.end': float\n 'plot.y_range.start': float\n 'plot.y_range.end': float\n\n "
upload_path = UPLOAD_PATH
filename = self.file_select.value
tablepath = self.table_select.value
x_col = self.x_axis.value
y_col = self.y_axis.value
if ignore_ranges:
xmin = None
xmax = None
ymin = None
ymax = None
else:
xmin = self.plot.x_range.start
xmax = self.plot.x_range.end
ymin = self.plot.y_range.start
ymax = self.plot.y_range.end
data = get_plot_data_python(upload_path, filename, tablepath, x_col=x_col, y_col=y_col, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, plotwidth=self.plot.width, plotheight=self.plot.height)
return data
|
Calculating data
:param ignore_ranges: True if complete data should be used, false if min and max depends on current ranges
:return: dictionary with data for redraw
'source.data.x': numpy array, floats
'source.data.y': numpy array, floats
'source.data.index': numpy array, floats
'source.data.y_above': numpy array, floats
'source.data.y_below': numpy array, floats
'plot.x_range.start': float
'plot.x_range.end': float
'plot.y_range.start': float
'plot.y_range.end': float
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
get_plot_data
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def get_plot_data(self, ignore_ranges=True):
"\n Calculating data\n :param ignore_ranges: True if complete data should be used, false if min and max depends on current ranges\n :return: dictionary with data for redraw\n 'source.data.x': numpy array, floats\n 'source.data.y': numpy array, floats\n 'source.data.index': numpy array, floats\n 'source.data.y_above': numpy array, floats\n 'source.data.y_below': numpy array, floats\n 'plot.x_range.start': float\n 'plot.x_range.end': float\n 'plot.y_range.start': float\n 'plot.y_range.end': float\n\n "
upload_path = UPLOAD_PATH
filename = self.file_select.value
tablepath = self.table_select.value
x_col = self.x_axis.value
y_col = self.y_axis.value
if ignore_ranges:
xmin = None
xmax = None
ymin = None
ymax = None
else:
xmin = self.plot.x_range.start
xmax = self.plot.x_range.end
ymin = self.plot.y_range.start
ymax = self.plot.y_range.end
data = get_plot_data_python(upload_path, filename, tablepath, x_col=x_col, y_col=y_col, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, plotwidth=self.plot.width, plotheight=self.plot.height)
return data
|
def get_plot_data(self, ignore_ranges=True):
"\n Calculating data\n :param ignore_ranges: True if complete data should be used, false if min and max depends on current ranges\n :return: dictionary with data for redraw\n 'source.data.x': numpy array, floats\n 'source.data.y': numpy array, floats\n 'source.data.index': numpy array, floats\n 'source.data.y_above': numpy array, floats\n 'source.data.y_below': numpy array, floats\n 'plot.x_range.start': float\n 'plot.x_range.end': float\n 'plot.y_range.start': float\n 'plot.y_range.end': float\n\n "
upload_path = UPLOAD_PATH
filename = self.file_select.value
tablepath = self.table_select.value
x_col = self.x_axis.value
y_col = self.y_axis.value
if ignore_ranges:
xmin = None
xmax = None
ymin = None
ymax = None
else:
xmin = self.plot.x_range.start
xmax = self.plot.x_range.end
ymin = self.plot.y_range.start
ymax = self.plot.y_range.end
data = get_plot_data_python(upload_path, filename, tablepath, x_col=x_col, y_col=y_col, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, plotwidth=self.plot.width, plotheight=self.plot.height)
return data<|docstring|>Calculating data
:param ignore_ranges: True if complete data should be used, false if min and max depends on current ranges
:return: dictionary with data for redraw
'source.data.x': numpy array, floats
'source.data.y': numpy array, floats
'source.data.index': numpy array, floats
'source.data.y_above': numpy array, floats
'source.data.y_below': numpy array, floats
'plot.x_range.start': float
'plot.x_range.end': float
'plot.y_range.start': float
'plot.y_range.end': float<|endoftext|>
|
a99f1443cfed877f4d5c8a8b2cd1a3d04d2b308fa8ad18011ff7402faf697e89
|
def __init__(self, *args, **kwargs):
'\n Create all parts of the layout in their initialised way with default data.\n Register handler for python callbacks\n '
super(ContourPlotLayout, self).__init__(*args, **kwargs)
data = self.data.data
image_data = data['image'][0]
flat_image_data = image_data.flatten()
min_data = get_min_value(flat_image_data)
max_data = get_max_value(flat_image_data)
self.color_mapper = LinearColorMapper(palette=RdYlGn11, low=min_data, high=max_data)
if ('x_min' in data):
x_min = data['x_min']
else:
x_min = self.x_min
if ('x_max' in data):
x_max = data['x_max']
else:
x_max = self.x_max
if ('y_min' in data):
y_min = data['y_min']
else:
y_min = self.y_min
if ('y_max' in data):
y_max = data['y_max']
else:
y_max = self.y_max
self.plot = figure(plot_width=600, plot_height=400, x_range=[x_min, x_max], y_range=[y_min, y_max], min_border_right=10)
self.plot.image(image='image', x=x_min, y=y_min, dw=(x_max - x_min), dh=(y_max - y_min), color_mapper=self.color_mapper, source=self.data)
self.color_bar = ColorBar(color_mapper=self.color_mapper, ticker=BasicTicker(desired_num_ticks=10), label_standoff=12, border_line_color=None, location=(0, 0))
self.plot.add_layout(self.color_bar, 'left')
|
Create all parts of the layout in their initialised way with default data.
Register handler for python callbacks
|
src/inqbus/graphdemo/bokeh_extension/layout.py
|
__init__
|
sandrarum/inqbus.graphdemo
| 1
|
python
|
def __init__(self, *args, **kwargs):
'\n Create all parts of the layout in their initialised way with default data.\n Register handler for python callbacks\n '
super(ContourPlotLayout, self).__init__(*args, **kwargs)
data = self.data.data
image_data = data['image'][0]
flat_image_data = image_data.flatten()
min_data = get_min_value(flat_image_data)
max_data = get_max_value(flat_image_data)
self.color_mapper = LinearColorMapper(palette=RdYlGn11, low=min_data, high=max_data)
if ('x_min' in data):
x_min = data['x_min']
else:
x_min = self.x_min
if ('x_max' in data):
x_max = data['x_max']
else:
x_max = self.x_max
if ('y_min' in data):
y_min = data['y_min']
else:
y_min = self.y_min
if ('y_max' in data):
y_max = data['y_max']
else:
y_max = self.y_max
self.plot = figure(plot_width=600, plot_height=400, x_range=[x_min, x_max], y_range=[y_min, y_max], min_border_right=10)
self.plot.image(image='image', x=x_min, y=y_min, dw=(x_max - x_min), dh=(y_max - y_min), color_mapper=self.color_mapper, source=self.data)
self.color_bar = ColorBar(color_mapper=self.color_mapper, ticker=BasicTicker(desired_num_ticks=10), label_standoff=12, border_line_color=None, location=(0, 0))
self.plot.add_layout(self.color_bar, 'left')
|
def __init__(self, *args, **kwargs):
'\n Create all parts of the layout in their initialised way with default data.\n Register handler for python callbacks\n '
super(ContourPlotLayout, self).__init__(*args, **kwargs)
data = self.data.data
image_data = data['image'][0]
flat_image_data = image_data.flatten()
min_data = get_min_value(flat_image_data)
max_data = get_max_value(flat_image_data)
self.color_mapper = LinearColorMapper(palette=RdYlGn11, low=min_data, high=max_data)
if ('x_min' in data):
x_min = data['x_min']
else:
x_min = self.x_min
if ('x_max' in data):
x_max = data['x_max']
else:
x_max = self.x_max
if ('y_min' in data):
y_min = data['y_min']
else:
y_min = self.y_min
if ('y_max' in data):
y_max = data['y_max']
else:
y_max = self.y_max
self.plot = figure(plot_width=600, plot_height=400, x_range=[x_min, x_max], y_range=[y_min, y_max], min_border_right=10)
self.plot.image(image='image', x=x_min, y=y_min, dw=(x_max - x_min), dh=(y_max - y_min), color_mapper=self.color_mapper, source=self.data)
self.color_bar = ColorBar(color_mapper=self.color_mapper, ticker=BasicTicker(desired_num_ticks=10), label_standoff=12, border_line_color=None, location=(0, 0))
self.plot.add_layout(self.color_bar, 'left')<|docstring|>Create all parts of the layout in their initialised way with default data.
Register handler for python callbacks<|endoftext|>
|
02487a4f67298b4186ee697a5503e4f32d5a26b3b768f98525a294d46a2de2e9
|
def level_up(self):
'\n This function increases the stats of the player after\n level up.\n - param: None\n - return: None\n '
self.level += 1
self.xp = 0
self.max_xp += self.max_xp
self.str += self.str_gain
self.dex += self.dex_gain
self.int += self.int_gain
self.max_hp = (self.str * 10)
self.hp = self.max_hp
print(f'{self.name} has leveled up!')
|
This function increases the stats of the player after
level up.
- param: None
- return: None
|
PlayerClass.py
|
level_up
|
Damon-Greenhalgh/BasicAdventureGame
| 1
|
python
|
def level_up(self):
'\n This function increases the stats of the player after\n level up.\n - param: None\n - return: None\n '
self.level += 1
self.xp = 0
self.max_xp += self.max_xp
self.str += self.str_gain
self.dex += self.dex_gain
self.int += self.int_gain
self.max_hp = (self.str * 10)
self.hp = self.max_hp
print(f'{self.name} has leveled up!')
|
def level_up(self):
'\n This function increases the stats of the player after\n level up.\n - param: None\n - return: None\n '
self.level += 1
self.xp = 0
self.max_xp += self.max_xp
self.str += self.str_gain
self.dex += self.dex_gain
self.int += self.int_gain
self.max_hp = (self.str * 10)
self.hp = self.max_hp
print(f'{self.name} has leveled up!')<|docstring|>This function increases the stats of the player after
level up.
- param: None
- return: None<|endoftext|>
|
fb467dfdea4a707d8e2626f7909aa787cbbf1e113ee830aeed9ad1cdd8adbdd1
|
def edit_xp(self, value):
'\n This function changes the current xp value, also handles\n the player level up.\n - param: int value\n - return: None\n '
self.xp = max((self.xp + value), 0)
if (self.xp >= self.max_xp):
self.level_up()
|
This function changes the current xp value, also handles
the player level up.
- param: int value
- return: None
|
PlayerClass.py
|
edit_xp
|
Damon-Greenhalgh/BasicAdventureGame
| 1
|
python
|
def edit_xp(self, value):
'\n This function changes the current xp value, also handles\n the player level up.\n - param: int value\n - return: None\n '
self.xp = max((self.xp + value), 0)
if (self.xp >= self.max_xp):
self.level_up()
|
def edit_xp(self, value):
'\n This function changes the current xp value, also handles\n the player level up.\n - param: int value\n - return: None\n '
self.xp = max((self.xp + value), 0)
if (self.xp >= self.max_xp):
self.level_up()<|docstring|>This function changes the current xp value, also handles
the player level up.
- param: int value
- return: None<|endoftext|>
|
dafd784cca1ab536555edcd0702143f3b0e3294cf7cc6f632ec1f146e9e860b5
|
def edit_gold(self, value):
'\n This function changes the gold value of the entity\n - param: int value\n - return: None\n '
self.gold = max((self.gold + value), 0)
|
This function changes the gold value of the entity
- param: int value
- return: None
|
PlayerClass.py
|
edit_gold
|
Damon-Greenhalgh/BasicAdventureGame
| 1
|
python
|
def edit_gold(self, value):
'\n This function changes the gold value of the entity\n - param: int value\n - return: None\n '
self.gold = max((self.gold + value), 0)
|
def edit_gold(self, value):
'\n This function changes the gold value of the entity\n - param: int value\n - return: None\n '
self.gold = max((self.gold + value), 0)<|docstring|>This function changes the gold value of the entity
- param: int value
- return: None<|endoftext|>
|
6c3251de7cb793f616e69a8714839afc293111a65e6058913ab8a2e142abe1db
|
def __init__(self, solver):
'Constructor for the TemporalInversion class.\n\n Parameters\n ----------\n solver : pysit wave solver object\n A wave solver that inherits from pysit.solvers.WaveSolverBase\n\n '
if (self.solver_type == solver.supports['equation_dynamics']):
self.solver = solver
else:
raise TypeError("Argument 'solver' type {1} does not match modeling solver type {0}.".format(self.solver_type, solver.supports['equation_dynamics']))
|
Constructor for the TemporalInversion class.
Parameters
----------
solver : pysit wave solver object
A wave solver that inherits from pysit.solvers.WaveSolverBase
|
pysit/modeling/temporal_modeling.py
|
__init__
|
zfang-slim/PysitForPython3
| 0
|
python
|
def __init__(self, solver):
'Constructor for the TemporalInversion class.\n\n Parameters\n ----------\n solver : pysit wave solver object\n A wave solver that inherits from pysit.solvers.WaveSolverBase\n\n '
if (self.solver_type == solver.supports['equation_dynamics']):
self.solver = solver
else:
raise TypeError("Argument 'solver' type {1} does not match modeling solver type {0}.".format(self.solver_type, solver.supports['equation_dynamics']))
|
def __init__(self, solver):
'Constructor for the TemporalInversion class.\n\n Parameters\n ----------\n solver : pysit wave solver object\n A wave solver that inherits from pysit.solvers.WaveSolverBase\n\n '
if (self.solver_type == solver.supports['equation_dynamics']):
self.solver = solver
else:
raise TypeError("Argument 'solver' type {1} does not match modeling solver type {0}.".format(self.solver_type, solver.supports['equation_dynamics']))<|docstring|>Constructor for the TemporalInversion class.
Parameters
----------
solver : pysit wave solver object
A wave solver that inherits from pysit.solvers.WaveSolverBase<|endoftext|>
|
2d482ebd991d1edaf2d829e03961dab7aa819610fb4df7207242058fc7db048a
|
def forward_model(self, shot, m0, imaging_period=1, return_parameters=[]):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon which to evaluate the forward model.\n return_parameters : list of {'wavefield', 'simdata', 'dWaveOp'}\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * utt is used to generically refer to the derivative of u that is needed to compute the imaging condition.\n\n Forward Model solves:\n\n For constant density: m*u_tt - lap u = f, where m = 1.0/c**2\n For variable density: m1*u_tt - div(m2 grad)u = f, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('wavefield' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp' in return_parameters):
dWaveOp = list()
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_kp1 = np.zeros(mesh.shape(include_bc=True))
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (k == 0):
rhs_k = self._setup_forward_rhs(rhs_k, source.f((k * dt)))
rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f(((k + 1) * dt)))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, rhs_k)
rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f(((k + 1) * dt)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOp.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield' in return_parameters):
retval['wavefield'] = us
if ('dWaveOp' in return_parameters):
retval['dWaveOp'] = dWaveOp
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval
|
Applies the forward model to the model for the given solver.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon which to evaluate the forward model.
return_parameters : list of {'wavefield', 'simdata', 'dWaveOp'}
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* utt is used to generically refer to the derivative of u that is needed to compute the imaging condition.
Forward Model solves:
For constant density: m*u_tt - lap u = f, where m = 1.0/c**2
For variable density: m1*u_tt - div(m2 grad)u = f, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5
|
pysit/modeling/temporal_modeling.py
|
forward_model
|
zfang-slim/PysitForPython3
| 0
|
python
|
def forward_model(self, shot, m0, imaging_period=1, return_parameters=[]):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon which to evaluate the forward model.\n return_parameters : list of {'wavefield', 'simdata', 'dWaveOp'}\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * utt is used to generically refer to the derivative of u that is needed to compute the imaging condition.\n\n Forward Model solves:\n\n For constant density: m*u_tt - lap u = f, where m = 1.0/c**2\n For variable density: m1*u_tt - div(m2 grad)u = f, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('wavefield' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp' in return_parameters):
dWaveOp = list()
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_kp1 = np.zeros(mesh.shape(include_bc=True))
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (k == 0):
rhs_k = self._setup_forward_rhs(rhs_k, source.f((k * dt)))
rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f(((k + 1) * dt)))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, rhs_k)
rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f(((k + 1) * dt)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOp.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield' in return_parameters):
retval['wavefield'] = us
if ('dWaveOp' in return_parameters):
retval['dWaveOp'] = dWaveOp
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval
|
def forward_model(self, shot, m0, imaging_period=1, return_parameters=[]):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon which to evaluate the forward model.\n return_parameters : list of {'wavefield', 'simdata', 'dWaveOp'}\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * utt is used to generically refer to the derivative of u that is needed to compute the imaging condition.\n\n Forward Model solves:\n\n For constant density: m*u_tt - lap u = f, where m = 1.0/c**2\n For variable density: m1*u_tt - div(m2 grad)u = f, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('wavefield' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp' in return_parameters):
dWaveOp = list()
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_kp1 = np.zeros(mesh.shape(include_bc=True))
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (k == 0):
rhs_k = self._setup_forward_rhs(rhs_k, source.f((k * dt)))
rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f(((k + 1) * dt)))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, rhs_k)
rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f(((k + 1) * dt)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOp.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield' in return_parameters):
retval['wavefield'] = us
if ('dWaveOp' in return_parameters):
retval['dWaveOp'] = dWaveOp
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval<|docstring|>Applies the forward model to the model for the given solver.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon which to evaluate the forward model.
return_parameters : list of {'wavefield', 'simdata', 'dWaveOp'}
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* utt is used to generically refer to the derivative of u that is needed to compute the imaging condition.
Forward Model solves:
For constant density: m*u_tt - lap u = f, where m = 1.0/c**2
For variable density: m1*u_tt - div(m2 grad)u = f, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5<|endoftext|>
|
89308985bb5ffcf377abe12fa4ba598f4ffe624fa2c4181c3a52be282b5ed8c7
|
def migrate_shot(self, shot, m0, operand_simdata, imaging_period, operand_dWaveOpAdj=None, operand_model=None, dWaveOp=None, adjointfield=None, dWaveOpAdj=None, wavefield=None):
'Performs migration on a single shot.\n\n Parameters\n ----------\n shot : pysit.Shot\n Shot for which to compute migration.\n operand : darray\n Operand, i.e., b in (F^*)b.\n dWaveOp : list\n Imaging condition components from the forward model for each receiver in the shot.\n qs : list\n Optional return list allowing us to retrieve the adjoint field as desired.\n\n '
if (dWaveOp is None):
retval = self.forward_model(shot, m0, imaging_period, return_parameters=['dWaveOp'])
dWaveOp = retval['dWaveOp']
rp = ['imaging_condition']
if (adjointfield is not None):
rp.append('adjointfield')
if (dWaveOpAdj is not None):
rp.append('dWaveOpAdj')
rv = self.adjoint_model(shot, m0, operand_simdata, imaging_period, operand_dWaveOpAdj, operand_model, return_parameters=rp, dWaveOp=dWaveOp, wavefield=wavefield)
if (adjointfield is not None):
adjointfield[:] = rv['adjointfield'][:]
if (dWaveOpAdj is not None):
dWaveOpAdj[:] = rv['dWaveOpAdj'][:]
ic = rv['imaging_condition']
return ic
|
Performs migration on a single shot.
Parameters
----------
shot : pysit.Shot
Shot for which to compute migration.
operand : darray
Operand, i.e., b in (F^*)b.
dWaveOp : list
Imaging condition components from the forward model for each receiver in the shot.
qs : list
Optional return list allowing us to retrieve the adjoint field as desired.
|
pysit/modeling/temporal_modeling.py
|
migrate_shot
|
zfang-slim/PysitForPython3
| 0
|
python
|
def migrate_shot(self, shot, m0, operand_simdata, imaging_period, operand_dWaveOpAdj=None, operand_model=None, dWaveOp=None, adjointfield=None, dWaveOpAdj=None, wavefield=None):
'Performs migration on a single shot.\n\n Parameters\n ----------\n shot : pysit.Shot\n Shot for which to compute migration.\n operand : darray\n Operand, i.e., b in (F^*)b.\n dWaveOp : list\n Imaging condition components from the forward model for each receiver in the shot.\n qs : list\n Optional return list allowing us to retrieve the adjoint field as desired.\n\n '
if (dWaveOp is None):
retval = self.forward_model(shot, m0, imaging_period, return_parameters=['dWaveOp'])
dWaveOp = retval['dWaveOp']
rp = ['imaging_condition']
if (adjointfield is not None):
rp.append('adjointfield')
if (dWaveOpAdj is not None):
rp.append('dWaveOpAdj')
rv = self.adjoint_model(shot, m0, operand_simdata, imaging_period, operand_dWaveOpAdj, operand_model, return_parameters=rp, dWaveOp=dWaveOp, wavefield=wavefield)
if (adjointfield is not None):
adjointfield[:] = rv['adjointfield'][:]
if (dWaveOpAdj is not None):
dWaveOpAdj[:] = rv['dWaveOpAdj'][:]
ic = rv['imaging_condition']
return ic
|
def migrate_shot(self, shot, m0, operand_simdata, imaging_period, operand_dWaveOpAdj=None, operand_model=None, dWaveOp=None, adjointfield=None, dWaveOpAdj=None, wavefield=None):
'Performs migration on a single shot.\n\n Parameters\n ----------\n shot : pysit.Shot\n Shot for which to compute migration.\n operand : darray\n Operand, i.e., b in (F^*)b.\n dWaveOp : list\n Imaging condition components from the forward model for each receiver in the shot.\n qs : list\n Optional return list allowing us to retrieve the adjoint field as desired.\n\n '
if (dWaveOp is None):
retval = self.forward_model(shot, m0, imaging_period, return_parameters=['dWaveOp'])
dWaveOp = retval['dWaveOp']
rp = ['imaging_condition']
if (adjointfield is not None):
rp.append('adjointfield')
if (dWaveOpAdj is not None):
rp.append('dWaveOpAdj')
rv = self.adjoint_model(shot, m0, operand_simdata, imaging_period, operand_dWaveOpAdj, operand_model, return_parameters=rp, dWaveOp=dWaveOp, wavefield=wavefield)
if (adjointfield is not None):
adjointfield[:] = rv['adjointfield'][:]
if (dWaveOpAdj is not None):
dWaveOpAdj[:] = rv['dWaveOpAdj'][:]
ic = rv['imaging_condition']
return ic<|docstring|>Performs migration on a single shot.
Parameters
----------
shot : pysit.Shot
Shot for which to compute migration.
operand : darray
Operand, i.e., b in (F^*)b.
dWaveOp : list
Imaging condition components from the forward model for each receiver in the shot.
qs : list
Optional return list allowing us to retrieve the adjoint field as desired.<|endoftext|>
|
2d668a13e74de9619430f2fec4860681816653126fa71d06ea45b79a8cdbae7b
|
def migrate_shots_extend(self, shots, m0, operand_simdata, max_sub_offset, h, imaging_period, operand_dWaveOpAdj=None, operand_model=None, DWaveOpIn=None, adjointfield=None, dWaveOpAdj=None, wavefield=None):
'Performs migration on a single shot.\n\n Parameters\n ----------\n shot : pysit.Shot\n Shot for which to compute migration.\n operand : darray\n Operand, i.e., b in (F^*)b.\n dWaveOp : list\n Imaging condition components from the forward model for each receiver in the shot.\n qs : list\n Optional return list allowing us to retrieve the adjoint field as desired.\n\n '
for i in range(len(shots)):
shot = shots[i]
if (DWaveOpIn is None):
retval = self.forward_model(shot, m0, imaging_period, return_parameters=['dWaveOp'])
dWaveOp = retval['dWaveOp']
else:
dWaveOp = DWaveOp[i]
rp = ['imaging_condition']
if (adjointfield is not None):
rp.append('adjointfield')
if (dWaveOpAdj is not None):
rp.append('dWaveOpAdj')
rv = self.adjoint_model_extend(shot, m0, operand_simdata[i], max_sub_offset, h, imaging_period, operand_dWaveOpAdj, operand_model, return_parameters=rp, dWaveOp=dWaveOp, wavefield=wavefield)
if (adjointfield is not None):
adjointfield[:] = rv['adjointfield'][:]
if (dWaveOpAdj is not None):
dWaveOpAdj[:] = rv['dWaveOpAdj'][:]
ic = rv['imaging_condition']
if (i == 0):
Ic = copy.deepcopy(ic)
else:
Ic.data += ic.data
return Ic
|
Performs migration on a single shot.
Parameters
----------
shot : pysit.Shot
Shot for which to compute migration.
operand : darray
Operand, i.e., b in (F^*)b.
dWaveOp : list
Imaging condition components from the forward model for each receiver in the shot.
qs : list
Optional return list allowing us to retrieve the adjoint field as desired.
|
pysit/modeling/temporal_modeling.py
|
migrate_shots_extend
|
zfang-slim/PysitForPython3
| 0
|
python
|
def migrate_shots_extend(self, shots, m0, operand_simdata, max_sub_offset, h, imaging_period, operand_dWaveOpAdj=None, operand_model=None, DWaveOpIn=None, adjointfield=None, dWaveOpAdj=None, wavefield=None):
'Performs migration on a single shot.\n\n Parameters\n ----------\n shot : pysit.Shot\n Shot for which to compute migration.\n operand : darray\n Operand, i.e., b in (F^*)b.\n dWaveOp : list\n Imaging condition components from the forward model for each receiver in the shot.\n qs : list\n Optional return list allowing us to retrieve the adjoint field as desired.\n\n '
for i in range(len(shots)):
shot = shots[i]
if (DWaveOpIn is None):
retval = self.forward_model(shot, m0, imaging_period, return_parameters=['dWaveOp'])
dWaveOp = retval['dWaveOp']
else:
dWaveOp = DWaveOp[i]
rp = ['imaging_condition']
if (adjointfield is not None):
rp.append('adjointfield')
if (dWaveOpAdj is not None):
rp.append('dWaveOpAdj')
rv = self.adjoint_model_extend(shot, m0, operand_simdata[i], max_sub_offset, h, imaging_period, operand_dWaveOpAdj, operand_model, return_parameters=rp, dWaveOp=dWaveOp, wavefield=wavefield)
if (adjointfield is not None):
adjointfield[:] = rv['adjointfield'][:]
if (dWaveOpAdj is not None):
dWaveOpAdj[:] = rv['dWaveOpAdj'][:]
ic = rv['imaging_condition']
if (i == 0):
Ic = copy.deepcopy(ic)
else:
Ic.data += ic.data
return Ic
|
def migrate_shots_extend(self, shots, m0, operand_simdata, max_sub_offset, h, imaging_period, operand_dWaveOpAdj=None, operand_model=None, DWaveOpIn=None, adjointfield=None, dWaveOpAdj=None, wavefield=None):
'Performs migration on a single shot.\n\n Parameters\n ----------\n shot : pysit.Shot\n Shot for which to compute migration.\n operand : darray\n Operand, i.e., b in (F^*)b.\n dWaveOp : list\n Imaging condition components from the forward model for each receiver in the shot.\n qs : list\n Optional return list allowing us to retrieve the adjoint field as desired.\n\n '
for i in range(len(shots)):
shot = shots[i]
if (DWaveOpIn is None):
retval = self.forward_model(shot, m0, imaging_period, return_parameters=['dWaveOp'])
dWaveOp = retval['dWaveOp']
else:
dWaveOp = DWaveOp[i]
rp = ['imaging_condition']
if (adjointfield is not None):
rp.append('adjointfield')
if (dWaveOpAdj is not None):
rp.append('dWaveOpAdj')
rv = self.adjoint_model_extend(shot, m0, operand_simdata[i], max_sub_offset, h, imaging_period, operand_dWaveOpAdj, operand_model, return_parameters=rp, dWaveOp=dWaveOp, wavefield=wavefield)
if (adjointfield is not None):
adjointfield[:] = rv['adjointfield'][:]
if (dWaveOpAdj is not None):
dWaveOpAdj[:] = rv['dWaveOpAdj'][:]
ic = rv['imaging_condition']
if (i == 0):
Ic = copy.deepcopy(ic)
else:
Ic.data += ic.data
return Ic<|docstring|>Performs migration on a single shot.
Parameters
----------
shot : pysit.Shot
Shot for which to compute migration.
operand : darray
Operand, i.e., b in (F^*)b.
dWaveOp : list
Imaging condition components from the forward model for each receiver in the shot.
qs : list
Optional return list allowing us to retrieve the adjoint field as desired.<|endoftext|>
|
de46589ff4db7fc2ef151c7c99e361d87bb89222a1034a49d2f5165b644eac68
|
def adjoint_model_extend(self, shot, m0, operand_simdata, max_sub_offset, h, imaging_period=1, operand_dWaveOpAdj=None, operand_model=None, return_parameters=[], dWaveOp=None, wavefield=None):
"Solves for the adjoint field.\n\n For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2\n For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the receiver model for the right hand side.\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n operand_dWaveOpAdj : list of ndarray\n Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n return_parameters : list of {'adjointfield', 'ic'}\n dWaveOp : ndarray\n Imaging component from the forward model.\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * q is the adjoint field.\n * ic is the imaging component. Because this function computes many of\n the things required to compute the imaging condition, there is an option\n to compute the imaging condition as we go. This should be used to save\n computational effort. If the imaging condition is to be computed, the\n optional argument utt must be present.\n\n Imaging Condition for variable density has components:\n ic.m1 = u_tt * q\n ic.m2 = grad(u) dot grad(q)\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
nh = ((2 * int((max_sub_offset / h))) + 1)
if ('adjointfield' in return_parameters):
qs = list()
vs = list()
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj = list()
if (dWaveOp is not None):
ic = ExtendedModelingParameter2D(mesh, max_sub_offset, h)
ic_data_tmp = np.zeros(ic.sh_data)
do_ic = True
elif ('imaging_condition' in return_parameters):
raise ValueError('To compute imaging condition, forward component must be specified.')
else:
do_ic = False
sh_sub = ic.sh_sub
dof_sub = ic.dof_sub
mesh_ih = copy.deepcopy(mesh)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
print("WARNING: Ian's operators are still used here even though the solver has changed. Gradient may be incorrect. These routines need to be updated.")
deltas = [mesh.x.delta, mesh.z.delta]
sh = mesh.shape(include_bc=True, as_grid=True)
(D1, D2) = build_heterogenous_matrices(sh, deltas)
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_km1 = np.zeros(mesh.shape(include_bc=True))
if (operand_model is not None):
operand_model = operand_model.with_padding()
for k in range((nsteps - 1), (- 1), (- 1)):
vk = solver_data.k.primary_wavefield
vk_bulk = mesh.unpad_array(vk)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
uk = mesh.pad_array(wavefield[k])
if ('adjointfield' in return_parameters):
vs.append(vk_bulk.copy())
if do_ic:
if ((k % imaging_period) == 0):
entry = (k // imaging_period)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
ic.kappa += (vk * dWaveOp[entry])
ic.rho += (((D1[0] * uk) * (D1[1] * vk)) + ((D2[0] * uk) * (D2[1] * vk)))
else:
for ih in range(0, nh):
u_tmp = dWaveOp[entry][(ic.padding_index_u[0] + (ih * ic.skip_index))]
v_tmp = vk[(ic.padding_index_v[0] - (ih * ic.skip_index))]
ic_data_tmp[(:, ih)] = (v_tmp * u_tmp).reshape(((- 1),))
ic.data += ic_data_tmp
if (k == (nsteps - 1)):
rhs_k = self._setup_adjoint_rhs(rhs_k, shot, k, operand_simdata, operand_model, operand_dWaveOpAdj)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
else:
(rhs_k, rhs_km1) = (rhs_km1, rhs_k)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
solver.time_step(solver_data, rhs_k, rhs_km1)
if ('dWaveOpAdj' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOpAdj.append(solver.compute_dWaveOp('time', solver_data))
if (k == 0):
break
solver_data.advance()
if do_ic:
ic.data *= ((- 1) * dt)
ic.data *= imaging_period
retval = dict()
if ('adjointfield' in return_parameters):
qs = list(vs)
qs.reverse()
retval['adjointfield'] = qs
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj.reverse()
retval['dWaveOpAdj'] = dWaveOpAdj
if do_ic:
retval['imaging_condition'] = ic
return retval
|
Solves for the adjoint field.
For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2
For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5
Parameters
----------
shot : pysit.Shot
Gives the receiver model for the right hand side.
operand_simdata : ndarray
Right hand side component in the data space, usually the residual.
operand_dWaveOpAdj : list of ndarray
Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving
operand_simdata : ndarray
Right hand side component in the data space, usually the residual.
return_parameters : list of {'adjointfield', 'ic'}
dWaveOp : ndarray
Imaging component from the forward model.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* q is the adjoint field.
* ic is the imaging component. Because this function computes many of
the things required to compute the imaging condition, there is an option
to compute the imaging condition as we go. This should be used to save
computational effort. If the imaging condition is to be computed, the
optional argument utt must be present.
Imaging Condition for variable density has components:
ic.m1 = u_tt * q
ic.m2 = grad(u) dot grad(q)
|
pysit/modeling/temporal_modeling.py
|
adjoint_model_extend
|
zfang-slim/PysitForPython3
| 0
|
python
|
def adjoint_model_extend(self, shot, m0, operand_simdata, max_sub_offset, h, imaging_period=1, operand_dWaveOpAdj=None, operand_model=None, return_parameters=[], dWaveOp=None, wavefield=None):
"Solves for the adjoint field.\n\n For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2\n For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the receiver model for the right hand side.\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n operand_dWaveOpAdj : list of ndarray\n Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n return_parameters : list of {'adjointfield', 'ic'}\n dWaveOp : ndarray\n Imaging component from the forward model.\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * q is the adjoint field.\n * ic is the imaging component. Because this function computes many of\n the things required to compute the imaging condition, there is an option\n to compute the imaging condition as we go. This should be used to save\n computational effort. If the imaging condition is to be computed, the\n optional argument utt must be present.\n\n Imaging Condition for variable density has components:\n ic.m1 = u_tt * q\n ic.m2 = grad(u) dot grad(q)\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
nh = ((2 * int((max_sub_offset / h))) + 1)
if ('adjointfield' in return_parameters):
qs = list()
vs = list()
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj = list()
if (dWaveOp is not None):
ic = ExtendedModelingParameter2D(mesh, max_sub_offset, h)
ic_data_tmp = np.zeros(ic.sh_data)
do_ic = True
elif ('imaging_condition' in return_parameters):
raise ValueError('To compute imaging condition, forward component must be specified.')
else:
do_ic = False
sh_sub = ic.sh_sub
dof_sub = ic.dof_sub
mesh_ih = copy.deepcopy(mesh)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
print("WARNING: Ian's operators are still used here even though the solver has changed. Gradient may be incorrect. These routines need to be updated.")
deltas = [mesh.x.delta, mesh.z.delta]
sh = mesh.shape(include_bc=True, as_grid=True)
(D1, D2) = build_heterogenous_matrices(sh, deltas)
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_km1 = np.zeros(mesh.shape(include_bc=True))
if (operand_model is not None):
operand_model = operand_model.with_padding()
for k in range((nsteps - 1), (- 1), (- 1)):
vk = solver_data.k.primary_wavefield
vk_bulk = mesh.unpad_array(vk)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
uk = mesh.pad_array(wavefield[k])
if ('adjointfield' in return_parameters):
vs.append(vk_bulk.copy())
if do_ic:
if ((k % imaging_period) == 0):
entry = (k // imaging_period)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
ic.kappa += (vk * dWaveOp[entry])
ic.rho += (((D1[0] * uk) * (D1[1] * vk)) + ((D2[0] * uk) * (D2[1] * vk)))
else:
for ih in range(0, nh):
u_tmp = dWaveOp[entry][(ic.padding_index_u[0] + (ih * ic.skip_index))]
v_tmp = vk[(ic.padding_index_v[0] - (ih * ic.skip_index))]
ic_data_tmp[(:, ih)] = (v_tmp * u_tmp).reshape(((- 1),))
ic.data += ic_data_tmp
if (k == (nsteps - 1)):
rhs_k = self._setup_adjoint_rhs(rhs_k, shot, k, operand_simdata, operand_model, operand_dWaveOpAdj)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
else:
(rhs_k, rhs_km1) = (rhs_km1, rhs_k)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
solver.time_step(solver_data, rhs_k, rhs_km1)
if ('dWaveOpAdj' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOpAdj.append(solver.compute_dWaveOp('time', solver_data))
if (k == 0):
break
solver_data.advance()
if do_ic:
ic.data *= ((- 1) * dt)
ic.data *= imaging_period
retval = dict()
if ('adjointfield' in return_parameters):
qs = list(vs)
qs.reverse()
retval['adjointfield'] = qs
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj.reverse()
retval['dWaveOpAdj'] = dWaveOpAdj
if do_ic:
retval['imaging_condition'] = ic
return retval
|
def adjoint_model_extend(self, shot, m0, operand_simdata, max_sub_offset, h, imaging_period=1, operand_dWaveOpAdj=None, operand_model=None, return_parameters=[], dWaveOp=None, wavefield=None):
"Solves for the adjoint field.\n\n For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2\n For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the receiver model for the right hand side.\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n operand_dWaveOpAdj : list of ndarray\n Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n return_parameters : list of {'adjointfield', 'ic'}\n dWaveOp : ndarray\n Imaging component from the forward model.\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * q is the adjoint field.\n * ic is the imaging component. Because this function computes many of\n the things required to compute the imaging condition, there is an option\n to compute the imaging condition as we go. This should be used to save\n computational effort. If the imaging condition is to be computed, the\n optional argument utt must be present.\n\n Imaging Condition for variable density has components:\n ic.m1 = u_tt * q\n ic.m2 = grad(u) dot grad(q)\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
nh = ((2 * int((max_sub_offset / h))) + 1)
if ('adjointfield' in return_parameters):
qs = list()
vs = list()
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj = list()
if (dWaveOp is not None):
ic = ExtendedModelingParameter2D(mesh, max_sub_offset, h)
ic_data_tmp = np.zeros(ic.sh_data)
do_ic = True
elif ('imaging_condition' in return_parameters):
raise ValueError('To compute imaging condition, forward component must be specified.')
else:
do_ic = False
sh_sub = ic.sh_sub
dof_sub = ic.dof_sub
mesh_ih = copy.deepcopy(mesh)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
print("WARNING: Ian's operators are still used here even though the solver has changed. Gradient may be incorrect. These routines need to be updated.")
deltas = [mesh.x.delta, mesh.z.delta]
sh = mesh.shape(include_bc=True, as_grid=True)
(D1, D2) = build_heterogenous_matrices(sh, deltas)
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_km1 = np.zeros(mesh.shape(include_bc=True))
if (operand_model is not None):
operand_model = operand_model.with_padding()
for k in range((nsteps - 1), (- 1), (- 1)):
vk = solver_data.k.primary_wavefield
vk_bulk = mesh.unpad_array(vk)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
uk = mesh.pad_array(wavefield[k])
if ('adjointfield' in return_parameters):
vs.append(vk_bulk.copy())
if do_ic:
if ((k % imaging_period) == 0):
entry = (k // imaging_period)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
ic.kappa += (vk * dWaveOp[entry])
ic.rho += (((D1[0] * uk) * (D1[1] * vk)) + ((D2[0] * uk) * (D2[1] * vk)))
else:
for ih in range(0, nh):
u_tmp = dWaveOp[entry][(ic.padding_index_u[0] + (ih * ic.skip_index))]
v_tmp = vk[(ic.padding_index_v[0] - (ih * ic.skip_index))]
ic_data_tmp[(:, ih)] = (v_tmp * u_tmp).reshape(((- 1),))
ic.data += ic_data_tmp
if (k == (nsteps - 1)):
rhs_k = self._setup_adjoint_rhs(rhs_k, shot, k, operand_simdata, operand_model, operand_dWaveOpAdj)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
else:
(rhs_k, rhs_km1) = (rhs_km1, rhs_k)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
solver.time_step(solver_data, rhs_k, rhs_km1)
if ('dWaveOpAdj' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOpAdj.append(solver.compute_dWaveOp('time', solver_data))
if (k == 0):
break
solver_data.advance()
if do_ic:
ic.data *= ((- 1) * dt)
ic.data *= imaging_period
retval = dict()
if ('adjointfield' in return_parameters):
qs = list(vs)
qs.reverse()
retval['adjointfield'] = qs
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj.reverse()
retval['dWaveOpAdj'] = dWaveOpAdj
if do_ic:
retval['imaging_condition'] = ic
return retval<|docstring|>Solves for the adjoint field.
For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2
For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5
Parameters
----------
shot : pysit.Shot
Gives the receiver model for the right hand side.
operand_simdata : ndarray
Right hand side component in the data space, usually the residual.
operand_dWaveOpAdj : list of ndarray
Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving
operand_simdata : ndarray
Right hand side component in the data space, usually the residual.
return_parameters : list of {'adjointfield', 'ic'}
dWaveOp : ndarray
Imaging component from the forward model.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* q is the adjoint field.
* ic is the imaging component. Because this function computes many of
the things required to compute the imaging condition, there is an option
to compute the imaging condition as we go. This should be used to save
computational effort. If the imaging condition is to be computed, the
optional argument utt must be present.
Imaging Condition for variable density has components:
ic.m1 = u_tt * q
ic.m2 = grad(u) dot grad(q)<|endoftext|>
|
8d580fa45b6daf13bbd16e64e48bf2acbd40313441d5f5a9e4a3c5c85cbad6fe
|
def adjoint_model(self, shot, m0, operand_simdata, imaging_period=1, operand_dWaveOpAdj=None, operand_model=None, return_parameters=[], dWaveOp=None, wavefield=None):
"Solves for the adjoint field.\n\n For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2\n For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the receiver model for the right hand side.\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n operand_dWaveOpAdj : list of ndarray\n Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n return_parameters : list of {'adjointfield', 'ic'}\n dWaveOp : ndarray\n Imaging component from the forward model.\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * q is the adjoint field.\n * ic is the imaging component. Because this function computes many of\n the things required to compute the imaging condition, there is an option\n to compute the imaging condition as we go. This should be used to save\n computational effort. If the imaging condition is to be computed, the\n optional argument utt must be present.\n\n Imaging Condition for variable density has components:\n ic.m1 = u_tt * q\n ic.m2 = grad(u) dot grad(q)\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('adjointfield' in return_parameters):
qs = list()
vs = list()
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj = list()
if (dWaveOp is not None):
ic = solver.model_parameters.perturbation()
do_ic = True
elif ('imaging_condition' in return_parameters):
raise ValueError('To compute imaging condition, forward component must be specified.')
else:
do_ic = False
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
print("WARNING: Ian's operators are still used here even though the solver has changed. Gradient may be incorrect. These routines need to be updated.")
deltas = [mesh.x.delta, mesh.z.delta]
sh = mesh.shape(include_bc=True, as_grid=True)
(D1, D2) = build_heterogenous_matrices(sh, deltas)
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_km1 = np.zeros(mesh.shape(include_bc=True))
if (operand_model is not None):
operand_model = operand_model.with_padding()
for k in range((nsteps - 1), (- 1), (- 1)):
vk = solver_data.k.primary_wavefield
vk_bulk = mesh.unpad_array(vk)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
uk = mesh.pad_array(wavefield[k])
if ('adjointfield' in return_parameters):
vs.append(vk_bulk.copy())
if do_ic:
if ((k % imaging_period) == 0):
entry = (k // imaging_period)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
ic.kappa += (vk * dWaveOp[entry])
ic.rho += (((D1[0] * uk) * (D1[1] * vk)) + ((D2[0] * uk) * (D2[1] * vk)))
else:
ic += (vk * dWaveOp[entry])
if (k == (nsteps - 1)):
rhs_k = self._setup_adjoint_rhs(rhs_k, shot, k, operand_simdata, operand_model, operand_dWaveOpAdj)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
else:
(rhs_k, rhs_km1) = (rhs_km1, rhs_k)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
solver.time_step(solver_data, rhs_k, rhs_km1)
if ('dWaveOpAdj' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOpAdj.append(solver.compute_dWaveOp('time', solver_data))
if (k == 0):
break
solver_data.advance()
if do_ic:
ic *= ((- 1) * dt)
ic *= imaging_period
if (m0.padded is not True):
if (solver.inv_padding_mode is 'add'):
ic = ic.add_padding()
else:
ic = ic.without_padding()
retval = dict()
if ('adjointfield' in return_parameters):
qs = list(vs)
qs.reverse()
retval['adjointfield'] = qs
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj.reverse()
retval['dWaveOpAdj'] = dWaveOpAdj
if do_ic:
retval['imaging_condition'] = ic
return retval
|
Solves for the adjoint field.
For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2
For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5
Parameters
----------
shot : pysit.Shot
Gives the receiver model for the right hand side.
operand_simdata : ndarray
Right hand side component in the data space, usually the residual.
operand_dWaveOpAdj : list of ndarray
Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving
operand_simdata : ndarray
Right hand side component in the data space, usually the residual.
return_parameters : list of {'adjointfield', 'ic'}
dWaveOp : ndarray
Imaging component from the forward model.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* q is the adjoint field.
* ic is the imaging component. Because this function computes many of
the things required to compute the imaging condition, there is an option
to compute the imaging condition as we go. This should be used to save
computational effort. If the imaging condition is to be computed, the
optional argument utt must be present.
Imaging Condition for variable density has components:
ic.m1 = u_tt * q
ic.m2 = grad(u) dot grad(q)
|
pysit/modeling/temporal_modeling.py
|
adjoint_model
|
zfang-slim/PysitForPython3
| 0
|
python
|
def adjoint_model(self, shot, m0, operand_simdata, imaging_period=1, operand_dWaveOpAdj=None, operand_model=None, return_parameters=[], dWaveOp=None, wavefield=None):
"Solves for the adjoint field.\n\n For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2\n For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the receiver model for the right hand side.\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n operand_dWaveOpAdj : list of ndarray\n Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n return_parameters : list of {'adjointfield', 'ic'}\n dWaveOp : ndarray\n Imaging component from the forward model.\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * q is the adjoint field.\n * ic is the imaging component. Because this function computes many of\n the things required to compute the imaging condition, there is an option\n to compute the imaging condition as we go. This should be used to save\n computational effort. If the imaging condition is to be computed, the\n optional argument utt must be present.\n\n Imaging Condition for variable density has components:\n ic.m1 = u_tt * q\n ic.m2 = grad(u) dot grad(q)\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('adjointfield' in return_parameters):
qs = list()
vs = list()
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj = list()
if (dWaveOp is not None):
ic = solver.model_parameters.perturbation()
do_ic = True
elif ('imaging_condition' in return_parameters):
raise ValueError('To compute imaging condition, forward component must be specified.')
else:
do_ic = False
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
print("WARNING: Ian's operators are still used here even though the solver has changed. Gradient may be incorrect. These routines need to be updated.")
deltas = [mesh.x.delta, mesh.z.delta]
sh = mesh.shape(include_bc=True, as_grid=True)
(D1, D2) = build_heterogenous_matrices(sh, deltas)
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_km1 = np.zeros(mesh.shape(include_bc=True))
if (operand_model is not None):
operand_model = operand_model.with_padding()
for k in range((nsteps - 1), (- 1), (- 1)):
vk = solver_data.k.primary_wavefield
vk_bulk = mesh.unpad_array(vk)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
uk = mesh.pad_array(wavefield[k])
if ('adjointfield' in return_parameters):
vs.append(vk_bulk.copy())
if do_ic:
if ((k % imaging_period) == 0):
entry = (k // imaging_period)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
ic.kappa += (vk * dWaveOp[entry])
ic.rho += (((D1[0] * uk) * (D1[1] * vk)) + ((D2[0] * uk) * (D2[1] * vk)))
else:
ic += (vk * dWaveOp[entry])
if (k == (nsteps - 1)):
rhs_k = self._setup_adjoint_rhs(rhs_k, shot, k, operand_simdata, operand_model, operand_dWaveOpAdj)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
else:
(rhs_k, rhs_km1) = (rhs_km1, rhs_k)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
solver.time_step(solver_data, rhs_k, rhs_km1)
if ('dWaveOpAdj' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOpAdj.append(solver.compute_dWaveOp('time', solver_data))
if (k == 0):
break
solver_data.advance()
if do_ic:
ic *= ((- 1) * dt)
ic *= imaging_period
if (m0.padded is not True):
if (solver.inv_padding_mode is 'add'):
ic = ic.add_padding()
else:
ic = ic.without_padding()
retval = dict()
if ('adjointfield' in return_parameters):
qs = list(vs)
qs.reverse()
retval['adjointfield'] = qs
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj.reverse()
retval['dWaveOpAdj'] = dWaveOpAdj
if do_ic:
retval['imaging_condition'] = ic
return retval
|
def adjoint_model(self, shot, m0, operand_simdata, imaging_period=1, operand_dWaveOpAdj=None, operand_model=None, return_parameters=[], dWaveOp=None, wavefield=None):
"Solves for the adjoint field.\n\n For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2\n For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5\n\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the receiver model for the right hand side.\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n operand_dWaveOpAdj : list of ndarray\n Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving\n operand_simdata : ndarray\n Right hand side component in the data space, usually the residual.\n return_parameters : list of {'adjointfield', 'ic'}\n dWaveOp : ndarray\n Imaging component from the forward model.\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * q is the adjoint field.\n * ic is the imaging component. Because this function computes many of\n the things required to compute the imaging condition, there is an option\n to compute the imaging condition as we go. This should be used to save\n computational effort. If the imaging condition is to be computed, the\n optional argument utt must be present.\n\n Imaging Condition for variable density has components:\n ic.m1 = u_tt * q\n ic.m2 = grad(u) dot grad(q)\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('adjointfield' in return_parameters):
qs = list()
vs = list()
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj = list()
if (dWaveOp is not None):
ic = solver.model_parameters.perturbation()
do_ic = True
elif ('imaging_condition' in return_parameters):
raise ValueError('To compute imaging condition, forward component must be specified.')
else:
do_ic = False
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
print("WARNING: Ian's operators are still used here even though the solver has changed. Gradient may be incorrect. These routines need to be updated.")
deltas = [mesh.x.delta, mesh.z.delta]
sh = mesh.shape(include_bc=True, as_grid=True)
(D1, D2) = build_heterogenous_matrices(sh, deltas)
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_km1 = np.zeros(mesh.shape(include_bc=True))
if (operand_model is not None):
operand_model = operand_model.with_padding()
for k in range((nsteps - 1), (- 1), (- 1)):
vk = solver_data.k.primary_wavefield
vk_bulk = mesh.unpad_array(vk)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
uk = mesh.pad_array(wavefield[k])
if ('adjointfield' in return_parameters):
vs.append(vk_bulk.copy())
if do_ic:
if ((k % imaging_period) == 0):
entry = (k // imaging_period)
if (hasattr(m0, 'kappa') and hasattr(m0, 'rho')):
ic.kappa += (vk * dWaveOp[entry])
ic.rho += (((D1[0] * uk) * (D1[1] * vk)) + ((D2[0] * uk) * (D2[1] * vk)))
else:
ic += (vk * dWaveOp[entry])
if (k == (nsteps - 1)):
rhs_k = self._setup_adjoint_rhs(rhs_k, shot, k, operand_simdata, operand_model, operand_dWaveOpAdj)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
else:
(rhs_k, rhs_km1) = (rhs_km1, rhs_k)
rhs_km1 = self._setup_adjoint_rhs(rhs_km1, shot, (k - 1), operand_simdata, operand_model, operand_dWaveOpAdj)
solver.time_step(solver_data, rhs_k, rhs_km1)
if ('dWaveOpAdj' in return_parameters):
if ((k % imaging_period) == 0):
dWaveOpAdj.append(solver.compute_dWaveOp('time', solver_data))
if (k == 0):
break
solver_data.advance()
if do_ic:
ic *= ((- 1) * dt)
ic *= imaging_period
if (m0.padded is not True):
if (solver.inv_padding_mode is 'add'):
ic = ic.add_padding()
else:
ic = ic.without_padding()
retval = dict()
if ('adjointfield' in return_parameters):
qs = list(vs)
qs.reverse()
retval['adjointfield'] = qs
if ('dWaveOpAdj' in return_parameters):
dWaveOpAdj.reverse()
retval['dWaveOpAdj'] = dWaveOpAdj
if do_ic:
retval['imaging_condition'] = ic
return retval<|docstring|>Solves for the adjoint field.
For constant density: m*q_tt - lap q = resid, where m = 1.0/c**2
For variable density: m1*q_tt - div(m2 grad)q = resid, where m1=1.0/kappa, m2=1.0/rho, and C = (kappa/rho)**0.5
Parameters
----------
shot : pysit.Shot
Gives the receiver model for the right hand side.
operand_simdata : ndarray
Right hand side component in the data space, usually the residual.
operand_dWaveOpAdj : list of ndarray
Right hand side component in the wave equation space, usually something to do with the imaging component...this needs resolving
operand_simdata : ndarray
Right hand side component in the data space, usually the residual.
return_parameters : list of {'adjointfield', 'ic'}
dWaveOp : ndarray
Imaging component from the forward model.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* q is the adjoint field.
* ic is the imaging component. Because this function computes many of
the things required to compute the imaging condition, there is an option
to compute the imaging condition as we go. This should be used to save
computational effort. If the imaging condition is to be computed, the
optional argument utt must be present.
Imaging Condition for variable density has components:
ic.m1 = u_tt * q
ic.m2 = grad(u) dot grad(q)<|endoftext|>
|
2bd20a09cc140d0fe06c2857b7668665369483f4d3a0c379f1a86883df41761b
|
def linear_forward_model(self, shot, m0, m1, return_parameters=[], dWaveOp0=None):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
m1_padded = m1.with_padding(padding_mode='edge')
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
if (k == 0):
rhs_k = (m1_padded * ((- 1) * dWaveOp0_k))
rhs_kp1 = (m1_padded * ((- 1) * dWaveOp0_kp1))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, (m1_padded * ((- 1) * dWaveOp0_kp1)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval
|
Applies the forward model to the model for the given solver.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon where to center the linear approximation.
m1 : solver.ModelParameters
The parameters upon which to apply the linear forward model to.
return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}
Values to return.
u0tt : ndarray
Derivative field required for the imaging condition to be used as right hand side.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.
|
pysit/modeling/temporal_modeling.py
|
linear_forward_model
|
zfang-slim/PysitForPython3
| 0
|
python
|
def linear_forward_model(self, shot, m0, m1, return_parameters=[], dWaveOp0=None):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
m1_padded = m1.with_padding(padding_mode='edge')
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
if (k == 0):
rhs_k = (m1_padded * ((- 1) * dWaveOp0_k))
rhs_kp1 = (m1_padded * ((- 1) * dWaveOp0_kp1))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, (m1_padded * ((- 1) * dWaveOp0_kp1)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval
|
def linear_forward_model(self, shot, m0, m1, return_parameters=[], dWaveOp0=None):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
m1_padded = m1.with_padding(padding_mode='edge')
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
if (k == 0):
rhs_k = (m1_padded * ((- 1) * dWaveOp0_k))
rhs_kp1 = (m1_padded * ((- 1) * dWaveOp0_kp1))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, (m1_padded * ((- 1) * dWaveOp0_kp1)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval<|docstring|>Applies the forward model to the model for the given solver.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon where to center the linear approximation.
m1 : solver.ModelParameters
The parameters upon which to apply the linear forward model to.
return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}
Values to return.
u0tt : ndarray
Derivative field required for the imaging condition to be used as right hand side.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.<|endoftext|>
|
5ad396f5f44afc0ed7133c5d41eb6fdcb42cc2c5739436245386387c52970f3a
|
def linear_forward_model_extend(self, shots, m0, m1_extend, max_sub_offset, h, return_parameters=[], DWaveOp0In=None):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shots : a list of pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1_extend : extended model perturbation, it is a structure of ExtendedModelingParameter2D\n The parameters upon which to apply the linear forward model to.\n max_offset: maximum subsurface offset for extended modeling\n h : subsurface offest interval\n\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
nh = ((2 * int((max_sub_offset / h))) + 1)
sh_sub = m1_extend.sh_sub
dof_sub = m1_extend.dof_sub
mesh_ih = copy.deepcopy(mesh)
if ('wavefield1' in return_parameters):
us = dict()
Us = dict()
if ('simdata' in return_parameters):
Simdata = dict()
simdata = dict()
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = dict()
DWaveOp0ret = dict()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = dict()
DWaveOp1 = dict()
solver_data = solver.SolverData()
for i in range(len(shots)):
shot = shots[i]
source = shot.sources
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
us = dict()
dWaveOp1 = list()
dWaveOp0ret = list()
if ('simdata' in return_parameters):
Simdata[i] = dict()
if ('dWaveOp0' in return_parameters):
DWaveOp0ret[i] = list()
if ('dWaveOp1' in return_parameters):
DWaveOp1[i] = list()
if (DWaveOp0In is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (DWaveOp0In is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = DWaveOp0In[i][k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
if (k == 0):
rhs_k = self.create_extended_rhs(m1_extend, dWaveOp0_k, mesh, mesh_ih, dof_sub, sh_sub, nh)
rhs_kp1 = self.create_extended_rhs(m1_extend, dWaveOp0_kp1, mesh, mesh_ih, dof_sub, sh_sub, nh)
else:
(rhs_k, rhs_kp1) = (rhs_kp1, self.create_extended_rhs(m1_extend, dWaveOp0_kp1, mesh, mesh_ih, dof_sub, sh_sub, nh))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
if ('dWaveOp1' in return_parameters):
DWaveOp1[i] = dWaveOp1
if ('dWaveOp0' in return_parameters):
DWaveOp0ret[i] = dWaveOp0ret
if ('simdata' in return_parameters):
Simdata[i] = simdata
if ('wavefield1' in return_parameters):
Us[i] = us
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = DWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = DWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = Simdata
return retval
|
Applies the forward model to the model for the given solver.
Parameters
----------
shots : a list of pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon where to center the linear approximation.
m1_extend : extended model perturbation, it is a structure of ExtendedModelingParameter2D
The parameters upon which to apply the linear forward model to.
max_offset: maximum subsurface offset for extended modeling
h : subsurface offest interval
return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}
Values to return.
u0tt : ndarray
Derivative field required for the imaging condition to be used as right hand side.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.
|
pysit/modeling/temporal_modeling.py
|
linear_forward_model_extend
|
zfang-slim/PysitForPython3
| 0
|
python
|
def linear_forward_model_extend(self, shots, m0, m1_extend, max_sub_offset, h, return_parameters=[], DWaveOp0In=None):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shots : a list of pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1_extend : extended model perturbation, it is a structure of ExtendedModelingParameter2D\n The parameters upon which to apply the linear forward model to.\n max_offset: maximum subsurface offset for extended modeling\n h : subsurface offest interval\n\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
nh = ((2 * int((max_sub_offset / h))) + 1)
sh_sub = m1_extend.sh_sub
dof_sub = m1_extend.dof_sub
mesh_ih = copy.deepcopy(mesh)
if ('wavefield1' in return_parameters):
us = dict()
Us = dict()
if ('simdata' in return_parameters):
Simdata = dict()
simdata = dict()
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = dict()
DWaveOp0ret = dict()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = dict()
DWaveOp1 = dict()
solver_data = solver.SolverData()
for i in range(len(shots)):
shot = shots[i]
source = shot.sources
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
us = dict()
dWaveOp1 = list()
dWaveOp0ret = list()
if ('simdata' in return_parameters):
Simdata[i] = dict()
if ('dWaveOp0' in return_parameters):
DWaveOp0ret[i] = list()
if ('dWaveOp1' in return_parameters):
DWaveOp1[i] = list()
if (DWaveOp0In is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (DWaveOp0In is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = DWaveOp0In[i][k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
if (k == 0):
rhs_k = self.create_extended_rhs(m1_extend, dWaveOp0_k, mesh, mesh_ih, dof_sub, sh_sub, nh)
rhs_kp1 = self.create_extended_rhs(m1_extend, dWaveOp0_kp1, mesh, mesh_ih, dof_sub, sh_sub, nh)
else:
(rhs_k, rhs_kp1) = (rhs_kp1, self.create_extended_rhs(m1_extend, dWaveOp0_kp1, mesh, mesh_ih, dof_sub, sh_sub, nh))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
if ('dWaveOp1' in return_parameters):
DWaveOp1[i] = dWaveOp1
if ('dWaveOp0' in return_parameters):
DWaveOp0ret[i] = dWaveOp0ret
if ('simdata' in return_parameters):
Simdata[i] = simdata
if ('wavefield1' in return_parameters):
Us[i] = us
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = DWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = DWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = Simdata
return retval
|
def linear_forward_model_extend(self, shots, m0, m1_extend, max_sub_offset, h, return_parameters=[], DWaveOp0In=None):
"Applies the forward model to the model for the given solver.\n\n Parameters\n ----------\n shots : a list of pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1_extend : extended model perturbation, it is a structure of ExtendedModelingParameter2D\n The parameters upon which to apply the linear forward model to.\n max_offset: maximum subsurface offset for extended modeling\n h : subsurface offest interval\n\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
nh = ((2 * int((max_sub_offset / h))) + 1)
sh_sub = m1_extend.sh_sub
dof_sub = m1_extend.dof_sub
mesh_ih = copy.deepcopy(mesh)
if ('wavefield1' in return_parameters):
us = dict()
Us = dict()
if ('simdata' in return_parameters):
Simdata = dict()
simdata = dict()
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = dict()
DWaveOp0ret = dict()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = dict()
DWaveOp1 = dict()
solver_data = solver.SolverData()
for i in range(len(shots)):
shot = shots[i]
source = shot.sources
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
us = dict()
dWaveOp1 = list()
dWaveOp0ret = list()
if ('simdata' in return_parameters):
Simdata[i] = dict()
if ('dWaveOp0' in return_parameters):
DWaveOp0ret[i] = list()
if ('dWaveOp1' in return_parameters):
DWaveOp1[i] = list()
if (DWaveOp0In is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (DWaveOp0In is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = DWaveOp0In[i][k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
if (k == 0):
rhs_k = self.create_extended_rhs(m1_extend, dWaveOp0_k, mesh, mesh_ih, dof_sub, sh_sub, nh)
rhs_kp1 = self.create_extended_rhs(m1_extend, dWaveOp0_kp1, mesh, mesh_ih, dof_sub, sh_sub, nh)
else:
(rhs_k, rhs_kp1) = (rhs_kp1, self.create_extended_rhs(m1_extend, dWaveOp0_kp1, mesh, mesh_ih, dof_sub, sh_sub, nh))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
if ('dWaveOp1' in return_parameters):
DWaveOp1[i] = dWaveOp1
if ('dWaveOp0' in return_parameters):
DWaveOp0ret[i] = dWaveOp0ret
if ('simdata' in return_parameters):
Simdata[i] = simdata
if ('wavefield1' in return_parameters):
Us[i] = us
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = DWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = DWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = Simdata
return retval<|docstring|>Applies the forward model to the model for the given solver.
Parameters
----------
shots : a list of pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon where to center the linear approximation.
m1_extend : extended model perturbation, it is a structure of ExtendedModelingParameter2D
The parameters upon which to apply the linear forward model to.
max_offset: maximum subsurface offset for extended modeling
h : subsurface offest interval
return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}
Values to return.
u0tt : ndarray
Derivative field required for the imaging condition to be used as right hand side.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.<|endoftext|>
|
fae1e1c4737751ba4b9749687b521df036dfdbab8abd27a63cc32b3b7b9afb9e
|
def linear_forward_model_kappa(self, shot, m0, m1, return_parameters=[], dWaveOp0=None):
"Applies the forward model to the model for the given solver, in terms of a pertubation of kappa.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
model_1 = (1.0 / m1.kappa)
model_1 = mesh.pad_array(model_1)
if (k == 0):
rhs_k = (model_1 * ((- 1.0) * dWaveOp0_k))
rhs_kp1 = (model_1 * ((- 1.0) * dWaveOp0_kp1))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, (model_1 * ((- 1.0) * dWaveOp0_kp1)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval
|
Applies the forward model to the model for the given solver, in terms of a pertubation of kappa.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon where to center the linear approximation.
m1 : solver.ModelParameters
The parameters upon which to apply the linear forward model to.
return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}
Values to return.
u0tt : ndarray
Derivative field required for the imaging condition to be used as right hand side.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.
|
pysit/modeling/temporal_modeling.py
|
linear_forward_model_kappa
|
zfang-slim/PysitForPython3
| 0
|
python
|
def linear_forward_model_kappa(self, shot, m0, m1, return_parameters=[], dWaveOp0=None):
"Applies the forward model to the model for the given solver, in terms of a pertubation of kappa.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
model_1 = (1.0 / m1.kappa)
model_1 = mesh.pad_array(model_1)
if (k == 0):
rhs_k = (model_1 * ((- 1.0) * dWaveOp0_k))
rhs_kp1 = (model_1 * ((- 1.0) * dWaveOp0_kp1))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, (model_1 * ((- 1.0) * dWaveOp0_kp1)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval
|
def linear_forward_model_kappa(self, shot, m0, m1, return_parameters=[], dWaveOp0=None):
"Applies the forward model to the model for the given solver, in terms of a pertubation of kappa.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
model_1 = (1.0 / m1.kappa)
model_1 = mesh.pad_array(model_1)
if (k == 0):
rhs_k = (model_1 * ((- 1.0) * dWaveOp0_k))
rhs_kp1 = (model_1 * ((- 1.0) * dWaveOp0_kp1))
else:
(rhs_k, rhs_kp1) = (rhs_kp1, (model_1 * ((- 1.0) * dWaveOp0_kp1)))
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval<|docstring|>Applies the forward model to the model for the given solver, in terms of a pertubation of kappa.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon where to center the linear approximation.
m1 : solver.ModelParameters
The parameters upon which to apply the linear forward model to.
return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}
Values to return.
u0tt : ndarray
Derivative field required for the imaging condition to be used as right hand side.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.<|endoftext|>
|
cdfbd056dc27e6b8e4ae3a4f1822215ec79f0054ace8d85da2ad2cf096c87359
|
def linear_forward_model_rho(self, shot, m0, m1, return_parameters=[], dWaveOp0=None, wavefield=None):
"Applies the forward model to the model for the given solver in terms of a pertubation of rho.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
sh = mesh.shape(include_bc=True, as_grid=True)
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
model_2 = (1.0 / m1.rho)
model_2 = mesh.pad_array(model_2)
print("WARNING: Ian's operators are still used here even though the solver has changed. These tests need to be updated.")
rp = dict()
rp['laplacian'] = True
Lap = build_heterogenous_matrices(sh, [mesh.x.delta, mesh.z.delta], model_2.reshape((- 1)), rp=rp)
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
u0k = wavefield[k]
if (k < (nsteps - 1)):
u0kp1 = wavefield[(k + 1)]
else:
u0kp1 = wavefield[k]
u0k = mesh.pad_array(u0k)
u0kp1 = mesh.pad_array(u0kp1)
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
G0 = (Lap * u0k)
G1 = (Lap * u0kp1)
if (k == 0):
rhs_k = G0
rhs_kp1 = G1
else:
(rhs_k, rhs_kp1) = (rhs_kp1, G1)
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval
|
Applies the forward model to the model for the given solver in terms of a pertubation of rho.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon where to center the linear approximation.
m1 : solver.ModelParameters
The parameters upon which to apply the linear forward model to.
return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}
Values to return.
u0tt : ndarray
Derivative field required for the imaging condition to be used as right hand side.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.
|
pysit/modeling/temporal_modeling.py
|
linear_forward_model_rho
|
zfang-slim/PysitForPython3
| 0
|
python
|
def linear_forward_model_rho(self, shot, m0, m1, return_parameters=[], dWaveOp0=None, wavefield=None):
"Applies the forward model to the model for the given solver in terms of a pertubation of rho.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
sh = mesh.shape(include_bc=True, as_grid=True)
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
model_2 = (1.0 / m1.rho)
model_2 = mesh.pad_array(model_2)
print("WARNING: Ian's operators are still used here even though the solver has changed. These tests need to be updated.")
rp = dict()
rp['laplacian'] = True
Lap = build_heterogenous_matrices(sh, [mesh.x.delta, mesh.z.delta], model_2.reshape((- 1)), rp=rp)
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
u0k = wavefield[k]
if (k < (nsteps - 1)):
u0kp1 = wavefield[(k + 1)]
else:
u0kp1 = wavefield[k]
u0k = mesh.pad_array(u0k)
u0kp1 = mesh.pad_array(u0kp1)
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
G0 = (Lap * u0k)
G1 = (Lap * u0kp1)
if (k == 0):
rhs_k = G0
rhs_kp1 = G1
else:
(rhs_k, rhs_kp1) = (rhs_kp1, G1)
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval
|
def linear_forward_model_rho(self, shot, m0, m1, return_parameters=[], dWaveOp0=None, wavefield=None):
"Applies the forward model to the model for the given solver in terms of a pertubation of rho.\n\n Parameters\n ----------\n shot : pysit.Shot\n Gives the source signal approximation for the right hand side.\n m0 : solver.ModelParameters\n The parameters upon where to center the linear approximation.\n m1 : solver.ModelParameters\n The parameters upon which to apply the linear forward model to.\n return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}\n Values to return.\n u0tt : ndarray\n Derivative field required for the imaging condition to be used as right hand side.\n\n\n Returns\n -------\n retval : dict\n Dictionary whose keys are return_parameters that contains the specified data.\n\n Notes\n -----\n * u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.\n * u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.\n * If u0tt is not specified, it may be computed on the fly at potentially high expense.\n\n "
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
sh = mesh.shape(include_bc=True, as_grid=True)
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
model_2 = (1.0 / m1.rho)
model_2 = mesh.pad_array(model_2)
print("WARNING: Ian's operators are still used here even though the solver has changed. These tests need to be updated.")
rp = dict()
rp['laplacian'] = True
Lap = build_heterogenous_matrices(sh, [mesh.x.delta, mesh.z.delta], model_2.reshape((- 1)), rp=rp)
if ('wavefield1' in return_parameters):
us = list()
if ('simdata' in return_parameters):
simdata = np.zeros((solver.nsteps, shot.receivers.receiver_count))
if ('dWaveOp0' in return_parameters):
dWaveOp0ret = list()
if ('dWaveOp1' in return_parameters):
dWaveOp1 = list()
solver_data = solver.SolverData()
if (dWaveOp0 is None):
solver_data_u0 = solver.SolverData()
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f((0 * dt)))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f((1 * dt)))
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_k, rhs_u0_kp1)
else:
solver_data_u0 = None
for k in range(nsteps):
u0k = wavefield[k]
if (k < (nsteps - 1)):
u0kp1 = wavefield[(k + 1)]
else:
u0kp1 = wavefield[k]
u0k = mesh.pad_array(u0k)
u0kp1 = mesh.pad_array(u0kp1)
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
if ('wavefield1' in return_parameters):
us.append(uk_bulk.copy())
if ('simdata' in return_parameters):
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata)
if (dWaveOp0 is None):
(rhs_u0_kp1, rhs_u0_kp2) = (rhs_u0_kp2, rhs_u0_kp1)
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f(((k + 2) * dt)))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
(dWaveOp0_k, dWaveOp0_kp1) = (dWaveOp0_kp1, dWaveOp0_k)
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
else:
dWaveOp0_k = dWaveOp0[k]
dWaveOp0_kp1 = (dWaveOp0[(k + 1)] if (k < (nsteps - 1)) else dWaveOp0[k])
if ('dWaveOp0' in return_parameters):
dWaveOp0ret.append(dWaveOp0_k)
G0 = (Lap * u0k)
G1 = (Lap * u0kp1)
if (k == 0):
rhs_k = G0
rhs_kp1 = G1
else:
(rhs_k, rhs_kp1) = (rhs_kp1, G1)
solver.time_step(solver_data, rhs_k, rhs_kp1)
if ('dWaveOp1' in return_parameters):
dWaveOp1.append(solver.compute_dWaveOp('time', solver_data))
if (k == (nsteps - 1)):
break
solver_data.advance()
retval = dict()
if ('wavefield1' in return_parameters):
retval['wavefield1'] = us
if ('dWaveOp0' in return_parameters):
retval['dWaveOp0'] = dWaveOp0ret
if ('dWaveOp1' in return_parameters):
retval['dWaveOp1'] = dWaveOp1
if ('simdata' in return_parameters):
retval['simdata'] = simdata
return retval<|docstring|>Applies the forward model to the model for the given solver in terms of a pertubation of rho.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m0 : solver.ModelParameters
The parameters upon where to center the linear approximation.
m1 : solver.ModelParameters
The parameters upon which to apply the linear forward model to.
return_parameters : list of {'wavefield1', 'dWaveOp1', 'dWaveOp0', 'simdata'}
Values to return.
u0tt : ndarray
Derivative field required for the imaging condition to be used as right hand side.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.<|endoftext|>
|
2ef9fe60715485861e61aff8cf57c205a3e9ceed29c9616e0191a2aca0ada41f
|
def parser(file_name) -> argparse.ArgumentParser:
'\n Add arguments needed by the framework\n '
return _with_default_args(argparse.ArgumentParser(), file_name)
|
Add arguments needed by the framework
|
exputils.py
|
parser
|
meta-inf/qbdiv
| 2
|
python
|
def parser(file_name) -> argparse.ArgumentParser:
'\n \n '
return _with_default_args(argparse.ArgumentParser(), file_name)
|
def parser(file_name) -> argparse.ArgumentParser:
'\n \n '
return _with_default_args(argparse.ArgumentParser(), file_name)<|docstring|>Add arguments needed by the framework<|endoftext|>
|
bb61deb74ae2ec711eeebc34976ad9278ca8fb279d886ec3e72ee4aa1717db1e
|
def preflight(args, data_dump=None, create_logdir=True):
'\n Routine checks, backup parameters \n :param create_logdir: whether this worker should create args.dir\n '
def get_output(cmd):
cp = subprocess.check_output(cmd, shell=True, stderr=subprocess.PIPE)
return cp.decode('utf-8')
try:
diff_to_head = get_output('git diff HEAD')
commit_hash = get_output('git rev-parse HEAD').rstrip()
except subprocess.CalledProcessError as e:
if args.production:
raise Exception('Git check failed: {}'.format(str(e)))
diff_to_head = ''
commit_hash = 'null'
print('Commit: {}; production: {}'.format(commit_hash[:8], args.production))
args.dir = os.path.expanduser(args.dir)
if (create_logdir and (not args.resume)):
if os.path.exists(os.path.join(args.dir, 'hps.txt')):
if args.production:
raise Exception('Directory {} exists'.format(args.dir))
else:
shutil.rmtree(args.dir)
if (not os.path.exists(args.dir)):
os.makedirs(args.dir)
else:
assert os.path.exists(args.dir)
if create_logdir:
global _log_dir
_log_dir = args.dir
with open(os.path.join(args.dir, 'hps.txt'), 'w') as fout:
dct = args.__dict__.copy()
dct['commit_hash'] = commit_hash
for k in dct:
v = dct[k]
if dataclasses.is_dataclass(type(v)):
dct[k] = dataclasses.asdict(v)
print(json.dumps(dct), file=fout)
with open(os.path.join(args.dir, 'repo-diff.txt'), 'w') as fout:
print(diff_to_head, file=fout)
with open(os.path.join(args.dir, 'dat.bin'), 'wb') as fout:
import pickle
pickle.dump(data_dump, fout)
|
Routine checks, backup parameters
:param create_logdir: whether this worker should create args.dir
|
exputils.py
|
preflight
|
meta-inf/qbdiv
| 2
|
python
|
def preflight(args, data_dump=None, create_logdir=True):
'\n Routine checks, backup parameters \n :param create_logdir: whether this worker should create args.dir\n '
def get_output(cmd):
cp = subprocess.check_output(cmd, shell=True, stderr=subprocess.PIPE)
return cp.decode('utf-8')
try:
diff_to_head = get_output('git diff HEAD')
commit_hash = get_output('git rev-parse HEAD').rstrip()
except subprocess.CalledProcessError as e:
if args.production:
raise Exception('Git check failed: {}'.format(str(e)))
diff_to_head =
commit_hash = 'null'
print('Commit: {}; production: {}'.format(commit_hash[:8], args.production))
args.dir = os.path.expanduser(args.dir)
if (create_logdir and (not args.resume)):
if os.path.exists(os.path.join(args.dir, 'hps.txt')):
if args.production:
raise Exception('Directory {} exists'.format(args.dir))
else:
shutil.rmtree(args.dir)
if (not os.path.exists(args.dir)):
os.makedirs(args.dir)
else:
assert os.path.exists(args.dir)
if create_logdir:
global _log_dir
_log_dir = args.dir
with open(os.path.join(args.dir, 'hps.txt'), 'w') as fout:
dct = args.__dict__.copy()
dct['commit_hash'] = commit_hash
for k in dct:
v = dct[k]
if dataclasses.is_dataclass(type(v)):
dct[k] = dataclasses.asdict(v)
print(json.dumps(dct), file=fout)
with open(os.path.join(args.dir, 'repo-diff.txt'), 'w') as fout:
print(diff_to_head, file=fout)
with open(os.path.join(args.dir, 'dat.bin'), 'wb') as fout:
import pickle
pickle.dump(data_dump, fout)
|
def preflight(args, data_dump=None, create_logdir=True):
'\n Routine checks, backup parameters \n :param create_logdir: whether this worker should create args.dir\n '
def get_output(cmd):
cp = subprocess.check_output(cmd, shell=True, stderr=subprocess.PIPE)
return cp.decode('utf-8')
try:
diff_to_head = get_output('git diff HEAD')
commit_hash = get_output('git rev-parse HEAD').rstrip()
except subprocess.CalledProcessError as e:
if args.production:
raise Exception('Git check failed: {}'.format(str(e)))
diff_to_head =
commit_hash = 'null'
print('Commit: {}; production: {}'.format(commit_hash[:8], args.production))
args.dir = os.path.expanduser(args.dir)
if (create_logdir and (not args.resume)):
if os.path.exists(os.path.join(args.dir, 'hps.txt')):
if args.production:
raise Exception('Directory {} exists'.format(args.dir))
else:
shutil.rmtree(args.dir)
if (not os.path.exists(args.dir)):
os.makedirs(args.dir)
else:
assert os.path.exists(args.dir)
if create_logdir:
global _log_dir
_log_dir = args.dir
with open(os.path.join(args.dir, 'hps.txt'), 'w') as fout:
dct = args.__dict__.copy()
dct['commit_hash'] = commit_hash
for k in dct:
v = dct[k]
if dataclasses.is_dataclass(type(v)):
dct[k] = dataclasses.asdict(v)
print(json.dumps(dct), file=fout)
with open(os.path.join(args.dir, 'repo-diff.txt'), 'w') as fout:
print(diff_to_head, file=fout)
with open(os.path.join(args.dir, 'dat.bin'), 'wb') as fout:
import pickle
pickle.dump(data_dump, fout)<|docstring|>Routine checks, backup parameters
:param create_logdir: whether this worker should create args.dir<|endoftext|>
|
d01bb7718c42a15e23d7ade02bbc845c80562ec47889a0c9e74f6e1c7987a937
|
def __init__(self, master_kernel):
'Instantiate the sumo traffic light kernel.\n\n Parameters\n ----------\n master_kernel : flow.core.kernel.Kernel\n the higher level kernel (used to call methods from other\n sub-kernels)\n '
KernelTrafficLight.__init__(self, master_kernel)
self.__ids = []
self.num_meters = 0
|
Instantiate the sumo traffic light kernel.
Parameters
----------
master_kernel : flow.core.kernel.Kernel
the higher level kernel (used to call methods from other
sub-kernels)
|
flow/core/kernel/traffic_light/aimsun.py
|
__init__
|
nathanlct/flow
| 805
|
python
|
def __init__(self, master_kernel):
'Instantiate the sumo traffic light kernel.\n\n Parameters\n ----------\n master_kernel : flow.core.kernel.Kernel\n the higher level kernel (used to call methods from other\n sub-kernels)\n '
KernelTrafficLight.__init__(self, master_kernel)
self.__ids = []
self.num_meters = 0
|
def __init__(self, master_kernel):
'Instantiate the sumo traffic light kernel.\n\n Parameters\n ----------\n master_kernel : flow.core.kernel.Kernel\n the higher level kernel (used to call methods from other\n sub-kernels)\n '
KernelTrafficLight.__init__(self, master_kernel)
self.__ids = []
self.num_meters = 0<|docstring|>Instantiate the sumo traffic light kernel.
Parameters
----------
master_kernel : flow.core.kernel.Kernel
the higher level kernel (used to call methods from other
sub-kernels)<|endoftext|>
|
aa05f43691aa28250043895188c2f7bf1f187de87ec77c4e1d950bfb95c088e7
|
def pass_api(self, kernel_api):
'See parent class.'
self.kernel_api = kernel_api
|
See parent class.
|
flow/core/kernel/traffic_light/aimsun.py
|
pass_api
|
nathanlct/flow
| 805
|
python
|
def pass_api(self, kernel_api):
self.kernel_api = kernel_api
|
def pass_api(self, kernel_api):
self.kernel_api = kernel_api<|docstring|>See parent class.<|endoftext|>
|
f312a94a30128a7d75c6b85f1e502fd14d23fc647c08fff188e803a82a7d217a
|
def update(self, reset):
'See parent class.'
pass
|
See parent class.
|
flow/core/kernel/traffic_light/aimsun.py
|
update
|
nathanlct/flow
| 805
|
python
|
def update(self, reset):
pass
|
def update(self, reset):
pass<|docstring|>See parent class.<|endoftext|>
|
b740a9f3c16a61533b752be9229129f7e37d79dadea79d43a04f1a1692b9fd1c
|
def get_ids(self):
'See parent class.'
return self.kernel_api.get_traffic_light_ids()
|
See parent class.
|
flow/core/kernel/traffic_light/aimsun.py
|
get_ids
|
nathanlct/flow
| 805
|
python
|
def get_ids(self):
return self.kernel_api.get_traffic_light_ids()
|
def get_ids(self):
return self.kernel_api.get_traffic_light_ids()<|docstring|>See parent class.<|endoftext|>
|
d2ae2305b07d72fe7716fc365c0a5eae8626bef4b4181b248914f85ba9fb8782
|
def set_state(self, meter_aimsun_id, state):
'Set the state of the traffic lights on a specific meter.\n\n Parameters\n ----------\n meter_aimsun_id : int\n aimsun id of the meter\n state : int\n desired state(s) for the traffic light\n 0: red\n 1: green\n 2: yellow\n link_index : int, optional\n index of the link whose traffic light state is meant to be changed.\n If no value is provided, the lights on all links are updated.\n '
self.kernel_api.set_traffic_light_state(meter_aimsun_id, None, state)
|
Set the state of the traffic lights on a specific meter.
Parameters
----------
meter_aimsun_id : int
aimsun id of the meter
state : int
desired state(s) for the traffic light
0: red
1: green
2: yellow
link_index : int, optional
index of the link whose traffic light state is meant to be changed.
If no value is provided, the lights on all links are updated.
|
flow/core/kernel/traffic_light/aimsun.py
|
set_state
|
nathanlct/flow
| 805
|
python
|
def set_state(self, meter_aimsun_id, state):
'Set the state of the traffic lights on a specific meter.\n\n Parameters\n ----------\n meter_aimsun_id : int\n aimsun id of the meter\n state : int\n desired state(s) for the traffic light\n 0: red\n 1: green\n 2: yellow\n link_index : int, optional\n index of the link whose traffic light state is meant to be changed.\n If no value is provided, the lights on all links are updated.\n '
self.kernel_api.set_traffic_light_state(meter_aimsun_id, None, state)
|
def set_state(self, meter_aimsun_id, state):
'Set the state of the traffic lights on a specific meter.\n\n Parameters\n ----------\n meter_aimsun_id : int\n aimsun id of the meter\n state : int\n desired state(s) for the traffic light\n 0: red\n 1: green\n 2: yellow\n link_index : int, optional\n index of the link whose traffic light state is meant to be changed.\n If no value is provided, the lights on all links are updated.\n '
self.kernel_api.set_traffic_light_state(meter_aimsun_id, None, state)<|docstring|>Set the state of the traffic lights on a specific meter.
Parameters
----------
meter_aimsun_id : int
aimsun id of the meter
state : int
desired state(s) for the traffic light
0: red
1: green
2: yellow
link_index : int, optional
index of the link whose traffic light state is meant to be changed.
If no value is provided, the lights on all links are updated.<|endoftext|>
|
a4c23dfd46244d162a7fdddf7346d20ea05282790e5c76cd04e79357945627f8
|
def get_state(self, meter_aimsun_id):
'Return the state of the traffic light(s) at the specified node.\n\n Parameters\n ----------\n meter_aimsun_id: int\n aimsun id of the meter\n\n Returns\n -------\n state : int\n desired state(s) for the traffic light\n 0: red\n 1: green\n 2: yellow\n '
return self.kernel_api.get_traffic_light_state(meter_aimsun_id)
|
Return the state of the traffic light(s) at the specified node.
Parameters
----------
meter_aimsun_id: int
aimsun id of the meter
Returns
-------
state : int
desired state(s) for the traffic light
0: red
1: green
2: yellow
|
flow/core/kernel/traffic_light/aimsun.py
|
get_state
|
nathanlct/flow
| 805
|
python
|
def get_state(self, meter_aimsun_id):
'Return the state of the traffic light(s) at the specified node.\n\n Parameters\n ----------\n meter_aimsun_id: int\n aimsun id of the meter\n\n Returns\n -------\n state : int\n desired state(s) for the traffic light\n 0: red\n 1: green\n 2: yellow\n '
return self.kernel_api.get_traffic_light_state(meter_aimsun_id)
|
def get_state(self, meter_aimsun_id):
'Return the state of the traffic light(s) at the specified node.\n\n Parameters\n ----------\n meter_aimsun_id: int\n aimsun id of the meter\n\n Returns\n -------\n state : int\n desired state(s) for the traffic light\n 0: red\n 1: green\n 2: yellow\n '
return self.kernel_api.get_traffic_light_state(meter_aimsun_id)<|docstring|>Return the state of the traffic light(s) at the specified node.
Parameters
----------
meter_aimsun_id: int
aimsun id of the meter
Returns
-------
state : int
desired state(s) for the traffic light
0: red
1: green
2: yellow<|endoftext|>
|
cc3e96da8e9cbb437734d050a0a2d1e46c48eab848da40b609bff867b2366911
|
def match_to_html(match):
'\n Render a Module Characteristic\n '
return '<p class="module-characteristic"><strong>{0}</strong>: {1}</p>'.format(match.group('key'), match.group('value'))
|
Render a Module Characteristic
|
pyccoon/markdown_extensions.py
|
match_to_html
|
ckald/pyccoon
| 14
|
python
|
def match_to_html(match):
'\n \n '
return '<p class="module-characteristic"><strong>{0}</strong>: {1}</p>'.format(match.group('key'), match.group('value'))
|
def match_to_html(match):
'\n \n '
return '<p class="module-characteristic"><strong>{0}</strong>: {1}</p>'.format(match.group('key'), match.group('value'))<|docstring|>Render a Module Characteristic<|endoftext|>
|
f8e2295f77187aefd06a688d06ea9f6fc131e361957e96938a6103d9a6b93c7c
|
def template(self, match):
' Intended markup for TODO strings. The type of the string is used as a class. '
return '<span class={0:s}><strong>{1:s}</strong>{2:s}</span>'.format(match.group(1).lower(), match.group(1), match.group(2))
|
Intended markup for TODO strings. The type of the string is used as a class.
|
pyccoon/markdown_extensions.py
|
template
|
ckald/pyccoon
| 14
|
python
|
def template(self, match):
' '
return '<span class={0:s}><strong>{1:s}</strong>{2:s}</span>'.format(match.group(1).lower(), match.group(1), match.group(2))
|
def template(self, match):
' '
return '<span class={0:s}><strong>{1:s}</strong>{2:s}</span>'.format(match.group(1).lower(), match.group(1), match.group(2))<|docstring|>Intended markup for TODO strings. The type of the string is used as a class.<|endoftext|>
|
5cce326f37464741babf941e06dea291de03c4abd315d3541803a060d1e979b6
|
def run(self, lines):
' String matching is case insensitive '
return [self.regex.sub(self.template, line) for line in lines]
|
String matching is case insensitive
|
pyccoon/markdown_extensions.py
|
run
|
ckald/pyccoon
| 14
|
python
|
def run(self, lines):
' '
return [self.regex.sub(self.template, line) for line in lines]
|
def run(self, lines):
' '
return [self.regex.sub(self.template, line) for line in lines]<|docstring|>String matching is case insensitive<|endoftext|>
|
f99f71537b9e465ccd925105879f67fb5f596a672f3f05c0d7a95e6a3f810a33
|
def run(self, lines):
'\n Searches for a line starting with a literal followed by a colon and multiple spaces:\n\n some arbitrary parameter name: and its definition separated by `:\\s\\s+`\n `parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works\n\n And turns it into:\n\n some arbitrary parameter name: and its definition separated by `:\\s\\s+`\n `parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works\n '
new_lines = []
for line in lines:
match = re.match('^(\\s*)([^:]+):\\s{2,}(.+)', line)
if match:
new_lines.append((match.group(1) + match.group(2)))
new_lines.append(((match.group(1) + ': ') + match.group(3)))
new_lines.append('')
else:
new_lines.append(line)
return new_lines
|
Searches for a line starting with a literal followed by a colon and multiple spaces:
some arbitrary parameter name: and its definition separated by `:\s\s+`
`parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works
And turns it into:
some arbitrary parameter name: and its definition separated by `:\s\s+`
`parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works
|
pyccoon/markdown_extensions.py
|
run
|
ckald/pyccoon
| 14
|
python
|
def run(self, lines):
'\n Searches for a line starting with a literal followed by a colon and multiple spaces:\n\n some arbitrary parameter name: and its definition separated by `:\\s\\s+`\n `parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works\n\n And turns it into:\n\n some arbitrary parameter name: and its definition separated by `:\\s\\s+`\n `parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works\n '
new_lines = []
for line in lines:
match = re.match('^(\\s*)([^:]+):\\s{2,}(.+)', line)
if match:
new_lines.append((match.group(1) + match.group(2)))
new_lines.append(((match.group(1) + ': ') + match.group(3)))
new_lines.append()
else:
new_lines.append(line)
return new_lines
|
def run(self, lines):
'\n Searches for a line starting with a literal followed by a colon and multiple spaces:\n\n some arbitrary parameter name: and its definition separated by `:\\s\\s+`\n `parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works\n\n And turns it into:\n\n some arbitrary parameter name: and its definition separated by `:\\s\\s+`\n `parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works\n '
new_lines = []
for line in lines:
match = re.match('^(\\s*)([^:]+):\\s{2,}(.+)', line)
if match:
new_lines.append((match.group(1) + match.group(2)))
new_lines.append(((match.group(1) + ': ') + match.group(3)))
new_lines.append()
else:
new_lines.append(line)
return new_lines<|docstring|>Searches for a line starting with a literal followed by a colon and multiple spaces:
some arbitrary parameter name: and its definition separated by `:\s\s+`
`parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works
And turns it into:
some arbitrary parameter name: and its definition separated by `:\s\s+`
`parameter name` might contain everything except a [colon](//en.wikipedia.org/wiki/Colon_(punctuation)) and be multiline: it still works<|endoftext|>
|
2eb6f71a0e1c1aa5b3f45bb1a7060704557a99b106f665e89cd5420f671a4126
|
def run(self, lines):
'\n :param lines: Documentation lines\n :return: Lines of text with parsed PyDoc comments\n '
new_lines = []
for text in lines:
for (regex, template) in self.regexps.items():
text = regex.sub(template, text)
new_lines.append(text)
return new_lines
|
:param lines: Documentation lines
:return: Lines of text with parsed PyDoc comments
|
pyccoon/markdown_extensions.py
|
run
|
ckald/pyccoon
| 14
|
python
|
def run(self, lines):
'\n :param lines: Documentation lines\n :return: Lines of text with parsed PyDoc comments\n '
new_lines = []
for text in lines:
for (regex, template) in self.regexps.items():
text = regex.sub(template, text)
new_lines.append(text)
return new_lines
|
def run(self, lines):
'\n :param lines: Documentation lines\n :return: Lines of text with parsed PyDoc comments\n '
new_lines = []
for text in lines:
for (regex, template) in self.regexps.items():
text = regex.sub(template, text)
new_lines.append(text)
return new_lines<|docstring|>:param lines: Documentation lines
:return: Lines of text with parsed PyDoc comments<|endoftext|>
|
11b5309989124c8db26cb652a0e9a3af8c5fafec0ec83dd30e2aac164b22628e
|
def run(self, lines):
' Method ensures that there is exactly one space between 2 joined strings. '
return regex.sub(sub, '\n'.join(lines)).split('\n')
|
Method ensures that there is exactly one space between 2 joined strings.
|
pyccoon/markdown_extensions.py
|
run
|
ckald/pyccoon
| 14
|
python
|
def run(self, lines):
' '
return regex.sub(sub, '\n'.join(lines)).split('\n')
|
def run(self, lines):
' '
return regex.sub(sub, '\n'.join(lines)).split('\n')<|docstring|>Method ensures that there is exactly one space between 2 joined strings.<|endoftext|>
|
f322f658ad6148ac19402f818fe66697361a29c8ba9c4fec4102c23668cc70cc
|
def get_asci_labels(tools_predictions_list, labels_list):
'\n\tReturns ASCI labels:\n\t\tAssign to each instance, the index of the tool that best predicted its label.\n\t\tIn case of conflict, assign the index of the tool that performed the best on overall.\n\n\targs:\n\t\ttools_predictions_list: a list containing the tools predictions for a set of systems\n\t\tlabels_list: a list containing the labels for this same set of systems\n\t'
tools_sorted_indexes = get_tools_sorted_indexes(tools_predictions_list, labels_list)
asci_labels = []
for (i, tools_predictions) in enumerate(tools_predictions_list):
labels = labels_list[i]
for (j, label) in enumerate(labels):
asci_label = tools_sorted_indexes[(- 1)]
for tool_index in tools_sorted_indexes:
if (tools_predictions[(tool_index, j)] == label):
asci_label = tool_index
asci_labels.append(asci_label)
return np.reshape(np.array(asci_labels), ((- 1), 1))
|
Returns ASCI labels:
Assign to each instance, the index of the tool that best predicted its label.
In case of conflict, assign the index of the tool that performed the best on overall.
args:
tools_predictions_list: a list containing the tools predictions for a set of systems
labels_list: a list containing the labels for this same set of systems
|
approaches/asci/asci_utils.py
|
get_asci_labels
|
antoineBarbez/SMAD
| 4
|
python
|
def get_asci_labels(tools_predictions_list, labels_list):
'\n\tReturns ASCI labels:\n\t\tAssign to each instance, the index of the tool that best predicted its label.\n\t\tIn case of conflict, assign the index of the tool that performed the best on overall.\n\n\targs:\n\t\ttools_predictions_list: a list containing the tools predictions for a set of systems\n\t\tlabels_list: a list containing the labels for this same set of systems\n\t'
tools_sorted_indexes = get_tools_sorted_indexes(tools_predictions_list, labels_list)
asci_labels = []
for (i, tools_predictions) in enumerate(tools_predictions_list):
labels = labels_list[i]
for (j, label) in enumerate(labels):
asci_label = tools_sorted_indexes[(- 1)]
for tool_index in tools_sorted_indexes:
if (tools_predictions[(tool_index, j)] == label):
asci_label = tool_index
asci_labels.append(asci_label)
return np.reshape(np.array(asci_labels), ((- 1), 1))
|
def get_asci_labels(tools_predictions_list, labels_list):
'\n\tReturns ASCI labels:\n\t\tAssign to each instance, the index of the tool that best predicted its label.\n\t\tIn case of conflict, assign the index of the tool that performed the best on overall.\n\n\targs:\n\t\ttools_predictions_list: a list containing the tools predictions for a set of systems\n\t\tlabels_list: a list containing the labels for this same set of systems\n\t'
tools_sorted_indexes = get_tools_sorted_indexes(tools_predictions_list, labels_list)
asci_labels = []
for (i, tools_predictions) in enumerate(tools_predictions_list):
labels = labels_list[i]
for (j, label) in enumerate(labels):
asci_label = tools_sorted_indexes[(- 1)]
for tool_index in tools_sorted_indexes:
if (tools_predictions[(tool_index, j)] == label):
asci_label = tool_index
asci_labels.append(asci_label)
return np.reshape(np.array(asci_labels), ((- 1), 1))<|docstring|>Returns ASCI labels:
Assign to each instance, the index of the tool that best predicted its label.
In case of conflict, assign the index of the tool that performed the best on overall.
args:
tools_predictions_list: a list containing the tools predictions for a set of systems
labels_list: a list containing the labels for this same set of systems<|endoftext|>
|
2750c4221247c3463e5f4d206d4391759230e4674656450524d3b757499a57fc
|
def get_tools_sorted_indexes(tools_predictions_list, labels_list):
'\n\tReturns the indexes of the tools, sorted according to their performances\n\t\texample: \n\t\t\tif tool 0 is better than tool 1 which is better that tool 2\n\t\t\tthen return [2, 1, 0]\n\n\targs:\n\t\ttools_predictions_list: a list containing the tools predictions for a set of systems\n\t\tlabels_list: a list containing the labels for this same set of systems\n\t'
assert (len(tools_predictions_list) == len(labels_list))
overall_labels = reduce((lambda x1, x2: np.concatenate((x1, x2), axis=0)), labels_list)
overall_tools_predictions = reduce((lambda x1, x2: np.concatenate((x1, x2), axis=1)), tools_predictions_list)
overall_tools_performances = [detection_utils.mcc(pred, overall_labels) for pred in overall_tools_predictions]
return np.argsort(np.array(overall_tools_performances))
|
Returns the indexes of the tools, sorted according to their performances
example:
if tool 0 is better than tool 1 which is better that tool 2
then return [2, 1, 0]
args:
tools_predictions_list: a list containing the tools predictions for a set of systems
labels_list: a list containing the labels for this same set of systems
|
approaches/asci/asci_utils.py
|
get_tools_sorted_indexes
|
antoineBarbez/SMAD
| 4
|
python
|
def get_tools_sorted_indexes(tools_predictions_list, labels_list):
'\n\tReturns the indexes of the tools, sorted according to their performances\n\t\texample: \n\t\t\tif tool 0 is better than tool 1 which is better that tool 2\n\t\t\tthen return [2, 1, 0]\n\n\targs:\n\t\ttools_predictions_list: a list containing the tools predictions for a set of systems\n\t\tlabels_list: a list containing the labels for this same set of systems\n\t'
assert (len(tools_predictions_list) == len(labels_list))
overall_labels = reduce((lambda x1, x2: np.concatenate((x1, x2), axis=0)), labels_list)
overall_tools_predictions = reduce((lambda x1, x2: np.concatenate((x1, x2), axis=1)), tools_predictions_list)
overall_tools_performances = [detection_utils.mcc(pred, overall_labels) for pred in overall_tools_predictions]
return np.argsort(np.array(overall_tools_performances))
|
def get_tools_sorted_indexes(tools_predictions_list, labels_list):
'\n\tReturns the indexes of the tools, sorted according to their performances\n\t\texample: \n\t\t\tif tool 0 is better than tool 1 which is better that tool 2\n\t\t\tthen return [2, 1, 0]\n\n\targs:\n\t\ttools_predictions_list: a list containing the tools predictions for a set of systems\n\t\tlabels_list: a list containing the labels for this same set of systems\n\t'
assert (len(tools_predictions_list) == len(labels_list))
overall_labels = reduce((lambda x1, x2: np.concatenate((x1, x2), axis=0)), labels_list)
overall_tools_predictions = reduce((lambda x1, x2: np.concatenate((x1, x2), axis=1)), tools_predictions_list)
overall_tools_performances = [detection_utils.mcc(pred, overall_labels) for pred in overall_tools_predictions]
return np.argsort(np.array(overall_tools_performances))<|docstring|>Returns the indexes of the tools, sorted according to their performances
example:
if tool 0 is better than tool 1 which is better that tool 2
then return [2, 1, 0]
args:
tools_predictions_list: a list containing the tools predictions for a set of systems
labels_list: a list containing the labels for this same set of systems<|endoftext|>
|
a3ba955959a968782931a7df5f334d1aa2da67ab1e89799edab054a1b5fce782
|
def get_tools_predictions(antipattern, system):
'\n\tReturns a list containing the predictions of each tool\n\t'
if (antipattern == 'god_class'):
return np.array([decor.predict(system), hist_gc.predict(system), jdeodorant_gc.predict(system)])
else:
return np.array([incode.predict(system), hist_fe.predict(system), jdeodorant_fe.predict(system)])
|
Returns a list containing the predictions of each tool
|
approaches/asci/asci_utils.py
|
get_tools_predictions
|
antoineBarbez/SMAD
| 4
|
python
|
def get_tools_predictions(antipattern, system):
'\n\t\n\t'
if (antipattern == 'god_class'):
return np.array([decor.predict(system), hist_gc.predict(system), jdeodorant_gc.predict(system)])
else:
return np.array([incode.predict(system), hist_fe.predict(system), jdeodorant_fe.predict(system)])
|
def get_tools_predictions(antipattern, system):
'\n\t\n\t'
if (antipattern == 'god_class'):
return np.array([decor.predict(system), hist_gc.predict(system), jdeodorant_gc.predict(system)])
else:
return np.array([incode.predict(system), hist_fe.predict(system), jdeodorant_fe.predict(system)])<|docstring|>Returns a list containing the predictions of each tool<|endoftext|>
|
30399113b949bf8fbc1d4d3e513cfea47e906d3272b26440373261273b031611
|
def main(argv=None):
'Load in arguments and get going.'
parser = ArgParser(description='Read the input landmask, and correct to boolean values.')
parser.add_argument('--force', dest='force', default=False, action='store_true', help='If True, ancillaries will be generated even if doing so will overwrite existing files.')
parser.add_argument('input_filepath_standard', metavar='INPUT_FILE_STANDARD', help='A path to an input NetCDF file to be processed')
parser.add_argument('output_filepath', metavar='OUTPUT_FILE', help='The output path for the processed NetCDF')
args = parser.parse_args(args=argv)
if ((not os.path.exists(args.output_filepath)) or args.force):
landmask = load_cube(args.input_filepath_standard)
land_binary_mask = process(landmask)
save_netcdf(land_binary_mask, args.output_filepath)
else:
print('File already exists here: ', args.output_filepath)
|
Load in arguments and get going.
|
lib/improver/cli/generate_landmask_ancillary.py
|
main
|
TomekTrzeciak/improver
| 0
|
python
|
def main(argv=None):
parser = ArgParser(description='Read the input landmask, and correct to boolean values.')
parser.add_argument('--force', dest='force', default=False, action='store_true', help='If True, ancillaries will be generated even if doing so will overwrite existing files.')
parser.add_argument('input_filepath_standard', metavar='INPUT_FILE_STANDARD', help='A path to an input NetCDF file to be processed')
parser.add_argument('output_filepath', metavar='OUTPUT_FILE', help='The output path for the processed NetCDF')
args = parser.parse_args(args=argv)
if ((not os.path.exists(args.output_filepath)) or args.force):
landmask = load_cube(args.input_filepath_standard)
land_binary_mask = process(landmask)
save_netcdf(land_binary_mask, args.output_filepath)
else:
print('File already exists here: ', args.output_filepath)
|
def main(argv=None):
parser = ArgParser(description='Read the input landmask, and correct to boolean values.')
parser.add_argument('--force', dest='force', default=False, action='store_true', help='If True, ancillaries will be generated even if doing so will overwrite existing files.')
parser.add_argument('input_filepath_standard', metavar='INPUT_FILE_STANDARD', help='A path to an input NetCDF file to be processed')
parser.add_argument('output_filepath', metavar='OUTPUT_FILE', help='The output path for the processed NetCDF')
args = parser.parse_args(args=argv)
if ((not os.path.exists(args.output_filepath)) or args.force):
landmask = load_cube(args.input_filepath_standard)
land_binary_mask = process(landmask)
save_netcdf(land_binary_mask, args.output_filepath)
else:
print('File already exists here: ', args.output_filepath)<|docstring|>Load in arguments and get going.<|endoftext|>
|
2416c7c1a854bfdf6b98509d570d517f04d4fb8216e1520fffe1d78672ac0a3c
|
def process(landmask):
'Runs landmask ancillary generation.\n\n Read in the interpolated landmask and round\n values < 0.5 to False\n values >= 0.5 to True.\n\n Args:\n landmask (iris.cube.Cube):\n Cube to process\n\n Returns:\n (iris.cube.Cube):\n A cube landmask of boolean values.\n '
return CorrectLandSeaMask().process(landmask)
|
Runs landmask ancillary generation.
Read in the interpolated landmask and round
values < 0.5 to False
values >= 0.5 to True.
Args:
landmask (iris.cube.Cube):
Cube to process
Returns:
(iris.cube.Cube):
A cube landmask of boolean values.
|
lib/improver/cli/generate_landmask_ancillary.py
|
process
|
TomekTrzeciak/improver
| 0
|
python
|
def process(landmask):
'Runs landmask ancillary generation.\n\n Read in the interpolated landmask and round\n values < 0.5 to False\n values >= 0.5 to True.\n\n Args:\n landmask (iris.cube.Cube):\n Cube to process\n\n Returns:\n (iris.cube.Cube):\n A cube landmask of boolean values.\n '
return CorrectLandSeaMask().process(landmask)
|
def process(landmask):
'Runs landmask ancillary generation.\n\n Read in the interpolated landmask and round\n values < 0.5 to False\n values >= 0.5 to True.\n\n Args:\n landmask (iris.cube.Cube):\n Cube to process\n\n Returns:\n (iris.cube.Cube):\n A cube landmask of boolean values.\n '
return CorrectLandSeaMask().process(landmask)<|docstring|>Runs landmask ancillary generation.
Read in the interpolated landmask and round
values < 0.5 to False
values >= 0.5 to True.
Args:
landmask (iris.cube.Cube):
Cube to process
Returns:
(iris.cube.Cube):
A cube landmask of boolean values.<|endoftext|>
|
fe0e97c81ae5714fc33e7d9484b17c527a54564e97016cffaa70650d3b8383b6
|
def __init__(self, matrix):
'\n initialize your data structure here.\n :type matrix: List[List[int]]\n '
self.matrix_sum = []
if (not matrix):
return
width = len(matrix[0])
height = len(matrix)
self.matrix_sum = [[0 for x in range((width + 1))] for y in range((height + 1))]
for i in range(1, (height + 1)):
for j in range(1, (width + 1)):
self.matrix_sum[i][j] += (((self.matrix_sum[(i - 1)][j] + self.matrix_sum[i][(j - 1)]) - self.matrix_sum[(i - 1)][(j - 1)]) + matrix[(i - 1)][(j - 1)])
|
initialize your data structure here.
:type matrix: List[List[int]]
|
dp/range_sum_query_2d.py
|
__init__
|
lycheng/leetcode
| 0
|
python
|
def __init__(self, matrix):
'\n initialize your data structure here.\n :type matrix: List[List[int]]\n '
self.matrix_sum = []
if (not matrix):
return
width = len(matrix[0])
height = len(matrix)
self.matrix_sum = [[0 for x in range((width + 1))] for y in range((height + 1))]
for i in range(1, (height + 1)):
for j in range(1, (width + 1)):
self.matrix_sum[i][j] += (((self.matrix_sum[(i - 1)][j] + self.matrix_sum[i][(j - 1)]) - self.matrix_sum[(i - 1)][(j - 1)]) + matrix[(i - 1)][(j - 1)])
|
def __init__(self, matrix):
'\n initialize your data structure here.\n :type matrix: List[List[int]]\n '
self.matrix_sum = []
if (not matrix):
return
width = len(matrix[0])
height = len(matrix)
self.matrix_sum = [[0 for x in range((width + 1))] for y in range((height + 1))]
for i in range(1, (height + 1)):
for j in range(1, (width + 1)):
self.matrix_sum[i][j] += (((self.matrix_sum[(i - 1)][j] + self.matrix_sum[i][(j - 1)]) - self.matrix_sum[(i - 1)][(j - 1)]) + matrix[(i - 1)][(j - 1)])<|docstring|>initialize your data structure here.
:type matrix: List[List[int]]<|endoftext|>
|
3464c0fd699d087c957efd977d78ffaf2103ef8fcc9474f334b7eea908a405c9
|
def sumRegion(self, row1, col1, row2, col2):
'\n sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n '
if (not self.matrix_sum):
return 0
return (((self.matrix_sum[(row2 + 1)][(col2 + 1)] - self.matrix_sum[row1][(col2 + 1)]) - self.matrix_sum[(row2 + 1)][col1]) + self.matrix_sum[row1][col1])
|
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
|
dp/range_sum_query_2d.py
|
sumRegion
|
lycheng/leetcode
| 0
|
python
|
def sumRegion(self, row1, col1, row2, col2):
'\n sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n '
if (not self.matrix_sum):
return 0
return (((self.matrix_sum[(row2 + 1)][(col2 + 1)] - self.matrix_sum[row1][(col2 + 1)]) - self.matrix_sum[(row2 + 1)][col1]) + self.matrix_sum[row1][col1])
|
def sumRegion(self, row1, col1, row2, col2):
'\n sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n '
if (not self.matrix_sum):
return 0
return (((self.matrix_sum[(row2 + 1)][(col2 + 1)] - self.matrix_sum[row1][(col2 + 1)]) - self.matrix_sum[(row2 + 1)][col1]) + self.matrix_sum[row1][col1])<|docstring|>sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int<|endoftext|>
|
4d8a2f8a4e15f538def329316c0c98fce64e5b0cd1b6bcee967f666749d796c8
|
def disk_cache(f):
'\n Decorator that wraps a function such that calls are only evaluated once and otherwise retrieve from an on-disk cache.\n '
MISSING = object()
fname = ('%s.%s' % (f.__module__, f.__name__))
@wraps(f)
def wrapper(*args, **kwargs):
if DISABLED:
return f(*args, **kwargs)
key = (args, tuple(sorted(kwargs.items())))
if (wrapper.cache is None):
logger.debug(('Loading cache for %s() from %s' % (fname, wrapper.cache_path)))
if os.path.exists(wrapper.cache_path):
wrapper.cache = pickle.load(open(wrapper.cache_path, 'rb'))
else:
wrapper.cache = {}
result = wrapper.cache.get(key, MISSING)
if (result is not MISSING):
return result
logger.debug(('Cache miss for %s() with args %r and kwargs %r' % (fname, args, kwargs)))
result = f(*args, **kwargs)
wrapper.cache[key] = result
pickle.dump(wrapper.cache, open(wrapper.cache_path, 'wb'))
return result
if (not os.path.exists(DISK_CACHE_DIR)):
os.mkdir(DISK_CACHE_DIR)
wrapper.cache_path = os.path.join(DISK_CACHE_DIR, ('%s.p' % fname))
wrapper.cache = None
return wrapper
|
Decorator that wraps a function such that calls are only evaluated once and otherwise retrieve from an on-disk cache.
|
moment_polytopes/disk_cache.py
|
disk_cache
|
amsqi/moment_polytopes
| 2
|
python
|
def disk_cache(f):
'\n \n '
MISSING = object()
fname = ('%s.%s' % (f.__module__, f.__name__))
@wraps(f)
def wrapper(*args, **kwargs):
if DISABLED:
return f(*args, **kwargs)
key = (args, tuple(sorted(kwargs.items())))
if (wrapper.cache is None):
logger.debug(('Loading cache for %s() from %s' % (fname, wrapper.cache_path)))
if os.path.exists(wrapper.cache_path):
wrapper.cache = pickle.load(open(wrapper.cache_path, 'rb'))
else:
wrapper.cache = {}
result = wrapper.cache.get(key, MISSING)
if (result is not MISSING):
return result
logger.debug(('Cache miss for %s() with args %r and kwargs %r' % (fname, args, kwargs)))
result = f(*args, **kwargs)
wrapper.cache[key] = result
pickle.dump(wrapper.cache, open(wrapper.cache_path, 'wb'))
return result
if (not os.path.exists(DISK_CACHE_DIR)):
os.mkdir(DISK_CACHE_DIR)
wrapper.cache_path = os.path.join(DISK_CACHE_DIR, ('%s.p' % fname))
wrapper.cache = None
return wrapper
|
def disk_cache(f):
'\n \n '
MISSING = object()
fname = ('%s.%s' % (f.__module__, f.__name__))
@wraps(f)
def wrapper(*args, **kwargs):
if DISABLED:
return f(*args, **kwargs)
key = (args, tuple(sorted(kwargs.items())))
if (wrapper.cache is None):
logger.debug(('Loading cache for %s() from %s' % (fname, wrapper.cache_path)))
if os.path.exists(wrapper.cache_path):
wrapper.cache = pickle.load(open(wrapper.cache_path, 'rb'))
else:
wrapper.cache = {}
result = wrapper.cache.get(key, MISSING)
if (result is not MISSING):
return result
logger.debug(('Cache miss for %s() with args %r and kwargs %r' % (fname, args, kwargs)))
result = f(*args, **kwargs)
wrapper.cache[key] = result
pickle.dump(wrapper.cache, open(wrapper.cache_path, 'wb'))
return result
if (not os.path.exists(DISK_CACHE_DIR)):
os.mkdir(DISK_CACHE_DIR)
wrapper.cache_path = os.path.join(DISK_CACHE_DIR, ('%s.p' % fname))
wrapper.cache = None
return wrapper<|docstring|>Decorator that wraps a function such that calls are only evaluated once and otherwise retrieve from an on-disk cache.<|endoftext|>
|
c5ff0118679bdaf011e2e5dfcd44cd4af4fb823d0dad65fa0b6be15ed1060e83
|
def __init__(self, object_id=None, user_id=None, timestamp=None, object_name=None, full_name=None, event=None, object_type=None, local_vars_configuration=None):
'PublicAuditLog - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._object_id = None
self._user_id = None
self._timestamp = None
self._object_name = None
self._full_name = None
self._event = None
self._object_type = None
self.discriminator = None
self.object_id = object_id
self.user_id = user_id
self.timestamp = timestamp
self.object_name = object_name
self.full_name = full_name
self.event = event
self.object_type = object_type
|
PublicAuditLog - a model defined in OpenAPI
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
__init__
|
Ronfer/hubspot-api-python
| 117
|
python
|
def __init__(self, object_id=None, user_id=None, timestamp=None, object_name=None, full_name=None, event=None, object_type=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._object_id = None
self._user_id = None
self._timestamp = None
self._object_name = None
self._full_name = None
self._event = None
self._object_type = None
self.discriminator = None
self.object_id = object_id
self.user_id = user_id
self.timestamp = timestamp
self.object_name = object_name
self.full_name = full_name
self.event = event
self.object_type = object_type
|
def __init__(self, object_id=None, user_id=None, timestamp=None, object_name=None, full_name=None, event=None, object_type=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._object_id = None
self._user_id = None
self._timestamp = None
self._object_name = None
self._full_name = None
self._event = None
self._object_type = None
self.discriminator = None
self.object_id = object_id
self.user_id = user_id
self.timestamp = timestamp
self.object_name = object_name
self.full_name = full_name
self.event = event
self.object_type = object_type<|docstring|>PublicAuditLog - a model defined in OpenAPI<|endoftext|>
|
b717b1567394314f5707a4d06215f993c4388afd8383165b0bb49c1f713029e2
|
@property
def object_id(self):
'Gets the object_id of this PublicAuditLog. # noqa: E501\n\n The ID of the object. # noqa: E501\n\n :return: The object_id of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_id
|
Gets the object_id of this PublicAuditLog. # noqa: E501
The ID of the object. # noqa: E501
:return: The object_id of this PublicAuditLog. # noqa: E501
:rtype: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
object_id
|
Ronfer/hubspot-api-python
| 117
|
python
|
@property
def object_id(self):
'Gets the object_id of this PublicAuditLog. # noqa: E501\n\n The ID of the object. # noqa: E501\n\n :return: The object_id of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_id
|
@property
def object_id(self):
'Gets the object_id of this PublicAuditLog. # noqa: E501\n\n The ID of the object. # noqa: E501\n\n :return: The object_id of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_id<|docstring|>Gets the object_id of this PublicAuditLog. # noqa: E501
The ID of the object. # noqa: E501
:return: The object_id of this PublicAuditLog. # noqa: E501
:rtype: str<|endoftext|>
|
d4fbb71240aab50014e6449d6199780c25fe06730dc9a9573c8108cb34e74058
|
@object_id.setter
def object_id(self, object_id):
'Sets the object_id of this PublicAuditLog.\n\n The ID of the object. # noqa: E501\n\n :param object_id: The object_id of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_id is None)):
raise ValueError('Invalid value for `object_id`, must not be `None`')
self._object_id = object_id
|
Sets the object_id of this PublicAuditLog.
The ID of the object. # noqa: E501
:param object_id: The object_id of this PublicAuditLog. # noqa: E501
:type: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
object_id
|
Ronfer/hubspot-api-python
| 117
|
python
|
@object_id.setter
def object_id(self, object_id):
'Sets the object_id of this PublicAuditLog.\n\n The ID of the object. # noqa: E501\n\n :param object_id: The object_id of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_id is None)):
raise ValueError('Invalid value for `object_id`, must not be `None`')
self._object_id = object_id
|
@object_id.setter
def object_id(self, object_id):
'Sets the object_id of this PublicAuditLog.\n\n The ID of the object. # noqa: E501\n\n :param object_id: The object_id of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_id is None)):
raise ValueError('Invalid value for `object_id`, must not be `None`')
self._object_id = object_id<|docstring|>Sets the object_id of this PublicAuditLog.
The ID of the object. # noqa: E501
:param object_id: The object_id of this PublicAuditLog. # noqa: E501
:type: str<|endoftext|>
|
5875c2c54345ffdc276ce15cd899f2c72195f5224f1053e4fd51c224e381d512
|
@property
def user_id(self):
'Gets the user_id of this PublicAuditLog. # noqa: E501\n\n The ID of the user who caused the event. # noqa: E501\n\n :return: The user_id of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._user_id
|
Gets the user_id of this PublicAuditLog. # noqa: E501
The ID of the user who caused the event. # noqa: E501
:return: The user_id of this PublicAuditLog. # noqa: E501
:rtype: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
user_id
|
Ronfer/hubspot-api-python
| 117
|
python
|
@property
def user_id(self):
'Gets the user_id of this PublicAuditLog. # noqa: E501\n\n The ID of the user who caused the event. # noqa: E501\n\n :return: The user_id of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._user_id
|
@property
def user_id(self):
'Gets the user_id of this PublicAuditLog. # noqa: E501\n\n The ID of the user who caused the event. # noqa: E501\n\n :return: The user_id of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._user_id<|docstring|>Gets the user_id of this PublicAuditLog. # noqa: E501
The ID of the user who caused the event. # noqa: E501
:return: The user_id of this PublicAuditLog. # noqa: E501
:rtype: str<|endoftext|>
|
e07c1dc9f1e264ba7cc09b8eaaba68c1984763372098c1644087372c27205497
|
@user_id.setter
def user_id(self, user_id):
'Sets the user_id of this PublicAuditLog.\n\n The ID of the user who caused the event. # noqa: E501\n\n :param user_id: The user_id of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (user_id is None)):
raise ValueError('Invalid value for `user_id`, must not be `None`')
self._user_id = user_id
|
Sets the user_id of this PublicAuditLog.
The ID of the user who caused the event. # noqa: E501
:param user_id: The user_id of this PublicAuditLog. # noqa: E501
:type: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
user_id
|
Ronfer/hubspot-api-python
| 117
|
python
|
@user_id.setter
def user_id(self, user_id):
'Sets the user_id of this PublicAuditLog.\n\n The ID of the user who caused the event. # noqa: E501\n\n :param user_id: The user_id of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (user_id is None)):
raise ValueError('Invalid value for `user_id`, must not be `None`')
self._user_id = user_id
|
@user_id.setter
def user_id(self, user_id):
'Sets the user_id of this PublicAuditLog.\n\n The ID of the user who caused the event. # noqa: E501\n\n :param user_id: The user_id of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (user_id is None)):
raise ValueError('Invalid value for `user_id`, must not be `None`')
self._user_id = user_id<|docstring|>Sets the user_id of this PublicAuditLog.
The ID of the user who caused the event. # noqa: E501
:param user_id: The user_id of this PublicAuditLog. # noqa: E501
:type: str<|endoftext|>
|
a4fe57887f72d7fb798458dcce110df6ec150206a77ad10b041cec49b96d13b7
|
@property
def timestamp(self):
'Gets the timestamp of this PublicAuditLog. # noqa: E501\n\n The timestamp at which the event occurred. # noqa: E501\n\n :return: The timestamp of this PublicAuditLog. # noqa: E501\n :rtype: datetime\n '
return self._timestamp
|
Gets the timestamp of this PublicAuditLog. # noqa: E501
The timestamp at which the event occurred. # noqa: E501
:return: The timestamp of this PublicAuditLog. # noqa: E501
:rtype: datetime
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
timestamp
|
Ronfer/hubspot-api-python
| 117
|
python
|
@property
def timestamp(self):
'Gets the timestamp of this PublicAuditLog. # noqa: E501\n\n The timestamp at which the event occurred. # noqa: E501\n\n :return: The timestamp of this PublicAuditLog. # noqa: E501\n :rtype: datetime\n '
return self._timestamp
|
@property
def timestamp(self):
'Gets the timestamp of this PublicAuditLog. # noqa: E501\n\n The timestamp at which the event occurred. # noqa: E501\n\n :return: The timestamp of this PublicAuditLog. # noqa: E501\n :rtype: datetime\n '
return self._timestamp<|docstring|>Gets the timestamp of this PublicAuditLog. # noqa: E501
The timestamp at which the event occurred. # noqa: E501
:return: The timestamp of this PublicAuditLog. # noqa: E501
:rtype: datetime<|endoftext|>
|
81cb182b4c5f8cfd79e5544c4e83bdf3da6d0ecebcdf308b4c36160e3ca27e72
|
@timestamp.setter
def timestamp(self, timestamp):
'Sets the timestamp of this PublicAuditLog.\n\n The timestamp at which the event occurred. # noqa: E501\n\n :param timestamp: The timestamp of this PublicAuditLog. # noqa: E501\n :type: datetime\n '
if (self.local_vars_configuration.client_side_validation and (timestamp is None)):
raise ValueError('Invalid value for `timestamp`, must not be `None`')
self._timestamp = timestamp
|
Sets the timestamp of this PublicAuditLog.
The timestamp at which the event occurred. # noqa: E501
:param timestamp: The timestamp of this PublicAuditLog. # noqa: E501
:type: datetime
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
timestamp
|
Ronfer/hubspot-api-python
| 117
|
python
|
@timestamp.setter
def timestamp(self, timestamp):
'Sets the timestamp of this PublicAuditLog.\n\n The timestamp at which the event occurred. # noqa: E501\n\n :param timestamp: The timestamp of this PublicAuditLog. # noqa: E501\n :type: datetime\n '
if (self.local_vars_configuration.client_side_validation and (timestamp is None)):
raise ValueError('Invalid value for `timestamp`, must not be `None`')
self._timestamp = timestamp
|
@timestamp.setter
def timestamp(self, timestamp):
'Sets the timestamp of this PublicAuditLog.\n\n The timestamp at which the event occurred. # noqa: E501\n\n :param timestamp: The timestamp of this PublicAuditLog. # noqa: E501\n :type: datetime\n '
if (self.local_vars_configuration.client_side_validation and (timestamp is None)):
raise ValueError('Invalid value for `timestamp`, must not be `None`')
self._timestamp = timestamp<|docstring|>Sets the timestamp of this PublicAuditLog.
The timestamp at which the event occurred. # noqa: E501
:param timestamp: The timestamp of this PublicAuditLog. # noqa: E501
:type: datetime<|endoftext|>
|
64d6ec6e7897e7e05e89638c88f429c41f59b1b139dbc865eaad8b366d5ea124
|
@property
def object_name(self):
'Gets the object_name of this PublicAuditLog. # noqa: E501\n\n The internal name of the object in HubSpot. # noqa: E501\n\n :return: The object_name of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_name
|
Gets the object_name of this PublicAuditLog. # noqa: E501
The internal name of the object in HubSpot. # noqa: E501
:return: The object_name of this PublicAuditLog. # noqa: E501
:rtype: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
object_name
|
Ronfer/hubspot-api-python
| 117
|
python
|
@property
def object_name(self):
'Gets the object_name of this PublicAuditLog. # noqa: E501\n\n The internal name of the object in HubSpot. # noqa: E501\n\n :return: The object_name of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_name
|
@property
def object_name(self):
'Gets the object_name of this PublicAuditLog. # noqa: E501\n\n The internal name of the object in HubSpot. # noqa: E501\n\n :return: The object_name of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_name<|docstring|>Gets the object_name of this PublicAuditLog. # noqa: E501
The internal name of the object in HubSpot. # noqa: E501
:return: The object_name of this PublicAuditLog. # noqa: E501
:rtype: str<|endoftext|>
|
7b01949faf2325bb1595160d22ea7467fff55bca9bc394ffb396e81d56bf7927
|
@object_name.setter
def object_name(self, object_name):
'Sets the object_name of this PublicAuditLog.\n\n The internal name of the object in HubSpot. # noqa: E501\n\n :param object_name: The object_name of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_name is None)):
raise ValueError('Invalid value for `object_name`, must not be `None`')
self._object_name = object_name
|
Sets the object_name of this PublicAuditLog.
The internal name of the object in HubSpot. # noqa: E501
:param object_name: The object_name of this PublicAuditLog. # noqa: E501
:type: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
object_name
|
Ronfer/hubspot-api-python
| 117
|
python
|
@object_name.setter
def object_name(self, object_name):
'Sets the object_name of this PublicAuditLog.\n\n The internal name of the object in HubSpot. # noqa: E501\n\n :param object_name: The object_name of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_name is None)):
raise ValueError('Invalid value for `object_name`, must not be `None`')
self._object_name = object_name
|
@object_name.setter
def object_name(self, object_name):
'Sets the object_name of this PublicAuditLog.\n\n The internal name of the object in HubSpot. # noqa: E501\n\n :param object_name: The object_name of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_name is None)):
raise ValueError('Invalid value for `object_name`, must not be `None`')
self._object_name = object_name<|docstring|>Sets the object_name of this PublicAuditLog.
The internal name of the object in HubSpot. # noqa: E501
:param object_name: The object_name of this PublicAuditLog. # noqa: E501
:type: str<|endoftext|>
|
08878e6fefd466535a48f2f287450863c2fbd35b4065a13fe3b086cf48233255
|
@property
def full_name(self):
'Gets the full_name of this PublicAuditLog. # noqa: E501\n\n The name of the user who caused the event. # noqa: E501\n\n :return: The full_name of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._full_name
|
Gets the full_name of this PublicAuditLog. # noqa: E501
The name of the user who caused the event. # noqa: E501
:return: The full_name of this PublicAuditLog. # noqa: E501
:rtype: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
full_name
|
Ronfer/hubspot-api-python
| 117
|
python
|
@property
def full_name(self):
'Gets the full_name of this PublicAuditLog. # noqa: E501\n\n The name of the user who caused the event. # noqa: E501\n\n :return: The full_name of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._full_name
|
@property
def full_name(self):
'Gets the full_name of this PublicAuditLog. # noqa: E501\n\n The name of the user who caused the event. # noqa: E501\n\n :return: The full_name of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._full_name<|docstring|>Gets the full_name of this PublicAuditLog. # noqa: E501
The name of the user who caused the event. # noqa: E501
:return: The full_name of this PublicAuditLog. # noqa: E501
:rtype: str<|endoftext|>
|
d2ac195ad7d328a1034316990519498544c50ed1808e31f33c4abd1daaab9473
|
@full_name.setter
def full_name(self, full_name):
'Sets the full_name of this PublicAuditLog.\n\n The name of the user who caused the event. # noqa: E501\n\n :param full_name: The full_name of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (full_name is None)):
raise ValueError('Invalid value for `full_name`, must not be `None`')
self._full_name = full_name
|
Sets the full_name of this PublicAuditLog.
The name of the user who caused the event. # noqa: E501
:param full_name: The full_name of this PublicAuditLog. # noqa: E501
:type: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
full_name
|
Ronfer/hubspot-api-python
| 117
|
python
|
@full_name.setter
def full_name(self, full_name):
'Sets the full_name of this PublicAuditLog.\n\n The name of the user who caused the event. # noqa: E501\n\n :param full_name: The full_name of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (full_name is None)):
raise ValueError('Invalid value for `full_name`, must not be `None`')
self._full_name = full_name
|
@full_name.setter
def full_name(self, full_name):
'Sets the full_name of this PublicAuditLog.\n\n The name of the user who caused the event. # noqa: E501\n\n :param full_name: The full_name of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (full_name is None)):
raise ValueError('Invalid value for `full_name`, must not be `None`')
self._full_name = full_name<|docstring|>Sets the full_name of this PublicAuditLog.
The name of the user who caused the event. # noqa: E501
:param full_name: The full_name of this PublicAuditLog. # noqa: E501
:type: str<|endoftext|>
|
4d0e7949a18771a1abfc5d83a39a28130da2c4d80632f0c1ed8d112059781219
|
@property
def event(self):
'Gets the event of this PublicAuditLog. # noqa: E501\n\n The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501\n\n :return: The event of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._event
|
Gets the event of this PublicAuditLog. # noqa: E501
The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501
:return: The event of this PublicAuditLog. # noqa: E501
:rtype: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
event
|
Ronfer/hubspot-api-python
| 117
|
python
|
@property
def event(self):
'Gets the event of this PublicAuditLog. # noqa: E501\n\n The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501\n\n :return: The event of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._event
|
@property
def event(self):
'Gets the event of this PublicAuditLog. # noqa: E501\n\n The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501\n\n :return: The event of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._event<|docstring|>Gets the event of this PublicAuditLog. # noqa: E501
The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501
:return: The event of this PublicAuditLog. # noqa: E501
:rtype: str<|endoftext|>
|
d0b56fe71a2ea8feda9b6d6404c599d02ae169744d22d359ac49d4523e769a39
|
@event.setter
def event(self, event):
'Sets the event of this PublicAuditLog.\n\n The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501\n\n :param event: The event of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (event is None)):
raise ValueError('Invalid value for `event`, must not be `None`')
allowed_values = ['CREATED', 'UPDATED', 'PUBLISHED', 'DELETED', 'UNPUBLISHED']
if (self.local_vars_configuration.client_side_validation and (event not in allowed_values)):
raise ValueError('Invalid value for `event` ({0}), must be one of {1}'.format(event, allowed_values))
self._event = event
|
Sets the event of this PublicAuditLog.
The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501
:param event: The event of this PublicAuditLog. # noqa: E501
:type: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
event
|
Ronfer/hubspot-api-python
| 117
|
python
|
@event.setter
def event(self, event):
'Sets the event of this PublicAuditLog.\n\n The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501\n\n :param event: The event of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (event is None)):
raise ValueError('Invalid value for `event`, must not be `None`')
allowed_values = ['CREATED', 'UPDATED', 'PUBLISHED', 'DELETED', 'UNPUBLISHED']
if (self.local_vars_configuration.client_side_validation and (event not in allowed_values)):
raise ValueError('Invalid value for `event` ({0}), must be one of {1}'.format(event, allowed_values))
self._event = event
|
@event.setter
def event(self, event):
'Sets the event of this PublicAuditLog.\n\n The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501\n\n :param event: The event of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (event is None)):
raise ValueError('Invalid value for `event`, must not be `None`')
allowed_values = ['CREATED', 'UPDATED', 'PUBLISHED', 'DELETED', 'UNPUBLISHED']
if (self.local_vars_configuration.client_side_validation and (event not in allowed_values)):
raise ValueError('Invalid value for `event` ({0}), must be one of {1}'.format(event, allowed_values))
self._event = event<|docstring|>Sets the event of this PublicAuditLog.
The type of event that took place (CREATED, UPDATED, PUBLISHED, DELETED, UNPUBLISHED). # noqa: E501
:param event: The event of this PublicAuditLog. # noqa: E501
:type: str<|endoftext|>
|
288ace72ab9c6fd6a716de1a0c329e3b34432c49b047fbace64ad70a4a3517af
|
@property
def object_type(self):
'Gets the object_type of this PublicAuditLog. # noqa: E501\n\n The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501\n\n :return: The object_type of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_type
|
Gets the object_type of this PublicAuditLog. # noqa: E501
The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501
:return: The object_type of this PublicAuditLog. # noqa: E501
:rtype: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
object_type
|
Ronfer/hubspot-api-python
| 117
|
python
|
@property
def object_type(self):
'Gets the object_type of this PublicAuditLog. # noqa: E501\n\n The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501\n\n :return: The object_type of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_type
|
@property
def object_type(self):
'Gets the object_type of this PublicAuditLog. # noqa: E501\n\n The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501\n\n :return: The object_type of this PublicAuditLog. # noqa: E501\n :rtype: str\n '
return self._object_type<|docstring|>Gets the object_type of this PublicAuditLog. # noqa: E501
The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501
:return: The object_type of this PublicAuditLog. # noqa: E501
:rtype: str<|endoftext|>
|
154c2919f8b44242d9f6bd49a62caeca624d5ae1a82f2508bff023fdef85f8d4
|
@object_type.setter
def object_type(self, object_type):
'Sets the object_type of this PublicAuditLog.\n\n The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501\n\n :param object_type: The object_type of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_type is None)):
raise ValueError('Invalid value for `object_type`, must not be `None`')
allowed_values = ['BLOG', 'BLOG_POST', 'LANDING_PAGE', 'WEBSITE_PAGE', 'TEMPLATE', 'MODULE', 'GLOBAL_MODULE', 'SERVERLESS_FUNCTION', 'DOMAIN', 'URL_MAPPING', 'EMAIL', 'CONTENT_SETTINGS', 'HUBDB_TABLE', 'KNOWLEDGE_BASE_ARTICLE', 'KNOWLEDGE_BASE', 'THEME', 'CSS', 'JS']
if (self.local_vars_configuration.client_side_validation and (object_type not in allowed_values)):
raise ValueError('Invalid value for `object_type` ({0}), must be one of {1}'.format(object_type, allowed_values))
self._object_type = object_type
|
Sets the object_type of this PublicAuditLog.
The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501
:param object_type: The object_type of this PublicAuditLog. # noqa: E501
:type: str
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
object_type
|
Ronfer/hubspot-api-python
| 117
|
python
|
@object_type.setter
def object_type(self, object_type):
'Sets the object_type of this PublicAuditLog.\n\n The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501\n\n :param object_type: The object_type of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_type is None)):
raise ValueError('Invalid value for `object_type`, must not be `None`')
allowed_values = ['BLOG', 'BLOG_POST', 'LANDING_PAGE', 'WEBSITE_PAGE', 'TEMPLATE', 'MODULE', 'GLOBAL_MODULE', 'SERVERLESS_FUNCTION', 'DOMAIN', 'URL_MAPPING', 'EMAIL', 'CONTENT_SETTINGS', 'HUBDB_TABLE', 'KNOWLEDGE_BASE_ARTICLE', 'KNOWLEDGE_BASE', 'THEME', 'CSS', 'JS']
if (self.local_vars_configuration.client_side_validation and (object_type not in allowed_values)):
raise ValueError('Invalid value for `object_type` ({0}), must be one of {1}'.format(object_type, allowed_values))
self._object_type = object_type
|
@object_type.setter
def object_type(self, object_type):
'Sets the object_type of this PublicAuditLog.\n\n The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501\n\n :param object_type: The object_type of this PublicAuditLog. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (object_type is None)):
raise ValueError('Invalid value for `object_type`, must not be `None`')
allowed_values = ['BLOG', 'BLOG_POST', 'LANDING_PAGE', 'WEBSITE_PAGE', 'TEMPLATE', 'MODULE', 'GLOBAL_MODULE', 'SERVERLESS_FUNCTION', 'DOMAIN', 'URL_MAPPING', 'EMAIL', 'CONTENT_SETTINGS', 'HUBDB_TABLE', 'KNOWLEDGE_BASE_ARTICLE', 'KNOWLEDGE_BASE', 'THEME', 'CSS', 'JS']
if (self.local_vars_configuration.client_side_validation and (object_type not in allowed_values)):
raise ValueError('Invalid value for `object_type` ({0}), must be one of {1}'.format(object_type, allowed_values))
self._object_type = object_type<|docstring|>Sets the object_type of this PublicAuditLog.
The type of the object (BLOG, LANDING_PAGE, DOMAIN, HUBDB_TABLE etc.) # noqa: E501
:param object_type: The object_type of this PublicAuditLog. # noqa: E501
:type: str<|endoftext|>
|
5a4e41bb6a0def746593298cb605df98f1366e957c4ca89b12010ea7db707963
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
Returns the model properties as a dict
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
to_dict
|
Ronfer/hubspot-api-python
| 117
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|>
|
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
|
Returns the string representation of the model
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
to_str
|
Ronfer/hubspot-api-python
| 117
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
|
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
|
For `print` and `pprint`
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
__repr__
|
Ronfer/hubspot-api-python
| 117
|
python
|
def __repr__(self):
return self.to_str()
|
def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
|
6b45279aba68309ad52d4feb9abc6a6e6a2cf71340c09a4be8f5b1c21e1ebc76
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, PublicAuditLog)):
return False
return (self.to_dict() == other.to_dict())
|
Returns true if both objects are equal
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
__eq__
|
Ronfer/hubspot-api-python
| 117
|
python
|
def __eq__(self, other):
if (not isinstance(other, PublicAuditLog)):
return False
return (self.to_dict() == other.to_dict())
|
def __eq__(self, other):
if (not isinstance(other, PublicAuditLog)):
return False
return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|>
|
b1511bdad5bef2b4cd3954531507f51eb9643c02694622c424d250b5028d0ac4
|
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, PublicAuditLog)):
return True
return (self.to_dict() != other.to_dict())
|
Returns true if both objects are not equal
|
hubspot/cms/audit_logs/models/public_audit_log.py
|
__ne__
|
Ronfer/hubspot-api-python
| 117
|
python
|
def __ne__(self, other):
if (not isinstance(other, PublicAuditLog)):
return True
return (self.to_dict() != other.to_dict())
|
def __ne__(self, other):
if (not isinstance(other, PublicAuditLog)):
return True
return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|>
|
2cc196216b34e6b6d6638abfff56dec6a359b1704ed23f5b67684048e9a05155
|
def get_args():
'Get command-line arguments'
parser = argparse.ArgumentParser(description='I find commanalities between two files', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('FILE1', help='Input file 1', metavar='FILE', type=argparse.FileType('rt'))
parser.add_argument('FILE2', help='Input file 2', metavar='FILE', type=argparse.FileType('rt'))
parser.add_argument('-k', '--kmers', help='K-mer size', metavar='int', type=int, default=3)
args = parser.parse_args()
if (args.kmers <= 0):
parser.error(f'--kmer "{args.kmers}" must be > 0')
return args
|
Get command-line arguments
|
assignments/08_kmers/kmers.py
|
get_args
|
ambanka/be434-fall-2021
| 0
|
python
|
def get_args():
parser = argparse.ArgumentParser(description='I find commanalities between two files', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('FILE1', help='Input file 1', metavar='FILE', type=argparse.FileType('rt'))
parser.add_argument('FILE2', help='Input file 2', metavar='FILE', type=argparse.FileType('rt'))
parser.add_argument('-k', '--kmers', help='K-mer size', metavar='int', type=int, default=3)
args = parser.parse_args()
if (args.kmers <= 0):
parser.error(f'--kmer "{args.kmers}" must be > 0')
return args
|
def get_args():
parser = argparse.ArgumentParser(description='I find commanalities between two files', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('FILE1', help='Input file 1', metavar='FILE', type=argparse.FileType('rt'))
parser.add_argument('FILE2', help='Input file 2', metavar='FILE', type=argparse.FileType('rt'))
parser.add_argument('-k', '--kmers', help='K-mer size', metavar='int', type=int, default=3)
args = parser.parse_args()
if (args.kmers <= 0):
parser.error(f'--kmer "{args.kmers}" must be > 0')
return args<|docstring|>Get command-line arguments<|endoftext|>
|
9409937c4c6515b8f28718fc80feeb33885c2439372b04fd2df0e21de2321a97
|
def main():
"This can't be the fastest way to do this"
args = get_args()
contents1 = args.FILE1.read()
contents2 = args.FILE2.read()
k = args.kmers
def find_kmers(seq, k):
n = ((len(seq) - k) + 1)
return ([] if (n < 1) else [seq[i:(i + k)] for i in range(n)])
words1 = contents1.split()
words2 = contents2.split()
kmer_list = set()
counted_1 = {}
counted_2 = {}
for word in words1:
for kmer in find_kmers(word, k):
counted_1.update({kmer: 0})
counted_2.update({kmer: 0})
for wo in words2:
if (kmer in find_kmers(wo, k)):
kmer_list.add(kmer)
for word in words1:
for kmer in find_kmers(word, k):
if (kmer in counted_1):
counted_1[kmer] += 1
for word in words2:
for kmer in find_kmers(word, k):
if (kmer in counted_2):
counted_2[kmer] += 1
for x in kmer_list:
print((('{0: <10}'.format(x) + '{0: >6}'.format(f'{counted_1.get(x)}')) + '{0: >6}'.format(f'{counted_2.get(x)}')))
|
This can't be the fastest way to do this
|
assignments/08_kmers/kmers.py
|
main
|
ambanka/be434-fall-2021
| 0
|
python
|
def main():
args = get_args()
contents1 = args.FILE1.read()
contents2 = args.FILE2.read()
k = args.kmers
def find_kmers(seq, k):
n = ((len(seq) - k) + 1)
return ([] if (n < 1) else [seq[i:(i + k)] for i in range(n)])
words1 = contents1.split()
words2 = contents2.split()
kmer_list = set()
counted_1 = {}
counted_2 = {}
for word in words1:
for kmer in find_kmers(word, k):
counted_1.update({kmer: 0})
counted_2.update({kmer: 0})
for wo in words2:
if (kmer in find_kmers(wo, k)):
kmer_list.add(kmer)
for word in words1:
for kmer in find_kmers(word, k):
if (kmer in counted_1):
counted_1[kmer] += 1
for word in words2:
for kmer in find_kmers(word, k):
if (kmer in counted_2):
counted_2[kmer] += 1
for x in kmer_list:
print((('{0: <10}'.format(x) + '{0: >6}'.format(f'{counted_1.get(x)}')) + '{0: >6}'.format(f'{counted_2.get(x)}')))
|
def main():
args = get_args()
contents1 = args.FILE1.read()
contents2 = args.FILE2.read()
k = args.kmers
def find_kmers(seq, k):
n = ((len(seq) - k) + 1)
return ([] if (n < 1) else [seq[i:(i + k)] for i in range(n)])
words1 = contents1.split()
words2 = contents2.split()
kmer_list = set()
counted_1 = {}
counted_2 = {}
for word in words1:
for kmer in find_kmers(word, k):
counted_1.update({kmer: 0})
counted_2.update({kmer: 0})
for wo in words2:
if (kmer in find_kmers(wo, k)):
kmer_list.add(kmer)
for word in words1:
for kmer in find_kmers(word, k):
if (kmer in counted_1):
counted_1[kmer] += 1
for word in words2:
for kmer in find_kmers(word, k):
if (kmer in counted_2):
counted_2[kmer] += 1
for x in kmer_list:
print((('{0: <10}'.format(x) + '{0: >6}'.format(f'{counted_1.get(x)}')) + '{0: >6}'.format(f'{counted_2.get(x)}')))<|docstring|>This can't be the fastest way to do this<|endoftext|>
|
a0f602d83e3a4f350f90a41e4597bdcf186dcc6d77bec259cc3b545400ef96f6
|
def counting_sort(array, key_length, key_func):
'Counting sort.'
counts = ([0] * key_length)
result = ([0] * len(array))
for i in array:
counts[key_func(i)] += 1
for i in range(1, key_length):
counts[i] += counts[(i - 1)]
for i in range((len(array) - 1), (- 1), (- 1)):
key = key_func(array[i])
result[(counts[key] - 1)] = array[i]
counts[key] -= 1
return result
|
Counting sort.
|
codes/counting_sort.py
|
counting_sort
|
pedh/CLRS-Solutions
| 3
|
python
|
def counting_sort(array, key_length, key_func):
counts = ([0] * key_length)
result = ([0] * len(array))
for i in array:
counts[key_func(i)] += 1
for i in range(1, key_length):
counts[i] += counts[(i - 1)]
for i in range((len(array) - 1), (- 1), (- 1)):
key = key_func(array[i])
result[(counts[key] - 1)] = array[i]
counts[key] -= 1
return result
|
def counting_sort(array, key_length, key_func):
counts = ([0] * key_length)
result = ([0] * len(array))
for i in array:
counts[key_func(i)] += 1
for i in range(1, key_length):
counts[i] += counts[(i - 1)]
for i in range((len(array) - 1), (- 1), (- 1)):
key = key_func(array[i])
result[(counts[key] - 1)] = array[i]
counts[key] -= 1
return result<|docstring|>Counting sort.<|endoftext|>
|
35264d9497d2dc67b7e3a89084bdd0a957d9201ed297b196aa817a3f9d97a554
|
def counting_sort_in_place(array, key_length, key_func):
'Counting sort in place, not stable.'
counts = ([0] * key_length)
for i in array:
counts[key_func(i)] += 1
for i in range(1, key_length):
counts[i] += counts[(i - 1)]
i = (len(array) - 1)
while (i >= 0):
key = key_func(array[i])
pos = (counts[key] - 1)
if (i > pos):
i -= 1
else:
(array[i], array[pos]) = (array[pos], array[i])
counts[key] -= 1
|
Counting sort in place, not stable.
|
codes/counting_sort.py
|
counting_sort_in_place
|
pedh/CLRS-Solutions
| 3
|
python
|
def counting_sort_in_place(array, key_length, key_func):
counts = ([0] * key_length)
for i in array:
counts[key_func(i)] += 1
for i in range(1, key_length):
counts[i] += counts[(i - 1)]
i = (len(array) - 1)
while (i >= 0):
key = key_func(array[i])
pos = (counts[key] - 1)
if (i > pos):
i -= 1
else:
(array[i], array[pos]) = (array[pos], array[i])
counts[key] -= 1
|
def counting_sort_in_place(array, key_length, key_func):
counts = ([0] * key_length)
for i in array:
counts[key_func(i)] += 1
for i in range(1, key_length):
counts[i] += counts[(i - 1)]
i = (len(array) - 1)
while (i >= 0):
key = key_func(array[i])
pos = (counts[key] - 1)
if (i > pos):
i -= 1
else:
(array[i], array[pos]) = (array[pos], array[i])
counts[key] -= 1<|docstring|>Counting sort in place, not stable.<|endoftext|>
|
9773f69bf515557e1c8453fa11a89ce7d66f9632cb93d789508f513c28c7eccd
|
def main():
'The main function.'
array = list(range(20))
random.shuffle(array)
print(array)
result = counting_sort(array, 3, (lambda i: (i % 3)))
print(result)
counting_sort_in_place(array, 3, (lambda i: (i % 3)))
print(array)
|
The main function.
|
codes/counting_sort.py
|
main
|
pedh/CLRS-Solutions
| 3
|
python
|
def main():
array = list(range(20))
random.shuffle(array)
print(array)
result = counting_sort(array, 3, (lambda i: (i % 3)))
print(result)
counting_sort_in_place(array, 3, (lambda i: (i % 3)))
print(array)
|
def main():
array = list(range(20))
random.shuffle(array)
print(array)
result = counting_sort(array, 3, (lambda i: (i % 3)))
print(result)
counting_sort_in_place(array, 3, (lambda i: (i % 3)))
print(array)<|docstring|>The main function.<|endoftext|>
|
f04ba89180755d8faf368986ff0f0a15a0cdde174c8dd80b7a06886856af1654
|
def sparse_tuple_from_label(sequences, dtype=np.int32):
'Create a sparse representention of x.\n Args:\n sequences: a list of lists of type dtype where each element is a sequence\n Returns:\n A tuple with (indices, values, shape)\n '
indices = []
values = []
for (n, seq) in enumerate(sequences):
indices.extend(zip(([n] * len(seq)), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), (np.asarray(indices).max(0)[1] + 1)], dtype=np.int64)
return (indices, values, shape)
|
Create a sparse representention of x.
Args:
sequences: a list of lists of type dtype where each element is a sequence
Returns:
A tuple with (indices, values, shape)
|
utils.py
|
sparse_tuple_from_label
|
zliu63/ctc_tf
| 4
|
python
|
def sparse_tuple_from_label(sequences, dtype=np.int32):
'Create a sparse representention of x.\n Args:\n sequences: a list of lists of type dtype where each element is a sequence\n Returns:\n A tuple with (indices, values, shape)\n '
indices = []
values = []
for (n, seq) in enumerate(sequences):
indices.extend(zip(([n] * len(seq)), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), (np.asarray(indices).max(0)[1] + 1)], dtype=np.int64)
return (indices, values, shape)
|
def sparse_tuple_from_label(sequences, dtype=np.int32):
'Create a sparse representention of x.\n Args:\n sequences: a list of lists of type dtype where each element is a sequence\n Returns:\n A tuple with (indices, values, shape)\n '
indices = []
values = []
for (n, seq) in enumerate(sequences):
indices.extend(zip(([n] * len(seq)), range(len(seq))))
values.extend(seq)
indices = np.asarray(indices, dtype=np.int64)
values = np.asarray(values, dtype=dtype)
shape = np.asarray([len(sequences), (np.asarray(indices).max(0)[1] + 1)], dtype=np.int64)
return (indices, values, shape)<|docstring|>Create a sparse representention of x.
Args:
sequences: a list of lists of type dtype where each element is a sequence
Returns:
A tuple with (indices, values, shape)<|endoftext|>
|
324e17f63ed122825e77d9e2fd67399ec3d380157c40f8dc0abec270ca284de4
|
def eval_expression(encoded_list):
'\n :param encoded_list:\n :return:\n '
eval_rs = []
for item in encoded_list:
try:
rs = str(eval(item))
eval_rs.append(rs)
except:
eval_rs.append(item)
continue
with open('./result.txt') as f:
for ith in xrange(len(encoded_list)):
f.write((((encoded_list[ith] + ' ') + eval_rs[ith]) + '\n'))
return eval_rs
|
:param encoded_list:
:return:
|
utils.py
|
eval_expression
|
zliu63/ctc_tf
| 4
|
python
|
def eval_expression(encoded_list):
'\n :param encoded_list:\n :return:\n '
eval_rs = []
for item in encoded_list:
try:
rs = str(eval(item))
eval_rs.append(rs)
except:
eval_rs.append(item)
continue
with open('./result.txt') as f:
for ith in xrange(len(encoded_list)):
f.write((((encoded_list[ith] + ' ') + eval_rs[ith]) + '\n'))
return eval_rs
|
def eval_expression(encoded_list):
'\n :param encoded_list:\n :return:\n '
eval_rs = []
for item in encoded_list:
try:
rs = str(eval(item))
eval_rs.append(rs)
except:
eval_rs.append(item)
continue
with open('./result.txt') as f:
for ith in xrange(len(encoded_list)):
f.write((((encoded_list[ith] + ' ') + eval_rs[ith]) + '\n'))
return eval_rs<|docstring|>:param encoded_list:
:return:<|endoftext|>
|
26969b075d30f599e44acad20fc34e4e476fee4217de607163f65087fa5aa2ea
|
def get_bits(hex: str) -> Iterator[str]:
"\n >>> list(get_bits('A'))\n ['1', '0', '1', '0']\n >>> list(get_bits('1'))\n ['0', '0', '0', '1']\n "
for h in hex:
d = int(h, base=16)
for b in f'{d:04b}':
(yield b)
|
>>> list(get_bits('A'))
['1', '0', '1', '0']
>>> list(get_bits('1'))
['0', '0', '0', '1']
|
d16/part1.py
|
get_bits
|
Jamie-Chang/advent2021
| 0
|
python
|
def get_bits(hex: str) -> Iterator[str]:
"\n >>> list(get_bits('A'))\n ['1', '0', '1', '0']\n >>> list(get_bits('1'))\n ['0', '0', '0', '1']\n "
for h in hex:
d = int(h, base=16)
for b in f'{d:04b}':
(yield b)
|
def get_bits(hex: str) -> Iterator[str]:
"\n >>> list(get_bits('A'))\n ['1', '0', '1', '0']\n >>> list(get_bits('1'))\n ['0', '0', '0', '1']\n "
for h in hex:
d = int(h, base=16)
for b in f'{d:04b}':
(yield b)<|docstring|>>>> list(get_bits('A'))
['1', '0', '1', '0']
>>> list(get_bits('1'))
['0', '0', '0', '1']<|endoftext|>
|
e872dca63e85f916d6f646930f432056d9c961bd9a913e1443815eaddc1b7c11
|
def take(bits: Iterable[str], number: int) -> str:
"\n >>> take('11000001', 2)\n '11'\n >>> take('110', 4)\n '110'\n "
took = ''.join(itertools.islice(bits, number))
return took
|
>>> take('11000001', 2)
'11'
>>> take('110', 4)
'110'
|
d16/part1.py
|
take
|
Jamie-Chang/advent2021
| 0
|
python
|
def take(bits: Iterable[str], number: int) -> str:
"\n >>> take('11000001', 2)\n '11'\n >>> take('110', 4)\n '110'\n "
took = .join(itertools.islice(bits, number))
return took
|
def take(bits: Iterable[str], number: int) -> str:
"\n >>> take('11000001', 2)\n '11'\n >>> take('110', 4)\n '110'\n "
took = .join(itertools.islice(bits, number))
return took<|docstring|>>>> take('11000001', 2)
'11'
>>> take('110', 4)
'110'<|endoftext|>
|
250d3ca80753d5faecb15d7ecc9fb8cd581131dc9b9ab0c6a96eb79873199dae
|
def parse_number(bits: Iterable[str]) -> int:
"\n >>> bits = iter('101111111000101000')\n >>> parse_number(bits)\n 2021\n "
bits = iter(bits)
return int(''.join(_parse_hex_digits(bits)), base=2)
|
>>> bits = iter('101111111000101000')
>>> parse_number(bits)
2021
|
d16/part1.py
|
parse_number
|
Jamie-Chang/advent2021
| 0
|
python
|
def parse_number(bits: Iterable[str]) -> int:
"\n >>> bits = iter('101111111000101000')\n >>> parse_number(bits)\n 2021\n "
bits = iter(bits)
return int(.join(_parse_hex_digits(bits)), base=2)
|
def parse_number(bits: Iterable[str]) -> int:
"\n >>> bits = iter('101111111000101000')\n >>> parse_number(bits)\n 2021\n "
bits = iter(bits)
return int(.join(_parse_hex_digits(bits)), base=2)<|docstring|>>>> bits = iter('101111111000101000')
>>> parse_number(bits)
2021<|endoftext|>
|
654e1736152dc4ecb60fec668fa25dccefd5e9ad559a840bcf0707704ceb44af
|
def choose_aws_role(self, assertion):
' Choose AWS role from SAML assertion '
roles = self.__extract_available_roles_from(assertion)
if self.role:
predefined_role = self.__find_predefined_role_from(roles)
if predefined_role:
self.logger.info(('Using predefined role: %s' % self.role))
return predefined_role
else:
self.logger.info(('Predefined role, %s, not found in the list\nof roles assigned to you.' % self.role))
self.logger.info('Please choose a role.')
role_options = self.__create_options_from(roles)
for option in role_options:
print(option)
role_choice = (int(input('Please select the AWS role: ')) - 1)
return roles[role_choice]
|
Choose AWS role from SAML assertion
|
oktaawscli/aws_auth.py
|
choose_aws_role
|
ntsutake/okta-awscli
| 0
|
python
|
def choose_aws_role(self, assertion):
' '
roles = self.__extract_available_roles_from(assertion)
if self.role:
predefined_role = self.__find_predefined_role_from(roles)
if predefined_role:
self.logger.info(('Using predefined role: %s' % self.role))
return predefined_role
else:
self.logger.info(('Predefined role, %s, not found in the list\nof roles assigned to you.' % self.role))
self.logger.info('Please choose a role.')
role_options = self.__create_options_from(roles)
for option in role_options:
print(option)
role_choice = (int(input('Please select the AWS role: ')) - 1)
return roles[role_choice]
|
def choose_aws_role(self, assertion):
' '
roles = self.__extract_available_roles_from(assertion)
if self.role:
predefined_role = self.__find_predefined_role_from(roles)
if predefined_role:
self.logger.info(('Using predefined role: %s' % self.role))
return predefined_role
else:
self.logger.info(('Predefined role, %s, not found in the list\nof roles assigned to you.' % self.role))
self.logger.info('Please choose a role.')
role_options = self.__create_options_from(roles)
for option in role_options:
print(option)
role_choice = (int(input('Please select the AWS role: ')) - 1)
return roles[role_choice]<|docstring|>Choose AWS role from SAML assertion<|endoftext|>
|
3174ea42b9f35443a172079eead147de726880a34c687e4dc0bf570b8705b6b9
|
def full_choose_aws_role(self, assertion):
' Choose AWS role list from SAML assertion '
return self.__extract_available_roles_from(assertion)
|
Choose AWS role list from SAML assertion
|
oktaawscli/aws_auth.py
|
full_choose_aws_role
|
ntsutake/okta-awscli
| 0
|
python
|
def full_choose_aws_role(self, assertion):
' '
return self.__extract_available_roles_from(assertion)
|
def full_choose_aws_role(self, assertion):
' '
return self.__extract_available_roles_from(assertion)<|docstring|>Choose AWS role list from SAML assertion<|endoftext|>
|
c75e15b0e733c47014d8ececd2d6d41d3cfb248f1f586e233c685e5e6dc18c5b
|
@staticmethod
def get_sts_token(role_arn, principal_arn, assertion, duration=None, logger=None):
' Gets a token from AWS STS '
arn_region = principal_arn.split(':')[1]
if (arn_region == 'aws-us-gov'):
sts = boto3.client('sts', region_name='us-gov-west-1')
else:
sts = boto3.client('sts')
try:
response = sts.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion, DurationSeconds=(duration or 43200))
except ClientError as ex:
if logger:
logger.error(('role %s: Could not retrieve credentials: %s' % role_arn), ex.response['Error']['Message'])
raise
else:
raise
credentials = response['Credentials']
return credentials
|
Gets a token from AWS STS
|
oktaawscli/aws_auth.py
|
get_sts_token
|
ntsutake/okta-awscli
| 0
|
python
|
@staticmethod
def get_sts_token(role_arn, principal_arn, assertion, duration=None, logger=None):
' '
arn_region = principal_arn.split(':')[1]
if (arn_region == 'aws-us-gov'):
sts = boto3.client('sts', region_name='us-gov-west-1')
else:
sts = boto3.client('sts')
try:
response = sts.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion, DurationSeconds=(duration or 43200))
except ClientError as ex:
if logger:
logger.error(('role %s: Could not retrieve credentials: %s' % role_arn), ex.response['Error']['Message'])
raise
else:
raise
credentials = response['Credentials']
return credentials
|
@staticmethod
def get_sts_token(role_arn, principal_arn, assertion, duration=None, logger=None):
' '
arn_region = principal_arn.split(':')[1]
if (arn_region == 'aws-us-gov'):
sts = boto3.client('sts', region_name='us-gov-west-1')
else:
sts = boto3.client('sts')
try:
response = sts.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion, DurationSeconds=(duration or 43200))
except ClientError as ex:
if logger:
logger.error(('role %s: Could not retrieve credentials: %s' % role_arn), ex.response['Error']['Message'])
raise
else:
raise
credentials = response['Credentials']
return credentials<|docstring|>Gets a token from AWS STS<|endoftext|>
|
8349ac2b186297e8546e1c655c457e0d47b7415bfa9968e72545944667507148
|
def check_sts_token(self, profile):
' Verifies that STS credentials are valid '
if (not profile):
return False
parser = RawConfigParser()
parser.read(self.creds_file)
if (not os.path.exists(self.creds_dir)):
self.logger.info('AWS credentials path does not exist. Not checking.')
return False
elif (not os.path.isfile(self.creds_file)):
self.logger.info('AWS credentials file does not exist. Not checking.')
return False
elif (not parser.has_section(profile)):
self.logger.info('No existing credentials found. Requesting new credentials.')
return False
session = boto3.Session(profile_name=profile)
sts = session.client('sts')
try:
sts.get_caller_identity()
except ClientError as ex:
if (ex.response['Error']['Code'] == 'ExpiredToken'):
self.logger.info('Temporary credentials have expired. Requesting new credentials.')
return False
self.logger.info('STS credentials are valid. Nothing to do.')
return True
|
Verifies that STS credentials are valid
|
oktaawscli/aws_auth.py
|
check_sts_token
|
ntsutake/okta-awscli
| 0
|
python
|
def check_sts_token(self, profile):
' '
if (not profile):
return False
parser = RawConfigParser()
parser.read(self.creds_file)
if (not os.path.exists(self.creds_dir)):
self.logger.info('AWS credentials path does not exist. Not checking.')
return False
elif (not os.path.isfile(self.creds_file)):
self.logger.info('AWS credentials file does not exist. Not checking.')
return False
elif (not parser.has_section(profile)):
self.logger.info('No existing credentials found. Requesting new credentials.')
return False
session = boto3.Session(profile_name=profile)
sts = session.client('sts')
try:
sts.get_caller_identity()
except ClientError as ex:
if (ex.response['Error']['Code'] == 'ExpiredToken'):
self.logger.info('Temporary credentials have expired. Requesting new credentials.')
return False
self.logger.info('STS credentials are valid. Nothing to do.')
return True
|
def check_sts_token(self, profile):
' '
if (not profile):
return False
parser = RawConfigParser()
parser.read(self.creds_file)
if (not os.path.exists(self.creds_dir)):
self.logger.info('AWS credentials path does not exist. Not checking.')
return False
elif (not os.path.isfile(self.creds_file)):
self.logger.info('AWS credentials file does not exist. Not checking.')
return False
elif (not parser.has_section(profile)):
self.logger.info('No existing credentials found. Requesting new credentials.')
return False
session = boto3.Session(profile_name=profile)
sts = session.client('sts')
try:
sts.get_caller_identity()
except ClientError as ex:
if (ex.response['Error']['Code'] == 'ExpiredToken'):
self.logger.info('Temporary credentials have expired. Requesting new credentials.')
return False
self.logger.info('STS credentials are valid. Nothing to do.')
return True<|docstring|>Verifies that STS credentials are valid<|endoftext|>
|
9b3a8a35a15288ea861803d0135008be0411a1e3e0f4d645a7b9b52c1e6c1a54
|
def write_sts_token(self, profile, access_key_id, secret_access_key, session_token):
' Writes STS auth information to credentials file '
if (not os.path.exists(self.creds_dir)):
os.makedirs(self.creds_dir)
config = RawConfigParser()
if os.path.isfile(self.creds_file):
config.read(self.creds_file)
if (not config.has_section(profile)):
config.add_section(profile)
config.set(profile, 'aws_access_key_id', access_key_id)
config.set(profile, 'aws_secret_access_key', secret_access_key)
config.set(profile, 'aws_session_token', session_token)
with open(self.creds_file, 'w+') as configfile:
config.write(configfile)
self.logger.info(('Temporary credentials written to profile: %s' % profile))
self.logger.info(('Invoke using: aws --profile %s <service> <command>' % profile))
|
Writes STS auth information to credentials file
|
oktaawscli/aws_auth.py
|
write_sts_token
|
ntsutake/okta-awscli
| 0
|
python
|
def write_sts_token(self, profile, access_key_id, secret_access_key, session_token):
' '
if (not os.path.exists(self.creds_dir)):
os.makedirs(self.creds_dir)
config = RawConfigParser()
if os.path.isfile(self.creds_file):
config.read(self.creds_file)
if (not config.has_section(profile)):
config.add_section(profile)
config.set(profile, 'aws_access_key_id', access_key_id)
config.set(profile, 'aws_secret_access_key', secret_access_key)
config.set(profile, 'aws_session_token', session_token)
with open(self.creds_file, 'w+') as configfile:
config.write(configfile)
self.logger.info(('Temporary credentials written to profile: %s' % profile))
self.logger.info(('Invoke using: aws --profile %s <service> <command>' % profile))
|
def write_sts_token(self, profile, access_key_id, secret_access_key, session_token):
' '
if (not os.path.exists(self.creds_dir)):
os.makedirs(self.creds_dir)
config = RawConfigParser()
if os.path.isfile(self.creds_file):
config.read(self.creds_file)
if (not config.has_section(profile)):
config.add_section(profile)
config.set(profile, 'aws_access_key_id', access_key_id)
config.set(profile, 'aws_secret_access_key', secret_access_key)
config.set(profile, 'aws_session_token', session_token)
with open(self.creds_file, 'w+') as configfile:
config.write(configfile)
self.logger.info(('Temporary credentials written to profile: %s' % profile))
self.logger.info(('Invoke using: aws --profile %s <service> <command>' % profile))<|docstring|>Writes STS auth information to credentials file<|endoftext|>
|
9705bc929fa3038e2adb2ff38948384e7c6680f31a8c0d44ffff4f5de2b70568
|
def test_lambda_dependency():
'Inject lambda function.'
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
class Summator(Injector):
foo = Foo
add = (lambda x, y: (x + y))
assert (Summator.foo.do(1) == 2)
|
Inject lambda function.
|
tests/test_injector.py
|
test_lambda_dependency
|
hyperleex/dependencies
| 0
|
python
|
def test_lambda_dependency():
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
class Summator(Injector):
foo = Foo
add = (lambda x, y: (x + y))
assert (Summator.foo.do(1) == 2)
|
def test_lambda_dependency():
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
class Summator(Injector):
foo = Foo
add = (lambda x, y: (x + y))
assert (Summator.foo.do(1) == 2)<|docstring|>Inject lambda function.<|endoftext|>
|
26b88460abde94090a74b8ebe439464b9c6ffac07bcfbb289c49c3f900c863ba
|
def test_function_dependency():
'Inject regular function.'
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
def plus(x, y):
return (x + y)
class Summator(Injector):
foo = Foo
add = plus
assert (Summator.foo.do(1) == 2)
|
Inject regular function.
|
tests/test_injector.py
|
test_function_dependency
|
hyperleex/dependencies
| 0
|
python
|
def test_function_dependency():
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
def plus(x, y):
return (x + y)
class Summator(Injector):
foo = Foo
add = plus
assert (Summator.foo.do(1) == 2)
|
def test_function_dependency():
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
def plus(x, y):
return (x + y)
class Summator(Injector):
foo = Foo
add = plus
assert (Summator.foo.do(1) == 2)<|docstring|>Inject regular function.<|endoftext|>
|
17093467b9775abb18766b4720e76b986c85ae8be7967d25066c798b8a2c21b3
|
def test_inline_dependency():
'Inject method defined inside Injector subclass.'
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
class Summator(Injector):
foo = Foo
def add(x, y):
return (x + y)
assert (Summator.foo.do(1) == 2)
|
Inject method defined inside Injector subclass.
|
tests/test_injector.py
|
test_inline_dependency
|
hyperleex/dependencies
| 0
|
python
|
def test_inline_dependency():
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
class Summator(Injector):
foo = Foo
def add(x, y):
return (x + y)
assert (Summator.foo.do(1) == 2)
|
def test_inline_dependency():
class Foo(object):
def __init__(self, add):
self.add = add
def do(self, x):
return self.add(x, x)
class Summator(Injector):
foo = Foo
def add(x, y):
return (x + y)
assert (Summator.foo.do(1) == 2)<|docstring|>Inject method defined inside Injector subclass.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.