body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def _calculate_z_values(self):
'Calculate z statistics for our two sets of coefficients\n '
(_std_error0, _std_error1) = self._calculate_robust_standard_errors()
_z_values0 = (self.lambda_0_coefficients[:, 0] / _std_error0)
_z_values1 = (self.lambda_1_coefficients[:, 0] / _std_error1)
return (_z_values0, _z_values1)
| -839,694,260,968,867,100
|
Calculate z statistics for our two sets of coefficients
|
metrics/__init__.py
|
_calculate_z_values
|
nathan-bennett/skellam
|
python
|
def _calculate_z_values(self):
'\n '
(_std_error0, _std_error1) = self._calculate_robust_standard_errors()
_z_values0 = (self.lambda_0_coefficients[:, 0] / _std_error0)
_z_values1 = (self.lambda_1_coefficients[:, 0] / _std_error1)
return (_z_values0, _z_values1)
|
def _calculate_p_values(self):
'Calculate p values for our two sets of coefficients\n '
(_z_values0, _z_values1) = self._calculate_z_values()
_p_values0 = (scipy.stats.norm.sf(abs(_z_values0)) * 2)
_p_values1 = (scipy.stats.norm.sf(abs(_z_values1)) * 2)
return (_p_values0, _p_values1)
| 2,315,567,417,583,156,700
|
Calculate p values for our two sets of coefficients
|
metrics/__init__.py
|
_calculate_p_values
|
nathan-bennett/skellam
|
python
|
def _calculate_p_values(self):
'\n '
(_z_values0, _z_values1) = self._calculate_z_values()
_p_values0 = (scipy.stats.norm.sf(abs(_z_values0)) * 2)
_p_values1 = (scipy.stats.norm.sf(abs(_z_values1)) * 2)
return (_p_values0, _p_values1)
|
def test_queue_end(self):
'\n test if the default pointer in the buffer is -1.\n '
from ..circular_buffer import CircularBuffer
buffer = CircularBuffer(shape=(100, 2))
self.assertEqual(buffer.pointer, (- 1))
| 3,510,914,895,791,073,300
|
test if the default pointer in the buffer is -1.
|
circular_buffer_numpy/tests/test_circular_buffer.py
|
test_queue_end
|
rcm2dev/circular_buffer_numpy
|
python
|
def test_queue_end(self):
'\n \n '
from ..circular_buffer import CircularBuffer
buffer = CircularBuffer(shape=(100, 2))
self.assertEqual(buffer.pointer, (- 1))
|
def test_queue_end_two(self):
'\n test if the default pointer in the buffer is -1.\n '
from ..circular_buffer import CircularBuffer
buffer = CircularBuffer(shape=(100, 2))
self.assertEqual(buffer.pointer, (- 1))
| -2,111,066,023,000,537,000
|
test if the default pointer in the buffer is -1.
|
circular_buffer_numpy/tests/test_circular_buffer.py
|
test_queue_end_two
|
rcm2dev/circular_buffer_numpy
|
python
|
def test_queue_end_two(self):
'\n \n '
from ..circular_buffer import CircularBuffer
buffer = CircularBuffer(shape=(100, 2))
self.assertEqual(buffer.pointer, (- 1))
|
@contextmanager
def _download_file(uri, bulk_api):
'Download the bulk API result file for a single batch'
resp = requests.get(uri, headers=bulk_api.headers(), stream=True)
with tempfile.TemporaryFile('w+b') as f:
for chunk in resp.iter_content(chunk_size=None):
f.write(chunk)
f.seek(0)
(yield f)
| 807,147,497,604,898,600
|
Download the bulk API result file for a single batch
|
cumulusci/tasks/bulkdata.py
|
_download_file
|
davidmreed/CumulusCI
|
python
|
@contextmanager
def _download_file(uri, bulk_api):
resp = requests.get(uri, headers=bulk_api.headers(), stream=True)
with tempfile.TemporaryFile('w+b') as f:
for chunk in resp.iter_content(chunk_size=None):
f.write(chunk)
f.seek(0)
(yield f)
|
def _split_batches(self, data, batch_size):
'Yield successive n-sized chunks from l.'
for i in range(0, len(data), batch_size):
(yield data[i:(i + batch_size)])
| -6,208,150,280,878,876,000
|
Yield successive n-sized chunks from l.
|
cumulusci/tasks/bulkdata.py
|
_split_batches
|
davidmreed/CumulusCI
|
python
|
def _split_batches(self, data, batch_size):
for i in range(0, len(data), batch_size):
(yield data[i:(i + batch_size)])
|
def _load_mapping(self, mapping):
'Load data for a single step.'
mapping['oid_as_pk'] = bool(mapping.get('fields', {}).get('Id'))
(job_id, local_ids_for_batch) = self._create_job(mapping)
result = self._wait_for_job(job_id)
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result
| 5,149,793,367,251,173,000
|
Load data for a single step.
|
cumulusci/tasks/bulkdata.py
|
_load_mapping
|
davidmreed/CumulusCI
|
python
|
def _load_mapping(self, mapping):
mapping['oid_as_pk'] = bool(mapping.get('fields', {}).get('Id'))
(job_id, local_ids_for_batch) = self._create_job(mapping)
result = self._wait_for_job(job_id)
self._store_inserted_ids(mapping, job_id, local_ids_for_batch)
return result
|
def _create_job(self, mapping):
'Initiate a bulk insert and upload batches to run in parallel.'
job_id = self.bulk.create_insert_job(mapping['sf_object'], contentType='CSV')
self.logger.info(' Created bulk job {}'.format(job_id))
local_ids_for_batch = {}
for (batch_file, local_ids) in self._get_batches(mapping):
batch_id = self.bulk.post_batch(job_id, batch_file)
local_ids_for_batch[batch_id] = local_ids
self.logger.info(' Uploaded batch {}'.format(batch_id))
self.bulk.close_job(job_id)
return (job_id, local_ids_for_batch)
| 7,123,044,622,465,607,000
|
Initiate a bulk insert and upload batches to run in parallel.
|
cumulusci/tasks/bulkdata.py
|
_create_job
|
davidmreed/CumulusCI
|
python
|
def _create_job(self, mapping):
job_id = self.bulk.create_insert_job(mapping['sf_object'], contentType='CSV')
self.logger.info(' Created bulk job {}'.format(job_id))
local_ids_for_batch = {}
for (batch_file, local_ids) in self._get_batches(mapping):
batch_id = self.bulk.post_batch(job_id, batch_file)
local_ids_for_batch[batch_id] = local_ids
self.logger.info(' Uploaded batch {}'.format(batch_id))
self.bulk.close_job(job_id)
return (job_id, local_ids_for_batch)
|
def _get_batches(self, mapping, batch_size=10000):
'Get data from the local db'
action = mapping.get('action', 'insert')
fields = mapping.get('fields', {}).copy()
static = mapping.get('static', {})
lookups = mapping.get('lookups', {})
record_type = mapping.get('record_type')
if ((action == 'insert') and ('Id' in fields)):
del fields['Id']
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append('RecordTypeId')
query = "SELECT Id FROM RecordType WHERE SObjectType='{0}'AND DeveloperName = '{1}' LIMIT 1"
record_type_id = self.sf.query(query.format(mapping.get('sf_object'), record_type))['records'][0]['Id']
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return (batch_file, writer, batch_ids)
(batch_file, writer, batch_ids) = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
pkey = row[0]
row = (list(row[1:]) + list(static.values()))
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
if (not (total_rows % batch_size)):
batch_file.seek(0)
self.logger.info(' Processing batch {}'.format(batch_num))
(yield (batch_file, batch_ids))
(batch_file, writer, batch_ids) = start_batch()
batch_num += 1
if batch_ids:
batch_file.seek(0)
(yield (batch_file, batch_ids))
self.logger.info(' Prepared {} rows for import to {}'.format(total_rows, mapping['sf_object']))
| 9,015,177,815,813,372,000
|
Get data from the local db
|
cumulusci/tasks/bulkdata.py
|
_get_batches
|
davidmreed/CumulusCI
|
python
|
def _get_batches(self, mapping, batch_size=10000):
action = mapping.get('action', 'insert')
fields = mapping.get('fields', {}).copy()
static = mapping.get('static', {})
lookups = mapping.get('lookups', {})
record_type = mapping.get('record_type')
if ((action == 'insert') and ('Id' in fields)):
del fields['Id']
columns = []
columns.extend(fields.keys())
columns.extend(lookups.keys())
columns.extend(static.keys())
if record_type:
columns.append('RecordTypeId')
query = "SELECT Id FROM RecordType WHERE SObjectType='{0}'AND DeveloperName = '{1}' LIMIT 1"
record_type_id = self.sf.query(query.format(mapping.get('sf_object'), record_type))['records'][0]['Id']
query = self._query_db(mapping)
total_rows = 0
batch_num = 1
def start_batch():
batch_file = io.BytesIO()
writer = unicodecsv.writer(batch_file)
writer.writerow(columns)
batch_ids = []
return (batch_file, writer, batch_ids)
(batch_file, writer, batch_ids) = start_batch()
for row in query.yield_per(batch_size):
total_rows += 1
pkey = row[0]
row = (list(row[1:]) + list(static.values()))
if record_type:
row.append(record_type_id)
writer.writerow([self._convert(value) for value in row])
batch_ids.append(pkey)
if (not (total_rows % batch_size)):
batch_file.seek(0)
self.logger.info(' Processing batch {}'.format(batch_num))
(yield (batch_file, batch_ids))
(batch_file, writer, batch_ids) = start_batch()
batch_num += 1
if batch_ids:
batch_file.seek(0)
(yield (batch_file, batch_ids))
self.logger.info(' Prepared {} rows for import to {}'.format(total_rows, mapping['sf_object']))
|
def _query_db(self, mapping):
'Build a query to retrieve data from the local db.\n\n Includes columns from the mapping\n as well as joining to the id tables to get real SF ids\n for lookups.\n '
model = self.models[mapping.get('table')]
fields = mapping.get('fields', {}).copy()
if mapping['oid_as_pk']:
del fields['Id']
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for f in fields.values():
columns.append(model.__table__.columns[f])
lookups = mapping.get('lookups', {}).copy()
for lookup in lookups.values():
lookup['aliased_table'] = aliased(self.metadata.tables['{}_sf_ids'.format(lookup['table'])])
columns.append(lookup['aliased_table'].columns.sf_id)
query = self.session.query(*columns)
if (('record_type' in mapping) and hasattr(model, 'record_type')):
query = query.filter((model.record_type == mapping['record_type']))
if ('filters' in mapping):
filter_args = []
for f in mapping['filters']:
filter_args.append(text(f))
query = query.filter(*filter_args)
for (sf_field, lookup) in lookups.items():
key_field = get_lookup_key_field(lookup, sf_field)
value_column = getattr(model, key_field)
query = query.outerjoin(lookup['aliased_table'], (lookup['aliased_table'].columns.id == value_column))
lookup_column = getattr(model, key_field)
query = query.order_by(lookup_column)
self.logger.info(str(query))
return query
| 4,219,606,446,555,931,600
|
Build a query to retrieve data from the local db.
Includes columns from the mapping
as well as joining to the id tables to get real SF ids
for lookups.
|
cumulusci/tasks/bulkdata.py
|
_query_db
|
davidmreed/CumulusCI
|
python
|
def _query_db(self, mapping):
'Build a query to retrieve data from the local db.\n\n Includes columns from the mapping\n as well as joining to the id tables to get real SF ids\n for lookups.\n '
model = self.models[mapping.get('table')]
fields = mapping.get('fields', {}).copy()
if mapping['oid_as_pk']:
del fields['Id']
id_column = model.__table__.primary_key.columns.keys()[0]
columns = [getattr(model, id_column)]
for f in fields.values():
columns.append(model.__table__.columns[f])
lookups = mapping.get('lookups', {}).copy()
for lookup in lookups.values():
lookup['aliased_table'] = aliased(self.metadata.tables['{}_sf_ids'.format(lookup['table'])])
columns.append(lookup['aliased_table'].columns.sf_id)
query = self.session.query(*columns)
if (('record_type' in mapping) and hasattr(model, 'record_type')):
query = query.filter((model.record_type == mapping['record_type']))
if ('filters' in mapping):
filter_args = []
for f in mapping['filters']:
filter_args.append(text(f))
query = query.filter(*filter_args)
for (sf_field, lookup) in lookups.items():
key_field = get_lookup_key_field(lookup, sf_field)
value_column = getattr(model, key_field)
query = query.outerjoin(lookup['aliased_table'], (lookup['aliased_table'].columns.id == value_column))
lookup_column = getattr(model, key_field)
query = query.order_by(lookup_column)
self.logger.info(str(query))
return query
|
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch):
'Get the job results and store inserted SF Ids in a new table'
id_table_name = self._reset_id_table(mapping)
conn = self.session.connection()
for (batch_id, local_ids) in local_ids_for_batch.items():
try:
results_url = '{}/job/{}/batch/{}/result'.format(self.bulk.endpoint, job_id, batch_id)
with _download_file(results_url, self.bulk) as f:
self.logger.info(' Downloaded results for batch {}'.format(batch_id))
self._store_inserted_ids_for_batch(f, local_ids, id_table_name, conn)
self.logger.info(' Updated {} for batch {}'.format(id_table_name, batch_id))
except Exception:
self.logger.error('Could not download batch results: {}'.format(batch_id))
continue
self.session.commit()
| 4,848,924,005,118,796,000
|
Get the job results and store inserted SF Ids in a new table
|
cumulusci/tasks/bulkdata.py
|
_store_inserted_ids
|
davidmreed/CumulusCI
|
python
|
def _store_inserted_ids(self, mapping, job_id, local_ids_for_batch):
id_table_name = self._reset_id_table(mapping)
conn = self.session.connection()
for (batch_id, local_ids) in local_ids_for_batch.items():
try:
results_url = '{}/job/{}/batch/{}/result'.format(self.bulk.endpoint, job_id, batch_id)
with _download_file(results_url, self.bulk) as f:
self.logger.info(' Downloaded results for batch {}'.format(batch_id))
self._store_inserted_ids_for_batch(f, local_ids, id_table_name, conn)
self.logger.info(' Updated {} for batch {}'.format(id_table_name, batch_id))
except Exception:
self.logger.error('Could not download batch results: {}'.format(batch_id))
continue
self.session.commit()
|
def _reset_id_table(self, mapping):
'Create an empty table to hold the inserted SF Ids'
if (not hasattr(self, '_initialized_id_tables')):
self._initialized_id_tables = set()
id_table_name = '{}_sf_ids'.format(mapping['table'])
if (id_table_name not in self._initialized_id_tables):
if (id_table_name in self.metadata.tables):
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(id_table_name, self.metadata, Column('id', Unicode(255), primary_key=True), Column('sf_id', Unicode(18)))
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
| -5,108,331,602,492,572,000
|
Create an empty table to hold the inserted SF Ids
|
cumulusci/tasks/bulkdata.py
|
_reset_id_table
|
davidmreed/CumulusCI
|
python
|
def _reset_id_table(self, mapping):
if (not hasattr(self, '_initialized_id_tables')):
self._initialized_id_tables = set()
id_table_name = '{}_sf_ids'.format(mapping['table'])
if (id_table_name not in self._initialized_id_tables):
if (id_table_name in self.metadata.tables):
self.metadata.remove(self.metadata.tables[id_table_name])
id_table = Table(id_table_name, self.metadata, Column('id', Unicode(255), primary_key=True), Column('sf_id', Unicode(18)))
if id_table.exists():
id_table.drop()
id_table.create()
self._initialized_id_tables.add(id_table_name)
return id_table_name
|
def _get_mapping_for_table(self, table):
' Returns the first mapping for a table name '
for mapping in self.mappings.values():
if (mapping['table'] == table):
return mapping
| 1,712,170,791,723,465,700
|
Returns the first mapping for a table name
|
cumulusci/tasks/bulkdata.py
|
_get_mapping_for_table
|
davidmreed/CumulusCI
|
python
|
def _get_mapping_for_table(self, table):
' '
for mapping in self.mappings.values():
if (mapping['table'] == table):
return mapping
|
def produce_csv():
'Iterate over job results and prepare rows for id table'
reader = unicodecsv.reader(result_file)
next(reader)
i = 0
for (row, local_id) in zip(reader, local_ids):
if (row[1] == 'true'):
sf_id = row[0]
(yield '{},{}\n'.format(local_id, sf_id).encode('utf-8'))
else:
self.logger.warning(' Error on row {}: {}'.format(i, row[3]))
i += 1
| -6,944,854,214,281,007,000
|
Iterate over job results and prepare rows for id table
|
cumulusci/tasks/bulkdata.py
|
produce_csv
|
davidmreed/CumulusCI
|
python
|
def produce_csv():
reader = unicodecsv.reader(result_file)
next(reader)
i = 0
for (row, local_id) in zip(reader, local_ids):
if (row[1] == 'true'):
sf_id = row[0]
(yield '{},{}\n'.format(local_id, sf_id).encode('utf-8'))
else:
self.logger.warning(' Error on row {}: {}'.format(i, row[3]))
i += 1
|
def __init__(self, intro, title, checklist):
'Initializes Diagnostic with neccessary attributes.\n\n Args:\n intro: A message to introduce the objectives and tasks of the diagnostic.\n title: The name of the diagnostic.\n checklist: An iterable of checkbase.Check objects to be run by the\n diagnostic.\n '
self.intro = intro
self.title = title
self.checklist = checklist
| -645,389,541,996,310,400
|
Initializes Diagnostic with neccessary attributes.
Args:
intro: A message to introduce the objectives and tasks of the diagnostic.
title: The name of the diagnostic.
checklist: An iterable of checkbase.Check objects to be run by the
diagnostic.
|
google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/diagnostics/diagnostic_base.py
|
__init__
|
KaranToor/MA450
|
python
|
def __init__(self, intro, title, checklist):
'Initializes Diagnostic with neccessary attributes.\n\n Args:\n intro: A message to introduce the objectives and tasks of the diagnostic.\n title: The name of the diagnostic.\n checklist: An iterable of checkbase.Check objects to be run by the\n diagnostic.\n '
self.intro = intro
self.title = title
self.checklist = checklist
|
def RunChecks(self):
'Runs one or more checks, tries fixes, and outputs results.\n\n Returns:\n True if the diagnostic ultimately passed.\n '
self._Print(self.intro)
num_checks_passed = 0
for check in self.checklist:
(result, fixer) = self._RunCheck(check)
if properties.VALUES.core.disable_prompts.GetBool():
continue
num_retries = 0
while ((not result.passed) and fixer and (num_retries < self._MAX_RETRIES)):
num_retries += 1
should_check_again = fixer()
if should_check_again:
(result, fixer) = self._RunCheck(check, first_run=False)
else:
fixer = None
if ((not result.passed) and fixer and (num_retries == self._MAX_RETRIES)):
log.warn('Unable to fix {0} failure after {1} attempts.'.format(self.title, num_retries))
if result.passed:
num_checks_passed += 1
num_checks = len(self.checklist)
passed = (num_checks_passed == num_checks)
summary = '{check} ({num_passed}/{num_checks} checks) {passed}.\n'.format(check=self.title, num_passed=num_checks_passed, num_checks=num_checks, passed=('passed' if passed else 'failed'))
self._Print(summary, as_error=(not passed))
return passed
| 8,129,295,028,772,314,000
|
Runs one or more checks, tries fixes, and outputs results.
Returns:
True if the diagnostic ultimately passed.
|
google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/diagnostics/diagnostic_base.py
|
RunChecks
|
KaranToor/MA450
|
python
|
def RunChecks(self):
'Runs one or more checks, tries fixes, and outputs results.\n\n Returns:\n True if the diagnostic ultimately passed.\n '
self._Print(self.intro)
num_checks_passed = 0
for check in self.checklist:
(result, fixer) = self._RunCheck(check)
if properties.VALUES.core.disable_prompts.GetBool():
continue
num_retries = 0
while ((not result.passed) and fixer and (num_retries < self._MAX_RETRIES)):
num_retries += 1
should_check_again = fixer()
if should_check_again:
(result, fixer) = self._RunCheck(check, first_run=False)
else:
fixer = None
if ((not result.passed) and fixer and (num_retries == self._MAX_RETRIES)):
log.warn('Unable to fix {0} failure after {1} attempts.'.format(self.title, num_retries))
if result.passed:
num_checks_passed += 1
num_checks = len(self.checklist)
passed = (num_checks_passed == num_checks)
summary = '{check} ({num_passed}/{num_checks} checks) {passed}.\n'.format(check=self.title, num_passed=num_checks_passed, num_checks=num_checks, passed=('passed' if passed else 'failed'))
self._Print(summary, as_error=(not passed))
return passed
|
def __init__(self, mode='perspective', theta=0, phi=0, scale=1):
'\n mode : str\n camera mode ("ortho" or "perspective")\n\n theta: float\n angle around z axis (degrees)\n\n phi: float\n angle around x axis (degrees)\n\n scale: float\n scale factor\n\n view : array (4x4)\n '
self.trackball = Trackball(theta, phi)
self.aperture = 35
self.aspect = 1
self.near = 1
self.far = 100
self.mode = mode
self.scale = scale
self.zoom = 1
self.zoom_max = 5.0
self.zoom_min = 0.1
self.view = (glm.translate(0, 0, (- 3)) @ glm.scale(scale))
if (mode == 'ortho'):
self.proj = glm.ortho((- 1), (+ 1), (- 1), (+ 1), self.near, self.far)
else:
self.proj = glm.perspective(self.aperture, self.aspect, self.near, self.far)
self.transform = ((self.proj @ self.view) @ self.trackball.model.T)
| 2,593,600,571,595,353,000
|
mode : str
camera mode ("ortho" or "perspective")
theta: float
angle around z axis (degrees)
phi: float
angle around x axis (degrees)
scale: float
scale factor
view : array (4x4)
|
hypyp/ext/mpl3d/camera.py
|
__init__
|
FranckPrts/HyPyP
|
python
|
def __init__(self, mode='perspective', theta=0, phi=0, scale=1):
'\n mode : str\n camera mode ("ortho" or "perspective")\n\n theta: float\n angle around z axis (degrees)\n\n phi: float\n angle around x axis (degrees)\n\n scale: float\n scale factor\n\n view : array (4x4)\n '
self.trackball = Trackball(theta, phi)
self.aperture = 35
self.aspect = 1
self.near = 1
self.far = 100
self.mode = mode
self.scale = scale
self.zoom = 1
self.zoom_max = 5.0
self.zoom_min = 0.1
self.view = (glm.translate(0, 0, (- 3)) @ glm.scale(scale))
if (mode == 'ortho'):
self.proj = glm.ortho((- 1), (+ 1), (- 1), (+ 1), self.near, self.far)
else:
self.proj = glm.perspective(self.aperture, self.aspect, self.near, self.far)
self.transform = ((self.proj @ self.view) @ self.trackball.model.T)
|
def connect(self, axes, update):
'\n axes : matplotlib.Axes\n Axes where to connect this camera to\n\n update: function(transform)\n Function to be called with the new transform to update the scene\n (transform is a 4x4 matrix).\n '
self.figure = axes.get_figure()
self.axes = axes
self.update = update
self.mouse = None
self.cidpress = self.figure.canvas.mpl_connect('scroll_event', self.on_scroll)
self.cidpress = self.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.cidrelease = self.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def format_coord(*args):
phi = self.trackball.phi
theta = self.trackball.theta
return ('Θ : %.1f, ɸ: %.1f' % (theta, phi))
self.axes.format_coord = format_coord
| -1,127,770,746,137,528,400
|
axes : matplotlib.Axes
Axes where to connect this camera to
update: function(transform)
Function to be called with the new transform to update the scene
(transform is a 4x4 matrix).
|
hypyp/ext/mpl3d/camera.py
|
connect
|
FranckPrts/HyPyP
|
python
|
def connect(self, axes, update):
'\n axes : matplotlib.Axes\n Axes where to connect this camera to\n\n update: function(transform)\n Function to be called with the new transform to update the scene\n (transform is a 4x4 matrix).\n '
self.figure = axes.get_figure()
self.axes = axes
self.update = update
self.mouse = None
self.cidpress = self.figure.canvas.mpl_connect('scroll_event', self.on_scroll)
self.cidpress = self.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.cidrelease = self.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def format_coord(*args):
phi = self.trackball.phi
theta = self.trackball.theta
return ('Θ : %.1f, ɸ: %.1f' % (theta, phi))
self.axes.format_coord = format_coord
|
def on_scroll(self, event):
'\n Scroll event for zooming in/out\n '
if (event.inaxes != self.axes):
return
if (event.button == 'up'):
self.zoom = max((0.9 * self.zoom), self.zoom_min)
elif (event.button == 'down'):
self.zoom = min((1.1 * self.zoom), self.zoom_max)
self.axes.set_xlim((- self.zoom), self.zoom)
self.axes.set_ylim((- self.zoom), self.zoom)
self.figure.canvas.draw()
| 1,118,335,059,573,984,300
|
Scroll event for zooming in/out
|
hypyp/ext/mpl3d/camera.py
|
on_scroll
|
FranckPrts/HyPyP
|
python
|
def on_scroll(self, event):
'\n \n '
if (event.inaxes != self.axes):
return
if (event.button == 'up'):
self.zoom = max((0.9 * self.zoom), self.zoom_min)
elif (event.button == 'down'):
self.zoom = min((1.1 * self.zoom), self.zoom_max)
self.axes.set_xlim((- self.zoom), self.zoom)
self.axes.set_ylim((- self.zoom), self.zoom)
self.figure.canvas.draw()
|
def on_press(self, event):
'\n Press event to initiate a drag\n '
if (event.inaxes != self.axes):
return
self.mouse = (event.button, event.xdata, event.ydata)
| -3,210,899,453,409,284,600
|
Press event to initiate a drag
|
hypyp/ext/mpl3d/camera.py
|
on_press
|
FranckPrts/HyPyP
|
python
|
def on_press(self, event):
'\n \n '
if (event.inaxes != self.axes):
return
self.mouse = (event.button, event.xdata, event.ydata)
|
def on_motion(self, event):
'\n Motion event to rotate the scene\n '
if (self.mouse is None):
return
if (event.inaxes != self.axes):
return
(button, x, y) = (event.button, event.xdata, event.ydata)
(dx, dy) = ((x - self.mouse[1]), (y - self.mouse[2]))
self.mouse = (button, x, y)
self.trackball.drag_to(x, y, dx, dy)
self.transform = ((self.proj @ self.view) @ self.trackball.model.T)
self.update(self.transform)
self.figure.canvas.draw()
| -1,351,063,725,299,383,000
|
Motion event to rotate the scene
|
hypyp/ext/mpl3d/camera.py
|
on_motion
|
FranckPrts/HyPyP
|
python
|
def on_motion(self, event):
'\n \n '
if (self.mouse is None):
return
if (event.inaxes != self.axes):
return
(button, x, y) = (event.button, event.xdata, event.ydata)
(dx, dy) = ((x - self.mouse[1]), (y - self.mouse[2]))
self.mouse = (button, x, y)
self.trackball.drag_to(x, y, dx, dy)
self.transform = ((self.proj @ self.view) @ self.trackball.model.T)
self.update(self.transform)
self.figure.canvas.draw()
|
def on_release(self, event):
'\n End of drag event\n '
self.mouse = None
| 1,524,781,075,138,285,000
|
End of drag event
|
hypyp/ext/mpl3d/camera.py
|
on_release
|
FranckPrts/HyPyP
|
python
|
def on_release(self, event):
'\n \n '
self.mouse = None
|
def disconnect(self):
'\n Disconnect camera from the axes\n '
self.figure.canvas.mpl_disconnect(self.cidpress)
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
| -2,895,096,553,959,521,000
|
Disconnect camera from the axes
|
hypyp/ext/mpl3d/camera.py
|
disconnect
|
FranckPrts/HyPyP
|
python
|
def disconnect(self):
'\n \n '
self.figure.canvas.mpl_disconnect(self.cidpress)
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_init(bodies, multihyps, all_mgp, all_gp):
'\n test the init function\n '
clean()
gp_model = all_gp[f'{bodies}{multihyps}']
grid_params = {}
if ('2' in bodies):
grid_params['twobody'] = {'grid_num': [160], 'lower_bound': [0.02]}
if ('3' in bodies):
grid_params['threebody'] = {'grid_num': [31, 32, 33], 'lower_bound': ([0.02] * 3)}
lammps_location = f'{bodies}{multihyps}'
data = gp_model.training_statistics
try:
mgp_model = MappedGaussianProcess(grid_params=grid_params, unique_species=data['species'], n_cpus=1, lmp_file_name=lammps_location, var_map='simple')
except:
mgp_model = MappedGaussianProcess(grid_params=grid_params, unique_species=data['species'], n_cpus=1, lmp_file_name=lammps_location, var_map=None)
all_mgp[f'{bodies}{multihyps}'] = mgp_model
| -6,780,441,997,275,256,000
|
test the init function
|
tests/test_mgp.py
|
test_init
|
aaronchen0316/flare
|
python
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_init(bodies, multihyps, all_mgp, all_gp):
'\n \n '
clean()
gp_model = all_gp[f'{bodies}{multihyps}']
grid_params = {}
if ('2' in bodies):
grid_params['twobody'] = {'grid_num': [160], 'lower_bound': [0.02]}
if ('3' in bodies):
grid_params['threebody'] = {'grid_num': [31, 32, 33], 'lower_bound': ([0.02] * 3)}
lammps_location = f'{bodies}{multihyps}'
data = gp_model.training_statistics
try:
mgp_model = MappedGaussianProcess(grid_params=grid_params, unique_species=data['species'], n_cpus=1, lmp_file_name=lammps_location, var_map='simple')
except:
mgp_model = MappedGaussianProcess(grid_params=grid_params, unique_species=data['species'], n_cpus=1, lmp_file_name=lammps_location, var_map=None)
all_mgp[f'{bodies}{multihyps}'] = mgp_model
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_build_map(all_gp, all_mgp, bodies, multihyps):
'\n test the mapping for mc_simple kernel\n '
gp_model = all_gp[f'{bodies}{multihyps}']
mgp_model = all_mgp[f'{bodies}{multihyps}']
mgp_model.build_map(gp_model)
| 2,349,884,026,662,468,000
|
test the mapping for mc_simple kernel
|
tests/test_mgp.py
|
test_build_map
|
aaronchen0316/flare
|
python
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_build_map(all_gp, all_mgp, bodies, multihyps):
'\n \n '
gp_model = all_gp[f'{bodies}{multihyps}']
mgp_model = all_mgp[f'{bodies}{multihyps}']
mgp_model.build_map(gp_model)
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_write_model(all_mgp, bodies, multihyps):
'\n test the mapping for mc_simple kernel\n '
mgp_model = all_mgp[f'{bodies}{multihyps}']
mgp_model.write_model(f'my_mgp_{bodies}_{multihyps}')
mgp_model.write_model(f'my_mgp_{bodies}_{multihyps}', format='pickle')
with pytest.warns(Warning):
mgp_model.var_map = 'pca'
mgp_model.as_dict()
mgp_model.var_map = 'simple'
mgp_model.as_dict()
| -1,201,215,416,537,480,700
|
test the mapping for mc_simple kernel
|
tests/test_mgp.py
|
test_write_model
|
aaronchen0316/flare
|
python
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_write_model(all_mgp, bodies, multihyps):
'\n \n '
mgp_model = all_mgp[f'{bodies}{multihyps}']
mgp_model.write_model(f'my_mgp_{bodies}_{multihyps}')
mgp_model.write_model(f'my_mgp_{bodies}_{multihyps}', format='pickle')
with pytest.warns(Warning):
mgp_model.var_map = 'pca'
mgp_model.as_dict()
mgp_model.var_map = 'simple'
mgp_model.as_dict()
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_load_model(all_mgp, bodies, multihyps):
'\n test the mapping for mc_simple kernel\n '
name = f'my_mgp_{bodies}_{multihyps}.json'
all_mgp[f'{bodies}{multihyps}'] = MappedGaussianProcess.from_file(name)
os.remove(name)
name = f'my_mgp_{bodies}_{multihyps}.pickle'
all_mgp[f'{bodies}{multihyps}'] = MappedGaussianProcess.from_file(name)
os.remove(name)
| -506,470,629,265,809,540
|
test the mapping for mc_simple kernel
|
tests/test_mgp.py
|
test_load_model
|
aaronchen0316/flare
|
python
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_load_model(all_mgp, bodies, multihyps):
'\n \n '
name = f'my_mgp_{bodies}_{multihyps}.json'
all_mgp[f'{bodies}{multihyps}'] = MappedGaussianProcess.from_file(name)
os.remove(name)
name = f'my_mgp_{bodies}_{multihyps}.pickle'
all_mgp[f'{bodies}{multihyps}'] = MappedGaussianProcess.from_file(name)
os.remove(name)
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_cubic_spline(all_gp, all_mgp, bodies, multihyps):
'\n test the predict for mc_simple kernel\n '
mgp_model = all_mgp[f'{bodies}{multihyps}']
delta = 0.0001
if ('3' in bodies):
body_name = 'threebody'
elif ('2' in bodies):
body_name = 'twobody'
nmap = len(mgp_model.maps[body_name].maps)
print('nmap', nmap)
for i in range(nmap):
maxvalue = np.max(np.abs(mgp_model.maps[body_name].maps[i].mean.__coeffs__))
if (maxvalue > 0):
comp_code = mgp_model.maps[body_name].maps[i].species_code
if ('3' in bodies):
c_pt = np.array([[0.3, 0.4, 0.5]])
(c, cderv) = mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
cderv = cderv.reshape([(- 1)])
for j in range(3):
a_pt = deepcopy(c_pt)
b_pt = deepcopy(c_pt)
a_pt[0][j] += delta
b_pt[0][j] -= delta
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
num_derv = ((a - b) / (2 * delta))
print('spline', comp_code, num_derv, cderv[j])
assert np.isclose(num_derv, cderv[j], rtol=0.01)
elif ('2' in bodies):
center = (np.sum(mgp_model.maps[body_name].maps[i].bounds) / 2.0)
a_pt = np.array([[(center + delta)]])
b_pt = np.array([[(center - delta)]])
c_pt = np.array([[center]])
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
(c, cderv) = mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
cderv = cderv.reshape([(- 1)])[0]
num_derv = ((a - b) / (2 * delta))
print('spline', num_derv, cderv)
assert np.isclose(num_derv, cderv, rtol=0.01)
| 3,980,279,803,967,564,300
|
test the predict for mc_simple kernel
|
tests/test_mgp.py
|
test_cubic_spline
|
aaronchen0316/flare
|
python
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_cubic_spline(all_gp, all_mgp, bodies, multihyps):
'\n \n '
mgp_model = all_mgp[f'{bodies}{multihyps}']
delta = 0.0001
if ('3' in bodies):
body_name = 'threebody'
elif ('2' in bodies):
body_name = 'twobody'
nmap = len(mgp_model.maps[body_name].maps)
print('nmap', nmap)
for i in range(nmap):
maxvalue = np.max(np.abs(mgp_model.maps[body_name].maps[i].mean.__coeffs__))
if (maxvalue > 0):
comp_code = mgp_model.maps[body_name].maps[i].species_code
if ('3' in bodies):
c_pt = np.array([[0.3, 0.4, 0.5]])
(c, cderv) = mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
cderv = cderv.reshape([(- 1)])
for j in range(3):
a_pt = deepcopy(c_pt)
b_pt = deepcopy(c_pt)
a_pt[0][j] += delta
b_pt[0][j] -= delta
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
num_derv = ((a - b) / (2 * delta))
print('spline', comp_code, num_derv, cderv[j])
assert np.isclose(num_derv, cderv[j], rtol=0.01)
elif ('2' in bodies):
center = (np.sum(mgp_model.maps[body_name].maps[i].bounds) / 2.0)
a_pt = np.array([[(center + delta)]])
b_pt = np.array([[(center - delta)]])
c_pt = np.array([[center]])
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
(c, cderv) = mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
cderv = cderv.reshape([(- 1)])[0]
num_derv = ((a - b) / (2 * delta))
print('spline', num_derv, cderv)
assert np.isclose(num_derv, cderv, rtol=0.01)
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_predict(all_gp, all_mgp, bodies, multihyps):
'\n test the predict for mc_simple kernel\n '
gp_model = all_gp[f'{bodies}{multihyps}']
mgp_model = all_mgp[f'{bodies}{multihyps}']
nenv = 6
cell = (1.0 * np.eye(3))
cutoffs = gp_model.cutoffs
unique_species = gp_model.training_statistics['species']
(struc_test, f) = get_random_structure(cell, unique_species, nenv)
test_envi = env.AtomicEnvironment(struc_test, 0, cutoffs, cutoffs_mask=gp_model.hyps_mask)
if ('2' in bodies):
kernel_name = 'twobody'
elif ('3' in bodies):
kernel_name = 'threebody'
assert Parameters.compare_dict(gp_model.hyps_mask, mgp_model.maps[kernel_name].hyps_mask)
(gp_pred_en, gp_pred_envar) = gp_model.predict_local_energy_and_var(test_envi)
gp_pred = np.array([gp_model.predict(test_envi, (d + 1)) for d in range(3)]).T
print('mgp pred')
mgp_pred = mgp_model.predict(test_envi)
map_str = 'energy'
gp_pred_var = gp_pred_envar
print('mgp_en, gp_en', mgp_pred[3], gp_pred_en)
assert np.allclose(mgp_pred[3], gp_pred_en, rtol=0.002), f'{bodies} body {map_str} mapping is wrong'
print('mgp_pred', mgp_pred[0])
print('gp_pred', gp_pred[0])
print('isclose?', (mgp_pred[0] - gp_pred[0]), gp_pred[0])
assert np.allclose(mgp_pred[0], gp_pred[0], atol=0.001), f'{bodies} body {map_str} mapping is wrong'
if (mgp_model.var_map == 'simple'):
print(bodies, multihyps)
for i in range(struc_test.nat):
test_envi = env.AtomicEnvironment(struc_test, i, cutoffs, cutoffs_mask=gp_model.hyps_mask)
mgp_pred = mgp_model.predict(test_envi)
mgp_var = mgp_pred[1]
gp_var = predict_atom_diag_var(test_envi, gp_model, kernel_name)
print('mgp_var, gp_var', mgp_var, gp_var)
assert np.allclose(mgp_var, gp_var, rtol=0.01)
print('struc_test positions', struc_test.positions, struc_test.species_labels)
| 120,700,257,633,087,790
|
test the predict for mc_simple kernel
|
tests/test_mgp.py
|
test_predict
|
aaronchen0316/flare
|
python
|
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_predict(all_gp, all_mgp, bodies, multihyps):
'\n \n '
gp_model = all_gp[f'{bodies}{multihyps}']
mgp_model = all_mgp[f'{bodies}{multihyps}']
nenv = 6
cell = (1.0 * np.eye(3))
cutoffs = gp_model.cutoffs
unique_species = gp_model.training_statistics['species']
(struc_test, f) = get_random_structure(cell, unique_species, nenv)
test_envi = env.AtomicEnvironment(struc_test, 0, cutoffs, cutoffs_mask=gp_model.hyps_mask)
if ('2' in bodies):
kernel_name = 'twobody'
elif ('3' in bodies):
kernel_name = 'threebody'
assert Parameters.compare_dict(gp_model.hyps_mask, mgp_model.maps[kernel_name].hyps_mask)
(gp_pred_en, gp_pred_envar) = gp_model.predict_local_energy_and_var(test_envi)
gp_pred = np.array([gp_model.predict(test_envi, (d + 1)) for d in range(3)]).T
print('mgp pred')
mgp_pred = mgp_model.predict(test_envi)
map_str = 'energy'
gp_pred_var = gp_pred_envar
print('mgp_en, gp_en', mgp_pred[3], gp_pred_en)
assert np.allclose(mgp_pred[3], gp_pred_en, rtol=0.002), f'{bodies} body {map_str} mapping is wrong'
print('mgp_pred', mgp_pred[0])
print('gp_pred', gp_pred[0])
print('isclose?', (mgp_pred[0] - gp_pred[0]), gp_pred[0])
assert np.allclose(mgp_pred[0], gp_pred[0], atol=0.001), f'{bodies} body {map_str} mapping is wrong'
if (mgp_model.var_map == 'simple'):
print(bodies, multihyps)
for i in range(struc_test.nat):
test_envi = env.AtomicEnvironment(struc_test, i, cutoffs, cutoffs_mask=gp_model.hyps_mask)
mgp_pred = mgp_model.predict(test_envi)
mgp_var = mgp_pred[1]
gp_var = predict_atom_diag_var(test_envi, gp_model, kernel_name)
print('mgp_var, gp_var', mgp_var, gp_var)
assert np.allclose(mgp_var, gp_var, rtol=0.01)
print('struc_test positions', struc_test.positions, struc_test.species_labels)
|
@pytest.mark.skipif((not os.environ.get('lmp', False)), reason='lmp not found in environment: Please install LAMMPS and set the $lmp env. variable to point to the executatble.')
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_lmp_predict(all_lmp, all_gp, all_mgp, bodies, multihyps):
'\n test the lammps implementation\n '
prefix = f'{bodies}{multihyps}'
mgp_model = all_mgp[prefix]
gp_model = all_gp[prefix]
lmp_calculator = all_lmp[prefix]
ase_calculator = FLARE_Calculator(gp_model, mgp_model, par=False, use_mapping=True)
np.random.seed(1)
cell = (np.diag(np.array([1, 1, 1])) * 4)
nenv = 10
unique_species = gp_model.training_statistics['species']
cutoffs = gp_model.cutoffs
(struc_test, f) = get_random_structure(cell, unique_species, nenv)
ase_atoms_flare = struc_test.to_ase_atoms()
ase_atoms_flare = FLARE_Atoms.from_ase_atoms(ase_atoms_flare)
ase_atoms_flare.set_calculator(ase_calculator)
ase_atoms_lmp = deepcopy(struc_test).to_ase_atoms()
ase_atoms_lmp.set_calculator(lmp_calculator)
try:
lmp_en = ase_atoms_lmp.get_potential_energy()
flare_en = ase_atoms_flare.get_potential_energy()
lmp_stress = ase_atoms_lmp.get_stress()
flare_stress = ase_atoms_flare.get_stress()
lmp_forces = ase_atoms_lmp.get_forces()
flare_forces = ase_atoms_flare.get_forces()
except Exception as e:
os.chdir(curr_path)
print(e)
raise e
os.chdir(curr_path)
print('energy', (lmp_en - flare_en), flare_en)
assert np.isclose(lmp_en, flare_en, atol=0.001)
print('force', (lmp_forces - flare_forces), flare_forces)
assert np.isclose(lmp_forces, flare_forces, atol=0.001).all()
print('stress', (lmp_stress - flare_stress), flare_stress)
assert np.isclose(lmp_stress, flare_stress, atol=0.001).all()
clean(prefix=prefix)
| -7,572,597,734,177,068,000
|
test the lammps implementation
|
tests/test_mgp.py
|
test_lmp_predict
|
aaronchen0316/flare
|
python
|
@pytest.mark.skipif((not os.environ.get('lmp', False)), reason='lmp not found in environment: Please install LAMMPS and set the $lmp env. variable to point to the executatble.')
@pytest.mark.parametrize('bodies', body_list)
@pytest.mark.parametrize('multihyps', multi_list)
def test_lmp_predict(all_lmp, all_gp, all_mgp, bodies, multihyps):
'\n \n '
prefix = f'{bodies}{multihyps}'
mgp_model = all_mgp[prefix]
gp_model = all_gp[prefix]
lmp_calculator = all_lmp[prefix]
ase_calculator = FLARE_Calculator(gp_model, mgp_model, par=False, use_mapping=True)
np.random.seed(1)
cell = (np.diag(np.array([1, 1, 1])) * 4)
nenv = 10
unique_species = gp_model.training_statistics['species']
cutoffs = gp_model.cutoffs
(struc_test, f) = get_random_structure(cell, unique_species, nenv)
ase_atoms_flare = struc_test.to_ase_atoms()
ase_atoms_flare = FLARE_Atoms.from_ase_atoms(ase_atoms_flare)
ase_atoms_flare.set_calculator(ase_calculator)
ase_atoms_lmp = deepcopy(struc_test).to_ase_atoms()
ase_atoms_lmp.set_calculator(lmp_calculator)
try:
lmp_en = ase_atoms_lmp.get_potential_energy()
flare_en = ase_atoms_flare.get_potential_energy()
lmp_stress = ase_atoms_lmp.get_stress()
flare_stress = ase_atoms_flare.get_stress()
lmp_forces = ase_atoms_lmp.get_forces()
flare_forces = ase_atoms_flare.get_forces()
except Exception as e:
os.chdir(curr_path)
print(e)
raise e
os.chdir(curr_path)
print('energy', (lmp_en - flare_en), flare_en)
assert np.isclose(lmp_en, flare_en, atol=0.001)
print('force', (lmp_forces - flare_forces), flare_forces)
assert np.isclose(lmp_forces, flare_forces, atol=0.001).all()
print('stress', (lmp_stress - flare_stress), flare_stress)
assert np.isclose(lmp_stress, flare_stress, atol=0.001).all()
clean(prefix=prefix)
|
def __init__(self) -> None:
'\n Initializer to build the internal mapping\n '
self.attr_value_dict = {}
set_and_cards = self.construct_set_and_card_enums(MtgjsonAllPrintingsObject().to_json())
self.attr_value_dict.update(set_and_cards)
decks = self.construct_deck_enums(OUTPUT_PATH.joinpath('decks'))
self.attr_value_dict.update(decks)
keywords = OUTPUT_PATH.joinpath((MtgjsonStructuresObject().key_words + '.json'))
if (not keywords.is_file()):
LOGGER.warning(f'Unable to find {keywords}')
else:
with keywords.open(encoding='utf-8') as file:
content = json.load(file).get('data', {})
self.attr_value_dict.update({'keywords': content})
| 582,880,134,260,891,400
|
Initializer to build the internal mapping
|
mtgjson5/compiled_classes/mtgjson_enum_values.py
|
__init__
|
0az/mtgjson
|
python
|
def __init__(self) -> None:
'\n \n '
self.attr_value_dict = {}
set_and_cards = self.construct_set_and_card_enums(MtgjsonAllPrintingsObject().to_json())
self.attr_value_dict.update(set_and_cards)
decks = self.construct_deck_enums(OUTPUT_PATH.joinpath('decks'))
self.attr_value_dict.update(decks)
keywords = OUTPUT_PATH.joinpath((MtgjsonStructuresObject().key_words + '.json'))
if (not keywords.is_file()):
LOGGER.warning(f'Unable to find {keywords}')
else:
with keywords.open(encoding='utf-8') as file:
content = json.load(file).get('data', {})
self.attr_value_dict.update({'keywords': content})
|
def construct_deck_enums(self, decks_directory: pathlib.Path) -> Dict[(str, Any)]:
'\n Given Decks Path, compile enums based on the types found in the files\n :param decks_directory: Path to the decks/ output directory\n :return Sorted list of enum options for each key\n '
type_map: Dict[(str, Any)] = {}
for (object_name, object_values) in self.deck_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for deck in decks_directory.glob('**/*.json'):
with deck.open(encoding='utf-8') as file:
content = json.load(file).get('data', {})
for key in content.keys():
if (key in self.deck_key_struct['deck']):
type_map['deck'][key].add(content[key])
return dict(sort_internal_lists(type_map))
| 3,883,244,289,213,423,000
|
Given Decks Path, compile enums based on the types found in the files
:param decks_directory: Path to the decks/ output directory
:return Sorted list of enum options for each key
|
mtgjson5/compiled_classes/mtgjson_enum_values.py
|
construct_deck_enums
|
0az/mtgjson
|
python
|
def construct_deck_enums(self, decks_directory: pathlib.Path) -> Dict[(str, Any)]:
'\n Given Decks Path, compile enums based on the types found in the files\n :param decks_directory: Path to the decks/ output directory\n :return Sorted list of enum options for each key\n '
type_map: Dict[(str, Any)] = {}
for (object_name, object_values) in self.deck_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for deck in decks_directory.glob('**/*.json'):
with deck.open(encoding='utf-8') as file:
content = json.load(file).get('data', {})
for key in content.keys():
if (key in self.deck_key_struct['deck']):
type_map['deck'][key].add(content[key])
return dict(sort_internal_lists(type_map))
|
def construct_set_and_card_enums(self, all_printing_content: Dict[(str, Any)]) -> Dict[(str, Any)]:
'\n Given AllPrintings, compile enums based on the types found in the file\n :param all_printing_content: AllPrintings internally\n :return Sorted list of enum options for each key\n '
type_map: Dict[(str, Any)] = {}
for (object_name, object_values) in self.set_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for set_contents in all_printing_content.values():
for set_contents_key in set_contents.keys():
if (set_contents_key in self.set_key_struct['set']):
type_map['set'][set_contents_key].add(set_contents.get(set_contents_key))
match_keys = set(self.set_key_struct['card']).union(set(self.set_key_struct.keys()))
for card in (set_contents.get('cards', []) + set_contents.get('tokens', [])):
for card_key in card.keys():
if (card_key not in match_keys):
continue
card_value = card[card_key]
if isinstance(card_value, dict):
for value in card_value.keys():
type_map['card'][card_key].add(value)
continue
if (not isinstance(card_value, list)):
type_map['card'][card_key].add(card_value)
continue
for single_value in card_value:
if (not isinstance(single_value, dict)):
type_map['card'][card_key].add(single_value)
continue
for attribute in self.set_key_struct.get(card_key, []):
type_map[card_key][attribute].add(single_value[attribute])
return dict(sort_internal_lists(type_map))
| -853,621,584,394,499,200
|
Given AllPrintings, compile enums based on the types found in the file
:param all_printing_content: AllPrintings internally
:return Sorted list of enum options for each key
|
mtgjson5/compiled_classes/mtgjson_enum_values.py
|
construct_set_and_card_enums
|
0az/mtgjson
|
python
|
def construct_set_and_card_enums(self, all_printing_content: Dict[(str, Any)]) -> Dict[(str, Any)]:
'\n Given AllPrintings, compile enums based on the types found in the file\n :param all_printing_content: AllPrintings internally\n :return Sorted list of enum options for each key\n '
type_map: Dict[(str, Any)] = {}
for (object_name, object_values) in self.set_key_struct.items():
type_map[object_name] = dict()
for object_field_name in object_values:
type_map[object_name][object_field_name] = set()
for set_contents in all_printing_content.values():
for set_contents_key in set_contents.keys():
if (set_contents_key in self.set_key_struct['set']):
type_map['set'][set_contents_key].add(set_contents.get(set_contents_key))
match_keys = set(self.set_key_struct['card']).union(set(self.set_key_struct.keys()))
for card in (set_contents.get('cards', []) + set_contents.get('tokens', [])):
for card_key in card.keys():
if (card_key not in match_keys):
continue
card_value = card[card_key]
if isinstance(card_value, dict):
for value in card_value.keys():
type_map['card'][card_key].add(value)
continue
if (not isinstance(card_value, list)):
type_map['card'][card_key].add(card_value)
continue
for single_value in card_value:
if (not isinstance(single_value, dict)):
type_map['card'][card_key].add(single_value)
continue
for attribute in self.set_key_struct.get(card_key, []):
type_map[card_key][attribute].add(single_value[attribute])
return dict(sort_internal_lists(type_map))
|
def to_json(self) -> Dict[(str, Union[(Dict[(str, List[str])], List[str])])]:
'\n Support json.dump()\n :return: JSON serialized object\n '
return self.attr_value_dict
| 6,557,228,863,460,708,000
|
Support json.dump()
:return: JSON serialized object
|
mtgjson5/compiled_classes/mtgjson_enum_values.py
|
to_json
|
0az/mtgjson
|
python
|
def to_json(self) -> Dict[(str, Union[(Dict[(str, List[str])], List[str])])]:
'\n Support json.dump()\n :return: JSON serialized object\n '
return self.attr_value_dict
|
def testProjectAttachmentRemovedMessagePayload(self):
'Test ProjectAttachmentRemovedMessagePayload'
pass
| -7,816,304,920,314,342,000
|
Test ProjectAttachmentRemovedMessagePayload
|
python/test/test_project_attachment_removed_message_payload.py
|
testProjectAttachmentRemovedMessagePayload
|
dlens/dlxapi
|
python
|
def testProjectAttachmentRemovedMessagePayload(self):
pass
|
def __init__(__self__, *, event_time: str, metadata: '_meta.v1.outputs.ObjectMeta', action: Optional[str]=None, api_version: Optional[str]=None, deprecated_count: Optional[int]=None, deprecated_first_timestamp: Optional[str]=None, deprecated_last_timestamp: Optional[str]=None, deprecated_source: Optional['_core.v1.outputs.EventSource']=None, kind: Optional[str]=None, note: Optional[str]=None, reason: Optional[str]=None, regarding: Optional['_core.v1.outputs.ObjectReference']=None, related: Optional['_core.v1.outputs.ObjectReference']=None, reporting_controller: Optional[str]=None, reporting_instance: Optional[str]=None, series: Optional['outputs.EventSeries']=None, type: Optional[str]=None):
"\n Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.\n :param str event_time: eventTime is the time when this Event was first observed. It is required.\n :param str action: action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.\n :param str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n :param int deprecated_count: deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.\n :param str deprecated_first_timestamp: deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.\n :param str deprecated_last_timestamp: deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.\n :param '_core.v1.EventSourceArgs' deprecated_source: deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.\n :param str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n :param str note: note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.\n :param str reason: reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.\n :param '_core.v1.ObjectReferenceArgs' regarding: regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.\n :param '_core.v1.ObjectReferenceArgs' related: related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.\n :param str reporting_controller: reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.\n :param str reporting_instance: reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.\n :param 'EventSeriesArgs' series: series is data about the Event series this event represents or nil if it's a singleton Event.\n :param str type: type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.\n "
pulumi.set(__self__, 'event_time', event_time)
pulumi.set(__self__, 'metadata', metadata)
if (action is not None):
pulumi.set(__self__, 'action', action)
if (api_version is not None):
pulumi.set(__self__, 'api_version', 'events.k8s.io/v1')
if (deprecated_count is not None):
pulumi.set(__self__, 'deprecated_count', deprecated_count)
if (deprecated_first_timestamp is not None):
pulumi.set(__self__, 'deprecated_first_timestamp', deprecated_first_timestamp)
if (deprecated_last_timestamp is not None):
pulumi.set(__self__, 'deprecated_last_timestamp', deprecated_last_timestamp)
if (deprecated_source is not None):
pulumi.set(__self__, 'deprecated_source', deprecated_source)
if (kind is not None):
pulumi.set(__self__, 'kind', 'Event')
if (note is not None):
pulumi.set(__self__, 'note', note)
if (reason is not None):
pulumi.set(__self__, 'reason', reason)
if (regarding is not None):
pulumi.set(__self__, 'regarding', regarding)
if (related is not None):
pulumi.set(__self__, 'related', related)
if (reporting_controller is not None):
pulumi.set(__self__, 'reporting_controller', reporting_controller)
if (reporting_instance is not None):
pulumi.set(__self__, 'reporting_instance', reporting_instance)
if (series is not None):
pulumi.set(__self__, 'series', series)
if (type is not None):
pulumi.set(__self__, 'type', type)
| -5,236,375,828,272,288,000
|
Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.
:param str event_time: eventTime is the time when this Event was first observed. It is required.
:param str action: action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.
:param str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param int deprecated_count: deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
:param str deprecated_first_timestamp: deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
:param str deprecated_last_timestamp: deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
:param '_core.v1.EventSourceArgs' deprecated_source: deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
:param str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param str note: note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
:param str reason: reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.
:param '_core.v1.ObjectReferenceArgs' regarding: regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
:param '_core.v1.ObjectReferenceArgs' related: related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
:param str reporting_controller: reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.
:param str reporting_instance: reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.
:param 'EventSeriesArgs' series: series is data about the Event series this event represents or nil if it's a singleton Event.
:param str type: type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
__init__
|
sunbing81/pulumi-kubernetes
|
python
|
def __init__(__self__, *, event_time: str, metadata: '_meta.v1.outputs.ObjectMeta', action: Optional[str]=None, api_version: Optional[str]=None, deprecated_count: Optional[int]=None, deprecated_first_timestamp: Optional[str]=None, deprecated_last_timestamp: Optional[str]=None, deprecated_source: Optional['_core.v1.outputs.EventSource']=None, kind: Optional[str]=None, note: Optional[str]=None, reason: Optional[str]=None, regarding: Optional['_core.v1.outputs.ObjectReference']=None, related: Optional['_core.v1.outputs.ObjectReference']=None, reporting_controller: Optional[str]=None, reporting_instance: Optional[str]=None, series: Optional['outputs.EventSeries']=None, type: Optional[str]=None):
"\n Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.\n :param str event_time: eventTime is the time when this Event was first observed. It is required.\n :param str action: action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.\n :param str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n :param int deprecated_count: deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.\n :param str deprecated_first_timestamp: deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.\n :param str deprecated_last_timestamp: deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.\n :param '_core.v1.EventSourceArgs' deprecated_source: deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.\n :param str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n :param str note: note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.\n :param str reason: reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.\n :param '_core.v1.ObjectReferenceArgs' regarding: regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.\n :param '_core.v1.ObjectReferenceArgs' related: related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.\n :param str reporting_controller: reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.\n :param str reporting_instance: reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.\n :param 'EventSeriesArgs' series: series is data about the Event series this event represents or nil if it's a singleton Event.\n :param str type: type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.\n "
pulumi.set(__self__, 'event_time', event_time)
pulumi.set(__self__, 'metadata', metadata)
if (action is not None):
pulumi.set(__self__, 'action', action)
if (api_version is not None):
pulumi.set(__self__, 'api_version', 'events.k8s.io/v1')
if (deprecated_count is not None):
pulumi.set(__self__, 'deprecated_count', deprecated_count)
if (deprecated_first_timestamp is not None):
pulumi.set(__self__, 'deprecated_first_timestamp', deprecated_first_timestamp)
if (deprecated_last_timestamp is not None):
pulumi.set(__self__, 'deprecated_last_timestamp', deprecated_last_timestamp)
if (deprecated_source is not None):
pulumi.set(__self__, 'deprecated_source', deprecated_source)
if (kind is not None):
pulumi.set(__self__, 'kind', 'Event')
if (note is not None):
pulumi.set(__self__, 'note', note)
if (reason is not None):
pulumi.set(__self__, 'reason', reason)
if (regarding is not None):
pulumi.set(__self__, 'regarding', regarding)
if (related is not None):
pulumi.set(__self__, 'related', related)
if (reporting_controller is not None):
pulumi.set(__self__, 'reporting_controller', reporting_controller)
if (reporting_instance is not None):
pulumi.set(__self__, 'reporting_instance', reporting_instance)
if (series is not None):
pulumi.set(__self__, 'series', series)
if (type is not None):
pulumi.set(__self__, 'type', type)
|
@property
@pulumi.getter(name='eventTime')
def event_time(self) -> str:
'\n eventTime is the time when this Event was first observed. It is required.\n '
return pulumi.get(self, 'event_time')
| -6,286,688,132,395,195,000
|
eventTime is the time when this Event was first observed. It is required.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
event_time
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='eventTime')
def event_time(self) -> str:
'\n \n '
return pulumi.get(self, 'event_time')
|
@property
@pulumi.getter
def action(self) -> Optional[str]:
'\n action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.\n '
return pulumi.get(self, 'action')
| -4,584,377,481,890,546,700
|
action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
action
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def action(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'action')
|
@property
@pulumi.getter(name='apiVersion')
def api_version(self) -> Optional[str]:
'\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n '
return pulumi.get(self, 'api_version')
| 2,459,156,068,463,855,600
|
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
api_version
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='apiVersion')
def api_version(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'api_version')
|
@property
@pulumi.getter(name='deprecatedCount')
def deprecated_count(self) -> Optional[int]:
'\n deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.\n '
return pulumi.get(self, 'deprecated_count')
| 2,836,554,730,426,721,000
|
deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
deprecated_count
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='deprecatedCount')
def deprecated_count(self) -> Optional[int]:
'\n \n '
return pulumi.get(self, 'deprecated_count')
|
@property
@pulumi.getter(name='deprecatedFirstTimestamp')
def deprecated_first_timestamp(self) -> Optional[str]:
'\n deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.\n '
return pulumi.get(self, 'deprecated_first_timestamp')
| 5,915,968,717,683,260,000
|
deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
deprecated_first_timestamp
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='deprecatedFirstTimestamp')
def deprecated_first_timestamp(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'deprecated_first_timestamp')
|
@property
@pulumi.getter(name='deprecatedLastTimestamp')
def deprecated_last_timestamp(self) -> Optional[str]:
'\n deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.\n '
return pulumi.get(self, 'deprecated_last_timestamp')
| 126,720,299,518,873,340
|
deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
deprecated_last_timestamp
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='deprecatedLastTimestamp')
def deprecated_last_timestamp(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'deprecated_last_timestamp')
|
@property
@pulumi.getter(name='deprecatedSource')
def deprecated_source(self) -> Optional['_core.v1.outputs.EventSource']:
'\n deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.\n '
return pulumi.get(self, 'deprecated_source')
| -6,137,881,131,470,587,000
|
deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
deprecated_source
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='deprecatedSource')
def deprecated_source(self) -> Optional['_core.v1.outputs.EventSource']:
'\n \n '
return pulumi.get(self, 'deprecated_source')
|
@property
@pulumi.getter
def kind(self) -> Optional[str]:
'\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n '
return pulumi.get(self, 'kind')
| 2,147,348,048,314,226,700
|
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
kind
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def kind(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'kind')
|
@property
@pulumi.getter
def note(self) -> Optional[str]:
'\n note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.\n '
return pulumi.get(self, 'note')
| 8,895,543,807,794,856,000
|
note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
note
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def note(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'note')
|
@property
@pulumi.getter
def reason(self) -> Optional[str]:
'\n reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.\n '
return pulumi.get(self, 'reason')
| -2,490,660,303,746,905,000
|
reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
reason
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def reason(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'reason')
|
@property
@pulumi.getter
def regarding(self) -> Optional['_core.v1.outputs.ObjectReference']:
"\n regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.\n "
return pulumi.get(self, 'regarding')
| 2,603,348,630,503,215,000
|
regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
regarding
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def regarding(self) -> Optional['_core.v1.outputs.ObjectReference']:
"\n \n "
return pulumi.get(self, 'regarding')
|
@property
@pulumi.getter
def related(self) -> Optional['_core.v1.outputs.ObjectReference']:
'\n related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.\n '
return pulumi.get(self, 'related')
| -352,630,234,162,584,600
|
related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
related
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def related(self) -> Optional['_core.v1.outputs.ObjectReference']:
'\n \n '
return pulumi.get(self, 'related')
|
@property
@pulumi.getter(name='reportingController')
def reporting_controller(self) -> Optional[str]:
'\n reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.\n '
return pulumi.get(self, 'reporting_controller')
| 283,252,810,756,854,530
|
reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
reporting_controller
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='reportingController')
def reporting_controller(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'reporting_controller')
|
@property
@pulumi.getter(name='reportingInstance')
def reporting_instance(self) -> Optional[str]:
'\n reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.\n '
return pulumi.get(self, 'reporting_instance')
| 3,961,336,271,352,025,600
|
reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
reporting_instance
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='reportingInstance')
def reporting_instance(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'reporting_instance')
|
@property
@pulumi.getter
def series(self) -> Optional['outputs.EventSeries']:
"\n series is data about the Event series this event represents or nil if it's a singleton Event.\n "
return pulumi.get(self, 'series')
| 8,046,471,293,108,567,000
|
series is data about the Event series this event represents or nil if it's a singleton Event.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
series
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def series(self) -> Optional['outputs.EventSeries']:
"\n \n "
return pulumi.get(self, 'series')
|
@property
@pulumi.getter
def type(self) -> Optional[str]:
'\n type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.\n '
return pulumi.get(self, 'type')
| -1,256,674,124,653,289,200
|
type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
type
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def type(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'type')
|
def __init__(__self__, *, count: int, last_observed_time: str):
'\n EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.\n :param int count: count is the number of occurrences in this series up to the last heartbeat time.\n :param str last_observed_time: lastObservedTime is the time when last Event from the series was seen before last heartbeat.\n '
pulumi.set(__self__, 'count', count)
pulumi.set(__self__, 'last_observed_time', last_observed_time)
| -1,628,136,964,268,435,000
|
EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.
:param int count: count is the number of occurrences in this series up to the last heartbeat time.
:param str last_observed_time: lastObservedTime is the time when last Event from the series was seen before last heartbeat.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
__init__
|
sunbing81/pulumi-kubernetes
|
python
|
def __init__(__self__, *, count: int, last_observed_time: str):
'\n EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.\n :param int count: count is the number of occurrences in this series up to the last heartbeat time.\n :param str last_observed_time: lastObservedTime is the time when last Event from the series was seen before last heartbeat.\n '
pulumi.set(__self__, 'count', count)
pulumi.set(__self__, 'last_observed_time', last_observed_time)
|
@property
@pulumi.getter
def count(self) -> int:
'\n count is the number of occurrences in this series up to the last heartbeat time.\n '
return pulumi.get(self, 'count')
| -2,723,830,724,659,995,600
|
count is the number of occurrences in this series up to the last heartbeat time.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
count
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter
def count(self) -> int:
'\n \n '
return pulumi.get(self, 'count')
|
@property
@pulumi.getter(name='lastObservedTime')
def last_observed_time(self) -> str:
'\n lastObservedTime is the time when last Event from the series was seen before last heartbeat.\n '
return pulumi.get(self, 'last_observed_time')
| 7,748,666,772,300,174,000
|
lastObservedTime is the time when last Event from the series was seen before last heartbeat.
|
sdk/python/pulumi_kubernetes/events/v1/outputs.py
|
last_observed_time
|
sunbing81/pulumi-kubernetes
|
python
|
@property
@pulumi.getter(name='lastObservedTime')
def last_observed_time(self) -> str:
'\n \n '
return pulumi.get(self, 'last_observed_time')
|
def _get_interface(self):
'\n Getter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)\n\n YANG Description: Reference to a base interface. If a reference to a\nsubinterface is required, this leaf must be specified\nto indicate the base interface.\n '
return self.__interface
| 6,688,500,057,853,465,000
|
Getter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/__init__.py
|
_get_interface
|
ABitMoreDepth/napalm-yang
|
python
|
def _get_interface(self):
'\n Getter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)\n\n YANG Description: Reference to a base interface. If a reference to a\nsubinterface is required, this leaf must be specified\nto indicate the base interface.\n '
return self.__interface
|
def _set_interface(self, v, load=False):
'\n Setter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_interface is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_interface() directly.\n\n YANG Description: Reference to a base interface. If a reference to a\nsubinterface is required, this leaf must be specified\nto indicate the base interface.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=six.text_type, is_leaf=True, yang_name='interface', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'interface must be of a type compatible with leafref', 'defined-type': 'leafref', 'generated-type': 'YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=\'http://openconfig.net/yang/interfaces/ip\', defining_module=\'openconfig-if-ip\', yang_type=\'leafref\', is_config=True)'})
self.__interface = t
if hasattr(self, '_set'):
self._set()
| -3,127,520,860,684,085,000
|
Setter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/__init__.py
|
_set_interface
|
ABitMoreDepth/napalm-yang
|
python
|
def _set_interface(self, v, load=False):
'\n Setter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_interface is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_interface() directly.\n\n YANG Description: Reference to a base interface. If a reference to a\nsubinterface is required, this leaf must be specified\nto indicate the base interface.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=six.text_type, is_leaf=True, yang_name='interface', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'interface must be of a type compatible with leafref', 'defined-type': 'leafref', 'generated-type': 'YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=\'http://openconfig.net/yang/interfaces/ip\', defining_module=\'openconfig-if-ip\', yang_type=\'leafref\', is_config=True)'})
self.__interface = t
if hasattr(self, '_set'):
self._set()
|
def _get_subinterface(self):
'\n Getter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)\n\n YANG Description: Reference to a subinterface -- this requires the base\ninterface to be specified using the interface leaf in\nthis container. If only a reference to a base interface\nis requuired, this leaf should not be set.\n '
return self.__subinterface
| -1,505,700,832,138,368,300
|
Getter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/__init__.py
|
_get_subinterface
|
ABitMoreDepth/napalm-yang
|
python
|
def _get_subinterface(self):
'\n Getter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)\n\n YANG Description: Reference to a subinterface -- this requires the base\ninterface to be specified using the interface leaf in\nthis container. If only a reference to a base interface\nis requuired, this leaf should not be set.\n '
return self.__subinterface
|
def _set_subinterface(self, v, load=False):
'\n Setter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_subinterface is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_subinterface() directly.\n\n YANG Description: Reference to a subinterface -- this requires the base\ninterface to be specified using the interface leaf in\nthis container. If only a reference to a base interface\nis requuired, this leaf should not be set.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=six.text_type, is_leaf=True, yang_name='subinterface', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'subinterface must be of a type compatible with leafref', 'defined-type': 'leafref', 'generated-type': 'YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=\'http://openconfig.net/yang/interfaces/ip\', defining_module=\'openconfig-if-ip\', yang_type=\'leafref\', is_config=True)'})
self.__subinterface = t
if hasattr(self, '_set'):
self._set()
| -1,205,504,949,720,681,200
|
Setter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/__init__.py
|
_set_subinterface
|
ABitMoreDepth/napalm-yang
|
python
|
def _set_subinterface(self, v, load=False):
'\n Setter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_subinterface is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_subinterface() directly.\n\n YANG Description: Reference to a subinterface -- this requires the base\ninterface to be specified using the interface leaf in\nthis container. If only a reference to a base interface\nis requuired, this leaf should not be set.\n '
if hasattr(v, '_utype'):
v = v._utype(v)
try:
t = YANGDynClass(v, base=six.text_type, is_leaf=True, yang_name='subinterface', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({'error-string': 'subinterface must be of a type compatible with leafref', 'defined-type': 'leafref', 'generated-type': 'YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=\'http://openconfig.net/yang/interfaces/ip\', defining_module=\'openconfig-if-ip\', yang_type=\'leafref\', is_config=True)'})
self.__subinterface = t
if hasattr(self, '_set'):
self._set()
|
def expect_warnings(*messages, **kw):
'Context manager which expects one or more warnings.\n\n With no arguments, squelches all SAWarning and RemovedIn20Warning emitted via\n sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise\n pass string expressions that will match selected warnings via regex;\n all non-matching warnings are sent through.\n\n The expect version **asserts** that the warnings were in fact seen.\n\n Note that the test suite sets SAWarning warnings to raise exceptions.\n\n '
return _expect_warnings((sa_exc.RemovedIn20Warning, sa_exc.SAWarning), messages, **kw)
| 3,824,676,256,934,366,700
|
Context manager which expects one or more warnings.
With no arguments, squelches all SAWarning and RemovedIn20Warning emitted via
sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise
pass string expressions that will match selected warnings via regex;
all non-matching warnings are sent through.
The expect version **asserts** that the warnings were in fact seen.
Note that the test suite sets SAWarning warnings to raise exceptions.
|
lib/sqlalchemy/testing/assertions.py
|
expect_warnings
|
ai-mocap/sqlalchemy
|
python
|
def expect_warnings(*messages, **kw):
'Context manager which expects one or more warnings.\n\n With no arguments, squelches all SAWarning and RemovedIn20Warning emitted via\n sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise\n pass string expressions that will match selected warnings via regex;\n all non-matching warnings are sent through.\n\n The expect version **asserts** that the warnings were in fact seen.\n\n Note that the test suite sets SAWarning warnings to raise exceptions.\n\n '
return _expect_warnings((sa_exc.RemovedIn20Warning, sa_exc.SAWarning), messages, **kw)
|
@contextlib.contextmanager
def expect_warnings_on(db, *messages, **kw):
'Context manager which expects one or more warnings on specific\n dialects.\n\n The expect version **asserts** that the warnings were in fact seen.\n\n '
spec = db_spec(db)
if (isinstance(db, util.string_types) and (not spec(config._current))):
(yield)
else:
with expect_warnings(*messages, **kw):
(yield)
| 5,831,124,492,960,674,000
|
Context manager which expects one or more warnings on specific
dialects.
The expect version **asserts** that the warnings were in fact seen.
|
lib/sqlalchemy/testing/assertions.py
|
expect_warnings_on
|
ai-mocap/sqlalchemy
|
python
|
@contextlib.contextmanager
def expect_warnings_on(db, *messages, **kw):
'Context manager which expects one or more warnings on specific\n dialects.\n\n The expect version **asserts** that the warnings were in fact seen.\n\n '
spec = db_spec(db)
if (isinstance(db, util.string_types) and (not spec(config._current))):
(yield)
else:
with expect_warnings(*messages, **kw):
(yield)
|
def emits_warning(*messages):
'Decorator form of expect_warnings().\n\n Note that emits_warning does **not** assert that the warnings\n were in fact seen.\n\n '
@decorator
def decorate(fn, *args, **kw):
with expect_warnings(*messages, assert_=False):
return fn(*args, **kw)
return decorate
| 7,570,357,996,637,143,000
|
Decorator form of expect_warnings().
Note that emits_warning does **not** assert that the warnings
were in fact seen.
|
lib/sqlalchemy/testing/assertions.py
|
emits_warning
|
ai-mocap/sqlalchemy
|
python
|
def emits_warning(*messages):
'Decorator form of expect_warnings().\n\n Note that emits_warning does **not** assert that the warnings\n were in fact seen.\n\n '
@decorator
def decorate(fn, *args, **kw):
with expect_warnings(*messages, assert_=False):
return fn(*args, **kw)
return decorate
|
def emits_warning_on(db, *messages):
'Mark a test as emitting a warning on a specific dialect.\n\n With no arguments, squelches all SAWarning failures. Or pass one or more\n strings; these will be matched to the root of the warning description by\n warnings.filterwarnings().\n\n Note that emits_warning_on does **not** assert that the warnings\n were in fact seen.\n\n '
@decorator
def decorate(fn, *args, **kw):
with expect_warnings_on(db, *messages, assert_=False):
return fn(*args, **kw)
return decorate
| -945,007,861,536,751,500
|
Mark a test as emitting a warning on a specific dialect.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
Note that emits_warning_on does **not** assert that the warnings
were in fact seen.
|
lib/sqlalchemy/testing/assertions.py
|
emits_warning_on
|
ai-mocap/sqlalchemy
|
python
|
def emits_warning_on(db, *messages):
'Mark a test as emitting a warning on a specific dialect.\n\n With no arguments, squelches all SAWarning failures. Or pass one or more\n strings; these will be matched to the root of the warning description by\n warnings.filterwarnings().\n\n Note that emits_warning_on does **not** assert that the warnings\n were in fact seen.\n\n '
@decorator
def decorate(fn, *args, **kw):
with expect_warnings_on(db, *messages, assert_=False):
return fn(*args, **kw)
return decorate
|
def uses_deprecated(*messages):
'Mark a test as immune from fatal deprecation warnings.\n\n With no arguments, squelches all SADeprecationWarning failures.\n Or pass one or more strings; these will be matched to the root\n of the warning description by warnings.filterwarnings().\n\n As a special case, you may pass a function name prefixed with //\n and it will be re-written as needed to match the standard warning\n verbiage emitted by the sqlalchemy.util.deprecated decorator.\n\n Note that uses_deprecated does **not** assert that the warnings\n were in fact seen.\n\n '
@decorator
def decorate(fn, *args, **kw):
with expect_deprecated(*messages, assert_=False):
return fn(*args, **kw)
return decorate
| -6,339,039,025,393,598,000
|
Mark a test as immune from fatal deprecation warnings.
With no arguments, squelches all SADeprecationWarning failures.
Or pass one or more strings; these will be matched to the root
of the warning description by warnings.filterwarnings().
As a special case, you may pass a function name prefixed with //
and it will be re-written as needed to match the standard warning
verbiage emitted by the sqlalchemy.util.deprecated decorator.
Note that uses_deprecated does **not** assert that the warnings
were in fact seen.
|
lib/sqlalchemy/testing/assertions.py
|
uses_deprecated
|
ai-mocap/sqlalchemy
|
python
|
def uses_deprecated(*messages):
'Mark a test as immune from fatal deprecation warnings.\n\n With no arguments, squelches all SADeprecationWarning failures.\n Or pass one or more strings; these will be matched to the root\n of the warning description by warnings.filterwarnings().\n\n As a special case, you may pass a function name prefixed with //\n and it will be re-written as needed to match the standard warning\n verbiage emitted by the sqlalchemy.util.deprecated decorator.\n\n Note that uses_deprecated does **not** assert that the warnings\n were in fact seen.\n\n '
@decorator
def decorate(fn, *args, **kw):
with expect_deprecated(*messages, assert_=False):
return fn(*args, **kw)
return decorate
|
def global_cleanup_assertions():
'Check things that have to be finalized at the end of a test suite.\n\n Hardcoded at the moment, a modular system can be built here\n to support things like PG prepared transactions, tables all\n dropped, etc.\n\n '
_assert_no_stray_pool_connections()
| 1,179,778,543,830,617,600
|
Check things that have to be finalized at the end of a test suite.
Hardcoded at the moment, a modular system can be built here
to support things like PG prepared transactions, tables all
dropped, etc.
|
lib/sqlalchemy/testing/assertions.py
|
global_cleanup_assertions
|
ai-mocap/sqlalchemy
|
python
|
def global_cleanup_assertions():
'Check things that have to be finalized at the end of a test suite.\n\n Hardcoded at the moment, a modular system can be built here\n to support things like PG prepared transactions, tables all\n dropped, etc.\n\n '
_assert_no_stray_pool_connections()
|
def eq_(a, b, msg=None):
'Assert a == b, with repr messaging on failure.'
assert (a == b), (msg or ('%r != %r' % (a, b)))
| 4,165,152,613,312,176,600
|
Assert a == b, with repr messaging on failure.
|
lib/sqlalchemy/testing/assertions.py
|
eq_
|
ai-mocap/sqlalchemy
|
python
|
def eq_(a, b, msg=None):
assert (a == b), (msg or ('%r != %r' % (a, b)))
|
def ne_(a, b, msg=None):
'Assert a != b, with repr messaging on failure.'
assert (a != b), (msg or ('%r == %r' % (a, b)))
| 1,292,732,351,336,334,300
|
Assert a != b, with repr messaging on failure.
|
lib/sqlalchemy/testing/assertions.py
|
ne_
|
ai-mocap/sqlalchemy
|
python
|
def ne_(a, b, msg=None):
assert (a != b), (msg or ('%r == %r' % (a, b)))
|
def le_(a, b, msg=None):
'Assert a <= b, with repr messaging on failure.'
assert (a <= b), (msg or ('%r != %r' % (a, b)))
| 6,696,283,727,868,860,000
|
Assert a <= b, with repr messaging on failure.
|
lib/sqlalchemy/testing/assertions.py
|
le_
|
ai-mocap/sqlalchemy
|
python
|
def le_(a, b, msg=None):
assert (a <= b), (msg or ('%r != %r' % (a, b)))
|
def is_(a, b, msg=None):
'Assert a is b, with repr messaging on failure.'
assert (a is b), (msg or ('%r is not %r' % (a, b)))
| 2,543,761,103,162,304,000
|
Assert a is b, with repr messaging on failure.
|
lib/sqlalchemy/testing/assertions.py
|
is_
|
ai-mocap/sqlalchemy
|
python
|
def is_(a, b, msg=None):
assert (a is b), (msg or ('%r is not %r' % (a, b)))
|
def is_not(a, b, msg=None):
'Assert a is not b, with repr messaging on failure.'
assert (a is not b), (msg or ('%r is %r' % (a, b)))
| 8,681,517,946,847,553,000
|
Assert a is not b, with repr messaging on failure.
|
lib/sqlalchemy/testing/assertions.py
|
is_not
|
ai-mocap/sqlalchemy
|
python
|
def is_not(a, b, msg=None):
assert (a is not b), (msg or ('%r is %r' % (a, b)))
|
def in_(a, b, msg=None):
'Assert a in b, with repr messaging on failure.'
assert (a in b), (msg or ('%r not in %r' % (a, b)))
| -3,984,596,821,298,299,000
|
Assert a in b, with repr messaging on failure.
|
lib/sqlalchemy/testing/assertions.py
|
in_
|
ai-mocap/sqlalchemy
|
python
|
def in_(a, b, msg=None):
assert (a in b), (msg or ('%r not in %r' % (a, b)))
|
def not_in(a, b, msg=None):
'Assert a in not b, with repr messaging on failure.'
assert (a not in b), (msg or ('%r is in %r' % (a, b)))
| 6,827,271,713,271,919,000
|
Assert a in not b, with repr messaging on failure.
|
lib/sqlalchemy/testing/assertions.py
|
not_in
|
ai-mocap/sqlalchemy
|
python
|
def not_in(a, b, msg=None):
assert (a not in b), (msg or ('%r is in %r' % (a, b)))
|
def startswith_(a, fragment, msg=None):
'Assert a.startswith(fragment), with repr messaging on failure.'
assert a.startswith(fragment), (msg or ('%r does not start with %r' % (a, fragment)))
| 1,283,718,687,394,437,600
|
Assert a.startswith(fragment), with repr messaging on failure.
|
lib/sqlalchemy/testing/assertions.py
|
startswith_
|
ai-mocap/sqlalchemy
|
python
|
def startswith_(a, fragment, msg=None):
assert a.startswith(fragment), (msg or ('%r does not start with %r' % (a, fragment)))
|
def _assert_proper_exception_context(exception):
'assert that any exception we\'re catching does not have a __context__\n without a __cause__, and that __suppress_context__ is never set.\n\n Python 3 will report nested as exceptions as "during the handling of\n error X, error Y occurred". That\'s not what we want to do. we want\n these exceptions in a cause chain.\n\n '
if (not util.py3k):
return
if ((exception.__context__ is not exception.__cause__) and (not exception.__suppress_context__)):
assert False, ('Exception %r was correctly raised but did not set a cause, within context %r as its cause.' % (exception, exception.__context__))
| -363,986,719,845,809,100
|
assert that any exception we're catching does not have a __context__
without a __cause__, and that __suppress_context__ is never set.
Python 3 will report nested as exceptions as "during the handling of
error X, error Y occurred". That's not what we want to do. we want
these exceptions in a cause chain.
|
lib/sqlalchemy/testing/assertions.py
|
_assert_proper_exception_context
|
ai-mocap/sqlalchemy
|
python
|
def _assert_proper_exception_context(exception):
'assert that any exception we\'re catching does not have a __context__\n without a __cause__, and that __suppress_context__ is never set.\n\n Python 3 will report nested as exceptions as "during the handling of\n error X, error Y occurred". That\'s not what we want to do. we want\n these exceptions in a cause chain.\n\n '
if (not util.py3k):
return
if ((exception.__context__ is not exception.__cause__) and (not exception.__suppress_context__)):
assert False, ('Exception %r was correctly raised but did not set a cause, within context %r as its cause.' % (exception, exception.__context__))
|
def assert_unordered_result(self, result, cls, *expected):
'As assert_result, but the order of objects is not considered.\n\n The algorithm is very expensive but not a big deal for the small\n numbers of rows that the test suite manipulates.\n '
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = {immutabledict(e) for e in expected}
for wrong in util.itertools_filterfalse((lambda o: isinstance(o, cls)), found):
fail(('Unexpected type "%s", expected "%s"' % (type(wrong).__name__, cls.__name__)))
if (len(found) != len(expected)):
fail(('Unexpected object count "%s", expected "%s"' % (len(found), len(expected))))
NOVALUE = object()
def _compare_item(obj, spec):
for (key, value) in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(getattr(obj, key), value[0], *value[1])
except AssertionError:
return False
elif (getattr(obj, key, NOVALUE) != value):
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(('Expected %s instance with attributes %s not found.' % (cls.__name__, repr(expected_item))))
return True
| -4,552,712,219,864,721,000
|
As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
|
lib/sqlalchemy/testing/assertions.py
|
assert_unordered_result
|
ai-mocap/sqlalchemy
|
python
|
def assert_unordered_result(self, result, cls, *expected):
'As assert_result, but the order of objects is not considered.\n\n The algorithm is very expensive but not a big deal for the small\n numbers of rows that the test suite manipulates.\n '
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = {immutabledict(e) for e in expected}
for wrong in util.itertools_filterfalse((lambda o: isinstance(o, cls)), found):
fail(('Unexpected type "%s", expected "%s"' % (type(wrong).__name__, cls.__name__)))
if (len(found) != len(expected)):
fail(('Unexpected object count "%s", expected "%s"' % (len(found), len(expected))))
NOVALUE = object()
def _compare_item(obj, spec):
for (key, value) in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(getattr(obj, key), value[0], *value[1])
except AssertionError:
return False
elif (getattr(obj, key, NOVALUE) != value):
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(('Expected %s instance with attributes %s not found.' % (cls.__name__, repr(expected_item))))
return True
|
@parameterized.parameters(((n_ref, n_alt_fraction) for n_ref in [1000, 10000, 100000, 1000000] for n_alt_fraction in [0.0, 0.01, 0.02]))
def test_handles_large_reference_counts(self, n_ref, n_alt_fraction):
"Tests that we don't blow up when the coverage gets really high."
caller = PlaceholderVariantCaller(0.01, 100)
n_alt = int((n_alt_fraction * n_ref))
(gq, likelihoods) = caller._calc_reference_confidence(n_ref, (n_ref + n_alt))
self.assertTrue(np.isfinite(likelihoods).all(), 'Non-finite likelihoods {}'.format(likelihoods))
self.assertEqual(100, gq)
| -7,789,392,651,805,580,000
|
Tests that we don't blow up when the coverage gets really high.
|
deeptrio/variant_caller_test.py
|
test_handles_large_reference_counts
|
FrogEnthusiast7/deepvariant
|
python
|
@parameterized.parameters(((n_ref, n_alt_fraction) for n_ref in [1000, 10000, 100000, 1000000] for n_alt_fraction in [0.0, 0.01, 0.02]))
def test_handles_large_reference_counts(self, n_ref, n_alt_fraction):
caller = PlaceholderVariantCaller(0.01, 100)
n_alt = int((n_alt_fraction * n_ref))
(gq, likelihoods) = caller._calc_reference_confidence(n_ref, (n_ref + n_alt))
self.assertTrue(np.isfinite(likelihoods).all(), 'Non-finite likelihoods {}'.format(likelihoods))
self.assertEqual(100, gq)
|
def update(self):
'Update game state.'
Menu.bg_offset += 2
if (Menu.bg_offset > (0.85 * Constants.WINDOW_SIZE.x)):
Menu.bg_dimness_current += Menu.bg_diminish_rate
if (Menu.bg_dimness_current >= 255):
Menu.current_bg = ((Menu.current_bg + 1) % len(Menu.backgrounds))
Menu.bg_offset = Menu.bg_offset_at_start
Menu.bg_dimness_current = 255
elif (Menu.bg_dimness_current > Menu.bg_dimness_peak):
Menu.bg_dimness_current -= Menu.bg_diminish_rate
| 774,171,782,656,281,000
|
Update game state.
|
menu.py
|
update
|
marax27/pyNoid
|
python
|
def update(self):
Menu.bg_offset += 2
if (Menu.bg_offset > (0.85 * Constants.WINDOW_SIZE.x)):
Menu.bg_dimness_current += Menu.bg_diminish_rate
if (Menu.bg_dimness_current >= 255):
Menu.current_bg = ((Menu.current_bg + 1) % len(Menu.backgrounds))
Menu.bg_offset = Menu.bg_offset_at_start
Menu.bg_dimness_current = 255
elif (Menu.bg_dimness_current > Menu.bg_dimness_peak):
Menu.bg_dimness_current -= Menu.bg_diminish_rate
|
def handleEvent(self, e):
'Process relevant events.'
for i in self.menu.elem:
i.handleEvent(e)
if self.menu.elem[0].isPressed():
self.fading = True
elif self.menu.elem[1].isPressed():
self.is_open = False
| -5,300,897,899,891,764,000
|
Process relevant events.
|
menu.py
|
handleEvent
|
marax27/pyNoid
|
python
|
def handleEvent(self, e):
for i in self.menu.elem:
i.handleEvent(e)
if self.menu.elem[0].isPressed():
self.fading = True
elif self.menu.elem[1].isPressed():
self.is_open = False
|
def render(self, renderer):
'Render scene.'
rect = ((Constants.WINDOW_SIZE.x - Menu.bg_offset), 0, *Constants.WINDOW_SIZE)
renderer.copy(Menu.backgrounds[Menu.current_bg], None, rect)
renderer.fill((0, 0, Constants.WINDOW_SIZE.x, Constants.WINDOW_SIZE.y), (0, 0, 0, Menu.bg_dimness_current))
self.title.render(renderer)
self.credits.render(renderer)
self.menu.render(renderer)
if self.render_content:
self.render_content.render(renderer)
if self.fading:
self.fader.draw(renderer)
if self.fader.finished():
self.fading = False
self.fader.reset()
self.choice = 0
| -2,090,481,727,746,035,200
|
Render scene.
|
menu.py
|
render
|
marax27/pyNoid
|
python
|
def render(self, renderer):
rect = ((Constants.WINDOW_SIZE.x - Menu.bg_offset), 0, *Constants.WINDOW_SIZE)
renderer.copy(Menu.backgrounds[Menu.current_bg], None, rect)
renderer.fill((0, 0, Constants.WINDOW_SIZE.x, Constants.WINDOW_SIZE.y), (0, 0, 0, Menu.bg_dimness_current))
self.title.render(renderer)
self.credits.render(renderer)
self.menu.render(renderer)
if self.render_content:
self.render_content.render(renderer)
if self.fading:
self.fader.draw(renderer)
if self.fader.finished():
self.fading = False
self.fader.reset()
self.choice = 0
|
def isOpen(self):
'Returns False if GameInstance should be no longer active.'
return self.is_open
| -5,757,840,685,984,857,000
|
Returns False if GameInstance should be no longer active.
|
menu.py
|
isOpen
|
marax27/pyNoid
|
python
|
def isOpen(self):
return self.is_open
|
def dict_scrape(POS, dictionaryfile='as_freq.txt'):
'Scrapes a dictionary for a given part of speech. POS tags in POS_tags.\n\n POS(str), dictionaryfile(str-of-filename) -> list-of-strings\n '
if (POS in POS_tags):
with open(dictionaryfile) as to_scrape:
for line in to_scrape:
if (POS in line):
lemmas.append(line)
for line in lemmas:
i = 0
for char in line[:44]:
if (char not in alphabet):
i = (i + 1)
lemmas_cleaned.append((line[i:].strip().replace('*', '').replace('?', '') + '\n'))
return lemmas_cleaned
| 2,654,576,855,969,723,000
|
Scrapes a dictionary for a given part of speech. POS tags in POS_tags.
POS(str), dictionaryfile(str-of-filename) -> list-of-strings
|
dictionaries/archives/dict_scrape.py
|
dict_scrape
|
tykniess/muspilli
|
python
|
def dict_scrape(POS, dictionaryfile='as_freq.txt'):
'Scrapes a dictionary for a given part of speech. POS tags in POS_tags.\n\n POS(str), dictionaryfile(str-of-filename) -> list-of-strings\n '
if (POS in POS_tags):
with open(dictionaryfile) as to_scrape:
for line in to_scrape:
if (POS in line):
lemmas.append(line)
for line in lemmas:
i = 0
for char in line[:44]:
if (char not in alphabet):
i = (i + 1)
lemmas_cleaned.append((line[i:].strip().replace('*', ).replace('?', ) + '\n'))
return lemmas_cleaned
|
@property
def product_id(self):
' Currently OrderBook only supports a single product even though it is stored as a list of products. '
return self.products[0]
| 2,245,109,685,871,720,400
|
Currently OrderBook only supports a single product even though it is stored as a list of products.
|
cbpro/order_book.py
|
product_id
|
1M15M3/coinbasepro-python
|
python
|
@property
def product_id(self):
' '
return self.products[0]
|
def __init__(self, watermark_text=None, watermark_text_size=None, watermark_position=None, watermark_rotation_angle=None, is_watermark_mandatory=None, watermark_intensity=None):
'Watermark - a model defined in Swagger'
self._watermark_text = None
self._watermark_text_size = None
self._watermark_position = None
self._watermark_rotation_angle = None
self._is_watermark_mandatory = None
self._watermark_intensity = None
self.discriminator = None
if (watermark_text is not None):
self.watermark_text = watermark_text
if (watermark_text_size is not None):
self.watermark_text_size = watermark_text_size
if (watermark_position is not None):
self.watermark_position = watermark_position
if (watermark_rotation_angle is not None):
self.watermark_rotation_angle = watermark_rotation_angle
if (is_watermark_mandatory is not None):
self.is_watermark_mandatory = is_watermark_mandatory
if (watermark_intensity is not None):
self.watermark_intensity = watermark_intensity
| 5,573,022,992,493,580,000
|
Watermark - a model defined in Swagger
|
laserfiche_api/models/watermark.py
|
__init__
|
Layer8Err/laserfiche-api
|
python
|
def __init__(self, watermark_text=None, watermark_text_size=None, watermark_position=None, watermark_rotation_angle=None, is_watermark_mandatory=None, watermark_intensity=None):
self._watermark_text = None
self._watermark_text_size = None
self._watermark_position = None
self._watermark_rotation_angle = None
self._is_watermark_mandatory = None
self._watermark_intensity = None
self.discriminator = None
if (watermark_text is not None):
self.watermark_text = watermark_text
if (watermark_text_size is not None):
self.watermark_text_size = watermark_text_size
if (watermark_position is not None):
self.watermark_position = watermark_position
if (watermark_rotation_angle is not None):
self.watermark_rotation_angle = watermark_rotation_angle
if (is_watermark_mandatory is not None):
self.is_watermark_mandatory = is_watermark_mandatory
if (watermark_intensity is not None):
self.watermark_intensity = watermark_intensity
|
@property
def watermark_text(self):
'Gets the watermark_text of this Watermark. # noqa: E501\n\n The watermark text associated with the tag defintion. # noqa: E501\n\n :return: The watermark_text of this Watermark. # noqa: E501\n :rtype: str\n '
return self._watermark_text
| 6,224,266,616,262,649,000
|
Gets the watermark_text of this Watermark. # noqa: E501
The watermark text associated with the tag defintion. # noqa: E501
:return: The watermark_text of this Watermark. # noqa: E501
:rtype: str
|
laserfiche_api/models/watermark.py
|
watermark_text
|
Layer8Err/laserfiche-api
|
python
|
@property
def watermark_text(self):
'Gets the watermark_text of this Watermark. # noqa: E501\n\n The watermark text associated with the tag defintion. # noqa: E501\n\n :return: The watermark_text of this Watermark. # noqa: E501\n :rtype: str\n '
return self._watermark_text
|
@watermark_text.setter
def watermark_text(self, watermark_text):
'Sets the watermark_text of this Watermark.\n\n The watermark text associated with the tag defintion. # noqa: E501\n\n :param watermark_text: The watermark_text of this Watermark. # noqa: E501\n :type: str\n '
self._watermark_text = watermark_text
| -3,662,349,038,935,738,400
|
Sets the watermark_text of this Watermark.
The watermark text associated with the tag defintion. # noqa: E501
:param watermark_text: The watermark_text of this Watermark. # noqa: E501
:type: str
|
laserfiche_api/models/watermark.py
|
watermark_text
|
Layer8Err/laserfiche-api
|
python
|
@watermark_text.setter
def watermark_text(self, watermark_text):
'Sets the watermark_text of this Watermark.\n\n The watermark text associated with the tag defintion. # noqa: E501\n\n :param watermark_text: The watermark_text of this Watermark. # noqa: E501\n :type: str\n '
self._watermark_text = watermark_text
|
@property
def watermark_text_size(self):
'Gets the watermark_text_size of this Watermark. # noqa: E501\n\n The size of the watermark text, in points, associated with the tag definition. # noqa: E501\n\n :return: The watermark_text_size of this Watermark. # noqa: E501\n :rtype: int\n '
return self._watermark_text_size
| 2,012,693,618,271,067,600
|
Gets the watermark_text_size of this Watermark. # noqa: E501
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:return: The watermark_text_size of this Watermark. # noqa: E501
:rtype: int
|
laserfiche_api/models/watermark.py
|
watermark_text_size
|
Layer8Err/laserfiche-api
|
python
|
@property
def watermark_text_size(self):
'Gets the watermark_text_size of this Watermark. # noqa: E501\n\n The size of the watermark text, in points, associated with the tag definition. # noqa: E501\n\n :return: The watermark_text_size of this Watermark. # noqa: E501\n :rtype: int\n '
return self._watermark_text_size
|
@watermark_text_size.setter
def watermark_text_size(self, watermark_text_size):
'Sets the watermark_text_size of this Watermark.\n\n The size of the watermark text, in points, associated with the tag definition. # noqa: E501\n\n :param watermark_text_size: The watermark_text_size of this Watermark. # noqa: E501\n :type: int\n '
self._watermark_text_size = watermark_text_size
| -7,234,098,268,141,262,000
|
Sets the watermark_text_size of this Watermark.
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:param watermark_text_size: The watermark_text_size of this Watermark. # noqa: E501
:type: int
|
laserfiche_api/models/watermark.py
|
watermark_text_size
|
Layer8Err/laserfiche-api
|
python
|
@watermark_text_size.setter
def watermark_text_size(self, watermark_text_size):
'Sets the watermark_text_size of this Watermark.\n\n The size of the watermark text, in points, associated with the tag definition. # noqa: E501\n\n :param watermark_text_size: The watermark_text_size of this Watermark. # noqa: E501\n :type: int\n '
self._watermark_text_size = watermark_text_size
|
@property
def watermark_position(self):
'Gets the watermark_position of this Watermark. # noqa: E501\n\n The position of the watermark on the page. # noqa: E501\n\n :return: The watermark_position of this Watermark. # noqa: E501\n :rtype: OneOfWatermarkWatermarkPosition\n '
return self._watermark_position
| 2,746,406,502,061,963,300
|
Gets the watermark_position of this Watermark. # noqa: E501
The position of the watermark on the page. # noqa: E501
:return: The watermark_position of this Watermark. # noqa: E501
:rtype: OneOfWatermarkWatermarkPosition
|
laserfiche_api/models/watermark.py
|
watermark_position
|
Layer8Err/laserfiche-api
|
python
|
@property
def watermark_position(self):
'Gets the watermark_position of this Watermark. # noqa: E501\n\n The position of the watermark on the page. # noqa: E501\n\n :return: The watermark_position of this Watermark. # noqa: E501\n :rtype: OneOfWatermarkWatermarkPosition\n '
return self._watermark_position
|
@watermark_position.setter
def watermark_position(self, watermark_position):
'Sets the watermark_position of this Watermark.\n\n The position of the watermark on the page. # noqa: E501\n\n :param watermark_position: The watermark_position of this Watermark. # noqa: E501\n :type: OneOfWatermarkWatermarkPosition\n '
self._watermark_position = watermark_position
| 1,800,833,730,588,717,300
|
Sets the watermark_position of this Watermark.
The position of the watermark on the page. # noqa: E501
:param watermark_position: The watermark_position of this Watermark. # noqa: E501
:type: OneOfWatermarkWatermarkPosition
|
laserfiche_api/models/watermark.py
|
watermark_position
|
Layer8Err/laserfiche-api
|
python
|
@watermark_position.setter
def watermark_position(self, watermark_position):
'Sets the watermark_position of this Watermark.\n\n The position of the watermark on the page. # noqa: E501\n\n :param watermark_position: The watermark_position of this Watermark. # noqa: E501\n :type: OneOfWatermarkWatermarkPosition\n '
self._watermark_position = watermark_position
|
@property
def watermark_rotation_angle(self):
'Gets the watermark_rotation_angle of this Watermark. # noqa: E501\n\n The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501\n\n :return: The watermark_rotation_angle of this Watermark. # noqa: E501\n :rtype: int\n '
return self._watermark_rotation_angle
| -3,229,983,936,839,661,000
|
Gets the watermark_rotation_angle of this Watermark. # noqa: E501
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:return: The watermark_rotation_angle of this Watermark. # noqa: E501
:rtype: int
|
laserfiche_api/models/watermark.py
|
watermark_rotation_angle
|
Layer8Err/laserfiche-api
|
python
|
@property
def watermark_rotation_angle(self):
'Gets the watermark_rotation_angle of this Watermark. # noqa: E501\n\n The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501\n\n :return: The watermark_rotation_angle of this Watermark. # noqa: E501\n :rtype: int\n '
return self._watermark_rotation_angle
|
@watermark_rotation_angle.setter
def watermark_rotation_angle(self, watermark_rotation_angle):
'Sets the watermark_rotation_angle of this Watermark.\n\n The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501\n\n :param watermark_rotation_angle: The watermark_rotation_angle of this Watermark. # noqa: E501\n :type: int\n '
self._watermark_rotation_angle = watermark_rotation_angle
| 3,218,273,114,369,041,400
|
Sets the watermark_rotation_angle of this Watermark.
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:param watermark_rotation_angle: The watermark_rotation_angle of this Watermark. # noqa: E501
:type: int
|
laserfiche_api/models/watermark.py
|
watermark_rotation_angle
|
Layer8Err/laserfiche-api
|
python
|
@watermark_rotation_angle.setter
def watermark_rotation_angle(self, watermark_rotation_angle):
'Sets the watermark_rotation_angle of this Watermark.\n\n The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501\n\n :param watermark_rotation_angle: The watermark_rotation_angle of this Watermark. # noqa: E501\n :type: int\n '
self._watermark_rotation_angle = watermark_rotation_angle
|
@property
def is_watermark_mandatory(self):
'Gets the is_watermark_mandatory of this Watermark. # noqa: E501\n\n A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501\n\n :return: The is_watermark_mandatory of this Watermark. # noqa: E501\n :rtype: bool\n '
return self._is_watermark_mandatory
| 5,733,061,525,636,277,000
|
Gets the is_watermark_mandatory of this Watermark. # noqa: E501
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:return: The is_watermark_mandatory of this Watermark. # noqa: E501
:rtype: bool
|
laserfiche_api/models/watermark.py
|
is_watermark_mandatory
|
Layer8Err/laserfiche-api
|
python
|
@property
def is_watermark_mandatory(self):
'Gets the is_watermark_mandatory of this Watermark. # noqa: E501\n\n A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501\n\n :return: The is_watermark_mandatory of this Watermark. # noqa: E501\n :rtype: bool\n '
return self._is_watermark_mandatory
|
@is_watermark_mandatory.setter
def is_watermark_mandatory(self, is_watermark_mandatory):
'Sets the is_watermark_mandatory of this Watermark.\n\n A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501\n\n :param is_watermark_mandatory: The is_watermark_mandatory of this Watermark. # noqa: E501\n :type: bool\n '
self._is_watermark_mandatory = is_watermark_mandatory
| -2,591,400,929,638,444,000
|
Sets the is_watermark_mandatory of this Watermark.
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:param is_watermark_mandatory: The is_watermark_mandatory of this Watermark. # noqa: E501
:type: bool
|
laserfiche_api/models/watermark.py
|
is_watermark_mandatory
|
Layer8Err/laserfiche-api
|
python
|
@is_watermark_mandatory.setter
def is_watermark_mandatory(self, is_watermark_mandatory):
'Sets the is_watermark_mandatory of this Watermark.\n\n A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501\n\n :param is_watermark_mandatory: The is_watermark_mandatory of this Watermark. # noqa: E501\n :type: bool\n '
self._is_watermark_mandatory = is_watermark_mandatory
|
@property
def watermark_intensity(self):
'Gets the watermark_intensity of this Watermark. # noqa: E501\n\n The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501\n\n :return: The watermark_intensity of this Watermark. # noqa: E501\n :rtype: int\n '
return self._watermark_intensity
| -2,142,675,285,600,010,500
|
Gets the watermark_intensity of this Watermark. # noqa: E501
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:return: The watermark_intensity of this Watermark. # noqa: E501
:rtype: int
|
laserfiche_api/models/watermark.py
|
watermark_intensity
|
Layer8Err/laserfiche-api
|
python
|
@property
def watermark_intensity(self):
'Gets the watermark_intensity of this Watermark. # noqa: E501\n\n The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501\n\n :return: The watermark_intensity of this Watermark. # noqa: E501\n :rtype: int\n '
return self._watermark_intensity
|
@watermark_intensity.setter
def watermark_intensity(self, watermark_intensity):
'Sets the watermark_intensity of this Watermark.\n\n The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501\n\n :param watermark_intensity: The watermark_intensity of this Watermark. # noqa: E501\n :type: int\n '
self._watermark_intensity = watermark_intensity
| 6,529,353,345,295,813,000
|
Sets the watermark_intensity of this Watermark.
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:param watermark_intensity: The watermark_intensity of this Watermark. # noqa: E501
:type: int
|
laserfiche_api/models/watermark.py
|
watermark_intensity
|
Layer8Err/laserfiche-api
|
python
|
@watermark_intensity.setter
def watermark_intensity(self, watermark_intensity):
'Sets the watermark_intensity of this Watermark.\n\n The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501\n\n :param watermark_intensity: The watermark_intensity of this Watermark. # noqa: E501\n :type: int\n '
self._watermark_intensity = watermark_intensity
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(Watermark, dict):
for (key, value) in self.items():
result[key] = value
return result
| 4,253,247,666,043,611,600
|
Returns the model properties as a dict
|
laserfiche_api/models/watermark.py
|
to_dict
|
Layer8Err/laserfiche-api
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(Watermark, dict):
for (key, value) in self.items():
result[key] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
laserfiche_api/models/watermark.py
|
to_str
|
Layer8Err/laserfiche-api
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
laserfiche_api/models/watermark.py
|
__repr__
|
Layer8Err/laserfiche-api
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, Watermark)):
return False
return (self.__dict__ == other.__dict__)
| 6,308,948,225,613,916,000
|
Returns true if both objects are equal
|
laserfiche_api/models/watermark.py
|
__eq__
|
Layer8Err/laserfiche-api
|
python
|
def __eq__(self, other):
if (not isinstance(other, Watermark)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
laserfiche_api/models/watermark.py
|
__ne__
|
Layer8Err/laserfiche-api
|
python
|
def __ne__(self, other):
return (not (self == other))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.