body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
89f2e00aaf8c5309199032f5e6e176a01094e3fe953287c60bf4ef6331b6232e
|
def test_multiPull(self):
'Execute multiple pull operations and make sure all expected resources\n are in the final directory'
self.multiPull('alert', util.allAlerts, [util.Alert.nameMatch], ['--name', util.Alert.matchString], [util.Alert.additionalInformationMatch, util.Alert.nameMatch], ['--additionalInformation', util.Alert.matchString])
self.multiPull('dashboard', util.allDashboards, [util.Dashboard.kubeBoxPki], ['--name', util.Dashboard.kubeBoxPkiNameMatchString], [util.Dashboard.kubeBoxPki, util.Dashboard.skynetMonitoring], ['--name', util.Dashboard.skynetMonitoringNameMatchString])
|
Execute multiple pull operations and make sure all expected resources
are in the final directory
|
test/test_pull.py
|
test_multiPull
|
box/wavectl
| 18
|
python
|
def test_multiPull(self):
'Execute multiple pull operations and make sure all expected resources\n are in the final directory'
self.multiPull('alert', util.allAlerts, [util.Alert.nameMatch], ['--name', util.Alert.matchString], [util.Alert.additionalInformationMatch, util.Alert.nameMatch], ['--additionalInformation', util.Alert.matchString])
self.multiPull('dashboard', util.allDashboards, [util.Dashboard.kubeBoxPki], ['--name', util.Dashboard.kubeBoxPkiNameMatchString], [util.Dashboard.kubeBoxPki, util.Dashboard.skynetMonitoring], ['--name', util.Dashboard.skynetMonitoringNameMatchString])
|
def test_multiPull(self):
'Execute multiple pull operations and make sure all expected resources\n are in the final directory'
self.multiPull('alert', util.allAlerts, [util.Alert.nameMatch], ['--name', util.Alert.matchString], [util.Alert.additionalInformationMatch, util.Alert.nameMatch], ['--additionalInformation', util.Alert.matchString])
self.multiPull('dashboard', util.allDashboards, [util.Dashboard.kubeBoxPki], ['--name', util.Dashboard.kubeBoxPkiNameMatchString], [util.Dashboard.kubeBoxPki, util.Dashboard.skynetMonitoring], ['--name', util.Dashboard.skynetMonitoringNameMatchString])<|docstring|>Execute multiple pull operations and make sure all expected resources
are in the final directory<|endoftext|>
|
27782ca4c6589fe55f4504a3e7f38f33bc0ca3f0ffa887e1d23ffd274a27c760
|
def pullWithoutAPullBranch(self, rsrcType, rsrcs):
' The repo does not have a pull branch. That should mean that this is\n the first pull happening to this git repo. In that case the pull\n operation creates the pull branch from the current branch the repo was\n on.'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])
|
The repo does not have a pull branch. That should mean that this is
the first pull happening to this git repo. In that case the pull
operation creates the pull branch from the current branch the repo was
on.
|
test/test_pull.py
|
pullWithoutAPullBranch
|
box/wavectl
| 18
|
python
|
def pullWithoutAPullBranch(self, rsrcType, rsrcs):
' The repo does not have a pull branch. That should mean that this is\n the first pull happening to this git repo. In that case the pull\n operation creates the pull branch from the current branch the repo was\n on.'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])
|
def pullWithoutAPullBranch(self, rsrcType, rsrcs):
' The repo does not have a pull branch. That should mean that this is\n the first pull happening to this git repo. In that case the pull\n operation creates the pull branch from the current branch the repo was\n on.'
with util.TempDir() as td:
d = td.dir()
r = self.repoInit(d)
self.addReadmeFileToRepo(r)
self.executePull(rsrcType, d, r, rsrcs, pullAdditionalParams=['--inGit'])<|docstring|>The repo does not have a pull branch. That should mean that this is
the first pull happening to this git repo. In that case the pull
operation creates the pull branch from the current branch the repo was
on.<|endoftext|>
|
a93355bdb083e09fb1dfc4efcc3c126b6bfb3cf28939e3e2b253ef8b5c4fa068
|
def test_redactedCommandLine(self):
'Sometimes we need to redact secrets from a commandLine. In this function\n we verify the redaction works correctly'
inputOutput = [{'input': [], 'output': []}, {'input': ['--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED']}, {'input': ['--apiToken', 'someSecret', '--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED', '--apiToken', 'REDACTED']}, {'input': ['--apiToken', 'someSecret', '--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED', '--apiToken', 'REDACTED']}, {'input': ['one', '--two', '--apiToken', 'someSecret', 'one', '--two', '--apiToken', 'someSecret', 'one', '--two'], 'output': ['one', '--two', '--apiToken', 'REDACTED', 'one', '--two', '--apiToken', 'REDACTED', 'one', '--two']}]
for inout in inputOutput:
self.assertListEqual(inout['output'], wavectl.PullCommand.getRedactedCommandLine(inout['input']))
|
Sometimes we need to redact secrets from a commandLine. In this function
we verify the redaction works correctly
|
test/test_pull.py
|
test_redactedCommandLine
|
box/wavectl
| 18
|
python
|
def test_redactedCommandLine(self):
'Sometimes we need to redact secrets from a commandLine. In this function\n we verify the redaction works correctly'
inputOutput = [{'input': [], 'output': []}, {'input': ['--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED']}, {'input': ['--apiToken', 'someSecret', '--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED', '--apiToken', 'REDACTED']}, {'input': ['--apiToken', 'someSecret', '--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED', '--apiToken', 'REDACTED']}, {'input': ['one', '--two', '--apiToken', 'someSecret', 'one', '--two', '--apiToken', 'someSecret', 'one', '--two'], 'output': ['one', '--two', '--apiToken', 'REDACTED', 'one', '--two', '--apiToken', 'REDACTED', 'one', '--two']}]
for inout in inputOutput:
self.assertListEqual(inout['output'], wavectl.PullCommand.getRedactedCommandLine(inout['input']))
|
def test_redactedCommandLine(self):
'Sometimes we need to redact secrets from a commandLine. In this function\n we verify the redaction works correctly'
inputOutput = [{'input': [], 'output': []}, {'input': ['--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED']}, {'input': ['--apiToken', 'someSecret', '--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED', '--apiToken', 'REDACTED']}, {'input': ['--apiToken', 'someSecret', '--apiToken', 'someSecret'], 'output': ['--apiToken', 'REDACTED', '--apiToken', 'REDACTED']}, {'input': ['one', '--two', '--apiToken', 'someSecret', 'one', '--two', '--apiToken', 'someSecret', 'one', '--two'], 'output': ['one', '--two', '--apiToken', 'REDACTED', 'one', '--two', '--apiToken', 'REDACTED', 'one', '--two']}]
for inout in inputOutput:
self.assertListEqual(inout['output'], wavectl.PullCommand.getRedactedCommandLine(inout['input']))<|docstring|>Sometimes we need to redact secrets from a commandLine. In this function
we verify the redaction works correctly<|endoftext|>
|
6a09377bd88f98349f0813b0f4c4d16a55348f5b0cbf1d123a7836ee2fed5f36
|
def test_rook_ceph_operator_log_type(self):
'\n Test the ability to change the log level in rook-ceph operator dynamically\n without rook-ceph operator pod restart.\n\n '
set_configmap_log_level_rook_ceph_operator(value='DEBUG')
last_log_date_time_obj = get_last_log_time_date()
log.info('Respin OSD pod')
osd_pod_objs = get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
sample = TimeoutSampler(timeout=400, sleep=20, func=check_osd_log_exist_on_rook_ceph_operator_pod, last_log_date_time_obj=last_log_date_time_obj, expected_strings=['D |', 'osd'])
if (not sample.wait_for_func_status(result=True)):
raise ValueError('OSD DEBUG Log does not exist')
set_configmap_log_level_rook_ceph_operator(value='INFO')
last_log_date_time_obj = get_last_log_time_date()
log.info('Respin OSD pod')
osd_pod_objs = get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
sample = TimeoutSampler(timeout=400, sleep=20, func=check_osd_log_exist_on_rook_ceph_operator_pod, last_log_date_time_obj=last_log_date_time_obj, expected_strings=['I |', 'osd'], unexpected_strings=['D |'])
if (not sample.wait_for_func_status(result=True)):
raise ValueError('OSD INFO Log does not exist or DEBUG Log exist on INFO mode')
|
Test the ability to change the log level in rook-ceph operator dynamically
without rook-ceph operator pod restart.
|
tests/manage/z_cluster/test_rook_ceph_operator_log_type.py
|
test_rook_ceph_operator_log_type
|
jarrpa/ocs-ci
| 130
|
python
|
def test_rook_ceph_operator_log_type(self):
'\n Test the ability to change the log level in rook-ceph operator dynamically\n without rook-ceph operator pod restart.\n\n '
set_configmap_log_level_rook_ceph_operator(value='DEBUG')
last_log_date_time_obj = get_last_log_time_date()
log.info('Respin OSD pod')
osd_pod_objs = get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
sample = TimeoutSampler(timeout=400, sleep=20, func=check_osd_log_exist_on_rook_ceph_operator_pod, last_log_date_time_obj=last_log_date_time_obj, expected_strings=['D |', 'osd'])
if (not sample.wait_for_func_status(result=True)):
raise ValueError('OSD DEBUG Log does not exist')
set_configmap_log_level_rook_ceph_operator(value='INFO')
last_log_date_time_obj = get_last_log_time_date()
log.info('Respin OSD pod')
osd_pod_objs = get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
sample = TimeoutSampler(timeout=400, sleep=20, func=check_osd_log_exist_on_rook_ceph_operator_pod, last_log_date_time_obj=last_log_date_time_obj, expected_strings=['I |', 'osd'], unexpected_strings=['D |'])
if (not sample.wait_for_func_status(result=True)):
raise ValueError('OSD INFO Log does not exist or DEBUG Log exist on INFO mode')
|
def test_rook_ceph_operator_log_type(self):
'\n Test the ability to change the log level in rook-ceph operator dynamically\n without rook-ceph operator pod restart.\n\n '
set_configmap_log_level_rook_ceph_operator(value='DEBUG')
last_log_date_time_obj = get_last_log_time_date()
log.info('Respin OSD pod')
osd_pod_objs = get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
sample = TimeoutSampler(timeout=400, sleep=20, func=check_osd_log_exist_on_rook_ceph_operator_pod, last_log_date_time_obj=last_log_date_time_obj, expected_strings=['D |', 'osd'])
if (not sample.wait_for_func_status(result=True)):
raise ValueError('OSD DEBUG Log does not exist')
set_configmap_log_level_rook_ceph_operator(value='INFO')
last_log_date_time_obj = get_last_log_time_date()
log.info('Respin OSD pod')
osd_pod_objs = get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
sample = TimeoutSampler(timeout=400, sleep=20, func=check_osd_log_exist_on_rook_ceph_operator_pod, last_log_date_time_obj=last_log_date_time_obj, expected_strings=['I |', 'osd'], unexpected_strings=['D |'])
if (not sample.wait_for_func_status(result=True)):
raise ValueError('OSD INFO Log does not exist or DEBUG Log exist on INFO mode')<|docstring|>Test the ability to change the log level in rook-ceph operator dynamically
without rook-ceph operator pod restart.<|endoftext|>
|
fd9ccd12b2c7403415e7918371354985572845d0f254d2dae7abdca9bb945386
|
def clean(self, value):
'\n Value must be a string in the XXXXXXXX formats.\n '
value = super(PEDNIField, self).clean(value)
if (value in EMPTY_VALUES):
return ''
if (not value.isdigit()):
raise ValidationError(self.error_messages['invalid'])
if (len(value) != 8):
raise ValidationError(self.error_messages['max_digits'])
return value
|
Value must be a string in the XXXXXXXX formats.
|
django/contrib/localflavor/pe/forms.py
|
clean
|
kennethlove/django
| 790
|
python
|
def clean(self, value):
'\n \n '
value = super(PEDNIField, self).clean(value)
if (value in EMPTY_VALUES):
return
if (not value.isdigit()):
raise ValidationError(self.error_messages['invalid'])
if (len(value) != 8):
raise ValidationError(self.error_messages['max_digits'])
return value
|
def clean(self, value):
'\n \n '
value = super(PEDNIField, self).clean(value)
if (value in EMPTY_VALUES):
return
if (not value.isdigit()):
raise ValidationError(self.error_messages['invalid'])
if (len(value) != 8):
raise ValidationError(self.error_messages['max_digits'])
return value<|docstring|>Value must be a string in the XXXXXXXX formats.<|endoftext|>
|
67bcea608a7e331529d11da6cd5100cf433843d8c0a8a2dd79da06ce8f278a00
|
def clean(self, value):
'\n Value must be an 11-digit number.\n '
value = super(PERUCField, self).clean(value)
if (value in EMPTY_VALUES):
return ''
if (not value.isdigit()):
raise ValidationError(self.error_messages['invalid'])
if (len(value) != 11):
raise ValidationError(self.error_messages['max_digits'])
return value
|
Value must be an 11-digit number.
|
django/contrib/localflavor/pe/forms.py
|
clean
|
kennethlove/django
| 790
|
python
|
def clean(self, value):
'\n \n '
value = super(PERUCField, self).clean(value)
if (value in EMPTY_VALUES):
return
if (not value.isdigit()):
raise ValidationError(self.error_messages['invalid'])
if (len(value) != 11):
raise ValidationError(self.error_messages['max_digits'])
return value
|
def clean(self, value):
'\n \n '
value = super(PERUCField, self).clean(value)
if (value in EMPTY_VALUES):
return
if (not value.isdigit()):
raise ValidationError(self.error_messages['invalid'])
if (len(value) != 11):
raise ValidationError(self.error_messages['max_digits'])
return value<|docstring|>Value must be an 11-digit number.<|endoftext|>
|
1f4746db93e11c8c2906e07dec7821bd6d251635fc279eea5975e90914d0ae23
|
def append(self, item: 'IItem') -> None:
'Append new entity for consume'
try:
self.__queue.put(item=item)
except QueueFull:
self.__logger.error('Exchange processing queue is full. New messages could not be added')
|
Append new entity for consume
|
miniserver_gateway/exchanges/queue.py
|
append
|
FastyBird/miniserver-gateway
| 0
|
python
|
def append(self, item: 'IItem') -> None:
try:
self.__queue.put(item=item)
except QueueFull:
self.__logger.error('Exchange processing queue is full. New messages could not be added')
|
def append(self, item: 'IItem') -> None:
try:
self.__queue.put(item=item)
except QueueFull:
self.__logger.error('Exchange processing queue is full. New messages could not be added')<|docstring|>Append new entity for consume<|endoftext|>
|
ab8835b9c7c9fb9e01fed512ebd3e6dcd21a0f1a377ddddbca9e2056877fd43e
|
def consume(self) -> None:
'Consume queue item'
if (not self.__queue.empty()):
item = self.__queue.get()
if isinstance(item, IItem):
if isinstance(item, PublishMessageQueueItem):
self.__publisher.publish(origin=item.origin, routing_key=item.routing_key, data=item.data)
|
Consume queue item
|
miniserver_gateway/exchanges/queue.py
|
consume
|
FastyBird/miniserver-gateway
| 0
|
python
|
def consume(self) -> None:
if (not self.__queue.empty()):
item = self.__queue.get()
if isinstance(item, IItem):
if isinstance(item, PublishMessageQueueItem):
self.__publisher.publish(origin=item.origin, routing_key=item.routing_key, data=item.data)
|
def consume(self) -> None:
if (not self.__queue.empty()):
item = self.__queue.get()
if isinstance(item, IItem):
if isinstance(item, PublishMessageQueueItem):
self.__publisher.publish(origin=item.origin, routing_key=item.routing_key, data=item.data)<|docstring|>Consume queue item<|endoftext|>
|
ab54c6b9fb85e6bcfa32e63eb122734ad37effc3409eda6cbcd0dd799787e4d5
|
def is_empty(self) -> bool:
'Check if all messages are consumed'
return self.__queue.empty()
|
Check if all messages are consumed
|
miniserver_gateway/exchanges/queue.py
|
is_empty
|
FastyBird/miniserver-gateway
| 0
|
python
|
def is_empty(self) -> bool:
return self.__queue.empty()
|
def is_empty(self) -> bool:
return self.__queue.empty()<|docstring|>Check if all messages are consumed<|endoftext|>
|
9207f18d46c745983f8e50f9c81eab06375102917ec21c3440ecb931926de0de
|
@property
def origin(self) -> ModuleOrigin:
'Message module origin'
return self.__origin
|
Message module origin
|
miniserver_gateway/exchanges/queue.py
|
origin
|
FastyBird/miniserver-gateway
| 0
|
python
|
@property
def origin(self) -> ModuleOrigin:
return self.__origin
|
@property
def origin(self) -> ModuleOrigin:
return self.__origin<|docstring|>Message module origin<|endoftext|>
|
f0ca2ea4c614139a243b3a827d9114cbe3a3473b26c1726050825e12ebdcd512
|
@property
def routing_key(self) -> RoutingKey:
'Message routing key'
return self.__routing_key
|
Message routing key
|
miniserver_gateway/exchanges/queue.py
|
routing_key
|
FastyBird/miniserver-gateway
| 0
|
python
|
@property
def routing_key(self) -> RoutingKey:
return self.__routing_key
|
@property
def routing_key(self) -> RoutingKey:
return self.__routing_key<|docstring|>Message routing key<|endoftext|>
|
f05829f11a2c3b9addf7c2a68b12efe60954b7e7475c3c5a7453601d6e748fbf
|
@property
def data(self) -> dict:
'Message data formatted into dictionary'
return self.__data
|
Message data formatted into dictionary
|
miniserver_gateway/exchanges/queue.py
|
data
|
FastyBird/miniserver-gateway
| 0
|
python
|
@property
def data(self) -> dict:
return self.__data
|
@property
def data(self) -> dict:
return self.__data<|docstring|>Message data formatted into dictionary<|endoftext|>
|
e8bc9dbfa214a95460d64637bebc2b64168fbf2d9bdaf4252b0fd10dc2c263ea
|
def test_fileformatyaml_no_inpath_raises():
'None in path raises.'
context = Context({'k1': 'v1'})
with pytest.raises(KeyNotInContextError) as err_info:
fileformat.run_step(context)
assert (str(err_info.value) == 'fileFormatYaml not found in the pypyr context.')
|
None in path raises.
|
tests/unit/pypyr/steps/fileformatyaml_test.py
|
test_fileformatyaml_no_inpath_raises
|
Reskov/pypyr
| 261
|
python
|
def test_fileformatyaml_no_inpath_raises():
context = Context({'k1': 'v1'})
with pytest.raises(KeyNotInContextError) as err_info:
fileformat.run_step(context)
assert (str(err_info.value) == 'fileFormatYaml not found in the pypyr context.')
|
def test_fileformatyaml_no_inpath_raises():
context = Context({'k1': 'v1'})
with pytest.raises(KeyNotInContextError) as err_info:
fileformat.run_step(context)
assert (str(err_info.value) == 'fileFormatYaml not found in the pypyr context.')<|docstring|>None in path raises.<|endoftext|>
|
85b94dfb6793677be2f7be7e9f44d81c96d59d95a681d5edd61e47be268d37b2
|
def test_fileformatyaml_empty_inpath_raises():
'Empty in path raises.'
context = Context({'fileFormatYaml': {'in': None}})
with pytest.raises(KeyInContextHasNoValueError) as err_info:
fileformat.run_step(context)
assert (str(err_info.value) == "context['fileFormatYaml']['in'] must have a value for pypyr.steps.fileformatyaml.")
|
Empty in path raises.
|
tests/unit/pypyr/steps/fileformatyaml_test.py
|
test_fileformatyaml_empty_inpath_raises
|
Reskov/pypyr
| 261
|
python
|
def test_fileformatyaml_empty_inpath_raises():
context = Context({'fileFormatYaml': {'in': None}})
with pytest.raises(KeyInContextHasNoValueError) as err_info:
fileformat.run_step(context)
assert (str(err_info.value) == "context['fileFormatYaml']['in'] must have a value for pypyr.steps.fileformatyaml.")
|
def test_fileformatyaml_empty_inpath_raises():
context = Context({'fileFormatYaml': {'in': None}})
with pytest.raises(KeyInContextHasNoValueError) as err_info:
fileformat.run_step(context)
assert (str(err_info.value) == "context['fileFormatYaml']['in'] must have a value for pypyr.steps.fileformatyaml.")<|docstring|>Empty in path raises.<|endoftext|>
|
bd2c5aff0cb7aece772403122d494e881ef98049239cce7b8cf0b20609c0b858
|
def test_fileformatyaml_pass_no_substitutions():
'Relative path to file should succeed.\n\n Strictly speaking not a unit test.\n '
context = Context({'ok1': 'ov1', 'fileFormatYaml': {'in': './tests/testfiles/test.yaml', 'out': './tests/testfiles/out/out.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 2), 'context should have 2 items'
assert (context['ok1'] == 'ov1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/test.yaml', 'out': './tests/testfiles/out/out.yaml'})
with open('./tests/testfiles/out/out.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
assert (len(outcontents) == 3)
assert (outcontents['key'] == 'value1 !£$%# *')
assert (outcontents['key2'] == 'blah')
assert (outcontents['key3'] == ['l1', '!£$% *', 'l2', ['l31', {'l32': ['l321', 'l322']}]])
os.remove('./tests/testfiles/out/out.yaml')
|
Relative path to file should succeed.
Strictly speaking not a unit test.
|
tests/unit/pypyr/steps/fileformatyaml_test.py
|
test_fileformatyaml_pass_no_substitutions
|
Reskov/pypyr
| 261
|
python
|
def test_fileformatyaml_pass_no_substitutions():
'Relative path to file should succeed.\n\n Strictly speaking not a unit test.\n '
context = Context({'ok1': 'ov1', 'fileFormatYaml': {'in': './tests/testfiles/test.yaml', 'out': './tests/testfiles/out/out.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 2), 'context should have 2 items'
assert (context['ok1'] == 'ov1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/test.yaml', 'out': './tests/testfiles/out/out.yaml'})
with open('./tests/testfiles/out/out.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
assert (len(outcontents) == 3)
assert (outcontents['key'] == 'value1 !£$%# *')
assert (outcontents['key2'] == 'blah')
assert (outcontents['key3'] == ['l1', '!£$% *', 'l2', ['l31', {'l32': ['l321', 'l322']}]])
os.remove('./tests/testfiles/out/out.yaml')
|
def test_fileformatyaml_pass_no_substitutions():
'Relative path to file should succeed.\n\n Strictly speaking not a unit test.\n '
context = Context({'ok1': 'ov1', 'fileFormatYaml': {'in': './tests/testfiles/test.yaml', 'out': './tests/testfiles/out/out.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 2), 'context should have 2 items'
assert (context['ok1'] == 'ov1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/test.yaml', 'out': './tests/testfiles/out/out.yaml'})
with open('./tests/testfiles/out/out.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
assert (len(outcontents) == 3)
assert (outcontents['key'] == 'value1 !£$%# *')
assert (outcontents['key2'] == 'blah')
assert (outcontents['key3'] == ['l1', '!£$% *', 'l2', ['l31', {'l32': ['l321', 'l322']}]])
os.remove('./tests/testfiles/out/out.yaml')<|docstring|>Relative path to file should succeed.
Strictly speaking not a unit test.<|endoftext|>
|
1bff9f9b1ca5c4a100baaee73e02a62ffaddbfbd7bfb249f5cd9f134d628cc7f
|
def test_fileformatyaml_pass_with_substitutions():
'Relative path to file should succeed.\n\n Strictly speaking not a unit test.\n '
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'fileFormatYaml': {'in': './tests/testfiles/testsubst.yaml', 'out': './tests/testfiles/out/outsubst.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 6), 'context should have 6 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/testsubst.yaml', 'out': './tests/testfiles/out/outsubst.yaml'})
with open('./tests/testfiles/out/outsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/outsubst.yaml')
|
Relative path to file should succeed.
Strictly speaking not a unit test.
|
tests/unit/pypyr/steps/fileformatyaml_test.py
|
test_fileformatyaml_pass_with_substitutions
|
Reskov/pypyr
| 261
|
python
|
def test_fileformatyaml_pass_with_substitutions():
'Relative path to file should succeed.\n\n Strictly speaking not a unit test.\n '
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'fileFormatYaml': {'in': './tests/testfiles/testsubst.yaml', 'out': './tests/testfiles/out/outsubst.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 6), 'context should have 6 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/testsubst.yaml', 'out': './tests/testfiles/out/outsubst.yaml'})
with open('./tests/testfiles/out/outsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/outsubst.yaml')
|
def test_fileformatyaml_pass_with_substitutions():
'Relative path to file should succeed.\n\n Strictly speaking not a unit test.\n '
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'fileFormatYaml': {'in': './tests/testfiles/testsubst.yaml', 'out': './tests/testfiles/out/outsubst.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 6), 'context should have 6 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/testsubst.yaml', 'out': './tests/testfiles/out/outsubst.yaml'})
with open('./tests/testfiles/out/outsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/outsubst.yaml')<|docstring|>Relative path to file should succeed.
Strictly speaking not a unit test.<|endoftext|>
|
8e2ee5e32ab434a2a56d4361d8066bbf8b485c02ee050f51137ad0524662d500
|
def test_fileformatyaml_edit_with_substitutions():
'Relative path to file should succeed, with no out meaning edit.\n\n Strictly speaking not a unit test.\n '
shutil.copyfile('./tests/testfiles/testsubst.yaml', './tests/testfiles/out/edittestsubst.yaml')
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'fileFormatYaml': {'in': './tests/testfiles/out/edittestsubst.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 6), 'context should have 6 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/out/edittestsubst.yaml'})
with open('./tests/testfiles/out/edittestsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/edittestsubst.yaml')
|
Relative path to file should succeed, with no out meaning edit.
Strictly speaking not a unit test.
|
tests/unit/pypyr/steps/fileformatyaml_test.py
|
test_fileformatyaml_edit_with_substitutions
|
Reskov/pypyr
| 261
|
python
|
def test_fileformatyaml_edit_with_substitutions():
'Relative path to file should succeed, with no out meaning edit.\n\n Strictly speaking not a unit test.\n '
shutil.copyfile('./tests/testfiles/testsubst.yaml', './tests/testfiles/out/edittestsubst.yaml')
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'fileFormatYaml': {'in': './tests/testfiles/out/edittestsubst.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 6), 'context should have 6 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/out/edittestsubst.yaml'})
with open('./tests/testfiles/out/edittestsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/edittestsubst.yaml')
|
def test_fileformatyaml_edit_with_substitutions():
'Relative path to file should succeed, with no out meaning edit.\n\n Strictly speaking not a unit test.\n '
shutil.copyfile('./tests/testfiles/testsubst.yaml', './tests/testfiles/out/edittestsubst.yaml')
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'fileFormatYaml': {'in': './tests/testfiles/out/edittestsubst.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 6), 'context should have 6 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/out/edittestsubst.yaml'})
with open('./tests/testfiles/out/edittestsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/edittestsubst.yaml')<|docstring|>Relative path to file should succeed, with no out meaning edit.
Strictly speaking not a unit test.<|endoftext|>
|
d770357db2582efaa9a28ad8e12745f02c93847830a09bce3c79e66bed081414
|
def test_fileformatyaml_pass_with_path_substitutions():
'Relative path to file should succeed with path subsitutions.\n\n Strictly speaking not a unit test.\n '
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'pathIn': 'testsubst', 'pathOut': 'outsubst', 'fileFormatYaml': {'in': './tests/testfiles/{pathIn}.yaml', 'out': './tests/testfiles/out/{pathOut}.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 8), 'context should have 8 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/{pathIn}.yaml', 'out': './tests/testfiles/out/{pathOut}.yaml'})
with open('./tests/testfiles/out/outsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/outsubst.yaml')
|
Relative path to file should succeed with path subsitutions.
Strictly speaking not a unit test.
|
tests/unit/pypyr/steps/fileformatyaml_test.py
|
test_fileformatyaml_pass_with_path_substitutions
|
Reskov/pypyr
| 261
|
python
|
def test_fileformatyaml_pass_with_path_substitutions():
'Relative path to file should succeed with path subsitutions.\n\n Strictly speaking not a unit test.\n '
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'pathIn': 'testsubst', 'pathOut': 'outsubst', 'fileFormatYaml': {'in': './tests/testfiles/{pathIn}.yaml', 'out': './tests/testfiles/out/{pathOut}.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 8), 'context should have 8 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/{pathIn}.yaml', 'out': './tests/testfiles/out/{pathOut}.yaml'})
with open('./tests/testfiles/out/outsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/outsubst.yaml')
|
def test_fileformatyaml_pass_with_path_substitutions():
'Relative path to file should succeed with path subsitutions.\n\n Strictly speaking not a unit test.\n '
context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'pathIn': 'testsubst', 'pathOut': 'outsubst', 'fileFormatYaml': {'in': './tests/testfiles/{pathIn}.yaml', 'out': './tests/testfiles/out/{pathOut}.yaml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 8), 'context should have 8 items'
assert (context['k1'] == 'v1')
assert (context['fileFormatYaml'] == {'in': './tests/testfiles/{pathIn}.yaml', 'out': './tests/testfiles/out/{pathOut}.yaml'})
with open('./tests/testfiles/out/outsubst.yaml') as outfile:
yaml_loader = yaml.YAML(typ='rt', pure=True)
outcontents = yaml_loader.load(outfile)
expected = {'key': 'v1value1 !£$%# *', 'key2v2': 'blah', 'key3': ['l1', '!£$% * v3', 'l2', ['l31v4', {'l32': ['l321', 'l322v5']}]]}
assert (outcontents == expected)
os.remove('./tests/testfiles/out/outsubst.yaml')<|docstring|>Relative path to file should succeed with path subsitutions.
Strictly speaking not a unit test.<|endoftext|>
|
20b355d1f69c5d9aa6a8a8d396e18608c8861e07bc525aa0fc97f21edba2c10f
|
def teardown_module(module):
'Teardown.'
os.rmdir('./tests/testfiles/out/')
|
Teardown.
|
tests/unit/pypyr/steps/fileformatyaml_test.py
|
teardown_module
|
Reskov/pypyr
| 261
|
python
|
def teardown_module(module):
os.rmdir('./tests/testfiles/out/')
|
def teardown_module(module):
os.rmdir('./tests/testfiles/out/')<|docstring|>Teardown.<|endoftext|>
|
7389b90f5c0c1818803b4e2d98798922a8e08509b9205687e6bb561fcdd4977a
|
@rpc
def square_odd_numbers(self, integer_list: List[int]) -> List[int]:
'\n Squares odd numbers in a list.\n :param integer_list: A list of integers.\n :return: A list of integers with squared odd numbers.\n '
return [((number ** 2) if ((number % 2) != 0) else number) for number in integer_list]
|
Squares odd numbers in a list.
:param integer_list: A list of integers.
:return: A list of integers with squared odd numbers.
|
src/service.py
|
square_odd_numbers
|
cblignaut/oddsquarehuffmanservice
| 0
|
python
|
@rpc
def square_odd_numbers(self, integer_list: List[int]) -> List[int]:
'\n Squares odd numbers in a list.\n :param integer_list: A list of integers.\n :return: A list of integers with squared odd numbers.\n '
return [((number ** 2) if ((number % 2) != 0) else number) for number in integer_list]
|
@rpc
def square_odd_numbers(self, integer_list: List[int]) -> List[int]:
'\n Squares odd numbers in a list.\n :param integer_list: A list of integers.\n :return: A list of integers with squared odd numbers.\n '
return [((number ** 2) if ((number % 2) != 0) else number) for number in integer_list]<|docstring|>Squares odd numbers in a list.
:param integer_list: A list of integers.
:return: A list of integers with squared odd numbers.<|endoftext|>
|
7ec4d612fbb016ce4d57a49c3d8559049da0347665b99c359b68ffa62b84daa3
|
@rpc
def huffman_encode(self, strings: Union[(list, str)]) -> dict:
'\n Encoding a list of strings with Huffman encoding and storing a frequency table.\n :param strings: List of strings to be encoded.\n :return: Dictionary of the strings and encoded strings.\n '
strings = ([strings] if isinstance(strings, str) else strings)
encoded_strings = {}
for string in strings:
bitarray_frequency_table = huffman_code(Counter(string))
encoded_string = bitarray(endian='little')
encoded_string.encode(bitarray_frequency_table, string)
encoded_strings[string] = encoded_string.to01()
self._freq_tables[encoded_string.to01()] = bitarray_frequency_table
return encoded_strings
|
Encoding a list of strings with Huffman encoding and storing a frequency table.
:param strings: List of strings to be encoded.
:return: Dictionary of the strings and encoded strings.
|
src/service.py
|
huffman_encode
|
cblignaut/oddsquarehuffmanservice
| 0
|
python
|
@rpc
def huffman_encode(self, strings: Union[(list, str)]) -> dict:
'\n Encoding a list of strings with Huffman encoding and storing a frequency table.\n :param strings: List of strings to be encoded.\n :return: Dictionary of the strings and encoded strings.\n '
strings = ([strings] if isinstance(strings, str) else strings)
encoded_strings = {}
for string in strings:
bitarray_frequency_table = huffman_code(Counter(string))
encoded_string = bitarray(endian='little')
encoded_string.encode(bitarray_frequency_table, string)
encoded_strings[string] = encoded_string.to01()
self._freq_tables[encoded_string.to01()] = bitarray_frequency_table
return encoded_strings
|
@rpc
def huffman_encode(self, strings: Union[(list, str)]) -> dict:
'\n Encoding a list of strings with Huffman encoding and storing a frequency table.\n :param strings: List of strings to be encoded.\n :return: Dictionary of the strings and encoded strings.\n '
strings = ([strings] if isinstance(strings, str) else strings)
encoded_strings = {}
for string in strings:
bitarray_frequency_table = huffman_code(Counter(string))
encoded_string = bitarray(endian='little')
encoded_string.encode(bitarray_frequency_table, string)
encoded_strings[string] = encoded_string.to01()
self._freq_tables[encoded_string.to01()] = bitarray_frequency_table
return encoded_strings<|docstring|>Encoding a list of strings with Huffman encoding and storing a frequency table.
:param strings: List of strings to be encoded.
:return: Dictionary of the strings and encoded strings.<|endoftext|>
|
dcf03494bfb1fdd84d368265a6732f7c6bf22046ad67a403e12bafebe327d3d9
|
@rpc
def huffman_decode(self, encoded: str, freq_table: dict=None) -> str:
'\n Decoding a huffman encoded string, requires either a stored or given frequency table.\n :param encoded: Huffman encoded string.\n :param freq_table: An optional table frequency table. (RECOMMENDED see readme)\n :return: Decoded string.\n '
if freq_table:
return ''.join(bitarray(encoded).decode(decodetree(huffman_code(freq_table))))
elif self._freq_tables.get(encoded):
return ''.join(bitarray(encoded).decode(decodetree(self._freq_tables[encoded])))
else:
raise Exception('Unable to find frequency table, please provide one.')
|
Decoding a huffman encoded string, requires either a stored or given frequency table.
:param encoded: Huffman encoded string.
:param freq_table: An optional table frequency table. (RECOMMENDED see readme)
:return: Decoded string.
|
src/service.py
|
huffman_decode
|
cblignaut/oddsquarehuffmanservice
| 0
|
python
|
@rpc
def huffman_decode(self, encoded: str, freq_table: dict=None) -> str:
'\n Decoding a huffman encoded string, requires either a stored or given frequency table.\n :param encoded: Huffman encoded string.\n :param freq_table: An optional table frequency table. (RECOMMENDED see readme)\n :return: Decoded string.\n '
if freq_table:
return .join(bitarray(encoded).decode(decodetree(huffman_code(freq_table))))
elif self._freq_tables.get(encoded):
return .join(bitarray(encoded).decode(decodetree(self._freq_tables[encoded])))
else:
raise Exception('Unable to find frequency table, please provide one.')
|
@rpc
def huffman_decode(self, encoded: str, freq_table: dict=None) -> str:
'\n Decoding a huffman encoded string, requires either a stored or given frequency table.\n :param encoded: Huffman encoded string.\n :param freq_table: An optional table frequency table. (RECOMMENDED see readme)\n :return: Decoded string.\n '
if freq_table:
return .join(bitarray(encoded).decode(decodetree(huffman_code(freq_table))))
elif self._freq_tables.get(encoded):
return .join(bitarray(encoded).decode(decodetree(self._freq_tables[encoded])))
else:
raise Exception('Unable to find frequency table, please provide one.')<|docstring|>Decoding a huffman encoded string, requires either a stored or given frequency table.
:param encoded: Huffman encoded string.
:param freq_table: An optional table frequency table. (RECOMMENDED see readme)
:return: Decoded string.<|endoftext|>
|
bafc6cda37f3dddeac16efbef7874a768531810176798277afeca4decd475dba
|
def fit(self, df_train, y_train):
'Fits Clf_feature_selector\n\n Parameters\n ----------\n df_train : pandas dataframe of shape = (n_train, n_features)\n The train dataset with numerical features and no NA\n\n y_train : pandas series of shape = (n_train, )\n The target for classification task. Must be encoded.\n\n Returns\n -------\n object\n self\n '
if ((type(df_train) != pd.SparseDataFrame) and (type(df_train) != pd.DataFrame)):
raise ValueError('df_train must be a DataFrame')
if (type(y_train) != pd.core.series.Series):
raise ValueError('y_train must be a Series')
if (self.strategy == 'variance'):
coef = df_train.std()
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = coef[(coef < abstract_threshold)].index
self.__fitOK = True
elif (self.strategy == 'l1'):
model = LogisticRegression(C=0.01, penalty='l1', solver='saga', n_jobs=(- 1), random_state=0)
model.fit(df_train, y_train)
coef = np.mean(np.abs(model.coef_), axis=0)
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = df_train.columns[(coef < abstract_threshold)]
self.__fitOK = True
elif (self.strategy == 'rf_feature_importance'):
model = RandomForestClassifier(n_estimators=50, n_jobs=(- 1), random_state=0)
model.fit(df_train, y_train)
coef = model.feature_importances_
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = df_train.columns[(coef < abstract_threshold)]
self.__fitOK = True
else:
raise ValueError("Strategy invalid. Please choose between 'variance', 'l1' or 'rf_feature_importance'")
return self
|
Fits Clf_feature_selector
Parameters
----------
df_train : pandas dataframe of shape = (n_train, n_features)
The train dataset with numerical features and no NA
y_train : pandas series of shape = (n_train, )
The target for classification task. Must be encoded.
Returns
-------
object
self
|
mlbox/model/classification/feature_selector.py
|
fit
|
manugarri/MLBox
| 1,382
|
python
|
def fit(self, df_train, y_train):
'Fits Clf_feature_selector\n\n Parameters\n ----------\n df_train : pandas dataframe of shape = (n_train, n_features)\n The train dataset with numerical features and no NA\n\n y_train : pandas series of shape = (n_train, )\n The target for classification task. Must be encoded.\n\n Returns\n -------\n object\n self\n '
if ((type(df_train) != pd.SparseDataFrame) and (type(df_train) != pd.DataFrame)):
raise ValueError('df_train must be a DataFrame')
if (type(y_train) != pd.core.series.Series):
raise ValueError('y_train must be a Series')
if (self.strategy == 'variance'):
coef = df_train.std()
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = coef[(coef < abstract_threshold)].index
self.__fitOK = True
elif (self.strategy == 'l1'):
model = LogisticRegression(C=0.01, penalty='l1', solver='saga', n_jobs=(- 1), random_state=0)
model.fit(df_train, y_train)
coef = np.mean(np.abs(model.coef_), axis=0)
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = df_train.columns[(coef < abstract_threshold)]
self.__fitOK = True
elif (self.strategy == 'rf_feature_importance'):
model = RandomForestClassifier(n_estimators=50, n_jobs=(- 1), random_state=0)
model.fit(df_train, y_train)
coef = model.feature_importances_
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = df_train.columns[(coef < abstract_threshold)]
self.__fitOK = True
else:
raise ValueError("Strategy invalid. Please choose between 'variance', 'l1' or 'rf_feature_importance'")
return self
|
def fit(self, df_train, y_train):
'Fits Clf_feature_selector\n\n Parameters\n ----------\n df_train : pandas dataframe of shape = (n_train, n_features)\n The train dataset with numerical features and no NA\n\n y_train : pandas series of shape = (n_train, )\n The target for classification task. Must be encoded.\n\n Returns\n -------\n object\n self\n '
if ((type(df_train) != pd.SparseDataFrame) and (type(df_train) != pd.DataFrame)):
raise ValueError('df_train must be a DataFrame')
if (type(y_train) != pd.core.series.Series):
raise ValueError('y_train must be a Series')
if (self.strategy == 'variance'):
coef = df_train.std()
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = coef[(coef < abstract_threshold)].index
self.__fitOK = True
elif (self.strategy == 'l1'):
model = LogisticRegression(C=0.01, penalty='l1', solver='saga', n_jobs=(- 1), random_state=0)
model.fit(df_train, y_train)
coef = np.mean(np.abs(model.coef_), axis=0)
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = df_train.columns[(coef < abstract_threshold)]
self.__fitOK = True
elif (self.strategy == 'rf_feature_importance'):
model = RandomForestClassifier(n_estimators=50, n_jobs=(- 1), random_state=0)
model.fit(df_train, y_train)
coef = model.feature_importances_
abstract_threshold = np.percentile(coef, (100.0 * self.threshold))
self.__to_discard = df_train.columns[(coef < abstract_threshold)]
self.__fitOK = True
else:
raise ValueError("Strategy invalid. Please choose between 'variance', 'l1' or 'rf_feature_importance'")
return self<|docstring|>Fits Clf_feature_selector
Parameters
----------
df_train : pandas dataframe of shape = (n_train, n_features)
The train dataset with numerical features and no NA
y_train : pandas series of shape = (n_train, )
The target for classification task. Must be encoded.
Returns
-------
object
self<|endoftext|>
|
b5c725897494991aaae895f80760ee177d54e0b917f7b7d599cc0a80e21e8ace
|
def transform(self, df):
'Transforms the dataset\n\n Parameters\n ----------\n df : pandas dataframe of shape = (n, n_features)\n The dataset with numerical features and no NA\n\n Returns\n -------\n pandas dataframe of shape = (n_train, n_features*(1-threshold))\n The train dataset with relevant features\n '
if self.__fitOK:
if ((type(df) != pd.SparseDataFrame) and (type(df) != pd.DataFrame)):
raise ValueError('df must be a DataFrame')
return df.drop(self.__to_discard, axis=1)
else:
raise ValueError('call fit or fit_transform function before')
|
Transforms the dataset
Parameters
----------
df : pandas dataframe of shape = (n, n_features)
The dataset with numerical features and no NA
Returns
-------
pandas dataframe of shape = (n_train, n_features*(1-threshold))
The train dataset with relevant features
|
mlbox/model/classification/feature_selector.py
|
transform
|
manugarri/MLBox
| 1,382
|
python
|
def transform(self, df):
'Transforms the dataset\n\n Parameters\n ----------\n df : pandas dataframe of shape = (n, n_features)\n The dataset with numerical features and no NA\n\n Returns\n -------\n pandas dataframe of shape = (n_train, n_features*(1-threshold))\n The train dataset with relevant features\n '
if self.__fitOK:
if ((type(df) != pd.SparseDataFrame) and (type(df) != pd.DataFrame)):
raise ValueError('df must be a DataFrame')
return df.drop(self.__to_discard, axis=1)
else:
raise ValueError('call fit or fit_transform function before')
|
def transform(self, df):
'Transforms the dataset\n\n Parameters\n ----------\n df : pandas dataframe of shape = (n, n_features)\n The dataset with numerical features and no NA\n\n Returns\n -------\n pandas dataframe of shape = (n_train, n_features*(1-threshold))\n The train dataset with relevant features\n '
if self.__fitOK:
if ((type(df) != pd.SparseDataFrame) and (type(df) != pd.DataFrame)):
raise ValueError('df must be a DataFrame')
return df.drop(self.__to_discard, axis=1)
else:
raise ValueError('call fit or fit_transform function before')<|docstring|>Transforms the dataset
Parameters
----------
df : pandas dataframe of shape = (n, n_features)
The dataset with numerical features and no NA
Returns
-------
pandas dataframe of shape = (n_train, n_features*(1-threshold))
The train dataset with relevant features<|endoftext|>
|
43d120d28309d8b540d213d304b670e91e4904bf157428868d6017e6ca94c875
|
def fit_transform(self, df_train, y_train):
'Fits Clf_feature_selector and transforms the dataset\n \n Parameters\n ----------\n df_train : pandas dataframe of shape = (n_train, n_features)\n The train dataset with numerical features and no NA\n\n y_train : pandas series of shape = (n_train, ). \n The target for classification task. Must be encoded.\n \n Returns\n -------\n pandas dataframe of shape = (n_train, n_features*(1-threshold))\n The train dataset with relevant features\n '
self.fit(df_train, y_train)
return self.transform(df_train)
|
Fits Clf_feature_selector and transforms the dataset
Parameters
----------
df_train : pandas dataframe of shape = (n_train, n_features)
The train dataset with numerical features and no NA
y_train : pandas series of shape = (n_train, ).
The target for classification task. Must be encoded.
Returns
-------
pandas dataframe of shape = (n_train, n_features*(1-threshold))
The train dataset with relevant features
|
mlbox/model/classification/feature_selector.py
|
fit_transform
|
manugarri/MLBox
| 1,382
|
python
|
def fit_transform(self, df_train, y_train):
'Fits Clf_feature_selector and transforms the dataset\n \n Parameters\n ----------\n df_train : pandas dataframe of shape = (n_train, n_features)\n The train dataset with numerical features and no NA\n\n y_train : pandas series of shape = (n_train, ). \n The target for classification task. Must be encoded.\n \n Returns\n -------\n pandas dataframe of shape = (n_train, n_features*(1-threshold))\n The train dataset with relevant features\n '
self.fit(df_train, y_train)
return self.transform(df_train)
|
def fit_transform(self, df_train, y_train):
'Fits Clf_feature_selector and transforms the dataset\n \n Parameters\n ----------\n df_train : pandas dataframe of shape = (n_train, n_features)\n The train dataset with numerical features and no NA\n\n y_train : pandas series of shape = (n_train, ). \n The target for classification task. Must be encoded.\n \n Returns\n -------\n pandas dataframe of shape = (n_train, n_features*(1-threshold))\n The train dataset with relevant features\n '
self.fit(df_train, y_train)
return self.transform(df_train)<|docstring|>Fits Clf_feature_selector and transforms the dataset
Parameters
----------
df_train : pandas dataframe of shape = (n_train, n_features)
The train dataset with numerical features and no NA
y_train : pandas series of shape = (n_train, ).
The target for classification task. Must be encoded.
Returns
-------
pandas dataframe of shape = (n_train, n_features*(1-threshold))
The train dataset with relevant features<|endoftext|>
|
bbc528cefb7eb366bc4cbb02d5e0133d9ddbf568b1026f54623a2065285c7507
|
def measure_snr(wave, flux, block_size=1.0):
'\n Estimate SNR of given spectral data\n :param wave: wavelength grid\n :type wave: ndarray\n :param flux: flux\n :type flux: ndarray\n\n :return: SNR, SNR of each of the block\n :rtype: list\n '
xmin = wave[0]
xmax = (xmin + block_size)
SNR = []
while (xmin < wave[(- 1)]):
flux_block = flux[np.where(((wave > xmin) & (wave < xmax)))]
if (len(flux_block) == 1):
break
if (np.nanmean(flux_block) > 0.0):
sigma_block = (np.nanmean(flux_block) / np.nanstd(flux_block))
SNR.append(sigma_block)
xmin = xmax.copy()
xmax = (xmin + block_size)
return SNR
|
Estimate SNR of given spectral data
:param wave: wavelength grid
:type wave: ndarray
:param flux: flux
:type flux: ndarray
:return: SNR, SNR of each of the block
:rtype: list
|
edibles/utils/ISLineFitter.py
|
measure_snr
|
jancami/edibles
| 8
|
python
|
def measure_snr(wave, flux, block_size=1.0):
'\n Estimate SNR of given spectral data\n :param wave: wavelength grid\n :type wave: ndarray\n :param flux: flux\n :type flux: ndarray\n\n :return: SNR, SNR of each of the block\n :rtype: list\n '
xmin = wave[0]
xmax = (xmin + block_size)
SNR = []
while (xmin < wave[(- 1)]):
flux_block = flux[np.where(((wave > xmin) & (wave < xmax)))]
if (len(flux_block) == 1):
break
if (np.nanmean(flux_block) > 0.0):
sigma_block = (np.nanmean(flux_block) / np.nanstd(flux_block))
SNR.append(sigma_block)
xmin = xmax.copy()
xmax = (xmin + block_size)
return SNR
|
def measure_snr(wave, flux, block_size=1.0):
'\n Estimate SNR of given spectral data\n :param wave: wavelength grid\n :type wave: ndarray\n :param flux: flux\n :type flux: ndarray\n\n :return: SNR, SNR of each of the block\n :rtype: list\n '
xmin = wave[0]
xmax = (xmin + block_size)
SNR = []
while (xmin < wave[(- 1)]):
flux_block = flux[np.where(((wave > xmin) & (wave < xmax)))]
if (len(flux_block) == 1):
break
if (np.nanmean(flux_block) > 0.0):
sigma_block = (np.nanmean(flux_block) / np.nanstd(flux_block))
SNR.append(sigma_block)
xmin = xmax.copy()
xmax = (xmin + block_size)
return SNR<|docstring|>Estimate SNR of given spectral data
:param wave: wavelength grid
:type wave: ndarray
:param flux: flux
:type flux: ndarray
:return: SNR, SNR of each of the block
:rtype: list<|endoftext|>
|
4058ab42bfc56197fabc32e362ce687d3b4478df07e72350d337911498ae5245
|
def fit(self, species='KI', n_anchors=5, windowsize=3, criteria='BIC', **kwargs):
'\n The main fitting method for the class.\n Currently kwargs for select_species_data to make code more pretty\n :param species: name of the species\n :param n_anchors: number of anchor points for spline continuum, default: 5\n :param windowsize: width of wavelength window on EACH side of target line, default: 3 (AA)\n :param kwargs: for select_species_data, allowed kwargs are:\n Wave, OscillatorStrengthm, Gamma and their Max/Min\n :return:\n '
(spec_name, lam_0, fjj, gamma) = self.select_species_data(species=species, **kwargs)
_ = self.getData2Fit(lam_0, windowsize=windowsize)
while True:
n_components = len(self.model_all)
print(('\n' + ('=' * 40)))
print(('Fitting model with %i component...' % n_components))
(model2fit, pars_guess) = self.buildModel(lam_0, fjj, gamma, n_anchors)
result = model2fit.fit(data=self.flux2fit, params=pars_guess, x=self.wave2fit, weights=((np.ones_like(self.flux2fit) * self.SNR) / np.median(self.flux2fit)))
self.__afterFit(model2fit, result)
stop_flag = self.bayesianCriterion(criteria=criteria)
if (self.verbose >= 1):
self.plotModel(which=(- 1), v_next=self.getNextVoff(), sleep=10)
if stop_flag:
break
return self.result_all[(- 2)]
|
The main fitting method for the class.
Currently kwargs for select_species_data to make code more pretty
:param species: name of the species
:param n_anchors: number of anchor points for spline continuum, default: 5
:param windowsize: width of wavelength window on EACH side of target line, default: 3 (AA)
:param kwargs: for select_species_data, allowed kwargs are:
Wave, OscillatorStrengthm, Gamma and their Max/Min
:return:
|
edibles/utils/ISLineFitter.py
|
fit
|
jancami/edibles
| 8
|
python
|
def fit(self, species='KI', n_anchors=5, windowsize=3, criteria='BIC', **kwargs):
'\n The main fitting method for the class.\n Currently kwargs for select_species_data to make code more pretty\n :param species: name of the species\n :param n_anchors: number of anchor points for spline continuum, default: 5\n :param windowsize: width of wavelength window on EACH side of target line, default: 3 (AA)\n :param kwargs: for select_species_data, allowed kwargs are:\n Wave, OscillatorStrengthm, Gamma and their Max/Min\n :return:\n '
(spec_name, lam_0, fjj, gamma) = self.select_species_data(species=species, **kwargs)
_ = self.getData2Fit(lam_0, windowsize=windowsize)
while True:
n_components = len(self.model_all)
print(('\n' + ('=' * 40)))
print(('Fitting model with %i component...' % n_components))
(model2fit, pars_guess) = self.buildModel(lam_0, fjj, gamma, n_anchors)
result = model2fit.fit(data=self.flux2fit, params=pars_guess, x=self.wave2fit, weights=((np.ones_like(self.flux2fit) * self.SNR) / np.median(self.flux2fit)))
self.__afterFit(model2fit, result)
stop_flag = self.bayesianCriterion(criteria=criteria)
if (self.verbose >= 1):
self.plotModel(which=(- 1), v_next=self.getNextVoff(), sleep=10)
if stop_flag:
break
return self.result_all[(- 2)]
|
def fit(self, species='KI', n_anchors=5, windowsize=3, criteria='BIC', **kwargs):
'\n The main fitting method for the class.\n Currently kwargs for select_species_data to make code more pretty\n :param species: name of the species\n :param n_anchors: number of anchor points for spline continuum, default: 5\n :param windowsize: width of wavelength window on EACH side of target line, default: 3 (AA)\n :param kwargs: for select_species_data, allowed kwargs are:\n Wave, OscillatorStrengthm, Gamma and their Max/Min\n :return:\n '
(spec_name, lam_0, fjj, gamma) = self.select_species_data(species=species, **kwargs)
_ = self.getData2Fit(lam_0, windowsize=windowsize)
while True:
n_components = len(self.model_all)
print(('\n' + ('=' * 40)))
print(('Fitting model with %i component...' % n_components))
(model2fit, pars_guess) = self.buildModel(lam_0, fjj, gamma, n_anchors)
result = model2fit.fit(data=self.flux2fit, params=pars_guess, x=self.wave2fit, weights=((np.ones_like(self.flux2fit) * self.SNR) / np.median(self.flux2fit)))
self.__afterFit(model2fit, result)
stop_flag = self.bayesianCriterion(criteria=criteria)
if (self.verbose >= 1):
self.plotModel(which=(- 1), v_next=self.getNextVoff(), sleep=10)
if stop_flag:
break
return self.result_all[(- 2)]<|docstring|>The main fitting method for the class.
Currently kwargs for select_species_data to make code more pretty
:param species: name of the species
:param n_anchors: number of anchor points for spline continuum, default: 5
:param windowsize: width of wavelength window on EACH side of target line, default: 3 (AA)
:param kwargs: for select_species_data, allowed kwargs are:
Wave, OscillatorStrengthm, Gamma and their Max/Min
:return:<|endoftext|>
|
7aa76931db3c21d3a0a1273551dad8c2b2cfe43e2e543bf2344865502dfd3f41
|
def select_species_data(self, species=None, **kwargs):
'This method will provide a filtered list of species information that matches\n the specified criteria on sightline/target parameters as well as\n on observational criteria (e.g. wavelength range).\n Use kwargs to make the code look pretty,\n Consider allow both upper and lower cases?\n Allowed kwargs:\n Wave, WaveMin, WaveMax\n OscillatorStrength(Max/Min)\n Gamma(Max/Min)\n '
bool_species_matches = np.zeros(len(self.species_df.index), dtype=bool)
if (species is None):
bool_species_matches = np.ones(len(self.species_df.index), dtype=bool)
elif (isinstance(species, np.ndarray) | isinstance(species, list)):
for thisobject in species:
bool_species_matches = (self.species_df['Species'].str.contains(thisobject) | bool_species_matches)
else:
bool_species_matches = (self.species_df['Species'] == species)
bool_wave_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('Wave' in kwargs.keys()):
bool_wave_matches = (self.species_df.WavelengthAir == kwargs['Wave'])
if ('WaveMin' in kwargs.keys()):
bool_wave_matches = ((self.species_df.WavelengthAir > kwargs['WaveMin']) & bool_wave_matches)
if ('WaveMax' in kwargs.keys()):
bool_wave_matches = ((self.species_df.WavelengthAir < kwargs['WaveMax']) & bool_wave_matches)
bool_osc_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('OscillatorStrength' in kwargs.keys()):
bool_osc_matches = (self.species_df.OscillatorStrength == kwargs['OscillatorStrength'])
if ('OscillatorStrengthMin' in kwargs.keys()):
bool_osc_matches = ((self.species_df.OscillatorStrength > kwargs['OscillatorStrengthMin']) & bool_osc_matches)
if ('OscillatorStrengthMax' in kwargs.keys()):
bool_osc_matches = ((self.species_df.OscillatorStrength < kwargs['OscillatorStrengthMax']) & bool_osc_matches)
bool_gamma_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('Gamma' in kwargs.keys()):
bool_gamma_matches = (self.species_df.Gamma == kwargs['Gamma'])
if ('GammaMin' in kwargs.keys()):
bool_gamma_matches = ((self.species_df.Gamma > kwargs['GammaMin']) & bool_gamma_matches)
if ('GammaMax' in kwargs.keys()):
bool_gamma_matches = ((self.species_df.Gamma < kwargs['GammaMax']) & bool_gamma_matches)
ind = np.where((((bool_species_matches & bool_wave_matches) & bool_osc_matches) & bool_gamma_matches))[0]
self.species_list = self.species_df['Species'].iloc[ind].to_list()
self.air_wavelength = self.species_df['WavelengthAir'].iloc[ind].to_list()
self.oscillator_strength = self.species_df['OscillatorStrength'].iloc[ind].to_list()
self.gamma = self.species_df['Gamma'].iloc[ind].to_list()
return (self.species_list, self.air_wavelength, self.oscillator_strength, self.gamma)
|
This method will provide a filtered list of species information that matches
the specified criteria on sightline/target parameters as well as
on observational criteria (e.g. wavelength range).
Use kwargs to make the code look pretty,
Consider allow both upper and lower cases?
Allowed kwargs:
Wave, WaveMin, WaveMax
OscillatorStrength(Max/Min)
Gamma(Max/Min)
|
edibles/utils/ISLineFitter.py
|
select_species_data
|
jancami/edibles
| 8
|
python
|
def select_species_data(self, species=None, **kwargs):
'This method will provide a filtered list of species information that matches\n the specified criteria on sightline/target parameters as well as\n on observational criteria (e.g. wavelength range).\n Use kwargs to make the code look pretty,\n Consider allow both upper and lower cases?\n Allowed kwargs:\n Wave, WaveMin, WaveMax\n OscillatorStrength(Max/Min)\n Gamma(Max/Min)\n '
bool_species_matches = np.zeros(len(self.species_df.index), dtype=bool)
if (species is None):
bool_species_matches = np.ones(len(self.species_df.index), dtype=bool)
elif (isinstance(species, np.ndarray) | isinstance(species, list)):
for thisobject in species:
bool_species_matches = (self.species_df['Species'].str.contains(thisobject) | bool_species_matches)
else:
bool_species_matches = (self.species_df['Species'] == species)
bool_wave_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('Wave' in kwargs.keys()):
bool_wave_matches = (self.species_df.WavelengthAir == kwargs['Wave'])
if ('WaveMin' in kwargs.keys()):
bool_wave_matches = ((self.species_df.WavelengthAir > kwargs['WaveMin']) & bool_wave_matches)
if ('WaveMax' in kwargs.keys()):
bool_wave_matches = ((self.species_df.WavelengthAir < kwargs['WaveMax']) & bool_wave_matches)
bool_osc_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('OscillatorStrength' in kwargs.keys()):
bool_osc_matches = (self.species_df.OscillatorStrength == kwargs['OscillatorStrength'])
if ('OscillatorStrengthMin' in kwargs.keys()):
bool_osc_matches = ((self.species_df.OscillatorStrength > kwargs['OscillatorStrengthMin']) & bool_osc_matches)
if ('OscillatorStrengthMax' in kwargs.keys()):
bool_osc_matches = ((self.species_df.OscillatorStrength < kwargs['OscillatorStrengthMax']) & bool_osc_matches)
bool_gamma_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('Gamma' in kwargs.keys()):
bool_gamma_matches = (self.species_df.Gamma == kwargs['Gamma'])
if ('GammaMin' in kwargs.keys()):
bool_gamma_matches = ((self.species_df.Gamma > kwargs['GammaMin']) & bool_gamma_matches)
if ('GammaMax' in kwargs.keys()):
bool_gamma_matches = ((self.species_df.Gamma < kwargs['GammaMax']) & bool_gamma_matches)
ind = np.where((((bool_species_matches & bool_wave_matches) & bool_osc_matches) & bool_gamma_matches))[0]
self.species_list = self.species_df['Species'].iloc[ind].to_list()
self.air_wavelength = self.species_df['WavelengthAir'].iloc[ind].to_list()
self.oscillator_strength = self.species_df['OscillatorStrength'].iloc[ind].to_list()
self.gamma = self.species_df['Gamma'].iloc[ind].to_list()
return (self.species_list, self.air_wavelength, self.oscillator_strength, self.gamma)
|
def select_species_data(self, species=None, **kwargs):
'This method will provide a filtered list of species information that matches\n the specified criteria on sightline/target parameters as well as\n on observational criteria (e.g. wavelength range).\n Use kwargs to make the code look pretty,\n Consider allow both upper and lower cases?\n Allowed kwargs:\n Wave, WaveMin, WaveMax\n OscillatorStrength(Max/Min)\n Gamma(Max/Min)\n '
bool_species_matches = np.zeros(len(self.species_df.index), dtype=bool)
if (species is None):
bool_species_matches = np.ones(len(self.species_df.index), dtype=bool)
elif (isinstance(species, np.ndarray) | isinstance(species, list)):
for thisobject in species:
bool_species_matches = (self.species_df['Species'].str.contains(thisobject) | bool_species_matches)
else:
bool_species_matches = (self.species_df['Species'] == species)
bool_wave_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('Wave' in kwargs.keys()):
bool_wave_matches = (self.species_df.WavelengthAir == kwargs['Wave'])
if ('WaveMin' in kwargs.keys()):
bool_wave_matches = ((self.species_df.WavelengthAir > kwargs['WaveMin']) & bool_wave_matches)
if ('WaveMax' in kwargs.keys()):
bool_wave_matches = ((self.species_df.WavelengthAir < kwargs['WaveMax']) & bool_wave_matches)
bool_osc_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('OscillatorStrength' in kwargs.keys()):
bool_osc_matches = (self.species_df.OscillatorStrength == kwargs['OscillatorStrength'])
if ('OscillatorStrengthMin' in kwargs.keys()):
bool_osc_matches = ((self.species_df.OscillatorStrength > kwargs['OscillatorStrengthMin']) & bool_osc_matches)
if ('OscillatorStrengthMax' in kwargs.keys()):
bool_osc_matches = ((self.species_df.OscillatorStrength < kwargs['OscillatorStrengthMax']) & bool_osc_matches)
bool_gamma_matches = np.ones(len(self.species_df.index), dtype=bool)
if ('Gamma' in kwargs.keys()):
bool_gamma_matches = (self.species_df.Gamma == kwargs['Gamma'])
if ('GammaMin' in kwargs.keys()):
bool_gamma_matches = ((self.species_df.Gamma > kwargs['GammaMin']) & bool_gamma_matches)
if ('GammaMax' in kwargs.keys()):
bool_gamma_matches = ((self.species_df.Gamma < kwargs['GammaMax']) & bool_gamma_matches)
ind = np.where((((bool_species_matches & bool_wave_matches) & bool_osc_matches) & bool_gamma_matches))[0]
self.species_list = self.species_df['Species'].iloc[ind].to_list()
self.air_wavelength = self.species_df['WavelengthAir'].iloc[ind].to_list()
self.oscillator_strength = self.species_df['OscillatorStrength'].iloc[ind].to_list()
self.gamma = self.species_df['Gamma'].iloc[ind].to_list()
return (self.species_list, self.air_wavelength, self.oscillator_strength, self.gamma)<|docstring|>This method will provide a filtered list of species information that matches
the specified criteria on sightline/target parameters as well as
on observational criteria (e.g. wavelength range).
Use kwargs to make the code look pretty,
Consider allow both upper and lower cases?
Allowed kwargs:
Wave, WaveMin, WaveMax
OscillatorStrength(Max/Min)
Gamma(Max/Min)<|endoftext|>
|
ad1f2df121b198fc3587f4ce19e417668167e8696a3a199e72a28c5f8b62f148
|
def determine_vrad_from_correlation(self, wave, flux, model):
'\n Function to calculate the correlation between an observed spectrum and a model as a function of\n radial velocity and return the radial velocity with the highest correlation coefficient. \n Args:\n wave (float64): array of wavelengths\n flux (float64): Flux (observed)\n model(float64): model\n Returns:\n vrad_best: radial velocity corresponding to highest correlation. \n '
v_rad_grid = np.arange((- 50.0), 50.0, 0.1)
all_corr = (v_rad_grid * 0.0)
for loop in range(len(v_rad_grid)):
v_rad = v_rad_grid[loop]
Doppler_factor = (1.0 + (v_rad / cst.c.to('km/s').value))
new_wave = (wave * Doppler_factor)
interpolationfunction = interp1d(new_wave, model, kind='cubic', fill_value='extrapolate')
interpolatedModel = interpolationfunction(wave)
(this_c, _) = pearsonr(flux, interpolatedModel)
all_corr[loop] = this_c
v_rad_best = v_rad_grid[np.argmax(all_corr)]
return v_rad_best
|
Function to calculate the correlation between an observed spectrum and a model as a function of
radial velocity and return the radial velocity with the highest correlation coefficient.
Args:
wave (float64): array of wavelengths
flux (float64): Flux (observed)
model(float64): model
Returns:
vrad_best: radial velocity corresponding to highest correlation.
|
edibles/utils/ISLineFitter.py
|
determine_vrad_from_correlation
|
jancami/edibles
| 8
|
python
|
def determine_vrad_from_correlation(self, wave, flux, model):
'\n Function to calculate the correlation between an observed spectrum and a model as a function of\n radial velocity and return the radial velocity with the highest correlation coefficient. \n Args:\n wave (float64): array of wavelengths\n flux (float64): Flux (observed)\n model(float64): model\n Returns:\n vrad_best: radial velocity corresponding to highest correlation. \n '
v_rad_grid = np.arange((- 50.0), 50.0, 0.1)
all_corr = (v_rad_grid * 0.0)
for loop in range(len(v_rad_grid)):
v_rad = v_rad_grid[loop]
Doppler_factor = (1.0 + (v_rad / cst.c.to('km/s').value))
new_wave = (wave * Doppler_factor)
interpolationfunction = interp1d(new_wave, model, kind='cubic', fill_value='extrapolate')
interpolatedModel = interpolationfunction(wave)
(this_c, _) = pearsonr(flux, interpolatedModel)
all_corr[loop] = this_c
v_rad_best = v_rad_grid[np.argmax(all_corr)]
return v_rad_best
|
def determine_vrad_from_correlation(self, wave, flux, model):
'\n Function to calculate the correlation between an observed spectrum and a model as a function of\n radial velocity and return the radial velocity with the highest correlation coefficient. \n Args:\n wave (float64): array of wavelengths\n flux (float64): Flux (observed)\n model(float64): model\n Returns:\n vrad_best: radial velocity corresponding to highest correlation. \n '
v_rad_grid = np.arange((- 50.0), 50.0, 0.1)
all_corr = (v_rad_grid * 0.0)
for loop in range(len(v_rad_grid)):
v_rad = v_rad_grid[loop]
Doppler_factor = (1.0 + (v_rad / cst.c.to('km/s').value))
new_wave = (wave * Doppler_factor)
interpolationfunction = interp1d(new_wave, model, kind='cubic', fill_value='extrapolate')
interpolatedModel = interpolationfunction(wave)
(this_c, _) = pearsonr(flux, interpolatedModel)
all_corr[loop] = this_c
v_rad_best = v_rad_grid[np.argmax(all_corr)]
return v_rad_best<|docstring|>Function to calculate the correlation between an observed spectrum and a model as a function of
radial velocity and return the radial velocity with the highest correlation coefficient.
Args:
wave (float64): array of wavelengths
flux (float64): Flux (observed)
model(float64): model
Returns:
vrad_best: radial velocity corresponding to highest correlation.<|endoftext|>
|
893d591c98807638843786cc94511e1e5a7f7ccc6c98684828b20dc0245baa02
|
def __init__(self, n_components, lam_0=[3302.369, 3302.978], fjj=[0.00826, 0.00406], gamma=[62800000.0, 62800000.0], v_res=3.0, independent_vars=['x'], prefix='', nan_policy='raise', n_step=25, verbose=0, **kwargs):
"\n :param n_components: int, number of velocity components\n :param lam_0: list, air wavelength of target line. lam_0, fjj and gamma should have same length\n :param fjj: list, oscillator strengths\n :param gamma: list, Gamma parameter related to broadening.\n :param v_res: float, resolution in km/s\n :param independent_vars: from lmfit and Klay's code\n :param prefix: from lmfit and Klay's code\n :param nan_policy: from lmfit and Klay's code\n :param n_step: int, no. of points in 1*FWHM during calculation. Under-sample losses information\n but over-sample losses efficiency.\n :param verbose: int, if verbose=2, print V_off; if verbos>=3, print all parameter\n :param kwargs: ???\n "
(self.n_components, self.lam_0, self.fjj, self.gamma, self.n_setp) = self.__inputCheck(n_components, lam_0, fjj, gamma, n_step)
self.v_res = v_res
self.verbose = verbose
self.N_init = self.__estimateN(tau0=0.1)
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars})
self.b_names = [('b_Cloud%i' % i) for i in range(n_components)]
self.N_names = [('N_Cloud%i' % i) for i in range(n_components)]
self.V_names = [('V_off_Cloud%i' % i) for i in range(n_components)]
kwargs['param_names'] = ((self.b_names + self.N_names) + self.V_names)
params = {}
for name in kwargs['param_names']:
if (name[0] == 'b'):
params[name] = 1.0
if (name[0] == 'N'):
params[name] = self.N_init
if (name[0] == 'V'):
params[name] = 0.0
def calcISLineModel(x, b_Cloud0=1.0, N_Cloud0=1.0, V_off_Cloud0=0.0, **kwargs):
lambda0 = (self.lam_0 * self.n_components)
f = (self.fjj * self.n_components)
gamma = (self.gamma * self.n_components)
v_resolution = self.v_res
bs = ([b_Cloud0] * len(self.lam_0))
Ns = ([N_Cloud0] * len(self.lam_0))
V_offs = ([V_off_Cloud0] * len(self.lam_0))
for name in kwargs.keys():
if (name[0] == 'b'):
bs = (bs + ([kwargs[name]] * len(self.lam_0)))
if (name[0] == 'N'):
Ns = (Ns + ([kwargs[name]] * len(self.lam_0)))
if (name[0] == 'V'):
V_offs = (V_offs + ([kwargs[name]] * len(self.lam_0)))
if (self.verbose == 2):
print('V_off: ', [('%.2f' % item) for item in V_offs[0::len(self.lam_0)]])
if (self.verbose >= 3):
print('========= Line Model =========')
V_all = V_offs[0::len(self.lam_0)]
V_all = [('%.2f' % item) for item in V_all]
print('V_off: ', V_all)
b_all = bs[0::len(self.lam_0)]
b_all = [('%.2f' % item) for item in b_all]
print('b: ', b_all)
N_all = Ns[0::len(self.lam_0)]
N_mag = math.floor(np.log10(np.min(N_all)))
N_all = [(item / (10 ** N_mag)) for item in N_all]
N_all = [('%.2f' % item) for item in N_all]
print(('N (X10^%i): ' % N_mag), N_all)
if (self.n_components > 0):
flux = voigt_absorption_line(x, lambda0=lambda0, b=bs, N=Ns, f=f, v_rad=V_offs, gamma=gamma, v_resolution=v_resolution, n_step=self.n_setp)
elif (self.n_components == 0):
flux = np.ones_like(x)
return flux
sig = inspect.signature(calcISLineModel)
base_b = inspect.signature(calcISLineModel).parameters['b_Cloud0']
base_N = inspect.signature(calcISLineModel).parameters['N_Cloud0']
base_V_off = inspect.signature(calcISLineModel).parameters['V_off_Cloud0']
d = {'x': sig.parameters['x']}
for i in range(n_components):
b_key = ('b_Cloud' + str(i))
b_val = base_b.replace(name=b_key)
d[b_key] = b_val
N_key = ('N_Cloud' + str(i))
N_val = base_N.replace(name=N_key)
d[N_key] = N_val
V_off_key = ('V_off_Cloud' + str(i))
V_off_val = base_V_off.replace(name=V_off_key)
d[V_off_key] = V_off_val
d = collections.OrderedDict(d)
calcISLineModel.__signature__ = sig.replace(parameters=tuple(d.values()))
super().__init__(calcISLineModel, **kwargs)
|
:param n_components: int, number of velocity components
:param lam_0: list, air wavelength of target line. lam_0, fjj and gamma should have same length
:param fjj: list, oscillator strengths
:param gamma: list, Gamma parameter related to broadening.
:param v_res: float, resolution in km/s
:param independent_vars: from lmfit and Klay's code
:param prefix: from lmfit and Klay's code
:param nan_policy: from lmfit and Klay's code
:param n_step: int, no. of points in 1*FWHM during calculation. Under-sample losses information
but over-sample losses efficiency.
:param verbose: int, if verbose=2, print V_off; if verbos>=3, print all parameter
:param kwargs: ???
|
edibles/utils/ISLineFitter.py
|
__init__
|
jancami/edibles
| 8
|
python
|
def __init__(self, n_components, lam_0=[3302.369, 3302.978], fjj=[0.00826, 0.00406], gamma=[62800000.0, 62800000.0], v_res=3.0, independent_vars=['x'], prefix=, nan_policy='raise', n_step=25, verbose=0, **kwargs):
"\n :param n_components: int, number of velocity components\n :param lam_0: list, air wavelength of target line. lam_0, fjj and gamma should have same length\n :param fjj: list, oscillator strengths\n :param gamma: list, Gamma parameter related to broadening.\n :param v_res: float, resolution in km/s\n :param independent_vars: from lmfit and Klay's code\n :param prefix: from lmfit and Klay's code\n :param nan_policy: from lmfit and Klay's code\n :param n_step: int, no. of points in 1*FWHM during calculation. Under-sample losses information\n but over-sample losses efficiency.\n :param verbose: int, if verbose=2, print V_off; if verbos>=3, print all parameter\n :param kwargs: ???\n "
(self.n_components, self.lam_0, self.fjj, self.gamma, self.n_setp) = self.__inputCheck(n_components, lam_0, fjj, gamma, n_step)
self.v_res = v_res
self.verbose = verbose
self.N_init = self.__estimateN(tau0=0.1)
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars})
self.b_names = [('b_Cloud%i' % i) for i in range(n_components)]
self.N_names = [('N_Cloud%i' % i) for i in range(n_components)]
self.V_names = [('V_off_Cloud%i' % i) for i in range(n_components)]
kwargs['param_names'] = ((self.b_names + self.N_names) + self.V_names)
params = {}
for name in kwargs['param_names']:
if (name[0] == 'b'):
params[name] = 1.0
if (name[0] == 'N'):
params[name] = self.N_init
if (name[0] == 'V'):
params[name] = 0.0
def calcISLineModel(x, b_Cloud0=1.0, N_Cloud0=1.0, V_off_Cloud0=0.0, **kwargs):
lambda0 = (self.lam_0 * self.n_components)
f = (self.fjj * self.n_components)
gamma = (self.gamma * self.n_components)
v_resolution = self.v_res
bs = ([b_Cloud0] * len(self.lam_0))
Ns = ([N_Cloud0] * len(self.lam_0))
V_offs = ([V_off_Cloud0] * len(self.lam_0))
for name in kwargs.keys():
if (name[0] == 'b'):
bs = (bs + ([kwargs[name]] * len(self.lam_0)))
if (name[0] == 'N'):
Ns = (Ns + ([kwargs[name]] * len(self.lam_0)))
if (name[0] == 'V'):
V_offs = (V_offs + ([kwargs[name]] * len(self.lam_0)))
if (self.verbose == 2):
print('V_off: ', [('%.2f' % item) for item in V_offs[0::len(self.lam_0)]])
if (self.verbose >= 3):
print('========= Line Model =========')
V_all = V_offs[0::len(self.lam_0)]
V_all = [('%.2f' % item) for item in V_all]
print('V_off: ', V_all)
b_all = bs[0::len(self.lam_0)]
b_all = [('%.2f' % item) for item in b_all]
print('b: ', b_all)
N_all = Ns[0::len(self.lam_0)]
N_mag = math.floor(np.log10(np.min(N_all)))
N_all = [(item / (10 ** N_mag)) for item in N_all]
N_all = [('%.2f' % item) for item in N_all]
print(('N (X10^%i): ' % N_mag), N_all)
if (self.n_components > 0):
flux = voigt_absorption_line(x, lambda0=lambda0, b=bs, N=Ns, f=f, v_rad=V_offs, gamma=gamma, v_resolution=v_resolution, n_step=self.n_setp)
elif (self.n_components == 0):
flux = np.ones_like(x)
return flux
sig = inspect.signature(calcISLineModel)
base_b = inspect.signature(calcISLineModel).parameters['b_Cloud0']
base_N = inspect.signature(calcISLineModel).parameters['N_Cloud0']
base_V_off = inspect.signature(calcISLineModel).parameters['V_off_Cloud0']
d = {'x': sig.parameters['x']}
for i in range(n_components):
b_key = ('b_Cloud' + str(i))
b_val = base_b.replace(name=b_key)
d[b_key] = b_val
N_key = ('N_Cloud' + str(i))
N_val = base_N.replace(name=N_key)
d[N_key] = N_val
V_off_key = ('V_off_Cloud' + str(i))
V_off_val = base_V_off.replace(name=V_off_key)
d[V_off_key] = V_off_val
d = collections.OrderedDict(d)
calcISLineModel.__signature__ = sig.replace(parameters=tuple(d.values()))
super().__init__(calcISLineModel, **kwargs)
|
def __init__(self, n_components, lam_0=[3302.369, 3302.978], fjj=[0.00826, 0.00406], gamma=[62800000.0, 62800000.0], v_res=3.0, independent_vars=['x'], prefix=, nan_policy='raise', n_step=25, verbose=0, **kwargs):
"\n :param n_components: int, number of velocity components\n :param lam_0: list, air wavelength of target line. lam_0, fjj and gamma should have same length\n :param fjj: list, oscillator strengths\n :param gamma: list, Gamma parameter related to broadening.\n :param v_res: float, resolution in km/s\n :param independent_vars: from lmfit and Klay's code\n :param prefix: from lmfit and Klay's code\n :param nan_policy: from lmfit and Klay's code\n :param n_step: int, no. of points in 1*FWHM during calculation. Under-sample losses information\n but over-sample losses efficiency.\n :param verbose: int, if verbose=2, print V_off; if verbos>=3, print all parameter\n :param kwargs: ???\n "
(self.n_components, self.lam_0, self.fjj, self.gamma, self.n_setp) = self.__inputCheck(n_components, lam_0, fjj, gamma, n_step)
self.v_res = v_res
self.verbose = verbose
self.N_init = self.__estimateN(tau0=0.1)
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy, 'independent_vars': independent_vars})
self.b_names = [('b_Cloud%i' % i) for i in range(n_components)]
self.N_names = [('N_Cloud%i' % i) for i in range(n_components)]
self.V_names = [('V_off_Cloud%i' % i) for i in range(n_components)]
kwargs['param_names'] = ((self.b_names + self.N_names) + self.V_names)
params = {}
for name in kwargs['param_names']:
if (name[0] == 'b'):
params[name] = 1.0
if (name[0] == 'N'):
params[name] = self.N_init
if (name[0] == 'V'):
params[name] = 0.0
def calcISLineModel(x, b_Cloud0=1.0, N_Cloud0=1.0, V_off_Cloud0=0.0, **kwargs):
lambda0 = (self.lam_0 * self.n_components)
f = (self.fjj * self.n_components)
gamma = (self.gamma * self.n_components)
v_resolution = self.v_res
bs = ([b_Cloud0] * len(self.lam_0))
Ns = ([N_Cloud0] * len(self.lam_0))
V_offs = ([V_off_Cloud0] * len(self.lam_0))
for name in kwargs.keys():
if (name[0] == 'b'):
bs = (bs + ([kwargs[name]] * len(self.lam_0)))
if (name[0] == 'N'):
Ns = (Ns + ([kwargs[name]] * len(self.lam_0)))
if (name[0] == 'V'):
V_offs = (V_offs + ([kwargs[name]] * len(self.lam_0)))
if (self.verbose == 2):
print('V_off: ', [('%.2f' % item) for item in V_offs[0::len(self.lam_0)]])
if (self.verbose >= 3):
print('========= Line Model =========')
V_all = V_offs[0::len(self.lam_0)]
V_all = [('%.2f' % item) for item in V_all]
print('V_off: ', V_all)
b_all = bs[0::len(self.lam_0)]
b_all = [('%.2f' % item) for item in b_all]
print('b: ', b_all)
N_all = Ns[0::len(self.lam_0)]
N_mag = math.floor(np.log10(np.min(N_all)))
N_all = [(item / (10 ** N_mag)) for item in N_all]
N_all = [('%.2f' % item) for item in N_all]
print(('N (X10^%i): ' % N_mag), N_all)
if (self.n_components > 0):
flux = voigt_absorption_line(x, lambda0=lambda0, b=bs, N=Ns, f=f, v_rad=V_offs, gamma=gamma, v_resolution=v_resolution, n_step=self.n_setp)
elif (self.n_components == 0):
flux = np.ones_like(x)
return flux
sig = inspect.signature(calcISLineModel)
base_b = inspect.signature(calcISLineModel).parameters['b_Cloud0']
base_N = inspect.signature(calcISLineModel).parameters['N_Cloud0']
base_V_off = inspect.signature(calcISLineModel).parameters['V_off_Cloud0']
d = {'x': sig.parameters['x']}
for i in range(n_components):
b_key = ('b_Cloud' + str(i))
b_val = base_b.replace(name=b_key)
d[b_key] = b_val
N_key = ('N_Cloud' + str(i))
N_val = base_N.replace(name=N_key)
d[N_key] = N_val
V_off_key = ('V_off_Cloud' + str(i))
V_off_val = base_V_off.replace(name=V_off_key)
d[V_off_key] = V_off_val
d = collections.OrderedDict(d)
calcISLineModel.__signature__ = sig.replace(parameters=tuple(d.values()))
super().__init__(calcISLineModel, **kwargs)<|docstring|>:param n_components: int, number of velocity components
:param lam_0: list, air wavelength of target line. lam_0, fjj and gamma should have same length
:param fjj: list, oscillator strengths
:param gamma: list, Gamma parameter related to broadening.
:param v_res: float, resolution in km/s
:param independent_vars: from lmfit and Klay's code
:param prefix: from lmfit and Klay's code
:param nan_policy: from lmfit and Klay's code
:param n_step: int, no. of points in 1*FWHM during calculation. Under-sample losses information
but over-sample losses efficiency.
:param verbose: int, if verbose=2, print V_off; if verbos>=3, print all parameter
:param kwargs: ???<|endoftext|>
|
c9a3f7a528fa89b926710d981fff428712a072fb07085913750100cc416fe7fb
|
def _processEntities(self):
'Handle asym ids, etc'
if (self._model is None):
return
b0 = self._model[0]
aCat = DataCategory('pdbx_vrpt_entity')
aCat.appendAttribute('id')
aCat.appendAttribute('type')
aCat.appendAttribute('description')
c0 = b0.getObj('entity')
for idx in range(c0.getRowCount()):
eid = c0.getValue('id', idx)
etype = c0.getValue('type', idx)
edesc = c0.getValue('pdbx_description', idx)
rd = {'id': eid, 'type': etype, 'description': edesc}
aCat.append(rd)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_asym')
aCat.appendAttribute('label_asym_id')
aCat.appendAttribute('entity_id')
c0 = b0.getObj('struct_asym')
for idx in range(c0.getRowCount()):
asym = c0.getValue('id', idx)
entity = c0.getValue('entity_id', idx)
rd = {'label_asym_id': asym, 'entity_id': entity}
aCat.append(rd)
self._curContainer.append(aCat)
|
Handle asym ids, etc
|
ConvertCal.py
|
_processEntities
|
rcsb/py-rcsb_utils_validation
| 0
|
python
|
def _processEntities(self):
if (self._model is None):
return
b0 = self._model[0]
aCat = DataCategory('pdbx_vrpt_entity')
aCat.appendAttribute('id')
aCat.appendAttribute('type')
aCat.appendAttribute('description')
c0 = b0.getObj('entity')
for idx in range(c0.getRowCount()):
eid = c0.getValue('id', idx)
etype = c0.getValue('type', idx)
edesc = c0.getValue('pdbx_description', idx)
rd = {'id': eid, 'type': etype, 'description': edesc}
aCat.append(rd)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_asym')
aCat.appendAttribute('label_asym_id')
aCat.appendAttribute('entity_id')
c0 = b0.getObj('struct_asym')
for idx in range(c0.getRowCount()):
asym = c0.getValue('id', idx)
entity = c0.getValue('entity_id', idx)
rd = {'label_asym_id': asym, 'entity_id': entity}
aCat.append(rd)
self._curContainer.append(aCat)
|
def _processEntities(self):
if (self._model is None):
return
b0 = self._model[0]
aCat = DataCategory('pdbx_vrpt_entity')
aCat.appendAttribute('id')
aCat.appendAttribute('type')
aCat.appendAttribute('description')
c0 = b0.getObj('entity')
for idx in range(c0.getRowCount()):
eid = c0.getValue('id', idx)
etype = c0.getValue('type', idx)
edesc = c0.getValue('pdbx_description', idx)
rd = {'id': eid, 'type': etype, 'description': edesc}
aCat.append(rd)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_asym')
aCat.appendAttribute('label_asym_id')
aCat.appendAttribute('entity_id')
c0 = b0.getObj('struct_asym')
for idx in range(c0.getRowCount()):
asym = c0.getValue('id', idx)
entity = c0.getValue('entity_id', idx)
rd = {'label_asym_id': asym, 'entity_id': entity}
aCat.append(rd)
self._curContainer.append(aCat)<|docstring|>Handle asym ids, etc<|endoftext|>
|
80a3f9c63b91a18900ab9cbc651f2aaf1264398f923db02cf27eaeff01c5d8fc
|
def _addentries(self, ents):
'Adds dictionary of key and id'
aCat = DataCategory('pdbx_vrpt_database')
aCat.appendAttribute('id')
aCat.appendAttribute('code')
for e in ents:
rd = {'id': e['id'], 'code': e['code']}
aCat.append(rd)
self._curContainer.append(aCat)
|
Adds dictionary of key and id
|
ConvertCal.py
|
_addentries
|
rcsb/py-rcsb_utils_validation
| 0
|
python
|
def _addentries(self, ents):
aCat = DataCategory('pdbx_vrpt_database')
aCat.appendAttribute('id')
aCat.appendAttribute('code')
for e in ents:
rd = {'id': e['id'], 'code': e['code']}
aCat.append(rd)
self._curContainer.append(aCat)
|
def _addentries(self, ents):
aCat = DataCategory('pdbx_vrpt_database')
aCat.appendAttribute('id')
aCat.appendAttribute('code')
for e in ents:
rd = {'id': e['id'], 'code': e['code']}
aCat.append(rd)
self._curContainer.append(aCat)<|docstring|>Adds dictionary of key and id<|endoftext|>
|
3719cb0d6ea9db1a943c5be524d262b151942e7787a3cb30852bfefbda742b26
|
def _processEntry(self):
'Process attributes from Entry section'
entry = self._root.find('Entry')
attr = entry.attrib
aCat = DataCategory('pdbx_vrpt_summary')
atMap = [['entry_id', 'pdbid'], ['PDB_deposition_date', 'PDB-deposition-date'], ['PDB_revision_number', 'PDB-revision-number'], ['PDB_revision_date', 'PDB-revision-date'], ['RNA_suiteness', 'RNAsuiteness'], ['protein_DNA_RNA_entities', 'protein-DNA-RNA-entities'], ['model_CA_only', 'CA_ONLY'], ['EMDB_deposition_date', 'EMDB-deposition-date'], ['report_creation_date', 'XMLcreationDate'], ['attempted_validation_steps', 'attemptedValidationSteps'], ['no_ligands_for_mogul', 'no-ligands-for-mogul'], ['no_ligands_for_buster_report', 'no-ligands-for-buster-report'], ['ligands_for_buster_report', 'ligands-for-buster-report'], ['no_percentile_property', 'no-percentile-property']]
rd = {}
for a in atMap:
att = a[0]
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
if (att in ['ligands_for_buster_report', 'no_ligands_for_mogul', 'no_ligands_for_buster_report']):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val != '?'):
aCat.appendAttribute(att)
rd[att] = val
aCat.append(rd)
self._curContainer.append(aCat)
self._processChemicalShiftLists(entry)
pbins = attr['percentilebins']
aCat = DataCategory('pdbx_vrpt_percentile_list')
aCat.appendAttribute('id')
aCat.appendAttribute('range')
aCat.appendAttribute('exp_method')
sbins = pbins.split(',')
exp = sbins[(- 1):][0]
print(('EXP is %s' % exp))
if (exp == 'xray'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, sbins[0], 'x-ray'])
aCat.append([self.__relid, sbins[1], 'x-ray'])
emeth = 'x-ray'
elif (exp == 'em'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, 'all', 'pdb'])
aCat.append([self.__relid, 'all', 'electron microscopy'])
emeth = 'electron microscopy'
elif (exp == 'nmr'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, 'all', 'pdb'])
aCat.append([self.__relid, 'all', 'nmr'])
emeth = 'nmr'
else:
print('PBINS', pbins, exp)
sys.exit(1)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_exptl')
aCat.appendAttribute('ordinal')
aCat.appendAttribute('method')
aCat.append(['1', emeth])
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_percentile_conditions')
aCat.appendAttribute('id')
aCat.appendAttribute('type_id')
aCat.appendAttribute('rank')
aCat.appendAttribute('res_high')
aCat.appendAttribute('res_low')
aCat.appendAttribute('number_entries_total')
aCat.appendAttribute('percentile_list_id')
self._pvpc = {}
pmap = [['all_atom_clashscore', self.__absid, 'numPDBids-absolute-percentile-clashscore', '?', '?'], ['all_atom_clashscore', self.__relid, 'numPDBids-relative-percentile-clashscore', 'high-resol-relative-percentile-clashscore', 'low-resol-relative-percentile-clashscore'], ['Ramachandran_outlier_percent', self.__absid, 'numPDBids-absolute-percentile-percent-rama-outliers', '?', '?'], ['Ramachandran_outlier_percent', self.__relid, 'numPDBids-relative-percentile-percent-rama-outliers', 'high-resol-relative-percentile-percent-rama-outliers', 'low-resol-relative-percentile-percent-rama-outliers'], ['rotamer_outliers_percent', self.__absid, 'numPDBids-absolute-percentile-percent-rota-outliers', '?', '?'], ['rotamer_outliers_percent', self.__relid, 'numPDBids-relative-percentile-percent-rota-outliers', 'high-resol-relative-percentile-percent-rota-outliers', 'low-resol-relative-percentile-percent-rota-outliers'], ['R_value_R_free', self.__absid, 'numPDBids-absolute-percentile-DCC_Rfree', '?', '?'], ['R_value_R_free', self.__relid, 'numPDBids-relative-percentile-DCC_Rfree', 'high-resol-relative-percentile-DCC_Rfree', 'low-resol-relative-percentile-DCC_Rfree'], ['RSRZ_outliers_percent', self.__absid, 'numPDBids-absolute-percentile-percent-RSRZ-outliers', '?', '?'], ['RSRZ_outliers_percent', self.__relid, 'numPDBids-relative-percentile-percent-RSRZ-outliers', 'high-resol-relative-percentile-percent-RSRZ-outliers', 'low-resol-relative-percentile-percent-RSRZ-outliers'], ['RNAsuiteness_percent', self.__absid, 'numPDBids-absolute-percentile-RNAsuiteness', '?', '?'], ['RNAsuiteness_percent', self.__relid, 'numPDBids-relative-percentile-RNAsuiteness', 'high-resol-relative-percentile-RNAsuiteness', 'low-resol-relative-percentile-RNAsuiteness']]
cid = 1
for p in pmap:
ptype = p[0]
if (ptype not in self._type_map):
self._type_id += 1
self._type_map[ptype] = self._type_id
ptype_id = self._type_map[ptype]
plist = p[1]
num = attr.get(p[2], '?')
if (num == '?'):
continue
if (p[3] == '?'):
res_high = '?'
else:
res_high = attr.get(p[3], '?')
if (p[4] == '?'):
res_low = '?'
else:
res_low = attr.get(p[4], '?')
aCat.append([str(cid), '?', '?', res_high, res_low, num, plist])
self._pvpc[((str(ptype_id) + '_') + plist)] = str(cid)
cid += 1
vmap = [['all_atom_clashscore', 'absolute-percentile-clashscore', self.__absid], ['all_atom_clashscore', 'relative-percentile-clashscore', self.__relid], ['Ramachandran_outlier_percent', 'absolute-percentile-percent-rama-outliers', self.__absid], ['Ramachandran_outlier_percent', 'relative-percentile-percent-rama-outliers', self.__relid], ['rotamer_outliers_percent', 'absolute-percentile-percent-rota-outliers', self.__absid], ['rotamer_outliers_percent', 'relative-percentile-percent-rota-outliers', self.__relid], ['R_value_R_free', 'absolute-percentile-DCC_Rfree', self.__absid], ['R_value_R_free', 'relative-percentile-DCC_Rfree', self.__relid], ['RSRZ_outliers_percent', 'absolute-percentile-percent-RSRZ-outliers', self.__absid], ['RSRZ_outliers_percent', 'relative-percentile-percent-RSRZ-outliers', self.__relid], ['RNAsuiteness_percent', 'absolute-percentile-RNAsuiteness', self.__absid], ['RNAsuiteness_percent', 'relative-percentile-RNAsuiteness', self.__relid]]
for v in vmap:
ptype = v[0]
if (ptype not in self._type_map):
self._type_id += 1
self._type_map[ptype] = self._type_id
ptype_id = self._type_map[ptype]
rank = attr.get(v[1], '?')
if (rank == '?'):
continue
lid = v[2]
cid = self._pvpc.get(((str(ptype_id) + '_') + lid), '?')
row = (int(cid) - 1)
aCat.setValue(ptype_id, 'type_id', row)
aCat.setValue(rank, 'rank', row)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_diffraction')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['Babinet_b', 'babinet_b'], ['bulk_solvent_b', 'bulk_solvent_b'], ['Wilson_B_estimate', 'WilsonBestimate'], ['I_over_sigma', 'IoverSigma'], ['num_miller_indices', 'numMillerIndices'], ['Babinet_k', 'babinet_k'], ['bulk_solvent_k', 'bulk_solvent_k'], ['Padilla_Yeates_L_mean', 'TwinL'], ['Padilla_Yeates_L2_mean', 'TwinL2'], ['DCC_R', 'DCC_R'], ['DCC_Rfree', 'DCC_Rfree'], ['EDS_R', 'EDS_R'], ['EDS_res_high', 'EDS_resolution'], ['EDS_res_low', 'EDS_resolution_low'], ['Wilson_B_aniso', 'WilsonBaniso'], ['data_anisotropy', 'DataAnisotropy'], ['trans_NCS_details', 'TransNCS'], ['B_factor_type', 'B_factor_type'], ['acentric_outliers', 'acentric_outliers'], ['centric_outliers', 'centric_outliers'], ['data_completeness', 'DataCompleteness'], ['number_reflns_R_free', 'num-free-reflections'], ['percent_free_reflections', 'percent-free-reflections'], ['percent_RSRZ_outliers', 'percent-RSRZ-outliers'], ['PDB_resolution_high', 'PDB-resolution'], ['PDB_resolution_low', 'PDB-resolution-low'], ['PDB_R', 'PDB-R'], ['PDB_Rfree', 'PDB-Rfree']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
if (val == 'NotAvailable'):
val = '?'
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth not in ['electron microscopy']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_nmr')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['nmr_models_consistency_flag', 'nmr_models_consistency_flag'], ['nmrclust_representative_model', 'nmrclust_representative_model'], ['medoid_model', 'medoid_model'], ['nmrclust_number_of_outliers', 'nmrclust_number_of_outliers'], ['nmrclust_number_of_models', 'nmrclust_number_of_models'], ['nmrclust_number_of_clusters', 'nmrclust_number_of_clusters'], ['cyrange_number_of_domains', 'cyrange_number_of_domains'], ['chemical_shift_completeness', 'chemical_shift_completeness'], ['chemical_shift_completeness_full_length', 'chemical_shift_completeness_full_length']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth in ['nmr']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_em')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['contour_level_primary_map', 'contour_level_primary_map'], ['atom_inclusion_all_atoms', 'atom_inclusion_all_atoms'], ['atom_inclusion_backbone', 'atom_inclusion_backbone'], ['author_provided_fsc_resolution_by_cutoff_pt_143', 'author_provided_fsc_resolution_by_cutoff_0.143'], ['author_provided_fsc_resolution_by_cutoff_pt_333', 'author_provided_fsc_resolution_by_cutoff_0.133'], ['author_provided_fsc_resolution_by_cutoff_pt_5', 'author_provided_fsc_resolution_by_cutoff_0.5'], ['author_provided_fsc_resolution_by_cutoff_halfbit', 'author_provided_fsc_resolution_by_cutoff_halfbit'], ['author_provided_fsc_resolution_by_cutoff_onebit', 'author_provided_fsc_resolution_by_cutoff_onebit'], ['author_provided_fsc_resolution_by_cutoff_threesigma', 'author_provided_fsc_resolution_by_cutoff_threesigma'], ['calculated_fsc_resolution_by_cutoff_pt_143', 'calculated_fsc_resolution_by_cutoff_0.143'], ['calculated_fsc_resolution_by_cutoff_pt_333', 'calculated_fsc_resolution_by_cutoff_0.133'], ['calculated_fsc_resolution_by_cutoff_pt_5', 'calculated_fsc_resolution_by_cutoff_0.5'], ['calculated_fsc_resolution_by_cutoff_halfbit', 'calculated_fsc_resolution_by_cutoff_halfbit'], ['calculated_fsc_resolution_by_cutoff_onebit', 'calculated_fsc_resolution_by_cutoff_onebit'], ['calculated_fsc_resolution_by_cutoff_threesigma', 'calculated_fsc_resolution_by_cutoff_threesigma'], ['EMDB_resolution', 'EMDB-resolution']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth in ['electron microscopy']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_geometry')
atMap = [['ordinal', '?'], ['percent_ramachandran_outliers', 'percent-rama-outliers'], ['clashscore', 'clashscore'], ['angles_RMSZ', 'angles_rmsz'], ['bonds_RMSZ', 'bonds_rmsz'], ['num_angles_RMSZ', 'num_angles_rmsz'], ['num_bonds_RMSZ', 'num_bonds_rmsz'], ['percent_rotamer_outliers', 'percent-rota-outliers']]
atlookup = {'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] == '?'):
val = atlookup[att]
else:
val = attr.get(a[1], '?')
rd[att] = val
aCat.append(rd)
self._curContainer.append(aCat)
|
Process attributes from Entry section
|
ConvertCal.py
|
_processEntry
|
rcsb/py-rcsb_utils_validation
| 0
|
python
|
def _processEntry(self):
entry = self._root.find('Entry')
attr = entry.attrib
aCat = DataCategory('pdbx_vrpt_summary')
atMap = [['entry_id', 'pdbid'], ['PDB_deposition_date', 'PDB-deposition-date'], ['PDB_revision_number', 'PDB-revision-number'], ['PDB_revision_date', 'PDB-revision-date'], ['RNA_suiteness', 'RNAsuiteness'], ['protein_DNA_RNA_entities', 'protein-DNA-RNA-entities'], ['model_CA_only', 'CA_ONLY'], ['EMDB_deposition_date', 'EMDB-deposition-date'], ['report_creation_date', 'XMLcreationDate'], ['attempted_validation_steps', 'attemptedValidationSteps'], ['no_ligands_for_mogul', 'no-ligands-for-mogul'], ['no_ligands_for_buster_report', 'no-ligands-for-buster-report'], ['ligands_for_buster_report', 'ligands-for-buster-report'], ['no_percentile_property', 'no-percentile-property']]
rd = {}
for a in atMap:
att = a[0]
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
if (att in ['ligands_for_buster_report', 'no_ligands_for_mogul', 'no_ligands_for_buster_report']):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val != '?'):
aCat.appendAttribute(att)
rd[att] = val
aCat.append(rd)
self._curContainer.append(aCat)
self._processChemicalShiftLists(entry)
pbins = attr['percentilebins']
aCat = DataCategory('pdbx_vrpt_percentile_list')
aCat.appendAttribute('id')
aCat.appendAttribute('range')
aCat.appendAttribute('exp_method')
sbins = pbins.split(',')
exp = sbins[(- 1):][0]
print(('EXP is %s' % exp))
if (exp == 'xray'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, sbins[0], 'x-ray'])
aCat.append([self.__relid, sbins[1], 'x-ray'])
emeth = 'x-ray'
elif (exp == 'em'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, 'all', 'pdb'])
aCat.append([self.__relid, 'all', 'electron microscopy'])
emeth = 'electron microscopy'
elif (exp == 'nmr'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, 'all', 'pdb'])
aCat.append([self.__relid, 'all', 'nmr'])
emeth = 'nmr'
else:
print('PBINS', pbins, exp)
sys.exit(1)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_exptl')
aCat.appendAttribute('ordinal')
aCat.appendAttribute('method')
aCat.append(['1', emeth])
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_percentile_conditions')
aCat.appendAttribute('id')
aCat.appendAttribute('type_id')
aCat.appendAttribute('rank')
aCat.appendAttribute('res_high')
aCat.appendAttribute('res_low')
aCat.appendAttribute('number_entries_total')
aCat.appendAttribute('percentile_list_id')
self._pvpc = {}
pmap = [['all_atom_clashscore', self.__absid, 'numPDBids-absolute-percentile-clashscore', '?', '?'], ['all_atom_clashscore', self.__relid, 'numPDBids-relative-percentile-clashscore', 'high-resol-relative-percentile-clashscore', 'low-resol-relative-percentile-clashscore'], ['Ramachandran_outlier_percent', self.__absid, 'numPDBids-absolute-percentile-percent-rama-outliers', '?', '?'], ['Ramachandran_outlier_percent', self.__relid, 'numPDBids-relative-percentile-percent-rama-outliers', 'high-resol-relative-percentile-percent-rama-outliers', 'low-resol-relative-percentile-percent-rama-outliers'], ['rotamer_outliers_percent', self.__absid, 'numPDBids-absolute-percentile-percent-rota-outliers', '?', '?'], ['rotamer_outliers_percent', self.__relid, 'numPDBids-relative-percentile-percent-rota-outliers', 'high-resol-relative-percentile-percent-rota-outliers', 'low-resol-relative-percentile-percent-rota-outliers'], ['R_value_R_free', self.__absid, 'numPDBids-absolute-percentile-DCC_Rfree', '?', '?'], ['R_value_R_free', self.__relid, 'numPDBids-relative-percentile-DCC_Rfree', 'high-resol-relative-percentile-DCC_Rfree', 'low-resol-relative-percentile-DCC_Rfree'], ['RSRZ_outliers_percent', self.__absid, 'numPDBids-absolute-percentile-percent-RSRZ-outliers', '?', '?'], ['RSRZ_outliers_percent', self.__relid, 'numPDBids-relative-percentile-percent-RSRZ-outliers', 'high-resol-relative-percentile-percent-RSRZ-outliers', 'low-resol-relative-percentile-percent-RSRZ-outliers'], ['RNAsuiteness_percent', self.__absid, 'numPDBids-absolute-percentile-RNAsuiteness', '?', '?'], ['RNAsuiteness_percent', self.__relid, 'numPDBids-relative-percentile-RNAsuiteness', 'high-resol-relative-percentile-RNAsuiteness', 'low-resol-relative-percentile-RNAsuiteness']]
cid = 1
for p in pmap:
ptype = p[0]
if (ptype not in self._type_map):
self._type_id += 1
self._type_map[ptype] = self._type_id
ptype_id = self._type_map[ptype]
plist = p[1]
num = attr.get(p[2], '?')
if (num == '?'):
continue
if (p[3] == '?'):
res_high = '?'
else:
res_high = attr.get(p[3], '?')
if (p[4] == '?'):
res_low = '?'
else:
res_low = attr.get(p[4], '?')
aCat.append([str(cid), '?', '?', res_high, res_low, num, plist])
self._pvpc[((str(ptype_id) + '_') + plist)] = str(cid)
cid += 1
vmap = [['all_atom_clashscore', 'absolute-percentile-clashscore', self.__absid], ['all_atom_clashscore', 'relative-percentile-clashscore', self.__relid], ['Ramachandran_outlier_percent', 'absolute-percentile-percent-rama-outliers', self.__absid], ['Ramachandran_outlier_percent', 'relative-percentile-percent-rama-outliers', self.__relid], ['rotamer_outliers_percent', 'absolute-percentile-percent-rota-outliers', self.__absid], ['rotamer_outliers_percent', 'relative-percentile-percent-rota-outliers', self.__relid], ['R_value_R_free', 'absolute-percentile-DCC_Rfree', self.__absid], ['R_value_R_free', 'relative-percentile-DCC_Rfree', self.__relid], ['RSRZ_outliers_percent', 'absolute-percentile-percent-RSRZ-outliers', self.__absid], ['RSRZ_outliers_percent', 'relative-percentile-percent-RSRZ-outliers', self.__relid], ['RNAsuiteness_percent', 'absolute-percentile-RNAsuiteness', self.__absid], ['RNAsuiteness_percent', 'relative-percentile-RNAsuiteness', self.__relid]]
for v in vmap:
ptype = v[0]
if (ptype not in self._type_map):
self._type_id += 1
self._type_map[ptype] = self._type_id
ptype_id = self._type_map[ptype]
rank = attr.get(v[1], '?')
if (rank == '?'):
continue
lid = v[2]
cid = self._pvpc.get(((str(ptype_id) + '_') + lid), '?')
row = (int(cid) - 1)
aCat.setValue(ptype_id, 'type_id', row)
aCat.setValue(rank, 'rank', row)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_diffraction')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['Babinet_b', 'babinet_b'], ['bulk_solvent_b', 'bulk_solvent_b'], ['Wilson_B_estimate', 'WilsonBestimate'], ['I_over_sigma', 'IoverSigma'], ['num_miller_indices', 'numMillerIndices'], ['Babinet_k', 'babinet_k'], ['bulk_solvent_k', 'bulk_solvent_k'], ['Padilla_Yeates_L_mean', 'TwinL'], ['Padilla_Yeates_L2_mean', 'TwinL2'], ['DCC_R', 'DCC_R'], ['DCC_Rfree', 'DCC_Rfree'], ['EDS_R', 'EDS_R'], ['EDS_res_high', 'EDS_resolution'], ['EDS_res_low', 'EDS_resolution_low'], ['Wilson_B_aniso', 'WilsonBaniso'], ['data_anisotropy', 'DataAnisotropy'], ['trans_NCS_details', 'TransNCS'], ['B_factor_type', 'B_factor_type'], ['acentric_outliers', 'acentric_outliers'], ['centric_outliers', 'centric_outliers'], ['data_completeness', 'DataCompleteness'], ['number_reflns_R_free', 'num-free-reflections'], ['percent_free_reflections', 'percent-free-reflections'], ['percent_RSRZ_outliers', 'percent-RSRZ-outliers'], ['PDB_resolution_high', 'PDB-resolution'], ['PDB_resolution_low', 'PDB-resolution-low'], ['PDB_R', 'PDB-R'], ['PDB_Rfree', 'PDB-Rfree']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
if (val == 'NotAvailable'):
val = '?'
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth not in ['electron microscopy']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_nmr')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['nmr_models_consistency_flag', 'nmr_models_consistency_flag'], ['nmrclust_representative_model', 'nmrclust_representative_model'], ['medoid_model', 'medoid_model'], ['nmrclust_number_of_outliers', 'nmrclust_number_of_outliers'], ['nmrclust_number_of_models', 'nmrclust_number_of_models'], ['nmrclust_number_of_clusters', 'nmrclust_number_of_clusters'], ['cyrange_number_of_domains', 'cyrange_number_of_domains'], ['chemical_shift_completeness', 'chemical_shift_completeness'], ['chemical_shift_completeness_full_length', 'chemical_shift_completeness_full_length']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth in ['nmr']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_em')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['contour_level_primary_map', 'contour_level_primary_map'], ['atom_inclusion_all_atoms', 'atom_inclusion_all_atoms'], ['atom_inclusion_backbone', 'atom_inclusion_backbone'], ['author_provided_fsc_resolution_by_cutoff_pt_143', 'author_provided_fsc_resolution_by_cutoff_0.143'], ['author_provided_fsc_resolution_by_cutoff_pt_333', 'author_provided_fsc_resolution_by_cutoff_0.133'], ['author_provided_fsc_resolution_by_cutoff_pt_5', 'author_provided_fsc_resolution_by_cutoff_0.5'], ['author_provided_fsc_resolution_by_cutoff_halfbit', 'author_provided_fsc_resolution_by_cutoff_halfbit'], ['author_provided_fsc_resolution_by_cutoff_onebit', 'author_provided_fsc_resolution_by_cutoff_onebit'], ['author_provided_fsc_resolution_by_cutoff_threesigma', 'author_provided_fsc_resolution_by_cutoff_threesigma'], ['calculated_fsc_resolution_by_cutoff_pt_143', 'calculated_fsc_resolution_by_cutoff_0.143'], ['calculated_fsc_resolution_by_cutoff_pt_333', 'calculated_fsc_resolution_by_cutoff_0.133'], ['calculated_fsc_resolution_by_cutoff_pt_5', 'calculated_fsc_resolution_by_cutoff_0.5'], ['calculated_fsc_resolution_by_cutoff_halfbit', 'calculated_fsc_resolution_by_cutoff_halfbit'], ['calculated_fsc_resolution_by_cutoff_onebit', 'calculated_fsc_resolution_by_cutoff_onebit'], ['calculated_fsc_resolution_by_cutoff_threesigma', 'calculated_fsc_resolution_by_cutoff_threesigma'], ['EMDB_resolution', 'EMDB-resolution']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth in ['electron microscopy']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_geometry')
atMap = [['ordinal', '?'], ['percent_ramachandran_outliers', 'percent-rama-outliers'], ['clashscore', 'clashscore'], ['angles_RMSZ', 'angles_rmsz'], ['bonds_RMSZ', 'bonds_rmsz'], ['num_angles_RMSZ', 'num_angles_rmsz'], ['num_bonds_RMSZ', 'num_bonds_rmsz'], ['percent_rotamer_outliers', 'percent-rota-outliers']]
atlookup = {'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] == '?'):
val = atlookup[att]
else:
val = attr.get(a[1], '?')
rd[att] = val
aCat.append(rd)
self._curContainer.append(aCat)
|
def _processEntry(self):
entry = self._root.find('Entry')
attr = entry.attrib
aCat = DataCategory('pdbx_vrpt_summary')
atMap = [['entry_id', 'pdbid'], ['PDB_deposition_date', 'PDB-deposition-date'], ['PDB_revision_number', 'PDB-revision-number'], ['PDB_revision_date', 'PDB-revision-date'], ['RNA_suiteness', 'RNAsuiteness'], ['protein_DNA_RNA_entities', 'protein-DNA-RNA-entities'], ['model_CA_only', 'CA_ONLY'], ['EMDB_deposition_date', 'EMDB-deposition-date'], ['report_creation_date', 'XMLcreationDate'], ['attempted_validation_steps', 'attemptedValidationSteps'], ['no_ligands_for_mogul', 'no-ligands-for-mogul'], ['no_ligands_for_buster_report', 'no-ligands-for-buster-report'], ['ligands_for_buster_report', 'ligands-for-buster-report'], ['no_percentile_property', 'no-percentile-property']]
rd = {}
for a in atMap:
att = a[0]
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
if (att in ['ligands_for_buster_report', 'no_ligands_for_mogul', 'no_ligands_for_buster_report']):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val != '?'):
aCat.appendAttribute(att)
rd[att] = val
aCat.append(rd)
self._curContainer.append(aCat)
self._processChemicalShiftLists(entry)
pbins = attr['percentilebins']
aCat = DataCategory('pdbx_vrpt_percentile_list')
aCat.appendAttribute('id')
aCat.appendAttribute('range')
aCat.appendAttribute('exp_method')
sbins = pbins.split(',')
exp = sbins[(- 1):][0]
print(('EXP is %s' % exp))
if (exp == 'xray'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, sbins[0], 'x-ray'])
aCat.append([self.__relid, sbins[1], 'x-ray'])
emeth = 'x-ray'
elif (exp == 'em'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, 'all', 'pdb'])
aCat.append([self.__relid, 'all', 'electron microscopy'])
emeth = 'electron microscopy'
elif (exp == 'nmr'):
self.__absid = '1'
self.__relid = '2'
aCat.append([self.__absid, 'all', 'pdb'])
aCat.append([self.__relid, 'all', 'nmr'])
emeth = 'nmr'
else:
print('PBINS', pbins, exp)
sys.exit(1)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_exptl')
aCat.appendAttribute('ordinal')
aCat.appendAttribute('method')
aCat.append(['1', emeth])
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_percentile_conditions')
aCat.appendAttribute('id')
aCat.appendAttribute('type_id')
aCat.appendAttribute('rank')
aCat.appendAttribute('res_high')
aCat.appendAttribute('res_low')
aCat.appendAttribute('number_entries_total')
aCat.appendAttribute('percentile_list_id')
self._pvpc = {}
pmap = [['all_atom_clashscore', self.__absid, 'numPDBids-absolute-percentile-clashscore', '?', '?'], ['all_atom_clashscore', self.__relid, 'numPDBids-relative-percentile-clashscore', 'high-resol-relative-percentile-clashscore', 'low-resol-relative-percentile-clashscore'], ['Ramachandran_outlier_percent', self.__absid, 'numPDBids-absolute-percentile-percent-rama-outliers', '?', '?'], ['Ramachandran_outlier_percent', self.__relid, 'numPDBids-relative-percentile-percent-rama-outliers', 'high-resol-relative-percentile-percent-rama-outliers', 'low-resol-relative-percentile-percent-rama-outliers'], ['rotamer_outliers_percent', self.__absid, 'numPDBids-absolute-percentile-percent-rota-outliers', '?', '?'], ['rotamer_outliers_percent', self.__relid, 'numPDBids-relative-percentile-percent-rota-outliers', 'high-resol-relative-percentile-percent-rota-outliers', 'low-resol-relative-percentile-percent-rota-outliers'], ['R_value_R_free', self.__absid, 'numPDBids-absolute-percentile-DCC_Rfree', '?', '?'], ['R_value_R_free', self.__relid, 'numPDBids-relative-percentile-DCC_Rfree', 'high-resol-relative-percentile-DCC_Rfree', 'low-resol-relative-percentile-DCC_Rfree'], ['RSRZ_outliers_percent', self.__absid, 'numPDBids-absolute-percentile-percent-RSRZ-outliers', '?', '?'], ['RSRZ_outliers_percent', self.__relid, 'numPDBids-relative-percentile-percent-RSRZ-outliers', 'high-resol-relative-percentile-percent-RSRZ-outliers', 'low-resol-relative-percentile-percent-RSRZ-outliers'], ['RNAsuiteness_percent', self.__absid, 'numPDBids-absolute-percentile-RNAsuiteness', '?', '?'], ['RNAsuiteness_percent', self.__relid, 'numPDBids-relative-percentile-RNAsuiteness', 'high-resol-relative-percentile-RNAsuiteness', 'low-resol-relative-percentile-RNAsuiteness']]
cid = 1
for p in pmap:
ptype = p[0]
if (ptype not in self._type_map):
self._type_id += 1
self._type_map[ptype] = self._type_id
ptype_id = self._type_map[ptype]
plist = p[1]
num = attr.get(p[2], '?')
if (num == '?'):
continue
if (p[3] == '?'):
res_high = '?'
else:
res_high = attr.get(p[3], '?')
if (p[4] == '?'):
res_low = '?'
else:
res_low = attr.get(p[4], '?')
aCat.append([str(cid), '?', '?', res_high, res_low, num, plist])
self._pvpc[((str(ptype_id) + '_') + plist)] = str(cid)
cid += 1
vmap = [['all_atom_clashscore', 'absolute-percentile-clashscore', self.__absid], ['all_atom_clashscore', 'relative-percentile-clashscore', self.__relid], ['Ramachandran_outlier_percent', 'absolute-percentile-percent-rama-outliers', self.__absid], ['Ramachandran_outlier_percent', 'relative-percentile-percent-rama-outliers', self.__relid], ['rotamer_outliers_percent', 'absolute-percentile-percent-rota-outliers', self.__absid], ['rotamer_outliers_percent', 'relative-percentile-percent-rota-outliers', self.__relid], ['R_value_R_free', 'absolute-percentile-DCC_Rfree', self.__absid], ['R_value_R_free', 'relative-percentile-DCC_Rfree', self.__relid], ['RSRZ_outliers_percent', 'absolute-percentile-percent-RSRZ-outliers', self.__absid], ['RSRZ_outliers_percent', 'relative-percentile-percent-RSRZ-outliers', self.__relid], ['RNAsuiteness_percent', 'absolute-percentile-RNAsuiteness', self.__absid], ['RNAsuiteness_percent', 'relative-percentile-RNAsuiteness', self.__relid]]
for v in vmap:
ptype = v[0]
if (ptype not in self._type_map):
self._type_id += 1
self._type_map[ptype] = self._type_id
ptype_id = self._type_map[ptype]
rank = attr.get(v[1], '?')
if (rank == '?'):
continue
lid = v[2]
cid = self._pvpc.get(((str(ptype_id) + '_') + lid), '?')
row = (int(cid) - 1)
aCat.setValue(ptype_id, 'type_id', row)
aCat.setValue(rank, 'rank', row)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_diffraction')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['Babinet_b', 'babinet_b'], ['bulk_solvent_b', 'bulk_solvent_b'], ['Wilson_B_estimate', 'WilsonBestimate'], ['I_over_sigma', 'IoverSigma'], ['num_miller_indices', 'numMillerIndices'], ['Babinet_k', 'babinet_k'], ['bulk_solvent_k', 'bulk_solvent_k'], ['Padilla_Yeates_L_mean', 'TwinL'], ['Padilla_Yeates_L2_mean', 'TwinL2'], ['DCC_R', 'DCC_R'], ['DCC_Rfree', 'DCC_Rfree'], ['EDS_R', 'EDS_R'], ['EDS_res_high', 'EDS_resolution'], ['EDS_res_low', 'EDS_resolution_low'], ['Wilson_B_aniso', 'WilsonBaniso'], ['data_anisotropy', 'DataAnisotropy'], ['trans_NCS_details', 'TransNCS'], ['B_factor_type', 'B_factor_type'], ['acentric_outliers', 'acentric_outliers'], ['centric_outliers', 'centric_outliers'], ['data_completeness', 'DataCompleteness'], ['number_reflns_R_free', 'num-free-reflections'], ['percent_free_reflections', 'percent-free-reflections'], ['percent_RSRZ_outliers', 'percent-RSRZ-outliers'], ['PDB_resolution_high', 'PDB-resolution'], ['PDB_resolution_low', 'PDB-resolution-low'], ['PDB_R', 'PDB-R'], ['PDB_Rfree', 'PDB-Rfree']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
if (val == 'NotAvailable'):
val = '?'
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth not in ['electron microscopy']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_nmr')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['nmr_models_consistency_flag', 'nmr_models_consistency_flag'], ['nmrclust_representative_model', 'nmrclust_representative_model'], ['medoid_model', 'medoid_model'], ['nmrclust_number_of_outliers', 'nmrclust_number_of_outliers'], ['nmrclust_number_of_models', 'nmrclust_number_of_models'], ['nmrclust_number_of_clusters', 'nmrclust_number_of_clusters'], ['cyrange_number_of_domains', 'cyrange_number_of_domains'], ['chemical_shift_completeness', 'chemical_shift_completeness'], ['chemical_shift_completeness_full_length', 'chemical_shift_completeness_full_length']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth in ['nmr']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_em')
atMap = [['exp_method', '?'], ['ordinal', '?'], ['contour_level_primary_map', 'contour_level_primary_map'], ['atom_inclusion_all_atoms', 'atom_inclusion_all_atoms'], ['atom_inclusion_backbone', 'atom_inclusion_backbone'], ['author_provided_fsc_resolution_by_cutoff_pt_143', 'author_provided_fsc_resolution_by_cutoff_0.143'], ['author_provided_fsc_resolution_by_cutoff_pt_333', 'author_provided_fsc_resolution_by_cutoff_0.133'], ['author_provided_fsc_resolution_by_cutoff_pt_5', 'author_provided_fsc_resolution_by_cutoff_0.5'], ['author_provided_fsc_resolution_by_cutoff_halfbit', 'author_provided_fsc_resolution_by_cutoff_halfbit'], ['author_provided_fsc_resolution_by_cutoff_onebit', 'author_provided_fsc_resolution_by_cutoff_onebit'], ['author_provided_fsc_resolution_by_cutoff_threesigma', 'author_provided_fsc_resolution_by_cutoff_threesigma'], ['calculated_fsc_resolution_by_cutoff_pt_143', 'calculated_fsc_resolution_by_cutoff_0.143'], ['calculated_fsc_resolution_by_cutoff_pt_333', 'calculated_fsc_resolution_by_cutoff_0.133'], ['calculated_fsc_resolution_by_cutoff_pt_5', 'calculated_fsc_resolution_by_cutoff_0.5'], ['calculated_fsc_resolution_by_cutoff_halfbit', 'calculated_fsc_resolution_by_cutoff_halfbit'], ['calculated_fsc_resolution_by_cutoff_onebit', 'calculated_fsc_resolution_by_cutoff_onebit'], ['calculated_fsc_resolution_by_cutoff_threesigma', 'calculated_fsc_resolution_by_cutoff_threesigma'], ['EMDB_resolution', 'EMDB-resolution']]
atlookup = {'exp_method': emeth, 'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] != '?'):
val = attr.get(a[1], '?')
else:
val = atlookup[att]
rd[att] = val
aCat.append(rd)
if (emeth in ['electron microscopy']):
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_summary_geometry')
atMap = [['ordinal', '?'], ['percent_ramachandran_outliers', 'percent-rama-outliers'], ['clashscore', 'clashscore'], ['angles_RMSZ', 'angles_rmsz'], ['bonds_RMSZ', 'bonds_rmsz'], ['num_angles_RMSZ', 'num_angles_rmsz'], ['num_bonds_RMSZ', 'num_bonds_rmsz'], ['percent_rotamer_outliers', 'percent-rota-outliers']]
atlookup = {'ordinal': 1}
rd = {}
for a in atMap:
att = a[0]
aCat.appendAttribute(att)
if (a[1] == '?'):
val = atlookup[att]
else:
val = attr.get(a[1], '?')
rd[att] = val
aCat.append(rd)
self._curContainer.append(aCat)<|docstring|>Process attributes from Entry section<|endoftext|>
|
41551f9b3f365c5895248027df0bc656a7d08be390b90193808b2d5a8cede7aa
|
def reslookup(self, model, auth_chain, auth_seq_id, auth_comp_id, auth_ins_code, altcode):
'Returns lookup string for mapping'
if (auth_ins_code == ' '):
auth_ins_code = '?'
return '-'.join([model, auth_chain, auth_seq_id, auth_comp_id, auth_ins_code, altcode])
|
Returns lookup string for mapping
|
ConvertCal.py
|
reslookup
|
rcsb/py-rcsb_utils_validation
| 0
|
python
|
def reslookup(self, model, auth_chain, auth_seq_id, auth_comp_id, auth_ins_code, altcode):
if (auth_ins_code == ' '):
auth_ins_code = '?'
return '-'.join([model, auth_chain, auth_seq_id, auth_comp_id, auth_ins_code, altcode])
|
def reslookup(self, model, auth_chain, auth_seq_id, auth_comp_id, auth_ins_code, altcode):
if (auth_ins_code == ' '):
auth_ins_code = '?'
return '-'.join([model, auth_chain, auth_seq_id, auth_comp_id, auth_ins_code, altcode])<|docstring|>Returns lookup string for mapping<|endoftext|>
|
b435eda3b2c2185d690775e049236e91db2533d326be15b91be9e03dd5e3f4d7
|
def _processInstance(self):
'Converts instance level info'
allreskeys = []
modelledSubGroups = self._root.findall('ModelledSubgroup')
aMICat = DataCategory('pdbx_vrpt_model_instance')
mapping = [['id', '?'], ['PDB_model_num', 'model'], ['entity_id', 'ent'], ['label_asym_id', 'said'], ['label_seq_id', 'seq'], ['label_comp_id', 'resname'], ['auth_asym_id', 'chain'], ['auth_seq_id', 'resnum'], ['label_alt_id', 'altcode'], ['PDB_ins_code', 'icode']]
for m in mapping:
aMICat.appendAttribute(m[0])
ord = 0
for msg in modelledSubGroups:
attr = msg.attrib
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (key == 'id'):
continue
val = attr[lookup]
if (val == ' '):
val = '?'
rd[key] = val
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
self._resparent[reskey] = str(ord)
allreskeys.append(str(ord))
rd['id'] = str(ord)
aMICat.append(rd)
aCat = DataCategory('pdbx_vrpt_model_instance_density')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', 'altcode'], ['natoms_eds', 'NatomsEDS'], ['RSRCC', 'rscc'], ['RSR', 'rsr'], ['RSRZ', 'rsrz'], ['lig_RSRZ_nbr_id', 'lig_rsrz_nbr_id'], ['lig_RSR_nbr_mean', 'ligRSRnbrMean'], ['lig_RSR_nbr_stdev', 'ligRSRnbrStdev'], ['lig_RSR_numnbrs', 'ligRSRnumnbrs'], ['lig_RSRZ', 'ligRSRZ']]
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
rsrkeys = {}
for msg in modelledSubGroups:
attr = msg.attrib
if ('rsrz' not in attr):
continue
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
val = self._resparent[reskey]
rsrkeys[val] = str(ord)
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._trimContainer(aCat)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_model_instance_geometry')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', 'altcode'], ['OWAB', 'owab'], ['average_occupancy', 'avgoccu'], ['rotamer_class', 'rota'], ['phi', 'phi'], ['psi', 'psi'], ['ramachandran_class', 'rama'], ['flippable_sidechain', 'flippable-sidechain'], ['RNA_score', 'RNAscore'], ['RNA_suite', 'RNAsuite'], ['RNA_pucker', 'RNApucker'], ['ligand_chirality_outlier', 'ligand_chirality_outlier'], ['cis_peptide', 'cis_peptide'], ['cyrange_domain_id', 'cyrange_domain_id'], ['validate', 'validate'], ['num_H_reduce', 'num-H-reduce'], ['mogul_ignore', 'mogul-ignore'], ['mogul_angles_RMSZ', 'mogul_angles_rmsz'], ['mogul_bonds_RMSZ', 'mogul_bonds_rmsz'], ['mogul_RMSZ_num_angles', 'mogul_rmsz_numangles'], ['mogul_RMSZ_num_bonds', 'mogul_rmsz_numbonds'], ['ligand_density_outlier', 'ligand_density_outlier'], ['residue_inclusion', 'residue_inclusion']]
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
geomkeys = {}
for msg in modelledSubGroups:
attr = msg.attrib
if (('avgoccu' not in attr) and ('RNAscore' not in attr) and ('phi' not in attr) and ('rama' not in attr)):
continue
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
val = self._resparent[reskey]
geomkeys[val] = str(ord)
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (lookup in ['ligand_chirality_outlier', 'cis_peptide']):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._trimContainer(aCat)
self._curContainer.append(aCat)
def instance_subcat(aCat, mapping, name, splitatoms=False):
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
outliervar = {}
for msg in modelledSubGroups:
pattr = msg.attrib
altcode = pattr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(pattr['model'], pattr['chain'], pattr['resnum'], pattr['resname'], pattr['icode'], altcode)
aoutlier = msg.findall(name)
for ao in aoutlier:
attr = ao.attrib
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
val = self._resparent[reskey]
outliervar[val] = (outliervar.get(val, 0) + 1)
elif (key == 'label_alt_id'):
val = pattr['altcode']
elif ((key == 'atom0') and splitatoms):
val = attr['atoms'].split(',')[0]
elif ((key == 'atom1') and splitatoms):
val = attr['atoms'].split(',')[1]
elif ((key == 'atom2') and splitatoms):
val = attr['atoms'].split(',')[2]
elif ((key == 'atom3') and splitatoms):
val = attr['atoms'].split(',')[3]
elif ((key == 'atom_1') and splitatoms):
val = attr['atoms'].split(',')[0]
elif ((key == 'atom_2') and splitatoms):
val = attr['atoms'].split(',')[1]
elif ((key == 'atom_3') and splitatoms):
val = attr['atoms'].split(',')[2]
elif ((key == 'atom_4') and splitatoms):
val = attr['atoms'].split(',')[3]
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (lookup == 'link'):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._curContainer.append(aCat)
return outliervar
return {}
aCat = DataCategory('pdbx_vrpt_instance_intra_angle_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', 'atom0'], ['atom_2', 'atom1'], ['atom_3', 'atom2'], ['obs', 'obs'], ['mean', 'mean'], ['stdev', 'stdev'], ['Z', 'z'], ['link', 'link']]
angleoutliers = instance_subcat(aCat, mapping, 'angle-outlier')
aCat = DataCategory('pdbx_vrpt_instance_mogul_angle_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['atom_3', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['stdev', 'stdev'], ['numobs', 'numobs'], ['Zscore', 'Sscore'], ['mindiff', 'mindiff']]
mogangleoutliers = instance_subcat(aCat, mapping, 'mog-angle-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_stereo_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['label_atom_id', 'atom'], ['problem', 'problem']]
chiraloutliers = instance_subcat(aCat, mapping, 'chiral-outlier')
aCat = DataCategory('pdbx_vrpt_instance_intra_bond_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', 'atom0'], ['atom_2', 'atom1'], ['obs', 'obs'], ['mean', 'mean'], ['stdev', 'stdev'], ['Z', 'z'], ['link', 'link']]
bondoutliers = instance_subcat(aCat, mapping, 'bond-outlier')
aCat = DataCategory('pdbx_vrpt_instance_mogul_bond_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['numobs', 'numobs'], ['stdev', 'stdev'], ['Zscore', 'Zscore'], ['mindiff', 'mindiff']]
mogbondoutliers = instance_subcat(aCat, mapping, 'mog-bond-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_mogul_torsion_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['atom_3', '?'], ['atom_4', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['mindiff', 'mindiff'], ['numobs', 'numobs'], ['stdev', 'stdev'], ['local_density', 'local_density']]
mogtorsoutliers = instance_subcat(aCat, mapping, 'mog-torsion-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_mogul_ring_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atoms', 'atoms'], ['mean', 'mean'], ['mindiff', 'mindiff'], ['numobs', 'numobs'], ['stdev', 'stdev']]
mogringoutliers = instance_subcat(aCat, mapping, 'mog-ring-outlier')
aCat = DataCategory('pdbx_vrpt_instance_intra_plane_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['type', 'type'], ['improper', 'improper'], ['omega', 'omega'], ['plane_rmsd', 'planeRMSD']]
planeoutliers = instance_subcat(aCat, mapping, 'plane-outlier')
aCat = DataCategory('pdbx_vrpt_instance_clashes')
mapping = [['ordinal', '?'], ['label_alt_id', '?'], ['instance_id', '?'], ['label_atom_id', 'atom'], ['cid', 'cid'], ['clashmag', 'clashmag'], ['dist', 'dist']]
clashoutliers = instance_subcat(aCat, mapping, 'clash')
aCat = DataCategory('pdbx_vrpt_instance_symm_clashes')
mapping = [['ordinal', '?'], ['label_alt_id', '?'], ['instance_id', '?'], ['label_atom_id', 'atom'], ['symop', 'symop'], ['scid', 'scid'], ['clashmag', 'clashmag'], ['dist', 'dist']]
symclashoutliers = instance_subcat(aCat, mapping, 'symm-clash')
aMICat.appendAttribute('count_angle_outliers')
aMICat.appendAttribute('count_bond_outliers')
aMICat.appendAttribute('count_clashes')
aMICat.appendAttribute('count_symm_clashes')
aMICat.appendAttribute('count_chiral_outliers')
aMICat.appendAttribute('count_plane_outliers')
aMICat.appendAttribute('count_mogul_angle_outliers')
aMICat.appendAttribute('count_mogul_bond_outliers')
aMICat.appendAttribute('count_mogul_torsion_outliers')
aMICat.appendAttribute('count_mogul_ring_outliers')
for k in allreskeys:
inst_id = k
row = (int(k) - 1)
aMICat.setValue(angleoutliers.get(k, '?'), 'count_angle_outliers', row)
aMICat.setValue(bondoutliers.get(k, '?'), 'count_bond_outliers', row)
aMICat.setValue(clashoutliers.get(k, '?'), 'count_clashes', row)
aMICat.setValue(symclashoutliers.get(k, '?'), 'count_symm_clashes', row)
aMICat.setValue(chiraloutliers.get(k, '?'), 'count_chiral_outliers', row)
aMICat.setValue(planeoutliers.get(k, '?'), 'count_plane_outliers', row)
aMICat.setValue(mogangleoutliers.get(k, '?'), 'count_mogul_angle_outliers', row)
aMICat.setValue(mogbondoutliers.get(k, '?'), 'count_mogul_bond_outliers', row)
aMICat.setValue(mogtorsoutliers.get(k, '?'), 'count_mogul_torsion_outliers', row)
aMICat.setValue(mogringoutliers.get(k, '?'), 'count_mogul_ring_outliers', row)
self._trimContainer(aMICat, nodel=['label_altid', 'PDB_ins_code'])
self._curContainer.append(aMICat)
|
Converts instance level info
|
ConvertCal.py
|
_processInstance
|
rcsb/py-rcsb_utils_validation
| 0
|
python
|
def _processInstance(self):
allreskeys = []
modelledSubGroups = self._root.findall('ModelledSubgroup')
aMICat = DataCategory('pdbx_vrpt_model_instance')
mapping = [['id', '?'], ['PDB_model_num', 'model'], ['entity_id', 'ent'], ['label_asym_id', 'said'], ['label_seq_id', 'seq'], ['label_comp_id', 'resname'], ['auth_asym_id', 'chain'], ['auth_seq_id', 'resnum'], ['label_alt_id', 'altcode'], ['PDB_ins_code', 'icode']]
for m in mapping:
aMICat.appendAttribute(m[0])
ord = 0
for msg in modelledSubGroups:
attr = msg.attrib
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (key == 'id'):
continue
val = attr[lookup]
if (val == ' '):
val = '?'
rd[key] = val
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
self._resparent[reskey] = str(ord)
allreskeys.append(str(ord))
rd['id'] = str(ord)
aMICat.append(rd)
aCat = DataCategory('pdbx_vrpt_model_instance_density')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', 'altcode'], ['natoms_eds', 'NatomsEDS'], ['RSRCC', 'rscc'], ['RSR', 'rsr'], ['RSRZ', 'rsrz'], ['lig_RSRZ_nbr_id', 'lig_rsrz_nbr_id'], ['lig_RSR_nbr_mean', 'ligRSRnbrMean'], ['lig_RSR_nbr_stdev', 'ligRSRnbrStdev'], ['lig_RSR_numnbrs', 'ligRSRnumnbrs'], ['lig_RSRZ', 'ligRSRZ']]
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
rsrkeys = {}
for msg in modelledSubGroups:
attr = msg.attrib
if ('rsrz' not in attr):
continue
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
val = self._resparent[reskey]
rsrkeys[val] = str(ord)
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._trimContainer(aCat)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_model_instance_geometry')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', 'altcode'], ['OWAB', 'owab'], ['average_occupancy', 'avgoccu'], ['rotamer_class', 'rota'], ['phi', 'phi'], ['psi', 'psi'], ['ramachandran_class', 'rama'], ['flippable_sidechain', 'flippable-sidechain'], ['RNA_score', 'RNAscore'], ['RNA_suite', 'RNAsuite'], ['RNA_pucker', 'RNApucker'], ['ligand_chirality_outlier', 'ligand_chirality_outlier'], ['cis_peptide', 'cis_peptide'], ['cyrange_domain_id', 'cyrange_domain_id'], ['validate', 'validate'], ['num_H_reduce', 'num-H-reduce'], ['mogul_ignore', 'mogul-ignore'], ['mogul_angles_RMSZ', 'mogul_angles_rmsz'], ['mogul_bonds_RMSZ', 'mogul_bonds_rmsz'], ['mogul_RMSZ_num_angles', 'mogul_rmsz_numangles'], ['mogul_RMSZ_num_bonds', 'mogul_rmsz_numbonds'], ['ligand_density_outlier', 'ligand_density_outlier'], ['residue_inclusion', 'residue_inclusion']]
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
geomkeys = {}
for msg in modelledSubGroups:
attr = msg.attrib
if (('avgoccu' not in attr) and ('RNAscore' not in attr) and ('phi' not in attr) and ('rama' not in attr)):
continue
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
val = self._resparent[reskey]
geomkeys[val] = str(ord)
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (lookup in ['ligand_chirality_outlier', 'cis_peptide']):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._trimContainer(aCat)
self._curContainer.append(aCat)
def instance_subcat(aCat, mapping, name, splitatoms=False):
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
outliervar = {}
for msg in modelledSubGroups:
pattr = msg.attrib
altcode = pattr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(pattr['model'], pattr['chain'], pattr['resnum'], pattr['resname'], pattr['icode'], altcode)
aoutlier = msg.findall(name)
for ao in aoutlier:
attr = ao.attrib
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
val = self._resparent[reskey]
outliervar[val] = (outliervar.get(val, 0) + 1)
elif (key == 'label_alt_id'):
val = pattr['altcode']
elif ((key == 'atom0') and splitatoms):
val = attr['atoms'].split(',')[0]
elif ((key == 'atom1') and splitatoms):
val = attr['atoms'].split(',')[1]
elif ((key == 'atom2') and splitatoms):
val = attr['atoms'].split(',')[2]
elif ((key == 'atom3') and splitatoms):
val = attr['atoms'].split(',')[3]
elif ((key == 'atom_1') and splitatoms):
val = attr['atoms'].split(',')[0]
elif ((key == 'atom_2') and splitatoms):
val = attr['atoms'].split(',')[1]
elif ((key == 'atom_3') and splitatoms):
val = attr['atoms'].split(',')[2]
elif ((key == 'atom_4') and splitatoms):
val = attr['atoms'].split(',')[3]
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (lookup == 'link'):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._curContainer.append(aCat)
return outliervar
return {}
aCat = DataCategory('pdbx_vrpt_instance_intra_angle_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', 'atom0'], ['atom_2', 'atom1'], ['atom_3', 'atom2'], ['obs', 'obs'], ['mean', 'mean'], ['stdev', 'stdev'], ['Z', 'z'], ['link', 'link']]
angleoutliers = instance_subcat(aCat, mapping, 'angle-outlier')
aCat = DataCategory('pdbx_vrpt_instance_mogul_angle_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['atom_3', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['stdev', 'stdev'], ['numobs', 'numobs'], ['Zscore', 'Sscore'], ['mindiff', 'mindiff']]
mogangleoutliers = instance_subcat(aCat, mapping, 'mog-angle-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_stereo_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['label_atom_id', 'atom'], ['problem', 'problem']]
chiraloutliers = instance_subcat(aCat, mapping, 'chiral-outlier')
aCat = DataCategory('pdbx_vrpt_instance_intra_bond_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', 'atom0'], ['atom_2', 'atom1'], ['obs', 'obs'], ['mean', 'mean'], ['stdev', 'stdev'], ['Z', 'z'], ['link', 'link']]
bondoutliers = instance_subcat(aCat, mapping, 'bond-outlier')
aCat = DataCategory('pdbx_vrpt_instance_mogul_bond_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['numobs', 'numobs'], ['stdev', 'stdev'], ['Zscore', 'Zscore'], ['mindiff', 'mindiff']]
mogbondoutliers = instance_subcat(aCat, mapping, 'mog-bond-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_mogul_torsion_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['atom_3', '?'], ['atom_4', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['mindiff', 'mindiff'], ['numobs', 'numobs'], ['stdev', 'stdev'], ['local_density', 'local_density']]
mogtorsoutliers = instance_subcat(aCat, mapping, 'mog-torsion-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_mogul_ring_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atoms', 'atoms'], ['mean', 'mean'], ['mindiff', 'mindiff'], ['numobs', 'numobs'], ['stdev', 'stdev']]
mogringoutliers = instance_subcat(aCat, mapping, 'mog-ring-outlier')
aCat = DataCategory('pdbx_vrpt_instance_intra_plane_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['type', 'type'], ['improper', 'improper'], ['omega', 'omega'], ['plane_rmsd', 'planeRMSD']]
planeoutliers = instance_subcat(aCat, mapping, 'plane-outlier')
aCat = DataCategory('pdbx_vrpt_instance_clashes')
mapping = [['ordinal', '?'], ['label_alt_id', '?'], ['instance_id', '?'], ['label_atom_id', 'atom'], ['cid', 'cid'], ['clashmag', 'clashmag'], ['dist', 'dist']]
clashoutliers = instance_subcat(aCat, mapping, 'clash')
aCat = DataCategory('pdbx_vrpt_instance_symm_clashes')
mapping = [['ordinal', '?'], ['label_alt_id', '?'], ['instance_id', '?'], ['label_atom_id', 'atom'], ['symop', 'symop'], ['scid', 'scid'], ['clashmag', 'clashmag'], ['dist', 'dist']]
symclashoutliers = instance_subcat(aCat, mapping, 'symm-clash')
aMICat.appendAttribute('count_angle_outliers')
aMICat.appendAttribute('count_bond_outliers')
aMICat.appendAttribute('count_clashes')
aMICat.appendAttribute('count_symm_clashes')
aMICat.appendAttribute('count_chiral_outliers')
aMICat.appendAttribute('count_plane_outliers')
aMICat.appendAttribute('count_mogul_angle_outliers')
aMICat.appendAttribute('count_mogul_bond_outliers')
aMICat.appendAttribute('count_mogul_torsion_outliers')
aMICat.appendAttribute('count_mogul_ring_outliers')
for k in allreskeys:
inst_id = k
row = (int(k) - 1)
aMICat.setValue(angleoutliers.get(k, '?'), 'count_angle_outliers', row)
aMICat.setValue(bondoutliers.get(k, '?'), 'count_bond_outliers', row)
aMICat.setValue(clashoutliers.get(k, '?'), 'count_clashes', row)
aMICat.setValue(symclashoutliers.get(k, '?'), 'count_symm_clashes', row)
aMICat.setValue(chiraloutliers.get(k, '?'), 'count_chiral_outliers', row)
aMICat.setValue(planeoutliers.get(k, '?'), 'count_plane_outliers', row)
aMICat.setValue(mogangleoutliers.get(k, '?'), 'count_mogul_angle_outliers', row)
aMICat.setValue(mogbondoutliers.get(k, '?'), 'count_mogul_bond_outliers', row)
aMICat.setValue(mogtorsoutliers.get(k, '?'), 'count_mogul_torsion_outliers', row)
aMICat.setValue(mogringoutliers.get(k, '?'), 'count_mogul_ring_outliers', row)
self._trimContainer(aMICat, nodel=['label_altid', 'PDB_ins_code'])
self._curContainer.append(aMICat)
|
def _processInstance(self):
allreskeys = []
modelledSubGroups = self._root.findall('ModelledSubgroup')
aMICat = DataCategory('pdbx_vrpt_model_instance')
mapping = [['id', '?'], ['PDB_model_num', 'model'], ['entity_id', 'ent'], ['label_asym_id', 'said'], ['label_seq_id', 'seq'], ['label_comp_id', 'resname'], ['auth_asym_id', 'chain'], ['auth_seq_id', 'resnum'], ['label_alt_id', 'altcode'], ['PDB_ins_code', 'icode']]
for m in mapping:
aMICat.appendAttribute(m[0])
ord = 0
for msg in modelledSubGroups:
attr = msg.attrib
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (key == 'id'):
continue
val = attr[lookup]
if (val == ' '):
val = '?'
rd[key] = val
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
self._resparent[reskey] = str(ord)
allreskeys.append(str(ord))
rd['id'] = str(ord)
aMICat.append(rd)
aCat = DataCategory('pdbx_vrpt_model_instance_density')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', 'altcode'], ['natoms_eds', 'NatomsEDS'], ['RSRCC', 'rscc'], ['RSR', 'rsr'], ['RSRZ', 'rsrz'], ['lig_RSRZ_nbr_id', 'lig_rsrz_nbr_id'], ['lig_RSR_nbr_mean', 'ligRSRnbrMean'], ['lig_RSR_nbr_stdev', 'ligRSRnbrStdev'], ['lig_RSR_numnbrs', 'ligRSRnumnbrs'], ['lig_RSRZ', 'ligRSRZ']]
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
rsrkeys = {}
for msg in modelledSubGroups:
attr = msg.attrib
if ('rsrz' not in attr):
continue
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
val = self._resparent[reskey]
rsrkeys[val] = str(ord)
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._trimContainer(aCat)
self._curContainer.append(aCat)
aCat = DataCategory('pdbx_vrpt_model_instance_geometry')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', 'altcode'], ['OWAB', 'owab'], ['average_occupancy', 'avgoccu'], ['rotamer_class', 'rota'], ['phi', 'phi'], ['psi', 'psi'], ['ramachandran_class', 'rama'], ['flippable_sidechain', 'flippable-sidechain'], ['RNA_score', 'RNAscore'], ['RNA_suite', 'RNAsuite'], ['RNA_pucker', 'RNApucker'], ['ligand_chirality_outlier', 'ligand_chirality_outlier'], ['cis_peptide', 'cis_peptide'], ['cyrange_domain_id', 'cyrange_domain_id'], ['validate', 'validate'], ['num_H_reduce', 'num-H-reduce'], ['mogul_ignore', 'mogul-ignore'], ['mogul_angles_RMSZ', 'mogul_angles_rmsz'], ['mogul_bonds_RMSZ', 'mogul_bonds_rmsz'], ['mogul_RMSZ_num_angles', 'mogul_rmsz_numangles'], ['mogul_RMSZ_num_bonds', 'mogul_rmsz_numbonds'], ['ligand_density_outlier', 'ligand_density_outlier'], ['residue_inclusion', 'residue_inclusion']]
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
geomkeys = {}
for msg in modelledSubGroups:
attr = msg.attrib
if (('avgoccu' not in attr) and ('RNAscore' not in attr) and ('phi' not in attr) and ('rama' not in attr)):
continue
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
altcode = attr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(attr['model'], attr['chain'], attr['resnum'], attr['resname'], attr['icode'], altcode)
val = self._resparent[reskey]
geomkeys[val] = str(ord)
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (lookup in ['ligand_chirality_outlier', 'cis_peptide']):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._trimContainer(aCat)
self._curContainer.append(aCat)
def instance_subcat(aCat, mapping, name, splitatoms=False):
for m in mapping:
aCat.appendAttribute(m[0])
ord = 0
keep = False
outliervar = {}
for msg in modelledSubGroups:
pattr = msg.attrib
altcode = pattr['altcode']
if (altcode == ' '):
altcode = '?'
reskey = self.reslookup(pattr['model'], pattr['chain'], pattr['resnum'], pattr['resname'], pattr['icode'], altcode)
aoutlier = msg.findall(name)
for ao in aoutlier:
attr = ao.attrib
ord = (ord + 1)
rd = {}
for m in mapping:
key = m[0]
lookup = m[1]
if (lookup == '?'):
if (key == 'ordinal'):
val = str(ord)
elif (key == 'instance_id'):
val = self._resparent[reskey]
outliervar[val] = (outliervar.get(val, 0) + 1)
elif (key == 'label_alt_id'):
val = pattr['altcode']
elif ((key == 'atom0') and splitatoms):
val = attr['atoms'].split(',')[0]
elif ((key == 'atom1') and splitatoms):
val = attr['atoms'].split(',')[1]
elif ((key == 'atom2') and splitatoms):
val = attr['atoms'].split(',')[2]
elif ((key == 'atom3') and splitatoms):
val = attr['atoms'].split(',')[3]
elif ((key == 'atom_1') and splitatoms):
val = attr['atoms'].split(',')[0]
elif ((key == 'atom_2') and splitatoms):
val = attr['atoms'].split(',')[1]
elif ((key == 'atom_3') and splitatoms):
val = attr['atoms'].split(',')[2]
elif ((key == 'atom_4') and splitatoms):
val = attr['atoms'].split(',')[3]
else:
print(('UNKNOWN KEY %s' % key))
sys.exit(1)
else:
val = attr.get(lookup, '?')
if (lookup == 'link'):
if (val == 'yes'):
val = 'Y'
elif (val == 'no'):
val = 'N'
if (val == ' '):
val = '?'
rd[key] = val
keep = True
aCat.append(rd)
if keep:
self._curContainer.append(aCat)
return outliervar
return {}
aCat = DataCategory('pdbx_vrpt_instance_intra_angle_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', 'atom0'], ['atom_2', 'atom1'], ['atom_3', 'atom2'], ['obs', 'obs'], ['mean', 'mean'], ['stdev', 'stdev'], ['Z', 'z'], ['link', 'link']]
angleoutliers = instance_subcat(aCat, mapping, 'angle-outlier')
aCat = DataCategory('pdbx_vrpt_instance_mogul_angle_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['atom_3', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['stdev', 'stdev'], ['numobs', 'numobs'], ['Zscore', 'Sscore'], ['mindiff', 'mindiff']]
mogangleoutliers = instance_subcat(aCat, mapping, 'mog-angle-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_stereo_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['label_atom_id', 'atom'], ['problem', 'problem']]
chiraloutliers = instance_subcat(aCat, mapping, 'chiral-outlier')
aCat = DataCategory('pdbx_vrpt_instance_intra_bond_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', 'atom0'], ['atom_2', 'atom1'], ['obs', 'obs'], ['mean', 'mean'], ['stdev', 'stdev'], ['Z', 'z'], ['link', 'link']]
bondoutliers = instance_subcat(aCat, mapping, 'bond-outlier')
aCat = DataCategory('pdbx_vrpt_instance_mogul_bond_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['numobs', 'numobs'], ['stdev', 'stdev'], ['Zscore', 'Zscore'], ['mindiff', 'mindiff']]
mogbondoutliers = instance_subcat(aCat, mapping, 'mog-bond-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_mogul_torsion_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atom_1', '?'], ['atom_2', '?'], ['atom_3', '?'], ['atom_4', '?'], ['obsval', 'obsval'], ['mean', 'mean'], ['mindiff', 'mindiff'], ['numobs', 'numobs'], ['stdev', 'stdev'], ['local_density', 'local_density']]
mogtorsoutliers = instance_subcat(aCat, mapping, 'mog-torsion-outlier', splitatoms=True)
aCat = DataCategory('pdbx_vrpt_instance_mogul_ring_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['atoms', 'atoms'], ['mean', 'mean'], ['mindiff', 'mindiff'], ['numobs', 'numobs'], ['stdev', 'stdev']]
mogringoutliers = instance_subcat(aCat, mapping, 'mog-ring-outlier')
aCat = DataCategory('pdbx_vrpt_instance_intra_plane_outliers')
mapping = [['ordinal', '?'], ['instance_id', '?'], ['label_alt_id', '?'], ['type', 'type'], ['improper', 'improper'], ['omega', 'omega'], ['plane_rmsd', 'planeRMSD']]
planeoutliers = instance_subcat(aCat, mapping, 'plane-outlier')
aCat = DataCategory('pdbx_vrpt_instance_clashes')
mapping = [['ordinal', '?'], ['label_alt_id', '?'], ['instance_id', '?'], ['label_atom_id', 'atom'], ['cid', 'cid'], ['clashmag', 'clashmag'], ['dist', 'dist']]
clashoutliers = instance_subcat(aCat, mapping, 'clash')
aCat = DataCategory('pdbx_vrpt_instance_symm_clashes')
mapping = [['ordinal', '?'], ['label_alt_id', '?'], ['instance_id', '?'], ['label_atom_id', 'atom'], ['symop', 'symop'], ['scid', 'scid'], ['clashmag', 'clashmag'], ['dist', 'dist']]
symclashoutliers = instance_subcat(aCat, mapping, 'symm-clash')
aMICat.appendAttribute('count_angle_outliers')
aMICat.appendAttribute('count_bond_outliers')
aMICat.appendAttribute('count_clashes')
aMICat.appendAttribute('count_symm_clashes')
aMICat.appendAttribute('count_chiral_outliers')
aMICat.appendAttribute('count_plane_outliers')
aMICat.appendAttribute('count_mogul_angle_outliers')
aMICat.appendAttribute('count_mogul_bond_outliers')
aMICat.appendAttribute('count_mogul_torsion_outliers')
aMICat.appendAttribute('count_mogul_ring_outliers')
for k in allreskeys:
inst_id = k
row = (int(k) - 1)
aMICat.setValue(angleoutliers.get(k, '?'), 'count_angle_outliers', row)
aMICat.setValue(bondoutliers.get(k, '?'), 'count_bond_outliers', row)
aMICat.setValue(clashoutliers.get(k, '?'), 'count_clashes', row)
aMICat.setValue(symclashoutliers.get(k, '?'), 'count_symm_clashes', row)
aMICat.setValue(chiraloutliers.get(k, '?'), 'count_chiral_outliers', row)
aMICat.setValue(planeoutliers.get(k, '?'), 'count_plane_outliers', row)
aMICat.setValue(mogangleoutliers.get(k, '?'), 'count_mogul_angle_outliers', row)
aMICat.setValue(mogbondoutliers.get(k, '?'), 'count_mogul_bond_outliers', row)
aMICat.setValue(mogtorsoutliers.get(k, '?'), 'count_mogul_torsion_outliers', row)
aMICat.setValue(mogringoutliers.get(k, '?'), 'count_mogul_ring_outliers', row)
self._trimContainer(aMICat, nodel=['label_altid', 'PDB_ins_code'])
self._curContainer.append(aMICat)<|docstring|>Converts instance level info<|endoftext|>
|
1dfbae1a3d097ecf1d4c9785af399ef6c51d91ea101e08228ee999b0f90986d8
|
def _trimContainer(self, aCat, nodel=None):
'Remove columns that have not data'
if (nodel is None):
nodel = []
aset = set(aCat.getAttributeList())
keep = set()
for row in range(aCat.getRowCount()):
rd = aCat.getRowAttributeDict(row)
for (key, item) in rd.items():
if (item != '?'):
keep.add(key)
delist = list((aset - keep))
print(('Deleting %s' % delist))
for d in delist:
if (d not in nodel):
aCat.removeAttribute(d)
|
Remove columns that have not data
|
ConvertCal.py
|
_trimContainer
|
rcsb/py-rcsb_utils_validation
| 0
|
python
|
def _trimContainer(self, aCat, nodel=None):
if (nodel is None):
nodel = []
aset = set(aCat.getAttributeList())
keep = set()
for row in range(aCat.getRowCount()):
rd = aCat.getRowAttributeDict(row)
for (key, item) in rd.items():
if (item != '?'):
keep.add(key)
delist = list((aset - keep))
print(('Deleting %s' % delist))
for d in delist:
if (d not in nodel):
aCat.removeAttribute(d)
|
def _trimContainer(self, aCat, nodel=None):
if (nodel is None):
nodel = []
aset = set(aCat.getAttributeList())
keep = set()
for row in range(aCat.getRowCount()):
rd = aCat.getRowAttributeDict(row)
for (key, item) in rd.items():
if (item != '?'):
keep.add(key)
delist = list((aset - keep))
print(('Deleting %s' % delist))
for d in delist:
if (d not in nodel):
aCat.removeAttribute(d)<|docstring|>Remove columns that have not data<|endoftext|>
|
9eaea3153a8900b56b36cfcf94012b4ca882c57fab3cdfb21c95bb2554d7d8c4
|
@staticmethod
def weighted_choice(items):
'items is a list of tuples in the form (item, weight)'
weight_total = sum((item[1] for item in items))
n = random.uniform(0, weight_total)
for (item, weight) in items:
if (n < weight):
return item
n = (n - weight)
return item
|
items is a list of tuples in the form (item, weight)
|
generators.py
|
weighted_choice
|
rjw245/movr
| 24
|
python
|
@staticmethod
def weighted_choice(items):
weight_total = sum((item[1] for item in items))
n = random.uniform(0, weight_total)
for (item, weight) in items:
if (n < weight):
return item
n = (n - weight)
return item
|
@staticmethod
def weighted_choice(items):
weight_total = sum((item[1] for item in items))
n = random.uniform(0, weight_total)
for (item, weight) in items:
if (n < weight):
return item
n = (n - weight)
return item<|docstring|>items is a list of tuples in the form (item, weight)<|endoftext|>
|
b6d9f8d9447a4b5676e95e1e6dd747c564f0fb175b349c664826e97e73adce97
|
@property
def merk(self):
'Merknaam van het dynamisch bord op maat.'
return self._merk.get_waarde()
|
Merknaam van het dynamisch bord op maat.
|
src/OTLMOW/OTLModel/Classes/DynBordOpMaat.py
|
merk
|
davidvlaminck/OTLClassPython
| 2
|
python
|
@property
def merk(self):
return self._merk.get_waarde()
|
@property
def merk(self):
return self._merk.get_waarde()<|docstring|>Merknaam van het dynamisch bord op maat.<|endoftext|>
|
ebfa9d48bcee8841608595e684a083f12396906fbb49f516e56346d6c50cf5dd
|
@property
def modelnaam(self):
'Modelnaam van het dynamisch bord op maat.'
return self._modelnaam.get_waarde()
|
Modelnaam van het dynamisch bord op maat.
|
src/OTLMOW/OTLModel/Classes/DynBordOpMaat.py
|
modelnaam
|
davidvlaminck/OTLClassPython
| 2
|
python
|
@property
def modelnaam(self):
return self._modelnaam.get_waarde()
|
@property
def modelnaam(self):
return self._modelnaam.get_waarde()<|docstring|>Modelnaam van het dynamisch bord op maat.<|endoftext|>
|
556a0e4a46e9f4d0a102dbad82fe831e81e4b4642ef3855eed96e8868b626aa4
|
def factory(model, **kwargs):
'\n Creates a factory boy factory class\n '
ns = {'Meta': type('Meta', (), {'model': model})}
for (k, v) in kwargs:
ns[k] = explicit_declaration(model, k, v)
mommy = Mommy(model)
for field in model._meta.fields:
if (not requires_declaration(model, field.name, ns)):
continue
ns[field.name] = implicit_declaration(model, field.name, ns, mommy)
return type((model.__name__ + 'Factory'), (DjangoModelFactory,), ns)
|
Creates a factory boy factory class
|
src/boogie/testing/factories.py
|
factory
|
pencil-labs/django-boogie
| 0
|
python
|
def factory(model, **kwargs):
'\n \n '
ns = {'Meta': type('Meta', (), {'model': model})}
for (k, v) in kwargs:
ns[k] = explicit_declaration(model, k, v)
mommy = Mommy(model)
for field in model._meta.fields:
if (not requires_declaration(model, field.name, ns)):
continue
ns[field.name] = implicit_declaration(model, field.name, ns, mommy)
return type((model.__name__ + 'Factory'), (DjangoModelFactory,), ns)
|
def factory(model, **kwargs):
'\n \n '
ns = {'Meta': type('Meta', (), {'model': model})}
for (k, v) in kwargs:
ns[k] = explicit_declaration(model, k, v)
mommy = Mommy(model)
for field in model._meta.fields:
if (not requires_declaration(model, field.name, ns)):
continue
ns[field.name] = implicit_declaration(model, field.name, ns, mommy)
return type((model.__name__ + 'Factory'), (DjangoModelFactory,), ns)<|docstring|>Creates a factory boy factory class<|endoftext|>
|
7be16ecff9efb6a877690f7d27b3bff3d702409c5b1f756a29b780cc84916f6c
|
def explicit_declaration(model, name, value):
'\n Return a Declaration instance that implements an explicitly defined field\n for a model.\n\n Args:\n model:\n Model class\n name:\n Name of the field in the model\n value:\n Value explicitly passed by the user\n '
if issubclass(value, BaseDeclaration):
return value
elif (isinstance(value, type) and issubclass(value, Factory)):
return _factory.SubFactory(value)
elif callable(value):
if has_no_args(value):
return _factory.LazyFunction(value)
else:
return _factory.LazyAttribute(value)
elif isinstance(value, str):
return _factory.LazyAttribute((lambda x: value.format(model=x)))
else:
return value
|
Return a Declaration instance that implements an explicitly defined field
for a model.
Args:
model:
Model class
name:
Name of the field in the model
value:
Value explicitly passed by the user
|
src/boogie/testing/factories.py
|
explicit_declaration
|
pencil-labs/django-boogie
| 0
|
python
|
def explicit_declaration(model, name, value):
'\n Return a Declaration instance that implements an explicitly defined field\n for a model.\n\n Args:\n model:\n Model class\n name:\n Name of the field in the model\n value:\n Value explicitly passed by the user\n '
if issubclass(value, BaseDeclaration):
return value
elif (isinstance(value, type) and issubclass(value, Factory)):
return _factory.SubFactory(value)
elif callable(value):
if has_no_args(value):
return _factory.LazyFunction(value)
else:
return _factory.LazyAttribute(value)
elif isinstance(value, str):
return _factory.LazyAttribute((lambda x: value.format(model=x)))
else:
return value
|
def explicit_declaration(model, name, value):
'\n Return a Declaration instance that implements an explicitly defined field\n for a model.\n\n Args:\n model:\n Model class\n name:\n Name of the field in the model\n value:\n Value explicitly passed by the user\n '
if issubclass(value, BaseDeclaration):
return value
elif (isinstance(value, type) and issubclass(value, Factory)):
return _factory.SubFactory(value)
elif callable(value):
if has_no_args(value):
return _factory.LazyFunction(value)
else:
return _factory.LazyAttribute(value)
elif isinstance(value, str):
return _factory.LazyAttribute((lambda x: value.format(model=x)))
else:
return value<|docstring|>Return a Declaration instance that implements an explicitly defined field
for a model.
Args:
model:
Model class
name:
Name of the field in the model
value:
Value explicitly passed by the user<|endoftext|>
|
0befb410f64b85e1887855f1519ea385a71db12f4dd6f2c3bdfd72a400cd96e1
|
def requires_declaration(model, name, definitions):
'\n Return True if explicit generation of given field is required during model\n instantiation.\n\n Args:\n model:\n Model class\n name:\n Model field name\n definitions:\n A map of names of all explicitly defined fields to their\n corresponding defined values.\n '
field = model._meta.get_field(name)
if (field.has_default() or field.auto_created):
return False
if (field in definitions):
return False
return True
|
Return True if explicit generation of given field is required during model
instantiation.
Args:
model:
Model class
name:
Model field name
definitions:
A map of names of all explicitly defined fields to their
corresponding defined values.
|
src/boogie/testing/factories.py
|
requires_declaration
|
pencil-labs/django-boogie
| 0
|
python
|
def requires_declaration(model, name, definitions):
'\n Return True if explicit generation of given field is required during model\n instantiation.\n\n Args:\n model:\n Model class\n name:\n Model field name\n definitions:\n A map of names of all explicitly defined fields to their\n corresponding defined values.\n '
field = model._meta.get_field(name)
if (field.has_default() or field.auto_created):
return False
if (field in definitions):
return False
return True
|
def requires_declaration(model, name, definitions):
'\n Return True if explicit generation of given field is required during model\n instantiation.\n\n Args:\n model:\n Model class\n name:\n Model field name\n definitions:\n A map of names of all explicitly defined fields to their\n corresponding defined values.\n '
field = model._meta.get_field(name)
if (field.has_default() or field.auto_created):
return False
if (field in definitions):
return False
return True<|docstring|>Return True if explicit generation of given field is required during model
instantiation.
Args:
model:
Model class
name:
Model field name
definitions:
A map of names of all explicitly defined fields to their
corresponding defined values.<|endoftext|>
|
fa06df64dff93e9431920bbd83181fa87fc47dfbf7b2bfd5ae01a96c6b9d6ea6
|
def implicit_declaration(model, name, definitions, mommy):
'\n Creates an implicit declaration for the field.\n\n Receives the same arguments as :func:`requires_declaration`, but returns\n a declaration instance.\n '
field = model._meta.get_field(name)
try:
faker = getattr(fake, name)
return declarations.LazyFunction(faker)
except AttributeError:
generator = (lambda : mommy.generate_value(field, commit=False))
return declarations.LazyFunction(generator)
|
Creates an implicit declaration for the field.
Receives the same arguments as :func:`requires_declaration`, but returns
a declaration instance.
|
src/boogie/testing/factories.py
|
implicit_declaration
|
pencil-labs/django-boogie
| 0
|
python
|
def implicit_declaration(model, name, definitions, mommy):
'\n Creates an implicit declaration for the field.\n\n Receives the same arguments as :func:`requires_declaration`, but returns\n a declaration instance.\n '
field = model._meta.get_field(name)
try:
faker = getattr(fake, name)
return declarations.LazyFunction(faker)
except AttributeError:
generator = (lambda : mommy.generate_value(field, commit=False))
return declarations.LazyFunction(generator)
|
def implicit_declaration(model, name, definitions, mommy):
'\n Creates an implicit declaration for the field.\n\n Receives the same arguments as :func:`requires_declaration`, but returns\n a declaration instance.\n '
field = model._meta.get_field(name)
try:
faker = getattr(fake, name)
return declarations.LazyFunction(faker)
except AttributeError:
generator = (lambda : mommy.generate_value(field, commit=False))
return declarations.LazyFunction(generator)<|docstring|>Creates an implicit declaration for the field.
Receives the same arguments as :func:`requires_declaration`, but returns
a declaration instance.<|endoftext|>
|
73beb3dbaaf412f4ea6252b2ad9d1a2f0f8bcba0d2b7ad80bbfcd9f1ec9cfa4b
|
def has_no_args(func):
'\n Return True if function is called with no positional args.\n '
try:
spec = inspect.getfullargspec(func)
except TypeError:
return has_no_args(func.__call__)
return bool(spec.args)
|
Return True if function is called with no positional args.
|
src/boogie/testing/factories.py
|
has_no_args
|
pencil-labs/django-boogie
| 0
|
python
|
def has_no_args(func):
'\n \n '
try:
spec = inspect.getfullargspec(func)
except TypeError:
return has_no_args(func.__call__)
return bool(spec.args)
|
def has_no_args(func):
'\n \n '
try:
spec = inspect.getfullargspec(func)
except TypeError:
return has_no_args(func.__call__)
return bool(spec.args)<|docstring|>Return True if function is called with no positional args.<|endoftext|>
|
11b27c6524ae5fd5937137d868ed0ffb66855ef02459ba93438a69a05bd66405
|
def _ensure_section_existence(config_parser, section_name):
'Add a section to the config_parser if not present.'
if (not config_parser.has_section(section_name)):
config_parser.add_section(section_name)
|
Add a section to the config_parser if not present.
|
cli/src/pcluster/config/param_types.py
|
_ensure_section_existence
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def _ensure_section_existence(config_parser, section_name):
if (not config_parser.has_section(section_name)):
config_parser.add_section(section_name)
|
def _ensure_section_existence(config_parser, section_name):
if (not config_parser.has_section(section_name)):
config_parser.add_section(section_name)<|docstring|>Add a section to the config_parser if not present.<|endoftext|>
|
2a8612960e19e6229d06a1f1dda72f7a6ab12ff659d44ec8e56ee4b9766f5123
|
def get_value_from_string(self, string_value):
'Return internal representation starting from CFN/user-input value.'
param_value = self.get_default_value()
string_value = (str(string_value).strip() if string_value else None)
if (string_value and (string_value != 'NONE')):
param_value = string_value
return param_value
|
Return internal representation starting from CFN/user-input value.
|
cli/src/pcluster/config/param_types.py
|
get_value_from_string
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def get_value_from_string(self, string_value):
param_value = self.get_default_value()
string_value = (str(string_value).strip() if string_value else None)
if (string_value and (string_value != 'NONE')):
param_value = string_value
return param_value
|
def get_value_from_string(self, string_value):
param_value = self.get_default_value()
string_value = (str(string_value).strip() if string_value else None)
if (string_value and (string_value != 'NONE')):
param_value = string_value
return param_value<|docstring|>Return internal representation starting from CFN/user-input value.<|endoftext|>
|
8d3317f4e8251655d1cd03506b0161376063b00e8bcfab955feae0c69e4bbbbc
|
@abc.abstractmethod
def from_storage(self, storage_params):
'Load the param from the related storage data structure.'
pass
|
Load the param from the related storage data structure.
|
cli/src/pcluster/config/param_types.py
|
from_storage
|
ddeidda/aws-parallelcluster
| 1
|
python
|
@abc.abstractmethod
def from_storage(self, storage_params):
pass
|
@abc.abstractmethod
def from_storage(self, storage_params):
pass<|docstring|>Load the param from the related storage data structure.<|endoftext|>
|
1930b77ea03c4df15212539f8935ddc91bc9f857c763866f7492c1621c6a6464
|
def _validate_section_label(self):
'\n Validate the section label.\n\n Verifies that the section label begins by a letter, contains only alphanumeric characters and hyphens\n and if its length is at most 30.\n '
if ((self.section_label != '') and (not re.match('^[a-zA-Z][a-zA-Z0-9-\\\\_]{0,29}$', self.section_label))):
LOGGER.error('Failed validation for section {0} {1}. Section names can be at most 30 chars long, must begin with a letter and only contain alphanumeric characters, hyphens and underscores.'.format(self.section_key, self.section_label))
sys.exit(1)
|
Validate the section label.
Verifies that the section label begins by a letter, contains only alphanumeric characters and hyphens
and if its length is at most 30.
|
cli/src/pcluster/config/param_types.py
|
_validate_section_label
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def _validate_section_label(self):
'\n Validate the section label.\n\n Verifies that the section label begins by a letter, contains only alphanumeric characters and hyphens\n and if its length is at most 30.\n '
if ((self.section_label != ) and (not re.match('^[a-zA-Z][a-zA-Z0-9-\\\\_]{0,29}$', self.section_label))):
LOGGER.error('Failed validation for section {0} {1}. Section names can be at most 30 chars long, must begin with a letter and only contain alphanumeric characters, hyphens and underscores.'.format(self.section_key, self.section_label))
sys.exit(1)
|
def _validate_section_label(self):
'\n Validate the section label.\n\n Verifies that the section label begins by a letter, contains only alphanumeric characters and hyphens\n and if its length is at most 30.\n '
if ((self.section_label != ) and (not re.match('^[a-zA-Z][a-zA-Z0-9-\\\\_]{0,29}$', self.section_label))):
LOGGER.error('Failed validation for section {0} {1}. Section names can be at most 30 chars long, must begin with a letter and only contain alphanumeric characters, hyphens and underscores.'.format(self.section_key, self.section_label))
sys.exit(1)<|docstring|>Validate the section label.
Verifies that the section label begins by a letter, contains only alphanumeric characters and hyphens
and if its length is at most 30.<|endoftext|>
|
075d78eb61c1bd446a82e265d7903a8ba55a2e7b37af4378442c879a3767c078
|
def from_file(self, config_parser):
'\n Initialize parameter value from config_parser.\n\n :param config_parser: the configparser object from which get the parameter\n '
section_name = get_file_section_name(self.section_key, self.section_label)
if config_parser.has_option(section_name, self.key):
if (self.section_key not in self.pcluster_config.get_global_section_keys()):
self._validate_section_label()
self.value = config_parser.get(section_name, self.key)
self._check_allowed_values()
return self
|
Initialize parameter value from config_parser.
:param config_parser: the configparser object from which get the parameter
|
cli/src/pcluster/config/param_types.py
|
from_file
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def from_file(self, config_parser):
'\n Initialize parameter value from config_parser.\n\n :param config_parser: the configparser object from which get the parameter\n '
section_name = get_file_section_name(self.section_key, self.section_label)
if config_parser.has_option(section_name, self.key):
if (self.section_key not in self.pcluster_config.get_global_section_keys()):
self._validate_section_label()
self.value = config_parser.get(section_name, self.key)
self._check_allowed_values()
return self
|
def from_file(self, config_parser):
'\n Initialize parameter value from config_parser.\n\n :param config_parser: the configparser object from which get the parameter\n '
section_name = get_file_section_name(self.section_key, self.section_label)
if config_parser.has_option(section_name, self.key):
if (self.section_key not in self.pcluster_config.get_global_section_keys()):
self._validate_section_label()
self.value = config_parser.get(section_name, self.key)
self._check_allowed_values()
return self<|docstring|>Initialize parameter value from config_parser.
:param config_parser: the configparser object from which get the parameter<|endoftext|>
|
3cefc4dc267438634519b4e3a62588a23aa72e0a2eb1800026d0a5df3db4e6e4
|
@abc.abstractmethod
def to_storage(self, storage_params):
'Write the param to the related storage data structure.'
pass
|
Write the param to the related storage data structure.
|
cli/src/pcluster/config/param_types.py
|
to_storage
|
ddeidda/aws-parallelcluster
| 1
|
python
|
@abc.abstractmethod
def to_storage(self, storage_params):
pass
|
@abc.abstractmethod
def to_storage(self, storage_params):
pass<|docstring|>Write the param to the related storage data structure.<|endoftext|>
|
7779c9773eeec7a00999f8c7f8670e7aaddef905c0adf4385bd63ee028523898
|
def _from_definition(self):
'Initialize parameter value by using default specified in the mapping file.'
self.value = self.get_default_value()
if self.value:
LOGGER.debug("Setting default value '%s' for key '%s'", self.value, self.key)
|
Initialize parameter value by using default specified in the mapping file.
|
cli/src/pcluster/config/param_types.py
|
_from_definition
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def _from_definition(self):
self.value = self.get_default_value()
if self.value:
LOGGER.debug("Setting default value '%s' for key '%s'", self.value, self.key)
|
def _from_definition(self):
self.value = self.get_default_value()
if self.value:
LOGGER.debug("Setting default value '%s' for key '%s'", self.value, self.key)<|docstring|>Initialize parameter value by using default specified in the mapping file.<|endoftext|>
|
bd6406b096b352557ad43c37d2a482fe0e64f6c877e80f1337fa4afb5f26e636
|
def _check_allowed_values(self):
'Verify if the parameter value is one of the allowed values specified in the mapping file.'
allowed_values = self.definition.get('allowed_values', None)
if allowed_values:
if isinstance(allowed_values, list):
if (self.value not in allowed_values):
self.pcluster_config.error("The configuration parameter '{0}' has an invalid value '{1}'\nAllowed values are: {2}".format(self.key, self.value, allowed_values))
elif (not re.compile(allowed_values).match(str(self.value))):
self.pcluster_config.error("The configuration parameter '{0}' has an invalid value '{1}'\nAllowed values are: {2}".format(self.key, self.value, allowed_values))
|
Verify if the parameter value is one of the allowed values specified in the mapping file.
|
cli/src/pcluster/config/param_types.py
|
_check_allowed_values
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def _check_allowed_values(self):
allowed_values = self.definition.get('allowed_values', None)
if allowed_values:
if isinstance(allowed_values, list):
if (self.value not in allowed_values):
self.pcluster_config.error("The configuration parameter '{0}' has an invalid value '{1}'\nAllowed values are: {2}".format(self.key, self.value, allowed_values))
elif (not re.compile(allowed_values).match(str(self.value))):
self.pcluster_config.error("The configuration parameter '{0}' has an invalid value '{1}'\nAllowed values are: {2}".format(self.key, self.value, allowed_values))
|
def _check_allowed_values(self):
allowed_values = self.definition.get('allowed_values', None)
if allowed_values:
if isinstance(allowed_values, list):
if (self.value not in allowed_values):
self.pcluster_config.error("The configuration parameter '{0}' has an invalid value '{1}'\nAllowed values are: {2}".format(self.key, self.value, allowed_values))
elif (not re.compile(allowed_values).match(str(self.value))):
self.pcluster_config.error("The configuration parameter '{0}' has an invalid value '{1}'\nAllowed values are: {2}".format(self.key, self.value, allowed_values))<|docstring|>Verify if the parameter value is one of the allowed values specified in the mapping file.<|endoftext|>
|
74f602cf3d26e0adcd83dddd54d4d3717d87df2a3275e63bfc015f4c9f96009a
|
def validate(self):
'Call validation functions for the parameter, if there.'
if (self.definition.get('required') and (self.value is None)):
sys.exit("Configuration parameter '{0}' must have a value".format(self.key))
for validation_func in self.definition.get('validators', []):
if (self.value is None):
LOGGER.debug("Configuration parameter '%s' has no value", self.key)
else:
(errors, warnings) = validation_func(self.key, self.value, self.pcluster_config)
if errors:
self.pcluster_config.error("The configuration parameter '{0}' generated the following errors:\n{1}".format(self.key, '\n'.join(errors)))
elif warnings:
self.pcluster_config.warn("The configuration parameter '{0}' generated the following warnings:\n{1}".format(self.key, '\n'.join(warnings)))
else:
LOGGER.debug("Configuration parameter '%s' is valid", self.key)
|
Call validation functions for the parameter, if there.
|
cli/src/pcluster/config/param_types.py
|
validate
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def validate(self):
if (self.definition.get('required') and (self.value is None)):
sys.exit("Configuration parameter '{0}' must have a value".format(self.key))
for validation_func in self.definition.get('validators', []):
if (self.value is None):
LOGGER.debug("Configuration parameter '%s' has no value", self.key)
else:
(errors, warnings) = validation_func(self.key, self.value, self.pcluster_config)
if errors:
self.pcluster_config.error("The configuration parameter '{0}' generated the following errors:\n{1}".format(self.key, '\n'.join(errors)))
elif warnings:
self.pcluster_config.warn("The configuration parameter '{0}' generated the following warnings:\n{1}".format(self.key, '\n'.join(warnings)))
else:
LOGGER.debug("Configuration parameter '%s' is valid", self.key)
|
def validate(self):
if (self.definition.get('required') and (self.value is None)):
sys.exit("Configuration parameter '{0}' must have a value".format(self.key))
for validation_func in self.definition.get('validators', []):
if (self.value is None):
LOGGER.debug("Configuration parameter '%s' has no value", self.key)
else:
(errors, warnings) = validation_func(self.key, self.value, self.pcluster_config)
if errors:
self.pcluster_config.error("The configuration parameter '{0}' generated the following errors:\n{1}".format(self.key, '\n'.join(errors)))
elif warnings:
self.pcluster_config.warn("The configuration parameter '{0}' generated the following warnings:\n{1}".format(self.key, '\n'.join(warnings)))
else:
LOGGER.debug("Configuration parameter '%s' is valid", self.key)<|docstring|>Call validation functions for the parameter, if there.<|endoftext|>
|
f4977ab5bbfa1abe1d63b1a60c9cf359ec589f8633cbf74a844df55c2499e0d3
|
def to_file(self, config_parser, write_defaults=False):
'Set parameter in the config_parser in the right section.'
section_name = get_file_section_name(self.section_key, self.section_label)
if ((self.value is not None) and (write_defaults or (self.value != self.get_default_value())) and self.get_string_value()):
_ensure_section_existence(config_parser, section_name)
config_parser.set(section_name, self.key, self.get_string_value())
else:
try:
config_parser.remove_option(section_name, self.key)
except NoSectionError:
pass
|
Set parameter in the config_parser in the right section.
|
cli/src/pcluster/config/param_types.py
|
to_file
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def to_file(self, config_parser, write_defaults=False):
section_name = get_file_section_name(self.section_key, self.section_label)
if ((self.value is not None) and (write_defaults or (self.value != self.get_default_value())) and self.get_string_value()):
_ensure_section_existence(config_parser, section_name)
config_parser.set(section_name, self.key, self.get_string_value())
else:
try:
config_parser.remove_option(section_name, self.key)
except NoSectionError:
pass
|
def to_file(self, config_parser, write_defaults=False):
section_name = get_file_section_name(self.section_key, self.section_label)
if ((self.value is not None) and (write_defaults or (self.value != self.get_default_value())) and self.get_string_value()):
_ensure_section_existence(config_parser, section_name)
config_parser.set(section_name, self.key, self.get_string_value())
else:
try:
config_parser.remove_option(section_name, self.key)
except NoSectionError:
pass<|docstring|>Set parameter in the config_parser in the right section.<|endoftext|>
|
dd5490c5ab8b296f5afc8dc9b637b2f94c01755edcf4f36d17a03bc580072e2d
|
def get_string_value(self):
'Convert internal representation into string.'
return str(self.value)
|
Convert internal representation into string.
|
cli/src/pcluster/config/param_types.py
|
get_string_value
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def get_string_value(self):
return str(self.value)
|
def get_string_value(self):
return str(self.value)<|docstring|>Convert internal representation into string.<|endoftext|>
|
79c82d6ee937b05cdd7d9266e76977ece29553415e81a8ee37bee922473fa0a9
|
def get_default_value(self):
'\n Get default value from the Param definition.\n\n If the default value is a function, pass it the Section this parameter\n is contained within. Otherwise, pass the literal value, defaulting to\n None if not specified.\n '
default = self.definition.get('default', None)
if callable(default):
section = self.pcluster_config.get_section(self.section_key, self.section_label)
return default(section)
return default
|
Get default value from the Param definition.
If the default value is a function, pass it the Section this parameter
is contained within. Otherwise, pass the literal value, defaulting to
None if not specified.
|
cli/src/pcluster/config/param_types.py
|
get_default_value
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def get_default_value(self):
'\n Get default value from the Param definition.\n\n If the default value is a function, pass it the Section this parameter\n is contained within. Otherwise, pass the literal value, defaulting to\n None if not specified.\n '
default = self.definition.get('default', None)
if callable(default):
section = self.pcluster_config.get_section(self.section_key, self.section_label)
return default(section)
return default
|
def get_default_value(self):
'\n Get default value from the Param definition.\n\n If the default value is a function, pass it the Section this parameter\n is contained within. Otherwise, pass the literal value, defaulting to\n None if not specified.\n '
default = self.definition.get('default', None)
if callable(default):
section = self.pcluster_config.get_section(self.section_key, self.section_label)
return default(section)
return default<|docstring|>Get default value from the Param definition.
If the default value is a function, pass it the Section this parameter
is contained within. Otherwise, pass the literal value, defaulting to
None if not specified.<|endoftext|>
|
31eb86510775505d1fc812fcd5e3ff805f1518d63311dd5dd87de9052a2175e9
|
def reset_value(self):
'Reset parameter to default value.'
self.value = self.get_default_value()
|
Reset parameter to default value.
|
cli/src/pcluster/config/param_types.py
|
reset_value
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def reset_value(self):
self.value = self.get_default_value()
|
def reset_value(self):
self.value = self.get_default_value()<|docstring|>Reset parameter to default value.<|endoftext|>
|
8887cd417bc51b0b82a1d67b68fa2ee011914072270777b1db84be5f011f460d
|
def refresh(self):
"\n Refresh the parameter's value.\n\n Does nothing by default. Subclasses can implement this method by updating parameter's value based on\n PClusterConfig status.\n "
pass
|
Refresh the parameter's value.
Does nothing by default. Subclasses can implement this method by updating parameter's value based on
PClusterConfig status.
|
cli/src/pcluster/config/param_types.py
|
refresh
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def refresh(self):
"\n Refresh the parameter's value.\n\n Does nothing by default. Subclasses can implement this method by updating parameter's value based on\n PClusterConfig status.\n "
pass
|
def refresh(self):
"\n Refresh the parameter's value.\n\n Does nothing by default. Subclasses can implement this method by updating parameter's value based on\n PClusterConfig status.\n "
pass<|docstring|>Refresh the parameter's value.
Does nothing by default. Subclasses can implement this method by updating parameter's value based on
PClusterConfig status.<|endoftext|>
|
54028389f7ff71ce57e74c65fa131fc42af0e9475263c4d628bcd7143f67c977
|
def get_update_policy(self):
'Get the update policy of the parameter.'
return self.definition.get('update_policy', UpdatePolicy.UNKNOWN)
|
Get the update policy of the parameter.
|
cli/src/pcluster/config/param_types.py
|
get_update_policy
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def get_update_policy(self):
return self.definition.get('update_policy', UpdatePolicy.UNKNOWN)
|
def get_update_policy(self):
return self.definition.get('update_policy', UpdatePolicy.UNKNOWN)<|docstring|>Get the update policy of the parameter.<|endoftext|>
|
935f9c34d4b063200f8a996699167e1457c49494dea4725c32fb0440f1ec3f68
|
def get_storage_key(self):
'\n Return the key by which the current param must be stored in the JSON.\n\n By default the param key is used as storage key.\n '
return self.key
|
Return the key by which the current param must be stored in the JSON.
By default the param key is used as storage key.
|
cli/src/pcluster/config/param_types.py
|
get_storage_key
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def get_storage_key(self):
'\n Return the key by which the current param must be stored in the JSON.\n\n By default the param key is used as storage key.\n '
return self.key
|
def get_storage_key(self):
'\n Return the key by which the current param must be stored in the JSON.\n\n By default the param key is used as storage key.\n '
return self.key<|docstring|>Return the key by which the current param must be stored in the JSON.
By default the param key is used as storage key.<|endoftext|>
|
48e8365d23a28dd4f39b3a165af3ba7f2663af79520b2e53a23f1e8891271edf
|
def __init__(self, section_key, section_label, param_key, param_definition, pcluster_config, owner_section=None):
'Extend Param by adding info regarding the section referred by the settings.'
self.referred_section_definition = param_definition.get('referred_section')
self.referred_section_key = self.referred_section_definition.get('key')
self.referred_section_type = self.referred_section_definition.get('type')
param_definition.get('validators', []).append(settings_validator)
super(SettingsParam, self).__init__(section_key, section_label, param_key, param_definition, pcluster_config, owner_section)
|
Extend Param by adding info regarding the section referred by the settings.
|
cli/src/pcluster/config/param_types.py
|
__init__
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def __init__(self, section_key, section_label, param_key, param_definition, pcluster_config, owner_section=None):
self.referred_section_definition = param_definition.get('referred_section')
self.referred_section_key = self.referred_section_definition.get('key')
self.referred_section_type = self.referred_section_definition.get('type')
param_definition.get('validators', []).append(settings_validator)
super(SettingsParam, self).__init__(section_key, section_label, param_key, param_definition, pcluster_config, owner_section)
|
def __init__(self, section_key, section_label, param_key, param_definition, pcluster_config, owner_section=None):
self.referred_section_definition = param_definition.get('referred_section')
self.referred_section_key = self.referred_section_definition.get('key')
self.referred_section_type = self.referred_section_definition.get('type')
param_definition.get('validators', []).append(settings_validator)
super(SettingsParam, self).__init__(section_key, section_label, param_key, param_definition, pcluster_config, owner_section)<|docstring|>Extend Param by adding info regarding the section referred by the settings.<|endoftext|>
|
41aeb17b8a7c3ee677e41e55681cdff35a9c8e0f8776aaec2d50fd55cb89a20b
|
def get_default_value(self):
'\n Get default value.\n\n If the referred section has the "autocreate" attribute, it means that it is required to initialize\n the settings param and the related section with default values (i.e. vpc, scaling).\n '
return ('default' if self.referred_section_definition.get('autocreate', False) else None)
|
Get default value.
If the referred section has the "autocreate" attribute, it means that it is required to initialize
the settings param and the related section with default values (i.e. vpc, scaling).
|
cli/src/pcluster/config/param_types.py
|
get_default_value
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def get_default_value(self):
'\n Get default value.\n\n If the referred section has the "autocreate" attribute, it means that it is required to initialize\n the settings param and the related section with default values (i.e. vpc, scaling).\n '
return ('default' if self.referred_section_definition.get('autocreate', False) else None)
|
def get_default_value(self):
'\n Get default value.\n\n If the referred section has the "autocreate" attribute, it means that it is required to initialize\n the settings param and the related section with default values (i.e. vpc, scaling).\n '
return ('default' if self.referred_section_definition.get('autocreate', False) else None)<|docstring|>Get default value.
If the referred section has the "autocreate" attribute, it means that it is required to initialize
the settings param and the related section with default values (i.e. vpc, scaling).<|endoftext|>
|
e83e3aa3e073f659094f24807b89044453fa29b59adf64f5d7e7fc10e24ca1df
|
def from_file(self, config_parser):
'\n Initialize parameter value from config_parser.\n\n :param config_parser: the configparser object from which get the parameter\n '
section_name = get_file_section_name(self.section_key, self.section_label)
if config_parser.has_option(section_name, self.key):
self.value = config_parser.get(section_name, self.key)
if self.value:
self._check_allowed_values()
sections = []
for section_label in self.value.split(','):
sections.append(self.referred_section_type(self.referred_section_definition, self.pcluster_config, section_label=section_label.strip(), parent_section=self.owner_section).from_file(config_parser=config_parser, fail_on_absence=True))
self._add_sections(sections)
return self
|
Initialize parameter value from config_parser.
:param config_parser: the configparser object from which get the parameter
|
cli/src/pcluster/config/param_types.py
|
from_file
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def from_file(self, config_parser):
'\n Initialize parameter value from config_parser.\n\n :param config_parser: the configparser object from which get the parameter\n '
section_name = get_file_section_name(self.section_key, self.section_label)
if config_parser.has_option(section_name, self.key):
self.value = config_parser.get(section_name, self.key)
if self.value:
self._check_allowed_values()
sections = []
for section_label in self.value.split(','):
sections.append(self.referred_section_type(self.referred_section_definition, self.pcluster_config, section_label=section_label.strip(), parent_section=self.owner_section).from_file(config_parser=config_parser, fail_on_absence=True))
self._add_sections(sections)
return self
|
def from_file(self, config_parser):
'\n Initialize parameter value from config_parser.\n\n :param config_parser: the configparser object from which get the parameter\n '
section_name = get_file_section_name(self.section_key, self.section_label)
if config_parser.has_option(section_name, self.key):
self.value = config_parser.get(section_name, self.key)
if self.value:
self._check_allowed_values()
sections = []
for section_label in self.value.split(','):
sections.append(self.referred_section_type(self.referred_section_definition, self.pcluster_config, section_label=section_label.strip(), parent_section=self.owner_section).from_file(config_parser=config_parser, fail_on_absence=True))
self._add_sections(sections)
return self<|docstring|>Initialize parameter value from config_parser.
:param config_parser: the configparser object from which get the parameter<|endoftext|>
|
83bb1b4cd56299c0b0f192f6917e9e0137e55640de53d1110e684df60ddc289b
|
def validate(self):
'\n Validate the Settings Parameter.\n\n Overrides the default params validation mechanism by adding a default validation based on the number of expected\n sections. The implementation takes into account nested settings params so that the number of resources is\n validated per parent section rather than globally. So, for instance, for compute_resource_settings we check that\n no more than 3 compute resources are activated per queue, while the total number can be up to 15 (3 per queue\n section).\n '
labels = (None if (not self.value) else self.value.split(','))
max_resources = self.referred_section_definition.get('max_resources', 1)
if (labels and (len(labels) > max_resources)):
self.pcluster_config.error("Invalid number of '{0}' sections specified. Max {1} expected.".format(self.referred_section_key, max_resources))
super(SettingsParam, self).validate()
|
Validate the Settings Parameter.
Overrides the default params validation mechanism by adding a default validation based on the number of expected
sections. The implementation takes into account nested settings params so that the number of resources is
validated per parent section rather than globally. So, for instance, for compute_resource_settings we check that
no more than 3 compute resources are activated per queue, while the total number can be up to 15 (3 per queue
section).
|
cli/src/pcluster/config/param_types.py
|
validate
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def validate(self):
'\n Validate the Settings Parameter.\n\n Overrides the default params validation mechanism by adding a default validation based on the number of expected\n sections. The implementation takes into account nested settings params so that the number of resources is\n validated per parent section rather than globally. So, for instance, for compute_resource_settings we check that\n no more than 3 compute resources are activated per queue, while the total number can be up to 15 (3 per queue\n section).\n '
labels = (None if (not self.value) else self.value.split(','))
max_resources = self.referred_section_definition.get('max_resources', 1)
if (labels and (len(labels) > max_resources)):
self.pcluster_config.error("Invalid number of '{0}' sections specified. Max {1} expected.".format(self.referred_section_key, max_resources))
super(SettingsParam, self).validate()
|
def validate(self):
'\n Validate the Settings Parameter.\n\n Overrides the default params validation mechanism by adding a default validation based on the number of expected\n sections. The implementation takes into account nested settings params so that the number of resources is\n validated per parent section rather than globally. So, for instance, for compute_resource_settings we check that\n no more than 3 compute resources are activated per queue, while the total number can be up to 15 (3 per queue\n section).\n '
labels = (None if (not self.value) else self.value.split(','))
max_resources = self.referred_section_definition.get('max_resources', 1)
if (labels and (len(labels) > max_resources)):
self.pcluster_config.error("Invalid number of '{0}' sections specified. Max {1} expected.".format(self.referred_section_key, max_resources))
super(SettingsParam, self).validate()<|docstring|>Validate the Settings Parameter.
Overrides the default params validation mechanism by adding a default validation based on the number of expected
sections. The implementation takes into account nested settings params so that the number of resources is
validated per parent section rather than globally. So, for instance, for compute_resource_settings we check that
no more than 3 compute resources are activated per queue, while the total number can be up to 15 (3 per queue
section).<|endoftext|>
|
2d8fe8a295fc81a4a42e166d7380e52d15a780796185d732dc8c2c2ab1bc116f
|
def _value_eq(self, other):
'Compare settings labels ignoring positions and extra spaces.'
value1 = self.value
value2 = (other.value if other else None)
if value1:
value1 = ','.join(sorted([x.strip() for x in value1.split(',')]))
if value2:
value2 = ','.join(sorted([x.strip() for x in value2.split(',')]))
return (value1 == value2)
|
Compare settings labels ignoring positions and extra spaces.
|
cli/src/pcluster/config/param_types.py
|
_value_eq
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def _value_eq(self, other):
value1 = self.value
value2 = (other.value if other else None)
if value1:
value1 = ','.join(sorted([x.strip() for x in value1.split(',')]))
if value2:
value2 = ','.join(sorted([x.strip() for x in value2.split(',')]))
return (value1 == value2)
|
def _value_eq(self, other):
value1 = self.value
value2 = (other.value if other else None)
if value1:
value1 = ','.join(sorted([x.strip() for x in value1.split(',')]))
if value2:
value2 = ','.join(sorted([x.strip() for x in value2.split(',')]))
return (value1 == value2)<|docstring|>Compare settings labels ignoring positions and extra spaces.<|endoftext|>
|
b17e6739ffb338f7344c7fbd432acf92ec81d910f1117c9e4c5e17085a9d553d
|
def _replace_default_section(self, section):
'\n Remove default section and replace with the new one.\n\n Apart from multiple sections, which are managed in a specific way, normally only one section per key is allowed.\n Since some sections are created by default to make sure they are always present, calling this method ensures\n that any existing default section of the same type will be removed from the configuration before adding the new\n one.\n '
self.pcluster_config.remove_section(self.referred_section_key, self.referred_section_definition.get('default_label'))
self.pcluster_config.add_section(section)
|
Remove default section and replace with the new one.
Apart from multiple sections, which are managed in a specific way, normally only one section per key is allowed.
Since some sections are created by default to make sure they are always present, calling this method ensures
that any existing default section of the same type will be removed from the configuration before adding the new
one.
|
cli/src/pcluster/config/param_types.py
|
_replace_default_section
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def _replace_default_section(self, section):
'\n Remove default section and replace with the new one.\n\n Apart from multiple sections, which are managed in a specific way, normally only one section per key is allowed.\n Since some sections are created by default to make sure they are always present, calling this method ensures\n that any existing default section of the same type will be removed from the configuration before adding the new\n one.\n '
self.pcluster_config.remove_section(self.referred_section_key, self.referred_section_definition.get('default_label'))
self.pcluster_config.add_section(section)
|
def _replace_default_section(self, section):
'\n Remove default section and replace with the new one.\n\n Apart from multiple sections, which are managed in a specific way, normally only one section per key is allowed.\n Since some sections are created by default to make sure they are always present, calling this method ensures\n that any existing default section of the same type will be removed from the configuration before adding the new\n one.\n '
self.pcluster_config.remove_section(self.referred_section_key, self.referred_section_definition.get('default_label'))
self.pcluster_config.add_section(section)<|docstring|>Remove default section and replace with the new one.
Apart from multiple sections, which are managed in a specific way, normally only one section per key is allowed.
Since some sections are created by default to make sure they are always present, calling this method ensures
that any existing default section of the same type will be removed from the configuration before adding the new
one.<|endoftext|>
|
0aad87edb671550db1c89052e52776e08bc4868ad9804aeca3039789bd460654
|
def refresh(self):
'Update SettingsParam value to make it match actual sections in config.'
sections_labels = [section.label for (_, section) in self.pcluster_config.get_sections(self.referred_section_key).items() if ((section.parent_section == self.owner_section) or (section.parent_section is None))]
self.value = (','.join(sorted(sections_labels)) if sections_labels else None)
|
Update SettingsParam value to make it match actual sections in config.
|
cli/src/pcluster/config/param_types.py
|
refresh
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def refresh(self):
sections_labels = [section.label for (_, section) in self.pcluster_config.get_sections(self.referred_section_key).items() if ((section.parent_section == self.owner_section) or (section.parent_section is None))]
self.value = (','.join(sorted(sections_labels)) if sections_labels else None)
|
def refresh(self):
sections_labels = [section.label for (_, section) in self.pcluster_config.get_sections(self.referred_section_key).items() if ((section.parent_section == self.owner_section) or (section.parent_section is None))]
self.value = (','.join(sorted(sections_labels)) if sections_labels else None)<|docstring|>Update SettingsParam value to make it match actual sections in config.<|endoftext|>
|
8b45a49252f9236be3f174f0f91465df582874b0174f431c8c7110f4fa6fff09
|
def to_file(self, config_parser, write_defaults=False):
'Convert the param value into a section in the config_parser and initialize it.'
section = self.pcluster_config.get_section(self.referred_section_key, self.value)
if section:
for (param_key, param_definition) in self.referred_section_definition.get('params').items():
param_value = section.get_param_value(param_key)
section_name = get_file_section_name(self.section_key, self.section_label)
if ((not config_parser.has_option(section_name, self.key)) and (write_defaults or (param_value != param_definition.get('default', None)))):
_ensure_section_existence(config_parser, section_name)
config_parser.set(section_name, self.key, self.get_string_value())
section.to_file(config_parser)
|
Convert the param value into a section in the config_parser and initialize it.
|
cli/src/pcluster/config/param_types.py
|
to_file
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def to_file(self, config_parser, write_defaults=False):
section = self.pcluster_config.get_section(self.referred_section_key, self.value)
if section:
for (param_key, param_definition) in self.referred_section_definition.get('params').items():
param_value = section.get_param_value(param_key)
section_name = get_file_section_name(self.section_key, self.section_label)
if ((not config_parser.has_option(section_name, self.key)) and (write_defaults or (param_value != param_definition.get('default', None)))):
_ensure_section_existence(config_parser, section_name)
config_parser.set(section_name, self.key, self.get_string_value())
section.to_file(config_parser)
|
def to_file(self, config_parser, write_defaults=False):
section = self.pcluster_config.get_section(self.referred_section_key, self.value)
if section:
for (param_key, param_definition) in self.referred_section_definition.get('params').items():
param_value = section.get_param_value(param_key)
section_name = get_file_section_name(self.section_key, self.section_label)
if ((not config_parser.has_option(section_name, self.key)) and (write_defaults or (param_value != param_definition.get('default', None)))):
_ensure_section_existence(config_parser, section_name)
config_parser.set(section_name, self.key, self.get_string_value())
section.to_file(config_parser)<|docstring|>Convert the param value into a section in the config_parser and initialize it.<|endoftext|>
|
305d449030a3f60d60b0183f8490e21ed6e34ef8bbff731aa3917944e9e4c770
|
@property
def referred_section_labels(self):
'Return the referred section labels as a list of stripped element.'
return ([label.strip() for label in self.value.split(',')] if self.value else [])
|
Return the referred section labels as a list of stripped element.
|
cli/src/pcluster/config/param_types.py
|
referred_section_labels
|
ddeidda/aws-parallelcluster
| 1
|
python
|
@property
def referred_section_labels(self):
return ([label.strip() for label in self.value.split(',')] if self.value else [])
|
@property
def referred_section_labels(self):
return ([label.strip() for label in self.value.split(',')] if self.value else [])<|docstring|>Return the referred section labels as a list of stripped element.<|endoftext|>
|
343109df854e0f8a945d2c4c9a0b88e4581ba94dc3b5f3754da456f6b9ad1b1f
|
@property
def label(self):
'Get the section label.'
return self._label
|
Get the section label.
|
cli/src/pcluster/config/param_types.py
|
label
|
ddeidda/aws-parallelcluster
| 1
|
python
|
@property
def label(self):
return self._label
|
@property
def label(self):
return self._label<|docstring|>Get the section label.<|endoftext|>
|
5e556faa935db5284549393351618324fb9fb387bd2da86b39a585c0ef83f2e8
|
@label.setter
def label(self, label):
'Set the section label. Marks the PclusterConfig parent for refreshing if called.'
self._label = label
self.pcluster_config._config_updated()
|
Set the section label. Marks the PclusterConfig parent for refreshing if called.
|
cli/src/pcluster/config/param_types.py
|
label
|
ddeidda/aws-parallelcluster
| 1
|
python
|
@label.setter
def label(self, label):
self._label = label
self.pcluster_config._config_updated()
|
@label.setter
def label(self, label):
self._label = label
self.pcluster_config._config_updated()<|docstring|>Set the section label. Marks the PclusterConfig parent for refreshing if called.<|endoftext|>
|
6156d2110dc69c6ff650723c7084c9ba48377bc0a9f664790e86d0e8b43fce49
|
def from_file(self, config_parser, fail_on_absence=False):
'Initialize section configuration parameters by parsing config file.'
params_definitions = self.definition.get('params')
section_name = get_file_section_name(self.key, self.label)
public_param_keys = set([key for (key, definition) in params_definitions.items() if (definition.get('visibility', Visibility.PUBLIC) == Visibility.PUBLIC)])
if config_parser.has_section(section_name):
for (param_key, param_definition) in params_definitions.items():
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, pcluster_config=self.pcluster_config, owner_section=self).from_file(config_parser)
self.add_param(param)
not_valid_keys = [key for (key, value) in config_parser.items(section_name) if (key not in public_param_keys)]
if not_valid_keys:
self.pcluster_config.error("The configuration parameter{0} '{1}' {2} not allowed in the [{3}] section".format(('s' if (len(not_valid_keys) > 1) else ''), ','.join(not_valid_keys), ('are' if (len(not_valid_keys) > 1) else 'is'), section_name))
elif fail_on_absence:
self.pcluster_config.error("Section '[{0}]' not found in the config file.".format(section_name))
return self
|
Initialize section configuration parameters by parsing config file.
|
cli/src/pcluster/config/param_types.py
|
from_file
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def from_file(self, config_parser, fail_on_absence=False):
params_definitions = self.definition.get('params')
section_name = get_file_section_name(self.key, self.label)
public_param_keys = set([key for (key, definition) in params_definitions.items() if (definition.get('visibility', Visibility.PUBLIC) == Visibility.PUBLIC)])
if config_parser.has_section(section_name):
for (param_key, param_definition) in params_definitions.items():
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, pcluster_config=self.pcluster_config, owner_section=self).from_file(config_parser)
self.add_param(param)
not_valid_keys = [key for (key, value) in config_parser.items(section_name) if (key not in public_param_keys)]
if not_valid_keys:
self.pcluster_config.error("The configuration parameter{0} '{1}' {2} not allowed in the [{3}] section".format(('s' if (len(not_valid_keys) > 1) else ), ','.join(not_valid_keys), ('are' if (len(not_valid_keys) > 1) else 'is'), section_name))
elif fail_on_absence:
self.pcluster_config.error("Section '[{0}]' not found in the config file.".format(section_name))
return self
|
def from_file(self, config_parser, fail_on_absence=False):
params_definitions = self.definition.get('params')
section_name = get_file_section_name(self.key, self.label)
public_param_keys = set([key for (key, definition) in params_definitions.items() if (definition.get('visibility', Visibility.PUBLIC) == Visibility.PUBLIC)])
if config_parser.has_section(section_name):
for (param_key, param_definition) in params_definitions.items():
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, pcluster_config=self.pcluster_config, owner_section=self).from_file(config_parser)
self.add_param(param)
not_valid_keys = [key for (key, value) in config_parser.items(section_name) if (key not in public_param_keys)]
if not_valid_keys:
self.pcluster_config.error("The configuration parameter{0} '{1}' {2} not allowed in the [{3}] section".format(('s' if (len(not_valid_keys) > 1) else ), ','.join(not_valid_keys), ('are' if (len(not_valid_keys) > 1) else 'is'), section_name))
elif fail_on_absence:
self.pcluster_config.error("Section '[{0}]' not found in the config file.".format(section_name))
return self<|docstring|>Initialize section configuration parameters by parsing config file.<|endoftext|>
|
6a3ccc50e729fa6ffb5b1a5ceb399d5246fc5a91884bedf5b9c1d6e55ebe153b
|
def _from_definition(self):
'Initialize parameters with default values.'
for (param_key, param_definition) in self.definition.get('params').items():
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config, owner_section=self)
self.add_param(param)
|
Initialize parameters with default values.
|
cli/src/pcluster/config/param_types.py
|
_from_definition
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def _from_definition(self):
for (param_key, param_definition) in self.definition.get('params').items():
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config, owner_section=self)
self.add_param(param)
|
def _from_definition(self):
for (param_key, param_definition) in self.definition.get('params').items():
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config, owner_section=self)
self.add_param(param)<|docstring|>Initialize parameters with default values.<|endoftext|>
|
1fcbcbc6d8fa9ce6343321d58df2e6e4a7464634e7b031373b75f0d8d34adc4a
|
def validate(self):
'Call the validator function of the section and of all the parameters.'
if self.params:
section_name = get_file_section_name(self.key, self.label)
LOGGER.debug("Validating section '[%s]'...", section_name)
for validation_func in self.definition.get('validators', []):
(errors, warnings) = validation_func(self.key, self.label, self.pcluster_config)
if errors:
self.pcluster_config.error('The section [{0}] is incorrectly configured\n{1}'.format(section_name, '\n'.join(errors)))
elif warnings:
self.pcluster_config.warn('The section [{0}] is incorrectly configured\n{1}'.format(section_name, '\n'.join(warnings)))
else:
LOGGER.debug("Section '[%s]' is valid", section_name)
LOGGER.debug("Validating parameters of section '[%s]'...", section_name)
for (param_key, param_definition) in self.definition.get('params').items():
param_type = param_definition.get('type', self.get_default_param_type())
param = self.get_param(param_key)
if param:
param.validate()
else:
param_type(self.key, self.label, param_key, param_definition, self.pcluster_config).validate()
LOGGER.debug("Parameters validation of section '[%s]' completed correctly.", section_name)
|
Call the validator function of the section and of all the parameters.
|
cli/src/pcluster/config/param_types.py
|
validate
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def validate(self):
if self.params:
section_name = get_file_section_name(self.key, self.label)
LOGGER.debug("Validating section '[%s]'...", section_name)
for validation_func in self.definition.get('validators', []):
(errors, warnings) = validation_func(self.key, self.label, self.pcluster_config)
if errors:
self.pcluster_config.error('The section [{0}] is incorrectly configured\n{1}'.format(section_name, '\n'.join(errors)))
elif warnings:
self.pcluster_config.warn('The section [{0}] is incorrectly configured\n{1}'.format(section_name, '\n'.join(warnings)))
else:
LOGGER.debug("Section '[%s]' is valid", section_name)
LOGGER.debug("Validating parameters of section '[%s]'...", section_name)
for (param_key, param_definition) in self.definition.get('params').items():
param_type = param_definition.get('type', self.get_default_param_type())
param = self.get_param(param_key)
if param:
param.validate()
else:
param_type(self.key, self.label, param_key, param_definition, self.pcluster_config).validate()
LOGGER.debug("Parameters validation of section '[%s]' completed correctly.", section_name)
|
def validate(self):
if self.params:
section_name = get_file_section_name(self.key, self.label)
LOGGER.debug("Validating section '[%s]'...", section_name)
for validation_func in self.definition.get('validators', []):
(errors, warnings) = validation_func(self.key, self.label, self.pcluster_config)
if errors:
self.pcluster_config.error('The section [{0}] is incorrectly configured\n{1}'.format(section_name, '\n'.join(errors)))
elif warnings:
self.pcluster_config.warn('The section [{0}] is incorrectly configured\n{1}'.format(section_name, '\n'.join(warnings)))
else:
LOGGER.debug("Section '[%s]' is valid", section_name)
LOGGER.debug("Validating parameters of section '[%s]'...", section_name)
for (param_key, param_definition) in self.definition.get('params').items():
param_type = param_definition.get('type', self.get_default_param_type())
param = self.get_param(param_key)
if param:
param.validate()
else:
param_type(self.key, self.label, param_key, param_definition, self.pcluster_config).validate()
LOGGER.debug("Parameters validation of section '[%s]' completed correctly.", section_name)<|docstring|>Call the validator function of the section and of all the parameters.<|endoftext|>
|
f0e38850898460553daff50bd42535fd37905587ebf4c3f65f97dce15eeddbd6
|
def to_file(self, config_parser, write_defaults=False):
'Create the section and add all the parameters in the config_parser.'
section_name = get_file_section_name(self.key, self.label)
for (param_key, param_definition) in self.definition.get('params').items():
if (param_definition.get('visibility', Visibility.PUBLIC) == Visibility.PUBLIC):
param = self.get_param(param_key)
if (not param):
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config)
if (write_defaults or (param.value != param_definition.get('default', None))):
_ensure_section_existence(config_parser, section_name)
param.to_file(config_parser, write_defaults)
|
Create the section and add all the parameters in the config_parser.
|
cli/src/pcluster/config/param_types.py
|
to_file
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def to_file(self, config_parser, write_defaults=False):
section_name = get_file_section_name(self.key, self.label)
for (param_key, param_definition) in self.definition.get('params').items():
if (param_definition.get('visibility', Visibility.PUBLIC) == Visibility.PUBLIC):
param = self.get_param(param_key)
if (not param):
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config)
if (write_defaults or (param.value != param_definition.get('default', None))):
_ensure_section_existence(config_parser, section_name)
param.to_file(config_parser, write_defaults)
|
def to_file(self, config_parser, write_defaults=False):
section_name = get_file_section_name(self.key, self.label)
for (param_key, param_definition) in self.definition.get('params').items():
if (param_definition.get('visibility', Visibility.PUBLIC) == Visibility.PUBLIC):
param = self.get_param(param_key)
if (not param):
param_type = param_definition.get('type', self.get_default_param_type())
param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config)
if (write_defaults or (param.value != param_definition.get('default', None))):
_ensure_section_existence(config_parser, section_name)
param.to_file(config_parser, write_defaults)<|docstring|>Create the section and add all the parameters in the config_parser.<|endoftext|>
|
fe2e7ba55ec402f2de138ea5ba45f85104ed49ab84a2365f337e66fe18ae462b
|
def add_param(self, param):
'\n Add a Param to the Section.\n\n The internal representation is a dictionary like:\n {\n "key_name": Param,\n "base_os": Param,\n "use_public_ips": BoolParam,\n ...\n }\n :param param: the Param object to add to the Section\n '
self.params[param.key] = param
|
Add a Param to the Section.
The internal representation is a dictionary like:
{
"key_name": Param,
"base_os": Param,
"use_public_ips": BoolParam,
...
}
:param param: the Param object to add to the Section
|
cli/src/pcluster/config/param_types.py
|
add_param
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def add_param(self, param):
'\n Add a Param to the Section.\n\n The internal representation is a dictionary like:\n {\n "key_name": Param,\n "base_os": Param,\n "use_public_ips": BoolParam,\n ...\n }\n :param param: the Param object to add to the Section\n '
self.params[param.key] = param
|
def add_param(self, param):
'\n Add a Param to the Section.\n\n The internal representation is a dictionary like:\n {\n "key_name": Param,\n "base_os": Param,\n "use_public_ips": BoolParam,\n ...\n }\n :param param: the Param object to add to the Section\n '
self.params[param.key] = param<|docstring|>Add a Param to the Section.
The internal representation is a dictionary like:
{
"key_name": Param,
"base_os": Param,
"use_public_ips": BoolParam,
...
}
:param param: the Param object to add to the Section<|endoftext|>
|
e3077ee830f2262df97df8f7bdbe3417b8fc9d0415f5faa3dd241fd00a2234db
|
def get_param(self, param_key):
'\n Return the Param object corresponding to the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :return: a Param object\n '
return self.params[param_key]
|
Return the Param object corresponding to the given key.
:param param_key: the key to identify the Param object in the internal dictionary
:return: a Param object
|
cli/src/pcluster/config/param_types.py
|
get_param
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def get_param(self, param_key):
'\n Return the Param object corresponding to the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :return: a Param object\n '
return self.params[param_key]
|
def get_param(self, param_key):
'\n Return the Param object corresponding to the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :return: a Param object\n '
return self.params[param_key]<|docstring|>Return the Param object corresponding to the given key.
:param param_key: the key to identify the Param object in the internal dictionary
:return: a Param object<|endoftext|>
|
64880326b1ad787ea0b4d20ea852655136d794d75ce98c37c4f3280db1413e72
|
def set_param(self, param_key, param_obj):
'\n Set a new Param object at the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :param param_obj: a Param object\n '
self.params[param_key] = param_obj
|
Set a new Param object at the given key.
:param param_key: the key to identify the Param object in the internal dictionary
:param param_obj: a Param object
|
cli/src/pcluster/config/param_types.py
|
set_param
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def set_param(self, param_key, param_obj):
'\n Set a new Param object at the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :param param_obj: a Param object\n '
self.params[param_key] = param_obj
|
def set_param(self, param_key, param_obj):
'\n Set a new Param object at the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :param param_obj: a Param object\n '
self.params[param_key] = param_obj<|docstring|>Set a new Param object at the given key.
:param param_key: the key to identify the Param object in the internal dictionary
:param param_obj: a Param object<|endoftext|>
|
b2a85ef2e657bf689990ecd80c8e06ec0782ead7641cd15ff0c1ff589c14d6df
|
def get_param_value(self, param_key):
'\n Return the value of the Param object corresponding to the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :return: the value of the Param object or None if the param is not present in the Section\n '
return (self.get_param(param_key).value if self.get_param(param_key) else None)
|
Return the value of the Param object corresponding to the given key.
:param param_key: the key to identify the Param object in the internal dictionary
:return: the value of the Param object or None if the param is not present in the Section
|
cli/src/pcluster/config/param_types.py
|
get_param_value
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def get_param_value(self, param_key):
'\n Return the value of the Param object corresponding to the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :return: the value of the Param object or None if the param is not present in the Section\n '
return (self.get_param(param_key).value if self.get_param(param_key) else None)
|
def get_param_value(self, param_key):
'\n Return the value of the Param object corresponding to the given key.\n\n :param param_key: the key to identify the Param object in the internal dictionary\n :return: the value of the Param object or None if the param is not present in the Section\n '
return (self.get_param(param_key).value if self.get_param(param_key) else None)<|docstring|>Return the value of the Param object corresponding to the given key.
:param param_key: the key to identify the Param object in the internal dictionary
:return: the value of the Param object or None if the param is not present in the Section<|endoftext|>
|
6455d38e548455c846d95a84376111801a6deed81e0431bd064825bf888c79f7
|
def refresh(self):
'Refresh all parameters.'
for (_, param) in self.params.items():
param.refresh()
|
Refresh all parameters.
|
cli/src/pcluster/config/param_types.py
|
refresh
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def refresh(self):
for (_, param) in self.params.items():
param.refresh()
|
def refresh(self):
for (_, param) in self.params.items():
param.refresh()<|docstring|>Refresh all parameters.<|endoftext|>
|
ccfe45ea72203ce28e182e8dad1713bddac8dbe1d685ccb89d8d0342356ae747
|
@abstractmethod
def from_storage(self, storage_params):
'Initialize section configuration parameters by parsing storage configuration.'
pass
|
Initialize section configuration parameters by parsing storage configuration.
|
cli/src/pcluster/config/param_types.py
|
from_storage
|
ddeidda/aws-parallelcluster
| 1
|
python
|
@abstractmethod
def from_storage(self, storage_params):
pass
|
@abstractmethod
def from_storage(self, storage_params):
pass<|docstring|>Initialize section configuration parameters by parsing storage configuration.<|endoftext|>
|
aad2311624f2cf6bec6b15fb139b8eb7b94e73bc70571649359568ef01a31d4e
|
@abstractmethod
def to_storage(self, storage_params):
'Convert section to storage representation.'
pass
|
Convert section to storage representation.
|
cli/src/pcluster/config/param_types.py
|
to_storage
|
ddeidda/aws-parallelcluster
| 1
|
python
|
@abstractmethod
def to_storage(self, storage_params):
pass
|
@abstractmethod
def to_storage(self, storage_params):
pass<|docstring|>Convert section to storage representation.<|endoftext|>
|
e67fb330e9af2155182dfc63a38950b213ff1b34abc6db417ff7cda10bc2ecad
|
@abstractmethod
def get_default_param_type(self):
'\n Get the default Param type managed by the Section type.\n\n If no "type" attribute is specified in mappings, parameters declared inside the current section will be assigned\n to this default type.\n '
|
Get the default Param type managed by the Section type.
If no "type" attribute is specified in mappings, parameters declared inside the current section will be assigned
to this default type.
|
cli/src/pcluster/config/param_types.py
|
get_default_param_type
|
ddeidda/aws-parallelcluster
| 1
|
python
|
@abstractmethod
def get_default_param_type(self):
'\n Get the default Param type managed by the Section type.\n\n If no "type" attribute is specified in mappings, parameters declared inside the current section will be assigned\n to this default type.\n '
|
@abstractmethod
def get_default_param_type(self):
'\n Get the default Param type managed by the Section type.\n\n If no "type" attribute is specified in mappings, parameters declared inside the current section will be assigned\n to this default type.\n '<|docstring|>Get the default Param type managed by the Section type.
If no "type" attribute is specified in mappings, parameters declared inside the current section will be assigned
to this default type.<|endoftext|>
|
6c34ac3c9daaf6daa4ee56e5d416385988d04fce1a24fea4bceca6ac8b05d6e0
|
def has_metadata(self):
'\n Tells if metadata information should be stored about the Section.\n\n By default metadata is stored for all cfn sections, while Json sections use their own mechanism.\n '
return True
|
Tells if metadata information should be stored about the Section.
By default metadata is stored for all cfn sections, while Json sections use their own mechanism.
|
cli/src/pcluster/config/param_types.py
|
has_metadata
|
ddeidda/aws-parallelcluster
| 1
|
python
|
def has_metadata(self):
'\n Tells if metadata information should be stored about the Section.\n\n By default metadata is stored for all cfn sections, while Json sections use their own mechanism.\n '
return True
|
def has_metadata(self):
'\n Tells if metadata information should be stored about the Section.\n\n By default metadata is stored for all cfn sections, while Json sections use their own mechanism.\n '
return True<|docstring|>Tells if metadata information should be stored about the Section.
By default metadata is stored for all cfn sections, while Json sections use their own mechanism.<|endoftext|>
|
71ecd8fce396d455523f593a4a588213ef327305bf6d3c0f94040ebd1d2d9b43
|
def __init__(self):
'Initializes the configuration object'
self.threshold_rsquared = 0.75
self.numsigfigs = 3
self.results_db_path = None
self.img_dir = None
self.performername = None
self.performerprototype = None
self.baselinename = None
self.tanum = None
self.perf_img_dir = None
|
Initializes the configuration object
|
spar_python/report_generation/common/config.py
|
__init__
|
nathanawmk/SPARTA
| 37
|
python
|
def __init__(self):
self.threshold_rsquared = 0.75
self.numsigfigs = 3
self.results_db_path = None
self.img_dir = None
self.performername = None
self.performerprototype = None
self.baselinename = None
self.tanum = None
self.perf_img_dir = None
|
def __init__(self):
self.threshold_rsquared = 0.75
self.numsigfigs = 3
self.results_db_path = None
self.img_dir = None
self.performername = None
self.performerprototype = None
self.baselinename = None
self.tanum = None
self.perf_img_dir = None<|docstring|>Initializes the configuration object<|endoftext|>
|
081bc731e104172f7b34f3728b97506d2399f4a751c3dcb8066cd4a48a7ab127
|
@classmethod
def normalize_features(self, features, mean_rgb, stddev_rgb, stats_shape):
'Normalize the image given the means and stddevs.'
features -= tf.constant(mean_rgb, shape=stats_shape, dtype=features.dtype)
features /= tf.constant(stddev_rgb, shape=stats_shape, dtype=features.dtype)
return features
|
Normalize the image given the means and stddevs.
|
run/estimator_model_maker.py
|
normalize_features
|
cap-lab/S3NAS
| 15
|
python
|
@classmethod
def normalize_features(self, features, mean_rgb, stddev_rgb, stats_shape):
features -= tf.constant(mean_rgb, shape=stats_shape, dtype=features.dtype)
features /= tf.constant(stddev_rgb, shape=stats_shape, dtype=features.dtype)
return features
|
@classmethod
def normalize_features(self, features, mean_rgb, stddev_rgb, stats_shape):
features -= tf.constant(mean_rgb, shape=stats_shape, dtype=features.dtype)
features /= tf.constant(stddev_rgb, shape=stats_shape, dtype=features.dtype)
return features<|docstring|>Normalize the image given the means and stddevs.<|endoftext|>
|
0a972bcf98a18e64c052c402d9b5f63ef22bf0359b9c336480fc6721098fe3f7
|
def build_EMAed_op_scaffold_fn(self, moving_average_decay, train_op):
'\n :return: ema_op=None or scaffold_fn=None if not used, otherwise returns some valid instances.\n '
(EMAed_train_op, scaffold_fn) = (train_op, None)
is_training = (train_op is not None)
has_moving_average_decay = (moving_average_decay > 0)
if has_moving_average_decay:
ema = tf.train.ExponentialMovingAverage(decay=moving_average_decay, num_updates=self.global_step)
ema_vars = graph.graph_utils.get_ema_vars()
if is_training:
with tf.control_dependencies([train_op]):
EMAed_train_op = ema.apply(ema_vars)
else:
restore_vars_dict = ema.variables_to_restore(ema_vars)
def _scaffold_fn():
saver = tf.train.Saver(restore_vars_dict)
return tf.train.Scaffold(saver=saver)
scaffold_fn = _scaffold_fn
return (EMAed_train_op, scaffold_fn)
|
:return: ema_op=None or scaffold_fn=None if not used, otherwise returns some valid instances.
|
run/estimator_model_maker.py
|
build_EMAed_op_scaffold_fn
|
cap-lab/S3NAS
| 15
|
python
|
def build_EMAed_op_scaffold_fn(self, moving_average_decay, train_op):
'\n \n '
(EMAed_train_op, scaffold_fn) = (train_op, None)
is_training = (train_op is not None)
has_moving_average_decay = (moving_average_decay > 0)
if has_moving_average_decay:
ema = tf.train.ExponentialMovingAverage(decay=moving_average_decay, num_updates=self.global_step)
ema_vars = graph.graph_utils.get_ema_vars()
if is_training:
with tf.control_dependencies([train_op]):
EMAed_train_op = ema.apply(ema_vars)
else:
restore_vars_dict = ema.variables_to_restore(ema_vars)
def _scaffold_fn():
saver = tf.train.Saver(restore_vars_dict)
return tf.train.Scaffold(saver=saver)
scaffold_fn = _scaffold_fn
return (EMAed_train_op, scaffold_fn)
|
def build_EMAed_op_scaffold_fn(self, moving_average_decay, train_op):
'\n \n '
(EMAed_train_op, scaffold_fn) = (train_op, None)
is_training = (train_op is not None)
has_moving_average_decay = (moving_average_decay > 0)
if has_moving_average_decay:
ema = tf.train.ExponentialMovingAverage(decay=moving_average_decay, num_updates=self.global_step)
ema_vars = graph.graph_utils.get_ema_vars()
if is_training:
with tf.control_dependencies([train_op]):
EMAed_train_op = ema.apply(ema_vars)
else:
restore_vars_dict = ema.variables_to_restore(ema_vars)
def _scaffold_fn():
saver = tf.train.Saver(restore_vars_dict)
return tf.train.Scaffold(saver=saver)
scaffold_fn = _scaffold_fn
return (EMAed_train_op, scaffold_fn)<|docstring|>:return: ema_op=None or scaffold_fn=None if not used, otherwise returns some valid instances.<|endoftext|>
|
bd1fb105ed6a8f269515f75a0b954b49fef438dd46211a3fe84e8b1a5f949966
|
def metric_fn(self, labels, logits):
'Evaluation metric function. Evaluates accuracy.\n\n This function is executed on the CPU and should not directly reference\n any Tensors in the rest of the `model_fn`. To pass Tensors from the model\n to the `metric_fn`, provide as part of the `eval_metrics`. See\n https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec\n for more information.\n\n Arguments should match the list of `Tensor` objects passed as the second\n element in the tuple passed to `eval_metrics`.\n\n Args:\n labels: `Tensor` with shape `[batch]`.\n logits: `Tensor` with shape `[batch, num_classes]`.\n\n Returns:\n A dict of the metrics to return from evaluation.\n '
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {'top_1_accuracy': top_1_accuracy, 'top_5_accuracy': top_5_accuracy}
|
Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
|
run/estimator_model_maker.py
|
metric_fn
|
cap-lab/S3NAS
| 15
|
python
|
def metric_fn(self, labels, logits):
'Evaluation metric function. Evaluates accuracy.\n\n This function is executed on the CPU and should not directly reference\n any Tensors in the rest of the `model_fn`. To pass Tensors from the model\n to the `metric_fn`, provide as part of the `eval_metrics`. See\n https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec\n for more information.\n\n Arguments should match the list of `Tensor` objects passed as the second\n element in the tuple passed to `eval_metrics`.\n\n Args:\n labels: `Tensor` with shape `[batch]`.\n logits: `Tensor` with shape `[batch, num_classes]`.\n\n Returns:\n A dict of the metrics to return from evaluation.\n '
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {'top_1_accuracy': top_1_accuracy, 'top_5_accuracy': top_5_accuracy}
|
def metric_fn(self, labels, logits):
'Evaluation metric function. Evaluates accuracy.\n\n This function is executed on the CPU and should not directly reference\n any Tensors in the rest of the `model_fn`. To pass Tensors from the model\n to the `metric_fn`, provide as part of the `eval_metrics`. See\n https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec\n for more information.\n\n Arguments should match the list of `Tensor` objects passed as the second\n element in the tuple passed to `eval_metrics`.\n\n Args:\n labels: `Tensor` with shape `[batch]`.\n logits: `Tensor` with shape `[batch, num_classes]`.\n\n Returns:\n A dict of the metrics to return from evaluation.\n '
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {'top_1_accuracy': top_1_accuracy, 'top_5_accuracy': top_5_accuracy}<|docstring|>Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.<|endoftext|>
|
ff15a656c5de1b221819e901a087cf7ea7eb9a45816de56cab86d67f142c02bb
|
def set_gs_and_td_and_get_model_fn(features, labels, mode, params):
'params are automatically built by tensorflow, and additionally added in train_eval.py build_estimator'
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tf.keras.backend.set_learning_phase(is_training)
self.global_step = tf.train.get_global_step()
preprocessed_features = self.preprocess_features(features)
(logits, latency_val, self.tensordict_to_write_on_tensorboard) = self.logits_latency_tensordict(preprocessed_features, mode, params, FLAGS.ignore_latency, FLAGS.log_searchableblock_tensor)
loss = self.build_losses(logits, labels, latency_val, params, FLAGS.ignore_latency)
train_op = None
if is_training:
lr = self.build_learning_rate(params)
optim = self.build_optimizer(lr)
train_op = self.build_train_op(optim, loss, clip_gradients=FLAGS.clip_gradients)
(train_op, ema_scaffold_fn) = self.build_EMAed_op_scaffold_fn(FLAGS.moving_average_decay, train_op)
eval_metrics = None
if (mode == tf.estimator.ModeKeys.EVAL):
eval_metrics = (self.metric_fn, [labels, logits])
host_call = None
if (is_training and (not FLAGS.skip_host_call)):
gs_t = tf.reshape(self.global_step, [1])
current_epoch = (tf.cast(self.global_step, tf.float32) / params['steps_per_epoch'])
summary_dict = OrderedDict(dict(gs=gs_t, current_epoch=current_epoch, lr=lr, total_loss=loss, latency=latency_val))
summary_dict.update(self.tensordict_to_write_on_tensorboard)
host_call = self.build_host_call_for_tensorboard(summary_dict)
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op, host_call=host_call, eval_metrics=eval_metrics, scaffold_fn=ema_scaffold_fn)
|
params are automatically built by tensorflow, and additionally added in train_eval.py build_estimator
|
run/estimator_model_maker.py
|
set_gs_and_td_and_get_model_fn
|
cap-lab/S3NAS
| 15
|
python
|
def set_gs_and_td_and_get_model_fn(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tf.keras.backend.set_learning_phase(is_training)
self.global_step = tf.train.get_global_step()
preprocessed_features = self.preprocess_features(features)
(logits, latency_val, self.tensordict_to_write_on_tensorboard) = self.logits_latency_tensordict(preprocessed_features, mode, params, FLAGS.ignore_latency, FLAGS.log_searchableblock_tensor)
loss = self.build_losses(logits, labels, latency_val, params, FLAGS.ignore_latency)
train_op = None
if is_training:
lr = self.build_learning_rate(params)
optim = self.build_optimizer(lr)
train_op = self.build_train_op(optim, loss, clip_gradients=FLAGS.clip_gradients)
(train_op, ema_scaffold_fn) = self.build_EMAed_op_scaffold_fn(FLAGS.moving_average_decay, train_op)
eval_metrics = None
if (mode == tf.estimator.ModeKeys.EVAL):
eval_metrics = (self.metric_fn, [labels, logits])
host_call = None
if (is_training and (not FLAGS.skip_host_call)):
gs_t = tf.reshape(self.global_step, [1])
current_epoch = (tf.cast(self.global_step, tf.float32) / params['steps_per_epoch'])
summary_dict = OrderedDict(dict(gs=gs_t, current_epoch=current_epoch, lr=lr, total_loss=loss, latency=latency_val))
summary_dict.update(self.tensordict_to_write_on_tensorboard)
host_call = self.build_host_call_for_tensorboard(summary_dict)
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op, host_call=host_call, eval_metrics=eval_metrics, scaffold_fn=ema_scaffold_fn)
|
def set_gs_and_td_and_get_model_fn(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tf.keras.backend.set_learning_phase(is_training)
self.global_step = tf.train.get_global_step()
preprocessed_features = self.preprocess_features(features)
(logits, latency_val, self.tensordict_to_write_on_tensorboard) = self.logits_latency_tensordict(preprocessed_features, mode, params, FLAGS.ignore_latency, FLAGS.log_searchableblock_tensor)
loss = self.build_losses(logits, labels, latency_val, params, FLAGS.ignore_latency)
train_op = None
if is_training:
lr = self.build_learning_rate(params)
optim = self.build_optimizer(lr)
train_op = self.build_train_op(optim, loss, clip_gradients=FLAGS.clip_gradients)
(train_op, ema_scaffold_fn) = self.build_EMAed_op_scaffold_fn(FLAGS.moving_average_decay, train_op)
eval_metrics = None
if (mode == tf.estimator.ModeKeys.EVAL):
eval_metrics = (self.metric_fn, [labels, logits])
host_call = None
if (is_training and (not FLAGS.skip_host_call)):
gs_t = tf.reshape(self.global_step, [1])
current_epoch = (tf.cast(self.global_step, tf.float32) / params['steps_per_epoch'])
summary_dict = OrderedDict(dict(gs=gs_t, current_epoch=current_epoch, lr=lr, total_loss=loss, latency=latency_val))
summary_dict.update(self.tensordict_to_write_on_tensorboard)
host_call = self.build_host_call_for_tensorboard(summary_dict)
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op, host_call=host_call, eval_metrics=eval_metrics, scaffold_fn=ema_scaffold_fn)<|docstring|>params are automatically built by tensorflow, and additionally added in train_eval.py build_estimator<|endoftext|>
|
606f64b52fd18fc26363c4177c74f25fe8fd48a2276a341909b13b2fc80fdb50
|
def host_call_fn(**kwargs):
'\n writes the {"tag_name": tensor} dict to tensorboard. got idea from\n https://github.com/tensorflow/tensor2tensor/blob/bf33311314005528482ea50b098d1aca8da85d84/tensor2tensor/utils/t2t_model.py#L2157\n '
gs = kwargs.pop('gs')[0]
with tf.contrib.summary.create_file_writer(FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default():
with tf.contrib.summary.always_record_summaries():
for (name, tensor) in sorted(six.iteritems(kwargs)):
half_tensor = tf.cast(tensor, tf.float16)
tf.contrib.summary.scalar(name, half_tensor[0], step=gs)
return tf.contrib.summary.all_summary_ops()
|
writes the {"tag_name": tensor} dict to tensorboard. got idea from
https://github.com/tensorflow/tensor2tensor/blob/bf33311314005528482ea50b098d1aca8da85d84/tensor2tensor/utils/t2t_model.py#L2157
|
run/estimator_model_maker.py
|
host_call_fn
|
cap-lab/S3NAS
| 15
|
python
|
def host_call_fn(**kwargs):
'\n writes the {"tag_name": tensor} dict to tensorboard. got idea from\n https://github.com/tensorflow/tensor2tensor/blob/bf33311314005528482ea50b098d1aca8da85d84/tensor2tensor/utils/t2t_model.py#L2157\n '
gs = kwargs.pop('gs')[0]
with tf.contrib.summary.create_file_writer(FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default():
with tf.contrib.summary.always_record_summaries():
for (name, tensor) in sorted(six.iteritems(kwargs)):
half_tensor = tf.cast(tensor, tf.float16)
tf.contrib.summary.scalar(name, half_tensor[0], step=gs)
return tf.contrib.summary.all_summary_ops()
|
def host_call_fn(**kwargs):
'\n writes the {"tag_name": tensor} dict to tensorboard. got idea from\n https://github.com/tensorflow/tensor2tensor/blob/bf33311314005528482ea50b098d1aca8da85d84/tensor2tensor/utils/t2t_model.py#L2157\n '
gs = kwargs.pop('gs')[0]
with tf.contrib.summary.create_file_writer(FLAGS.model_dir, max_queue=FLAGS.iterations_per_loop).as_default():
with tf.contrib.summary.always_record_summaries():
for (name, tensor) in sorted(six.iteritems(kwargs)):
half_tensor = tf.cast(tensor, tf.float16)
tf.contrib.summary.scalar(name, half_tensor[0], step=gs)
return tf.contrib.summary.all_summary_ops()<|docstring|>writes the {"tag_name": tensor} dict to tensorboard. got idea from
https://github.com/tensorflow/tensor2tensor/blob/bf33311314005528482ea50b098d1aca8da85d84/tensor2tensor/utils/t2t_model.py#L2157<|endoftext|>
|
85c37339874ca243a6c51ace70b948cec938373608e9bf3bf696e6f9ae84a604
|
def backtr(df, vcol, vr, vrg, zmin, zmax, ltail, ltpar, utail, utpar):
'Back transform an entire DataFrame column with a provided transformation table and tail extrapolation.\n :param df: the source DataFrame\n :param vcol: the column with the variable to transfrom\n :param vr: the transformation table, 1D ndarray with the original values\n :param vrg: the transformation table, 1D ndarray with the trasnformed variable\n :param zmin: lower trimming limits\n :param zmax: upper trimming limits\n :param ltail: lower tail value\n :param ltpar: lower tail extrapolation parameter\n :param utail: upper tail value\n :param utpar: upper tail extrapolation parameter\n :return: TODO\n '
EPSLON = 1e-20
nd = len(df)
nt = len(vr)
backtr = np.zeros(nd)
vrgs = df[vcol].values
for id in range(0, nd):
if (vrgs[id] <= vrg[0]):
backtr[id] = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs[id])
if (ltail == 1):
backtr[id] = powint(0.0, cdflo, zmin, vr[0], cdfbt, 1.0)
elif (ltail == 2):
cpow = (1.0 / ltpar)
backtr[id] = powint(0.0, cdflo, zmin, vr[0], cdfbt, cpow)
elif (vrgs[id] >= vrg[(nt - 1)]):
backtr[id] = vr[(nt - 1)]
cdfhi = gcum(vrg[(nt - 1)])
cdfbt = gcum(vrgs[id])
if (utail == 1):
backtr[id] = powint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, 1.0)
elif (utail == 2):
cpow = (1.0 / utpar)
backtr[id] = powint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, cpow)
elif (utail == 4):
plambda = ((vr[(nt - 1)] ** utpar) * (1.0 - gcum(vrg[(nt - 1)])))
backtr[id] = ((plambda / (1.0 - gcum(vrgs))) ** (1.0 / utpar))
else:
j = locate(vrg, 1, nt, vrgs[id])
j = max(min((nt - 2), j), 1)
backtr[id] = powint(vrg[j], vrg[(j + 1)], vr[j], vr[(j + 1)], vrgs[id], 1.0)
return backtr
|
Back transform an entire DataFrame column with a provided transformation table and tail extrapolation.
:param df: the source DataFrame
:param vcol: the column with the variable to transfrom
:param vr: the transformation table, 1D ndarray with the original values
:param vrg: the transformation table, 1D ndarray with the trasnformed variable
:param zmin: lower trimming limits
:param zmax: upper trimming limits
:param ltail: lower tail value
:param ltpar: lower tail extrapolation parameter
:param utail: upper tail value
:param utpar: upper tail extrapolation parameter
:return: TODO
|
geostatspy/geostats.py
|
backtr
|
shohirose/GeostatsPy
| 284
|
python
|
def backtr(df, vcol, vr, vrg, zmin, zmax, ltail, ltpar, utail, utpar):
'Back transform an entire DataFrame column with a provided transformation table and tail extrapolation.\n :param df: the source DataFrame\n :param vcol: the column with the variable to transfrom\n :param vr: the transformation table, 1D ndarray with the original values\n :param vrg: the transformation table, 1D ndarray with the trasnformed variable\n :param zmin: lower trimming limits\n :param zmax: upper trimming limits\n :param ltail: lower tail value\n :param ltpar: lower tail extrapolation parameter\n :param utail: upper tail value\n :param utpar: upper tail extrapolation parameter\n :return: TODO\n '
EPSLON = 1e-20
nd = len(df)
nt = len(vr)
backtr = np.zeros(nd)
vrgs = df[vcol].values
for id in range(0, nd):
if (vrgs[id] <= vrg[0]):
backtr[id] = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs[id])
if (ltail == 1):
backtr[id] = powint(0.0, cdflo, zmin, vr[0], cdfbt, 1.0)
elif (ltail == 2):
cpow = (1.0 / ltpar)
backtr[id] = powint(0.0, cdflo, zmin, vr[0], cdfbt, cpow)
elif (vrgs[id] >= vrg[(nt - 1)]):
backtr[id] = vr[(nt - 1)]
cdfhi = gcum(vrg[(nt - 1)])
cdfbt = gcum(vrgs[id])
if (utail == 1):
backtr[id] = powint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, 1.0)
elif (utail == 2):
cpow = (1.0 / utpar)
backtr[id] = powint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, cpow)
elif (utail == 4):
plambda = ((vr[(nt - 1)] ** utpar) * (1.0 - gcum(vrg[(nt - 1)])))
backtr[id] = ((plambda / (1.0 - gcum(vrgs))) ** (1.0 / utpar))
else:
j = locate(vrg, 1, nt, vrgs[id])
j = max(min((nt - 2), j), 1)
backtr[id] = powint(vrg[j], vrg[(j + 1)], vr[j], vr[(j + 1)], vrgs[id], 1.0)
return backtr
|
def backtr(df, vcol, vr, vrg, zmin, zmax, ltail, ltpar, utail, utpar):
'Back transform an entire DataFrame column with a provided transformation table and tail extrapolation.\n :param df: the source DataFrame\n :param vcol: the column with the variable to transfrom\n :param vr: the transformation table, 1D ndarray with the original values\n :param vrg: the transformation table, 1D ndarray with the trasnformed variable\n :param zmin: lower trimming limits\n :param zmax: upper trimming limits\n :param ltail: lower tail value\n :param ltpar: lower tail extrapolation parameter\n :param utail: upper tail value\n :param utpar: upper tail extrapolation parameter\n :return: TODO\n '
EPSLON = 1e-20
nd = len(df)
nt = len(vr)
backtr = np.zeros(nd)
vrgs = df[vcol].values
for id in range(0, nd):
if (vrgs[id] <= vrg[0]):
backtr[id] = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs[id])
if (ltail == 1):
backtr[id] = powint(0.0, cdflo, zmin, vr[0], cdfbt, 1.0)
elif (ltail == 2):
cpow = (1.0 / ltpar)
backtr[id] = powint(0.0, cdflo, zmin, vr[0], cdfbt, cpow)
elif (vrgs[id] >= vrg[(nt - 1)]):
backtr[id] = vr[(nt - 1)]
cdfhi = gcum(vrg[(nt - 1)])
cdfbt = gcum(vrgs[id])
if (utail == 1):
backtr[id] = powint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, 1.0)
elif (utail == 2):
cpow = (1.0 / utpar)
backtr[id] = powint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, cpow)
elif (utail == 4):
plambda = ((vr[(nt - 1)] ** utpar) * (1.0 - gcum(vrg[(nt - 1)])))
backtr[id] = ((plambda / (1.0 - gcum(vrgs))) ** (1.0 / utpar))
else:
j = locate(vrg, 1, nt, vrgs[id])
j = max(min((nt - 2), j), 1)
backtr[id] = powint(vrg[j], vrg[(j + 1)], vr[j], vr[(j + 1)], vrgs[id], 1.0)
return backtr<|docstring|>Back transform an entire DataFrame column with a provided transformation table and tail extrapolation.
:param df: the source DataFrame
:param vcol: the column with the variable to transfrom
:param vr: the transformation table, 1D ndarray with the original values
:param vrg: the transformation table, 1D ndarray with the trasnformed variable
:param zmin: lower trimming limits
:param zmax: upper trimming limits
:param ltail: lower tail value
:param ltpar: lower tail extrapolation parameter
:param utail: upper tail value
:param utpar: upper tail extrapolation parameter
:return: TODO<|endoftext|>
|
14d4b24b69166a705698ed095330f63fab35712f2ec5fcbfb7915b5a7fded977
|
def backtr_value(vrgs, vr, vrg, zmin, zmax, ltail, ltpar, utail, utpar):
'Back transform a single value with a provided transformation table and tail extrapolation.\n :param vrgs: value to transform\n :param vr: the transformation table, 1D ndarray with the original values\n :param vrg: the transformation table, 1D ndarray with the trasnformed variable\n :param zmin: lower trimming limits\n :param zmax: upper trimming limits\n :param ltail: lower tail value\n :param ltpar: lower tail extrapolation parameter\n :param utail: upper tail value\n :param utpar: upper tail extrapolation parameter\n :return: TODO\n '
EPSLON = 1e-20
nt = len(vr)
if (vrgs <= vrg[0]):
backtr = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs)
if (ltail == 1):
backtr = dpowint(0.0, cdflo, zmin, vr[0], cdfbt, 1.0)
elif (ltail == 2):
cpow = (1.0 / ltpar)
backtr = dpowint(0.0, cdflo, zmin, vr[0], cdfbt, cpow)
elif (vrgs >= vrg[(nt - 1)]):
backtr = vr[(nt - 1)]
cdfhi = gcum(vrg[(nt - 1)])
cdfbt = gcum(vrgs)
if (utail == 1):
backtr = dpowint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, 1.0)
elif (utail == 2):
cpow = (1.0 / utpar)
backtr = dpowint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, cpow)
elif (utail == 4):
plambda = ((vr[(nt - 1)] ** utpar) * (1.0 - gcum(vrg[(nt - 1)])))
backtr = ((plambda / (1.0 - gcum(vrgs))) ** (1.0 / utpar))
else:
j = dlocate(vrg, 1, nt, vrgs)
j = max(min((nt - 2), j), 1)
backtr = dpowint(vrg[j], vrg[(j + 1)], vr[j], vr[(j + 1)], vrgs, 1.0)
return backtr
|
Back transform a single value with a provided transformation table and tail extrapolation.
:param vrgs: value to transform
:param vr: the transformation table, 1D ndarray with the original values
:param vrg: the transformation table, 1D ndarray with the trasnformed variable
:param zmin: lower trimming limits
:param zmax: upper trimming limits
:param ltail: lower tail value
:param ltpar: lower tail extrapolation parameter
:param utail: upper tail value
:param utpar: upper tail extrapolation parameter
:return: TODO
|
geostatspy/geostats.py
|
backtr_value
|
shohirose/GeostatsPy
| 284
|
python
|
def backtr_value(vrgs, vr, vrg, zmin, zmax, ltail, ltpar, utail, utpar):
'Back transform a single value with a provided transformation table and tail extrapolation.\n :param vrgs: value to transform\n :param vr: the transformation table, 1D ndarray with the original values\n :param vrg: the transformation table, 1D ndarray with the trasnformed variable\n :param zmin: lower trimming limits\n :param zmax: upper trimming limits\n :param ltail: lower tail value\n :param ltpar: lower tail extrapolation parameter\n :param utail: upper tail value\n :param utpar: upper tail extrapolation parameter\n :return: TODO\n '
EPSLON = 1e-20
nt = len(vr)
if (vrgs <= vrg[0]):
backtr = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs)
if (ltail == 1):
backtr = dpowint(0.0, cdflo, zmin, vr[0], cdfbt, 1.0)
elif (ltail == 2):
cpow = (1.0 / ltpar)
backtr = dpowint(0.0, cdflo, zmin, vr[0], cdfbt, cpow)
elif (vrgs >= vrg[(nt - 1)]):
backtr = vr[(nt - 1)]
cdfhi = gcum(vrg[(nt - 1)])
cdfbt = gcum(vrgs)
if (utail == 1):
backtr = dpowint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, 1.0)
elif (utail == 2):
cpow = (1.0 / utpar)
backtr = dpowint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, cpow)
elif (utail == 4):
plambda = ((vr[(nt - 1)] ** utpar) * (1.0 - gcum(vrg[(nt - 1)])))
backtr = ((plambda / (1.0 - gcum(vrgs))) ** (1.0 / utpar))
else:
j = dlocate(vrg, 1, nt, vrgs)
j = max(min((nt - 2), j), 1)
backtr = dpowint(vrg[j], vrg[(j + 1)], vr[j], vr[(j + 1)], vrgs, 1.0)
return backtr
|
def backtr_value(vrgs, vr, vrg, zmin, zmax, ltail, ltpar, utail, utpar):
'Back transform a single value with a provided transformation table and tail extrapolation.\n :param vrgs: value to transform\n :param vr: the transformation table, 1D ndarray with the original values\n :param vrg: the transformation table, 1D ndarray with the trasnformed variable\n :param zmin: lower trimming limits\n :param zmax: upper trimming limits\n :param ltail: lower tail value\n :param ltpar: lower tail extrapolation parameter\n :param utail: upper tail value\n :param utpar: upper tail extrapolation parameter\n :return: TODO\n '
EPSLON = 1e-20
nt = len(vr)
if (vrgs <= vrg[0]):
backtr = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs)
if (ltail == 1):
backtr = dpowint(0.0, cdflo, zmin, vr[0], cdfbt, 1.0)
elif (ltail == 2):
cpow = (1.0 / ltpar)
backtr = dpowint(0.0, cdflo, zmin, vr[0], cdfbt, cpow)
elif (vrgs >= vrg[(nt - 1)]):
backtr = vr[(nt - 1)]
cdfhi = gcum(vrg[(nt - 1)])
cdfbt = gcum(vrgs)
if (utail == 1):
backtr = dpowint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, 1.0)
elif (utail == 2):
cpow = (1.0 / utpar)
backtr = dpowint(cdfhi, 1.0, vr[(nt - 1)], zmax, cdfbt, cpow)
elif (utail == 4):
plambda = ((vr[(nt - 1)] ** utpar) * (1.0 - gcum(vrg[(nt - 1)])))
backtr = ((plambda / (1.0 - gcum(vrgs))) ** (1.0 / utpar))
else:
j = dlocate(vrg, 1, nt, vrgs)
j = max(min((nt - 2), j), 1)
backtr = dpowint(vrg[j], vrg[(j + 1)], vr[j], vr[(j + 1)], vrgs, 1.0)
return backtr<|docstring|>Back transform a single value with a provided transformation table and tail extrapolation.
:param vrgs: value to transform
:param vr: the transformation table, 1D ndarray with the original values
:param vrg: the transformation table, 1D ndarray with the trasnformed variable
:param zmin: lower trimming limits
:param zmax: upper trimming limits
:param ltail: lower tail value
:param ltpar: lower tail extrapolation parameter
:param utail: upper tail value
:param utpar: upper tail extrapolation parameter
:return: TODO<|endoftext|>
|
c68b421a74129246e4098f4a9444b894f106dd75a9e1377cd8331b634100abb6
|
def gcum(x):
'Calculate the cumulative probability of the standard normal distribution.\n :param x: the value from the standard normal distribution \n :return: TODO\n '
z = x
if (z < 0):
z = (- z)
t = (1.0 / (1.0 + (0.2316419 * z)))
gcum = (t * (0.31938153 + (t * ((- 0.356563782) + (t * (1.781477937 + (t * ((- 1.821255978) + (t * 1.330274429)))))))))
e2 = 0.0
if (z <= 6.0):
e2 = (np.exp((((- z) * z) / 2.0)) * 0.3989422803)
gcum = (1.0 - (e2 * gcum))
if (x >= 0):
return gcum
gcum = (1.0 - gcum)
return gcum
|
Calculate the cumulative probability of the standard normal distribution.
:param x: the value from the standard normal distribution
:return: TODO
|
geostatspy/geostats.py
|
gcum
|
shohirose/GeostatsPy
| 284
|
python
|
def gcum(x):
'Calculate the cumulative probability of the standard normal distribution.\n :param x: the value from the standard normal distribution \n :return: TODO\n '
z = x
if (z < 0):
z = (- z)
t = (1.0 / (1.0 + (0.2316419 * z)))
gcum = (t * (0.31938153 + (t * ((- 0.356563782) + (t * (1.781477937 + (t * ((- 1.821255978) + (t * 1.330274429)))))))))
e2 = 0.0
if (z <= 6.0):
e2 = (np.exp((((- z) * z) / 2.0)) * 0.3989422803)
gcum = (1.0 - (e2 * gcum))
if (x >= 0):
return gcum
gcum = (1.0 - gcum)
return gcum
|
def gcum(x):
'Calculate the cumulative probability of the standard normal distribution.\n :param x: the value from the standard normal distribution \n :return: TODO\n '
z = x
if (z < 0):
z = (- z)
t = (1.0 / (1.0 + (0.2316419 * z)))
gcum = (t * (0.31938153 + (t * ((- 0.356563782) + (t * (1.781477937 + (t * ((- 1.821255978) + (t * 1.330274429)))))))))
e2 = 0.0
if (z <= 6.0):
e2 = (np.exp((((- z) * z) / 2.0)) * 0.3989422803)
gcum = (1.0 - (e2 * gcum))
if (x >= 0):
return gcum
gcum = (1.0 - gcum)
return gcum<|docstring|>Calculate the cumulative probability of the standard normal distribution.
:param x: the value from the standard normal distribution
:return: TODO<|endoftext|>
|
e76718533860c8b6e5c14a8f919706914ff6a768c7662f190f4fe749111e3585
|
def locate(xx, iis, iie, x):
'Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where\n `xx` is an array of length `n`, and `x` is a given value. `xx` must be\n monotonic, either increasing or decreasing (GSLIB version).\n :param xx: array\n :param iis: start point\n :param iie: end point\n :param x: given value\n :return: TODO\n '
n = len(xx)
if (iis <= 0):
iis = 0
if (iie >= n):
iie = (n - 1)
jl = (iis - 1)
ju = iie
if (xx[(n - 1)] <= x):
j = iie
return j
while ((ju - jl) > 1):
jm = int(((ju + jl) / 2))
if ((xx[iie] > xx[iis]) == (x > xx[jm])):
jl = jm
else:
ju = jm
j = jl
return j
|
Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where
`xx` is an array of length `n`, and `x` is a given value. `xx` must be
monotonic, either increasing or decreasing (GSLIB version).
:param xx: array
:param iis: start point
:param iie: end point
:param x: given value
:return: TODO
|
geostatspy/geostats.py
|
locate
|
shohirose/GeostatsPy
| 284
|
python
|
def locate(xx, iis, iie, x):
'Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where\n `xx` is an array of length `n`, and `x` is a given value. `xx` must be\n monotonic, either increasing or decreasing (GSLIB version).\n :param xx: array\n :param iis: start point\n :param iie: end point\n :param x: given value\n :return: TODO\n '
n = len(xx)
if (iis <= 0):
iis = 0
if (iie >= n):
iie = (n - 1)
jl = (iis - 1)
ju = iie
if (xx[(n - 1)] <= x):
j = iie
return j
while ((ju - jl) > 1):
jm = int(((ju + jl) / 2))
if ((xx[iie] > xx[iis]) == (x > xx[jm])):
jl = jm
else:
ju = jm
j = jl
return j
|
def locate(xx, iis, iie, x):
'Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where\n `xx` is an array of length `n`, and `x` is a given value. `xx` must be\n monotonic, either increasing or decreasing (GSLIB version).\n :param xx: array\n :param iis: start point\n :param iie: end point\n :param x: given value\n :return: TODO\n '
n = len(xx)
if (iis <= 0):
iis = 0
if (iie >= n):
iie = (n - 1)
jl = (iis - 1)
ju = iie
if (xx[(n - 1)] <= x):
j = iie
return j
while ((ju - jl) > 1):
jm = int(((ju + jl) / 2))
if ((xx[iie] > xx[iis]) == (x > xx[jm])):
jl = jm
else:
ju = jm
j = jl
return j<|docstring|>Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where
`xx` is an array of length `n`, and `x` is a given value. `xx` must be
monotonic, either increasing or decreasing (GSLIB version).
:param xx: array
:param iis: start point
:param iie: end point
:param x: given value
:return: TODO<|endoftext|>
|
f127042158912d5d862852abf504e2dbbaf8640f62d0818c60dd85107951b65a
|
def dlocate(xx, iis, iie, x):
'Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where\n `xx` is an array of length `n`, and `x` is a given value. `xx` must be\n monotonic, either increasing or decreasing (updated with Python bisect)\n :param xx: array\n :param iis: start point\n :param iie: end point\n :param x: given value\n :return: TODO\n '
n = len(xx)
if (iie <= iis):
iis = 0
iie = (n - 1)
array = xx[iis:(iie - 1)]
j = bisect(array, x)
return j
|
Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where
`xx` is an array of length `n`, and `x` is a given value. `xx` must be
monotonic, either increasing or decreasing (updated with Python bisect)
:param xx: array
:param iis: start point
:param iie: end point
:param x: given value
:return: TODO
|
geostatspy/geostats.py
|
dlocate
|
shohirose/GeostatsPy
| 284
|
python
|
def dlocate(xx, iis, iie, x):
'Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where\n `xx` is an array of length `n`, and `x` is a given value. `xx` must be\n monotonic, either increasing or decreasing (updated with Python bisect)\n :param xx: array\n :param iis: start point\n :param iie: end point\n :param x: given value\n :return: TODO\n '
n = len(xx)
if (iie <= iis):
iis = 0
iie = (n - 1)
array = xx[iis:(iie - 1)]
j = bisect(array, x)
return j
|
def dlocate(xx, iis, iie, x):
'Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where\n `xx` is an array of length `n`, and `x` is a given value. `xx` must be\n monotonic, either increasing or decreasing (updated with Python bisect)\n :param xx: array\n :param iis: start point\n :param iie: end point\n :param x: given value\n :return: TODO\n '
n = len(xx)
if (iie <= iis):
iis = 0
iie = (n - 1)
array = xx[iis:(iie - 1)]
j = bisect(array, x)
return j<|docstring|>Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where
`xx` is an array of length `n`, and `x` is a given value. `xx` must be
monotonic, either increasing or decreasing (updated with Python bisect)
:param xx: array
:param iis: start point
:param iie: end point
:param x: given value
:return: TODO<|endoftext|>
|
86621ca6280a88288ef1a3e2f27409a35b8402ac652c9c1915b6865413694954
|
def powint(xlow, xhigh, ylow, yhigh, xval, power):
'Power-based interpolator \n :param xlow: x lower interval\n :param xhigh: x upper interval\n :param ylow: y lower interval\n :param yhigh: y upper interval\n :param xval: value on x\n :param power: power for interpolation\n :return: TODO\n '
EPSLON = 1e-20
if ((xhigh - xlow) < EPSLON):
powint = ((yhigh + ylow) / 2.0)
else:
powint = (ylow + ((yhigh - ylow) * (((xval - xlow) / (xhigh - xlow)) ** power)))
return powint
|
Power-based interpolator
:param xlow: x lower interval
:param xhigh: x upper interval
:param ylow: y lower interval
:param yhigh: y upper interval
:param xval: value on x
:param power: power for interpolation
:return: TODO
|
geostatspy/geostats.py
|
powint
|
shohirose/GeostatsPy
| 284
|
python
|
def powint(xlow, xhigh, ylow, yhigh, xval, power):
'Power-based interpolator \n :param xlow: x lower interval\n :param xhigh: x upper interval\n :param ylow: y lower interval\n :param yhigh: y upper interval\n :param xval: value on x\n :param power: power for interpolation\n :return: TODO\n '
EPSLON = 1e-20
if ((xhigh - xlow) < EPSLON):
powint = ((yhigh + ylow) / 2.0)
else:
powint = (ylow + ((yhigh - ylow) * (((xval - xlow) / (xhigh - xlow)) ** power)))
return powint
|
def powint(xlow, xhigh, ylow, yhigh, xval, power):
'Power-based interpolator \n :param xlow: x lower interval\n :param xhigh: x upper interval\n :param ylow: y lower interval\n :param yhigh: y upper interval\n :param xval: value on x\n :param power: power for interpolation\n :return: TODO\n '
EPSLON = 1e-20
if ((xhigh - xlow) < EPSLON):
powint = ((yhigh + ylow) / 2.0)
else:
powint = (ylow + ((yhigh - ylow) * (((xval - xlow) / (xhigh - xlow)) ** power)))
return powint<|docstring|>Power-based interpolator
:param xlow: x lower interval
:param xhigh: x upper interval
:param ylow: y lower interval
:param yhigh: y upper interval
:param xval: value on x
:param power: power for interpolation
:return: TODO<|endoftext|>
|
153d79a597c0abfff02126e4db6080cc02ce9a147bccae0fc926c54b25fc6073
|
def dsortem(ib, ie, a, iperm, b=0, c=0, d=0, e=0, f=0, g=0, h=0):
'Sort array in ascending order.\n :param ib: start index\n :param ie: end index\n :param a: array\n :param iperm: 0 no other array is permuted.\n 1 array b is permuted according to array a.\n 2 arrays b, c are permuted.\n 3 arrays b, c, d are permuted.\n 4 arrays b, c, d, e are permuted.\n 5 arrays b, c, d, e, f are permuted.\n 6 arrays b, c, d, e, f, g are permuted.\n 7 arrays b, c, d, e, f, g, h are permuted.\n >7 no other array is permuted.\n :param b: array to be permuted according to array a.\n :param c: array to be permuted according to array a.\n :param d: array to be permuted according to array a.\n :param e: array to be permuted according to array a.\n :param f: array to be permuted according to array a.\n :param g: array to be permuted according to array a.\n :param h: array to be permuted according to array a.\n :return: a: the array, a portion of which has been sorted.\n b, c, d, e, f, g, h: arrays permuted according to array a (see\n iperm)\n '
a = a[ib:ie]
inds = a.argsort()
a = np.copy(a[inds])
if (iperm == 1):
return a
b_slice = b[ib:ie]
b = b_slice[inds]
if (iperm == 2):
return (a, b)
c_slice = c[ib:ie]
c = c_slice[inds]
if (iperm == 3):
return (a, b, c)
d_slice = d[ib:ie]
d = d_slice[inds]
if (iperm == 4):
return (a, b, c, d)
e_slice = e[ib:ie]
e = e_slice[inds]
if (iperm == 5):
return (a, b, c, d, e)
f_slice = f[ib:ie]
f = f_slice[inds]
if (iperm == 6):
return (a, b, c, d, e, f)
g_slice = g[ib:ie]
g = g_slice[inds]
if (iperm == 7):
return (a, b, c, d, e, f, g)
h_slice = h[ib:ie]
h = h_slice[inds]
return (a, b, c, d, e, f, g, h)
|
Sort array in ascending order.
:param ib: start index
:param ie: end index
:param a: array
:param iperm: 0 no other array is permuted.
1 array b is permuted according to array a.
2 arrays b, c are permuted.
3 arrays b, c, d are permuted.
4 arrays b, c, d, e are permuted.
5 arrays b, c, d, e, f are permuted.
6 arrays b, c, d, e, f, g are permuted.
7 arrays b, c, d, e, f, g, h are permuted.
>7 no other array is permuted.
:param b: array to be permuted according to array a.
:param c: array to be permuted according to array a.
:param d: array to be permuted according to array a.
:param e: array to be permuted according to array a.
:param f: array to be permuted according to array a.
:param g: array to be permuted according to array a.
:param h: array to be permuted according to array a.
:return: a: the array, a portion of which has been sorted.
b, c, d, e, f, g, h: arrays permuted according to array a (see
iperm)
|
geostatspy/geostats.py
|
dsortem
|
shohirose/GeostatsPy
| 284
|
python
|
def dsortem(ib, ie, a, iperm, b=0, c=0, d=0, e=0, f=0, g=0, h=0):
'Sort array in ascending order.\n :param ib: start index\n :param ie: end index\n :param a: array\n :param iperm: 0 no other array is permuted.\n 1 array b is permuted according to array a.\n 2 arrays b, c are permuted.\n 3 arrays b, c, d are permuted.\n 4 arrays b, c, d, e are permuted.\n 5 arrays b, c, d, e, f are permuted.\n 6 arrays b, c, d, e, f, g are permuted.\n 7 arrays b, c, d, e, f, g, h are permuted.\n >7 no other array is permuted.\n :param b: array to be permuted according to array a.\n :param c: array to be permuted according to array a.\n :param d: array to be permuted according to array a.\n :param e: array to be permuted according to array a.\n :param f: array to be permuted according to array a.\n :param g: array to be permuted according to array a.\n :param h: array to be permuted according to array a.\n :return: a: the array, a portion of which has been sorted.\n b, c, d, e, f, g, h: arrays permuted according to array a (see\n iperm)\n '
a = a[ib:ie]
inds = a.argsort()
a = np.copy(a[inds])
if (iperm == 1):
return a
b_slice = b[ib:ie]
b = b_slice[inds]
if (iperm == 2):
return (a, b)
c_slice = c[ib:ie]
c = c_slice[inds]
if (iperm == 3):
return (a, b, c)
d_slice = d[ib:ie]
d = d_slice[inds]
if (iperm == 4):
return (a, b, c, d)
e_slice = e[ib:ie]
e = e_slice[inds]
if (iperm == 5):
return (a, b, c, d, e)
f_slice = f[ib:ie]
f = f_slice[inds]
if (iperm == 6):
return (a, b, c, d, e, f)
g_slice = g[ib:ie]
g = g_slice[inds]
if (iperm == 7):
return (a, b, c, d, e, f, g)
h_slice = h[ib:ie]
h = h_slice[inds]
return (a, b, c, d, e, f, g, h)
|
def dsortem(ib, ie, a, iperm, b=0, c=0, d=0, e=0, f=0, g=0, h=0):
'Sort array in ascending order.\n :param ib: start index\n :param ie: end index\n :param a: array\n :param iperm: 0 no other array is permuted.\n 1 array b is permuted according to array a.\n 2 arrays b, c are permuted.\n 3 arrays b, c, d are permuted.\n 4 arrays b, c, d, e are permuted.\n 5 arrays b, c, d, e, f are permuted.\n 6 arrays b, c, d, e, f, g are permuted.\n 7 arrays b, c, d, e, f, g, h are permuted.\n >7 no other array is permuted.\n :param b: array to be permuted according to array a.\n :param c: array to be permuted according to array a.\n :param d: array to be permuted according to array a.\n :param e: array to be permuted according to array a.\n :param f: array to be permuted according to array a.\n :param g: array to be permuted according to array a.\n :param h: array to be permuted according to array a.\n :return: a: the array, a portion of which has been sorted.\n b, c, d, e, f, g, h: arrays permuted according to array a (see\n iperm)\n '
a = a[ib:ie]
inds = a.argsort()
a = np.copy(a[inds])
if (iperm == 1):
return a
b_slice = b[ib:ie]
b = b_slice[inds]
if (iperm == 2):
return (a, b)
c_slice = c[ib:ie]
c = c_slice[inds]
if (iperm == 3):
return (a, b, c)
d_slice = d[ib:ie]
d = d_slice[inds]
if (iperm == 4):
return (a, b, c, d)
e_slice = e[ib:ie]
e = e_slice[inds]
if (iperm == 5):
return (a, b, c, d, e)
f_slice = f[ib:ie]
f = f_slice[inds]
if (iperm == 6):
return (a, b, c, d, e, f)
g_slice = g[ib:ie]
g = g_slice[inds]
if (iperm == 7):
return (a, b, c, d, e, f, g)
h_slice = h[ib:ie]
h = h_slice[inds]
return (a, b, c, d, e, f, g, h)<|docstring|>Sort array in ascending order.
:param ib: start index
:param ie: end index
:param a: array
:param iperm: 0 no other array is permuted.
1 array b is permuted according to array a.
2 arrays b, c are permuted.
3 arrays b, c, d are permuted.
4 arrays b, c, d, e are permuted.
5 arrays b, c, d, e, f are permuted.
6 arrays b, c, d, e, f, g are permuted.
7 arrays b, c, d, e, f, g, h are permuted.
>7 no other array is permuted.
:param b: array to be permuted according to array a.
:param c: array to be permuted according to array a.
:param d: array to be permuted according to array a.
:param e: array to be permuted according to array a.
:param f: array to be permuted according to array a.
:param g: array to be permuted according to array a.
:param h: array to be permuted according to array a.
:return: a: the array, a portion of which has been sorted.
b, c, d, e, f, g, h: arrays permuted according to array a (see
iperm)<|endoftext|>
|
0f469c97976027448dcbeae178636565608b017bdd500f0100d9791023aa8a48
|
def gauinv(p):
'Compute the inverse of the standard normal cumulative distribution\n function.\n :param p: cumulative probability value\n :return: TODO\n '
lim = 1e-10
p0 = (- 0.322232431088)
p1 = (- 1.0)
p2 = (- 0.342242088547)
p3 = (- 0.0204231210245)
p4 = (- 4.53642210148e-05)
q0 = 0.099348462606
q1 = 0.588581570495
q2 = 0.531103462366
q3 = 0.10353775285
q4 = 0.0038560700634
if (p < lim):
xp = (- 10000000000.0)
return xp
if (p > (1.0 - lim)):
xp = 10000000000.0
return xp
pp = p
if (p > 0.5):
pp = (1 - pp)
xp = 0.0
if (p == 0.5):
return xp
y = np.sqrt(np.log((1.0 / (pp * pp))))
xp = float((y + (((((((((y * p4) + p3) * y) + p2) * y) + p1) * y) + p0) / ((((((((y * q4) + q3) * y) + q2) * y) + q1) * y) + q0))))
if (float(p) == float(pp)):
xp = (- xp)
return xp
|
Compute the inverse of the standard normal cumulative distribution
function.
:param p: cumulative probability value
:return: TODO
|
geostatspy/geostats.py
|
gauinv
|
shohirose/GeostatsPy
| 284
|
python
|
def gauinv(p):
'Compute the inverse of the standard normal cumulative distribution\n function.\n :param p: cumulative probability value\n :return: TODO\n '
lim = 1e-10
p0 = (- 0.322232431088)
p1 = (- 1.0)
p2 = (- 0.342242088547)
p3 = (- 0.0204231210245)
p4 = (- 4.53642210148e-05)
q0 = 0.099348462606
q1 = 0.588581570495
q2 = 0.531103462366
q3 = 0.10353775285
q4 = 0.0038560700634
if (p < lim):
xp = (- 10000000000.0)
return xp
if (p > (1.0 - lim)):
xp = 10000000000.0
return xp
pp = p
if (p > 0.5):
pp = (1 - pp)
xp = 0.0
if (p == 0.5):
return xp
y = np.sqrt(np.log((1.0 / (pp * pp))))
xp = float((y + (((((((((y * p4) + p3) * y) + p2) * y) + p1) * y) + p0) / ((((((((y * q4) + q3) * y) + q2) * y) + q1) * y) + q0))))
if (float(p) == float(pp)):
xp = (- xp)
return xp
|
def gauinv(p):
'Compute the inverse of the standard normal cumulative distribution\n function.\n :param p: cumulative probability value\n :return: TODO\n '
lim = 1e-10
p0 = (- 0.322232431088)
p1 = (- 1.0)
p2 = (- 0.342242088547)
p3 = (- 0.0204231210245)
p4 = (- 4.53642210148e-05)
q0 = 0.099348462606
q1 = 0.588581570495
q2 = 0.531103462366
q3 = 0.10353775285
q4 = 0.0038560700634
if (p < lim):
xp = (- 10000000000.0)
return xp
if (p > (1.0 - lim)):
xp = 10000000000.0
return xp
pp = p
if (p > 0.5):
pp = (1 - pp)
xp = 0.0
if (p == 0.5):
return xp
y = np.sqrt(np.log((1.0 / (pp * pp))))
xp = float((y + (((((((((y * p4) + p3) * y) + p2) * y) + p1) * y) + p0) / ((((((((y * q4) + q3) * y) + q2) * y) + q1) * y) + q0))))
if (float(p) == float(pp)):
xp = (- xp)
return xp<|docstring|>Compute the inverse of the standard normal cumulative distribution
function.
:param p: cumulative probability value
:return: TODO<|endoftext|>
|
2853bebc263c40f2271cfc475cbd1c51b4b5326588e24165230aae007ed5cab9
|
def gcum(x):
'Evaluate the standard normal cdf given a normal deviate `x`. `gcum` is\n the area under a unit normal curve to the left of `x`. The results are\n accurate only to about 5 decimal places.\n :param x: TODO\n :return: TODO\n '
z = x
if (z < 0):
z = (- z)
t = (1.0 / (1.0 + (0.2316419 * z)))
gcum_ = (t * (0.31938153 + (t * ((- 0.356563782) + (t * (1.781477937 + (t * ((- 1.821255978) + (t * 1.330274429)))))))))
e2 = 0.0
if (z <= 6):
e2 = (np.exp((((- z) * z) / 2.0)) * 0.3989422803)
gcum_ = (1.0 - (e2 * gcum_))
if (x >= 0.0):
return gcum_
gcum_ = (1.0 - gcum_)
return gcum_
|
Evaluate the standard normal cdf given a normal deviate `x`. `gcum` is
the area under a unit normal curve to the left of `x`. The results are
accurate only to about 5 decimal places.
:param x: TODO
:return: TODO
|
geostatspy/geostats.py
|
gcum
|
shohirose/GeostatsPy
| 284
|
python
|
def gcum(x):
'Evaluate the standard normal cdf given a normal deviate `x`. `gcum` is\n the area under a unit normal curve to the left of `x`. The results are\n accurate only to about 5 decimal places.\n :param x: TODO\n :return: TODO\n '
z = x
if (z < 0):
z = (- z)
t = (1.0 / (1.0 + (0.2316419 * z)))
gcum_ = (t * (0.31938153 + (t * ((- 0.356563782) + (t * (1.781477937 + (t * ((- 1.821255978) + (t * 1.330274429)))))))))
e2 = 0.0
if (z <= 6):
e2 = (np.exp((((- z) * z) / 2.0)) * 0.3989422803)
gcum_ = (1.0 - (e2 * gcum_))
if (x >= 0.0):
return gcum_
gcum_ = (1.0 - gcum_)
return gcum_
|
def gcum(x):
'Evaluate the standard normal cdf given a normal deviate `x`. `gcum` is\n the area under a unit normal curve to the left of `x`. The results are\n accurate only to about 5 decimal places.\n :param x: TODO\n :return: TODO\n '
z = x
if (z < 0):
z = (- z)
t = (1.0 / (1.0 + (0.2316419 * z)))
gcum_ = (t * (0.31938153 + (t * ((- 0.356563782) + (t * (1.781477937 + (t * ((- 1.821255978) + (t * 1.330274429)))))))))
e2 = 0.0
if (z <= 6):
e2 = (np.exp((((- z) * z) / 2.0)) * 0.3989422803)
gcum_ = (1.0 - (e2 * gcum_))
if (x >= 0.0):
return gcum_
gcum_ = (1.0 - gcum_)
return gcum_<|docstring|>Evaluate the standard normal cdf given a normal deviate `x`. `gcum` is
the area under a unit normal curve to the left of `x`. The results are
accurate only to about 5 decimal places.
:param x: TODO
:return: TODO<|endoftext|>
|
7333815ca8f634a0dae16793e35d938ed0c70c184a8f5d9eadd49e1c73787ac2
|
def dpowint(xlow, xhigh, ylow, yhigh, xval, pwr):
'Power interpolate the value of `y` between (`xlow`, `ylow`) and\n (`xhigh`, `yhigh`) for a value of `x` and a power `pwr`.\n :param xlow: TODO\n :param xhigh: TODO\n :param ylow: TODO\n :param yhigh: TODO\n :param xval: TODO\n :param pwr: power\n :return: TODO\n '
EPSLON = 1e-20
if ((xhigh - xlow) < EPSLON):
dpowint_ = ((yhigh + ylow) / 2.0)
else:
dpowint_ = (ylow + ((yhigh - ylow) * (((xval - xlow) / (xhigh - xlow)) ** pwr)))
return dpowint_
|
Power interpolate the value of `y` between (`xlow`, `ylow`) and
(`xhigh`, `yhigh`) for a value of `x` and a power `pwr`.
:param xlow: TODO
:param xhigh: TODO
:param ylow: TODO
:param yhigh: TODO
:param xval: TODO
:param pwr: power
:return: TODO
|
geostatspy/geostats.py
|
dpowint
|
shohirose/GeostatsPy
| 284
|
python
|
def dpowint(xlow, xhigh, ylow, yhigh, xval, pwr):
'Power interpolate the value of `y` between (`xlow`, `ylow`) and\n (`xhigh`, `yhigh`) for a value of `x` and a power `pwr`.\n :param xlow: TODO\n :param xhigh: TODO\n :param ylow: TODO\n :param yhigh: TODO\n :param xval: TODO\n :param pwr: power\n :return: TODO\n '
EPSLON = 1e-20
if ((xhigh - xlow) < EPSLON):
dpowint_ = ((yhigh + ylow) / 2.0)
else:
dpowint_ = (ylow + ((yhigh - ylow) * (((xval - xlow) / (xhigh - xlow)) ** pwr)))
return dpowint_
|
def dpowint(xlow, xhigh, ylow, yhigh, xval, pwr):
'Power interpolate the value of `y` between (`xlow`, `ylow`) and\n (`xhigh`, `yhigh`) for a value of `x` and a power `pwr`.\n :param xlow: TODO\n :param xhigh: TODO\n :param ylow: TODO\n :param yhigh: TODO\n :param xval: TODO\n :param pwr: power\n :return: TODO\n '
EPSLON = 1e-20
if ((xhigh - xlow) < EPSLON):
dpowint_ = ((yhigh + ylow) / 2.0)
else:
dpowint_ = (ylow + ((yhigh - ylow) * (((xval - xlow) / (xhigh - xlow)) ** pwr)))
return dpowint_<|docstring|>Power interpolate the value of `y` between (`xlow`, `ylow`) and
(`xhigh`, `yhigh`) for a value of `x` and a power `pwr`.
:param xlow: TODO
:param xhigh: TODO
:param ylow: TODO
:param yhigh: TODO
:param xval: TODO
:param pwr: power
:return: TODO<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.