code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def to_json(self):
"""Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type"
"""
# map ConflictType to json-patch operator
path = self.path
if self.conflict_type in ('REORDER', 'SET_FIELD'):
op = 'replace'
elif self.conflict_type in ('MANUAL_MERGE', 'ADD_BACK_TO_HEAD'):
op = 'add'
path += ('-',)
elif self.conflict_type == 'REMOVE_FIELD':
op = 'remove'
else:
raise ValueError(
'Conflict Type %s can not be mapped to a json-patch operation'
% conflict_type
)
# stringify path array
json_pointer = '/' + '/'.join(str(el) for el in path)
conflict_values = force_list(self.body)
conflicts = []
for value in conflict_values:
if value is not None or self.conflict_type == 'REMOVE_FIELD':
conflicts.append({
'path': json_pointer,
'op': op,
'value': value,
'$type': self.conflict_type
})
return json.dumps(conflicts) | Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type" | Below is the the instruction that describes the task:
### Input:
Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type"
### Response:
def to_json(self):
"""Deserializes conflict to a JSON object.
It returns list of:
`json-patch <https://tools.ietf.org/html/rfc6902>`_ format.
- REORDER, SET_FIELD become "op": "replace"
- MANUAL_MERGE, ADD_BACK_TO_HEAD become "op": "add"
- Path becomes `json-pointer <https://tools.ietf.org/html/rfc6901>`_
- Original conflict type is added to "$type"
"""
# map ConflictType to json-patch operator
path = self.path
if self.conflict_type in ('REORDER', 'SET_FIELD'):
op = 'replace'
elif self.conflict_type in ('MANUAL_MERGE', 'ADD_BACK_TO_HEAD'):
op = 'add'
path += ('-',)
elif self.conflict_type == 'REMOVE_FIELD':
op = 'remove'
else:
raise ValueError(
'Conflict Type %s can not be mapped to a json-patch operation'
% conflict_type
)
# stringify path array
json_pointer = '/' + '/'.join(str(el) for el in path)
conflict_values = force_list(self.body)
conflicts = []
for value in conflict_values:
if value is not None or self.conflict_type == 'REMOVE_FIELD':
conflicts.append({
'path': json_pointer,
'op': op,
'value': value,
'$type': self.conflict_type
})
return json.dumps(conflicts) |
def _decompress(self, fp):
"""
Internal function for decompressing a backup file with the DEFLATE algorithm
:rtype: Proxy
"""
decompressor = zlib.decompressobj()
if self.stream:
return Proxy(decompressor.decompress, fp)
else:
out = io.BytesIO(decompressor.decompress(fp.read()))
out.write(decompressor.flush())
out.seek(0)
return out | Internal function for decompressing a backup file with the DEFLATE algorithm
:rtype: Proxy | Below is the the instruction that describes the task:
### Input:
Internal function for decompressing a backup file with the DEFLATE algorithm
:rtype: Proxy
### Response:
def _decompress(self, fp):
"""
Internal function for decompressing a backup file with the DEFLATE algorithm
:rtype: Proxy
"""
decompressor = zlib.decompressobj()
if self.stream:
return Proxy(decompressor.decompress, fp)
else:
out = io.BytesIO(decompressor.decompress(fp.read()))
out.write(decompressor.flush())
out.seek(0)
return out |
def createqtconf():
"""Create a qt.conf file next to the current executable"""
template = """[Paths]
Prefix = {path}
Binaries = {path}
"""
import PyQt5
exedir = os.path.dirname(sys.executable)
qtpath = os.path.join(exedir, "qt.conf")
pyqt5path = os.path.abspath(PyQt5.__file__)
binpath = os.path.dirname(pyqt5path).replace("\\", "/")
try:
with open(qtpath, "w") as f:
f.write(template.format(path=binpath))
except:
pass | Create a qt.conf file next to the current executable | Below is the the instruction that describes the task:
### Input:
Create a qt.conf file next to the current executable
### Response:
def createqtconf():
"""Create a qt.conf file next to the current executable"""
template = """[Paths]
Prefix = {path}
Binaries = {path}
"""
import PyQt5
exedir = os.path.dirname(sys.executable)
qtpath = os.path.join(exedir, "qt.conf")
pyqt5path = os.path.abspath(PyQt5.__file__)
binpath = os.path.dirname(pyqt5path).replace("\\", "/")
try:
with open(qtpath, "w") as f:
f.write(template.format(path=binpath))
except:
pass |
def compare_bags(testbag, goldbag, count_only=True):
"""
Compare two bags of Xmrs objects, returning a triple of
(unique in test, shared, unique in gold).
Args:
testbag: An iterable of Xmrs objects to test.
goldbag: An iterable of Xmrs objects to compare against.
count_only: If True, the returned triple will only have the
counts of each; if False, a list of Xmrs objects will be
returned for each (using the ones from testbag for the
shared set)
Returns:
A triple of (unique in test, shared, unique in gold), where
each of the three items is an integer count if the count_only
parameter is True, or a list of Xmrs objects otherwise.
"""
gold_remaining = list(goldbag)
test_unique = []
shared = []
for test in testbag:
gold_match = None
for gold in gold_remaining:
if isomorphic(test, gold):
gold_match = gold
break
if gold_match is not None:
gold_remaining.remove(gold_match)
shared.append(test)
else:
test_unique.append(test)
if count_only:
return (len(test_unique), len(shared), len(gold_remaining))
else:
return (test_unique, shared, gold_remaining) | Compare two bags of Xmrs objects, returning a triple of
(unique in test, shared, unique in gold).
Args:
testbag: An iterable of Xmrs objects to test.
goldbag: An iterable of Xmrs objects to compare against.
count_only: If True, the returned triple will only have the
counts of each; if False, a list of Xmrs objects will be
returned for each (using the ones from testbag for the
shared set)
Returns:
A triple of (unique in test, shared, unique in gold), where
each of the three items is an integer count if the count_only
parameter is True, or a list of Xmrs objects otherwise. | Below is the the instruction that describes the task:
### Input:
Compare two bags of Xmrs objects, returning a triple of
(unique in test, shared, unique in gold).
Args:
testbag: An iterable of Xmrs objects to test.
goldbag: An iterable of Xmrs objects to compare against.
count_only: If True, the returned triple will only have the
counts of each; if False, a list of Xmrs objects will be
returned for each (using the ones from testbag for the
shared set)
Returns:
A triple of (unique in test, shared, unique in gold), where
each of the three items is an integer count if the count_only
parameter is True, or a list of Xmrs objects otherwise.
### Response:
def compare_bags(testbag, goldbag, count_only=True):
"""
Compare two bags of Xmrs objects, returning a triple of
(unique in test, shared, unique in gold).
Args:
testbag: An iterable of Xmrs objects to test.
goldbag: An iterable of Xmrs objects to compare against.
count_only: If True, the returned triple will only have the
counts of each; if False, a list of Xmrs objects will be
returned for each (using the ones from testbag for the
shared set)
Returns:
A triple of (unique in test, shared, unique in gold), where
each of the three items is an integer count if the count_only
parameter is True, or a list of Xmrs objects otherwise.
"""
gold_remaining = list(goldbag)
test_unique = []
shared = []
for test in testbag:
gold_match = None
for gold in gold_remaining:
if isomorphic(test, gold):
gold_match = gold
break
if gold_match is not None:
gold_remaining.remove(gold_match)
shared.append(test)
else:
test_unique.append(test)
if count_only:
return (len(test_unique), len(shared), len(gold_remaining))
else:
return (test_unique, shared, gold_remaining) |
def parse_style_decl(style: str, owner: AbstractNode = None
) -> CSSStyleDeclaration:
"""Make CSSStyleDeclaration from style string.
:arg AbstractNode owner: Owner of the style.
"""
_style = CSSStyleDeclaration(style, owner=owner)
return _style | Make CSSStyleDeclaration from style string.
:arg AbstractNode owner: Owner of the style. | Below is the the instruction that describes the task:
### Input:
Make CSSStyleDeclaration from style string.
:arg AbstractNode owner: Owner of the style.
### Response:
def parse_style_decl(style: str, owner: AbstractNode = None
) -> CSSStyleDeclaration:
"""Make CSSStyleDeclaration from style string.
:arg AbstractNode owner: Owner of the style.
"""
_style = CSSStyleDeclaration(style, owner=owner)
return _style |
def initAttrs(cls):
"""
Class decorator: automatically generate an ``__init__`` method that expects args from cls.attrs and stores them.
Args:
cls (class): class to decorate
Returns:
class: same, but modified, class
"""
def __init__(self, skype=None, raw=None, *args, **kwargs):
super(cls, self).__init__(skype, raw)
# Merge args into kwargs based on cls.attrs.
for i in range(len(args)):
kwargs[cls.attrs[i]] = args[i]
# Disallow any unknown kwargs.
unknown = set(kwargs) - set(cls.attrs)
if unknown:
unknownDesc = "an unexpected keyword argument" if len(unknown) == 1 else "unexpected keyword arguments"
unknownList = ", ".join("'{0}'".format(k) for k in sorted(unknown))
raise TypeError("__init__() got {0} {1}".format(unknownDesc, unknownList))
# Set each attribute from kwargs, or use the default if not specified.
for k in cls.attrs:
setattr(self, k, kwargs.get(k, cls.defaults.get(k)))
# Add the init method to the class.
setattr(cls, "__init__", __init__)
return cls | Class decorator: automatically generate an ``__init__`` method that expects args from cls.attrs and stores them.
Args:
cls (class): class to decorate
Returns:
class: same, but modified, class | Below is the the instruction that describes the task:
### Input:
Class decorator: automatically generate an ``__init__`` method that expects args from cls.attrs and stores them.
Args:
cls (class): class to decorate
Returns:
class: same, but modified, class
### Response:
def initAttrs(cls):
"""
Class decorator: automatically generate an ``__init__`` method that expects args from cls.attrs and stores them.
Args:
cls (class): class to decorate
Returns:
class: same, but modified, class
"""
def __init__(self, skype=None, raw=None, *args, **kwargs):
super(cls, self).__init__(skype, raw)
# Merge args into kwargs based on cls.attrs.
for i in range(len(args)):
kwargs[cls.attrs[i]] = args[i]
# Disallow any unknown kwargs.
unknown = set(kwargs) - set(cls.attrs)
if unknown:
unknownDesc = "an unexpected keyword argument" if len(unknown) == 1 else "unexpected keyword arguments"
unknownList = ", ".join("'{0}'".format(k) for k in sorted(unknown))
raise TypeError("__init__() got {0} {1}".format(unknownDesc, unknownList))
# Set each attribute from kwargs, or use the default if not specified.
for k in cls.attrs:
setattr(self, k, kwargs.get(k, cls.defaults.get(k)))
# Add the init method to the class.
setattr(cls, "__init__", __init__)
return cls |
def register_workflow_type(domain=None, name=None, version=None, description=None, defaultTaskStartToCloseTimeout=None, defaultExecutionStartToCloseTimeout=None, defaultTaskList=None, defaultTaskPriority=None, defaultChildPolicy=None, defaultLambdaRole=None):
"""
Registers a new workflow type and its configuration settings in the specified domain.
The retention period for the workflow history is set by the RegisterDomain action.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.register_workflow_type(
domain='string',
name='string',
version='string',
description='string',
defaultTaskStartToCloseTimeout='string',
defaultExecutionStartToCloseTimeout='string',
defaultTaskList={
'name': 'string'
},
defaultTaskPriority='string',
defaultChildPolicy='TERMINATE'|'REQUEST_CANCEL'|'ABANDON',
defaultLambdaRole='string'
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain in which to register the workflow type.
:type name: string
:param name: [REQUIRED]
The name of the workflow type.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type version: string
:param version: [REQUIRED]
The version of the workflow type.
Note
The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type description: string
:param description: Textual description of the workflow type.
:type defaultTaskStartToCloseTimeout: string
:param defaultTaskStartToCloseTimeout: If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultExecutionStartToCloseTimeout: string
:param defaultExecutionStartToCloseTimeout: If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of 'NONE' for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.
:type defaultTaskList: dict
:param defaultTaskList: If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
name (string) -- [REQUIRED]The name of the task list.
:type defaultTaskPriority: string
:param defaultTaskPriority: The default task priority to assign to the workflow type. If not assigned, then '0' will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
:type defaultChildPolicy: string
:param defaultChildPolicy: If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The supported child policies are:
TERMINATE: the child executions will be terminated.
REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
ABANDON: no action will be taken. The child executions will continue to run.
:type defaultLambdaRole: string
:param defaultLambdaRole: The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.
This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.
:returns:
domain (string) -- [REQUIRED]
The name of the domain in which to register the workflow type.
name (string) -- [REQUIRED]
The name of the workflow type.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
version (string) -- [REQUIRED]
The version of the workflow type.
Note
The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
description (string) -- Textual description of the workflow type.
defaultTaskStartToCloseTimeout (string) -- If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultExecutionStartToCloseTimeout (string) -- If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.
defaultTaskList (dict) -- If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
name (string) -- [REQUIRED]The name of the task list.
defaultTaskPriority (string) -- The default task priority to assign to the workflow type. If not assigned, then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
defaultChildPolicy (string) -- If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The supported child policies are:
TERMINATE: the child executions will be terminated.
REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
ABANDON: no action will be taken. The child executions will continue to run.
defaultLambdaRole (string) -- The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.
This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.
"""
pass | Registers a new workflow type and its configuration settings in the specified domain.
The retention period for the workflow history is set by the RegisterDomain action.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.register_workflow_type(
domain='string',
name='string',
version='string',
description='string',
defaultTaskStartToCloseTimeout='string',
defaultExecutionStartToCloseTimeout='string',
defaultTaskList={
'name': 'string'
},
defaultTaskPriority='string',
defaultChildPolicy='TERMINATE'|'REQUEST_CANCEL'|'ABANDON',
defaultLambdaRole='string'
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain in which to register the workflow type.
:type name: string
:param name: [REQUIRED]
The name of the workflow type.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type version: string
:param version: [REQUIRED]
The version of the workflow type.
Note
The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type description: string
:param description: Textual description of the workflow type.
:type defaultTaskStartToCloseTimeout: string
:param defaultTaskStartToCloseTimeout: If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultExecutionStartToCloseTimeout: string
:param defaultExecutionStartToCloseTimeout: If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of 'NONE' for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.
:type defaultTaskList: dict
:param defaultTaskList: If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
name (string) -- [REQUIRED]The name of the task list.
:type defaultTaskPriority: string
:param defaultTaskPriority: The default task priority to assign to the workflow type. If not assigned, then '0' will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
:type defaultChildPolicy: string
:param defaultChildPolicy: If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The supported child policies are:
TERMINATE: the child executions will be terminated.
REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
ABANDON: no action will be taken. The child executions will continue to run.
:type defaultLambdaRole: string
:param defaultLambdaRole: The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.
This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.
:returns:
domain (string) -- [REQUIRED]
The name of the domain in which to register the workflow type.
name (string) -- [REQUIRED]
The name of the workflow type.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
version (string) -- [REQUIRED]
The version of the workflow type.
Note
The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
description (string) -- Textual description of the workflow type.
defaultTaskStartToCloseTimeout (string) -- If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultExecutionStartToCloseTimeout (string) -- If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.
defaultTaskList (dict) -- If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
name (string) -- [REQUIRED]The name of the task list.
defaultTaskPriority (string) -- The default task priority to assign to the workflow type. If not assigned, then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
defaultChildPolicy (string) -- If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The supported child policies are:
TERMINATE: the child executions will be terminated.
REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
ABANDON: no action will be taken. The child executions will continue to run.
defaultLambdaRole (string) -- The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.
This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision. | Below is the the instruction that describes the task:
### Input:
Registers a new workflow type and its configuration settings in the specified domain.
The retention period for the workflow history is set by the RegisterDomain action.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.register_workflow_type(
domain='string',
name='string',
version='string',
description='string',
defaultTaskStartToCloseTimeout='string',
defaultExecutionStartToCloseTimeout='string',
defaultTaskList={
'name': 'string'
},
defaultTaskPriority='string',
defaultChildPolicy='TERMINATE'|'REQUEST_CANCEL'|'ABANDON',
defaultLambdaRole='string'
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain in which to register the workflow type.
:type name: string
:param name: [REQUIRED]
The name of the workflow type.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type version: string
:param version: [REQUIRED]
The version of the workflow type.
Note
The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type description: string
:param description: Textual description of the workflow type.
:type defaultTaskStartToCloseTimeout: string
:param defaultTaskStartToCloseTimeout: If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultExecutionStartToCloseTimeout: string
:param defaultExecutionStartToCloseTimeout: If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of 'NONE' for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.
:type defaultTaskList: dict
:param defaultTaskList: If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
name (string) -- [REQUIRED]The name of the task list.
:type defaultTaskPriority: string
:param defaultTaskPriority: The default task priority to assign to the workflow type. If not assigned, then '0' will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
:type defaultChildPolicy: string
:param defaultChildPolicy: If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The supported child policies are:
TERMINATE: the child executions will be terminated.
REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
ABANDON: no action will be taken. The child executions will continue to run.
:type defaultLambdaRole: string
:param defaultLambdaRole: The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.
This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.
:returns:
domain (string) -- [REQUIRED]
The name of the domain in which to register the workflow type.
name (string) -- [REQUIRED]
The name of the workflow type.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
version (string) -- [REQUIRED]
The version of the workflow type.
Note
The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
description (string) -- Textual description of the workflow type.
defaultTaskStartToCloseTimeout (string) -- If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultExecutionStartToCloseTimeout (string) -- If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.
defaultTaskList (dict) -- If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
name (string) -- [REQUIRED]The name of the task list.
defaultTaskPriority (string) -- The default task priority to assign to the workflow type. If not assigned, then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
defaultChildPolicy (string) -- If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The supported child policies are:
TERMINATE: the child executions will be terminated.
REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
ABANDON: no action will be taken. The child executions will continue to run.
defaultLambdaRole (string) -- The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.
This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.
### Response:
def register_workflow_type(domain=None, name=None, version=None, description=None, defaultTaskStartToCloseTimeout=None, defaultExecutionStartToCloseTimeout=None, defaultTaskList=None, defaultTaskPriority=None, defaultChildPolicy=None, defaultLambdaRole=None):
"""
Registers a new workflow type and its configuration settings in the specified domain.
The retention period for the workflow history is set by the RegisterDomain action.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.register_workflow_type(
domain='string',
name='string',
version='string',
description='string',
defaultTaskStartToCloseTimeout='string',
defaultExecutionStartToCloseTimeout='string',
defaultTaskList={
'name': 'string'
},
defaultTaskPriority='string',
defaultChildPolicy='TERMINATE'|'REQUEST_CANCEL'|'ABANDON',
defaultLambdaRole='string'
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain in which to register the workflow type.
:type name: string
:param name: [REQUIRED]
The name of the workflow type.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type version: string
:param version: [REQUIRED]
The version of the workflow type.
Note
The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type description: string
:param description: Textual description of the workflow type.
:type defaultTaskStartToCloseTimeout: string
:param defaultTaskStartToCloseTimeout: If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultExecutionStartToCloseTimeout: string
:param defaultExecutionStartToCloseTimeout: If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of 'NONE' for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.
:type defaultTaskList: dict
:param defaultTaskList: If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
name (string) -- [REQUIRED]The name of the task list.
:type defaultTaskPriority: string
:param defaultTaskPriority: The default task priority to assign to the workflow type. If not assigned, then '0' will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
:type defaultChildPolicy: string
:param defaultChildPolicy: If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The supported child policies are:
TERMINATE: the child executions will be terminated.
REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
ABANDON: no action will be taken. The child executions will continue to run.
:type defaultLambdaRole: string
:param defaultLambdaRole: The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.
This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.
:returns:
domain (string) -- [REQUIRED]
The name of the domain in which to register the workflow type.
name (string) -- [REQUIRED]
The name of the workflow type.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
version (string) -- [REQUIRED]
The version of the workflow type.
Note
The workflow type consists of the name and version, the combination of which must be unique within the domain. To get a list of all currently registered workflow types, use the ListWorkflowTypes action.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
description (string) -- Textual description of the workflow type.
defaultTaskStartToCloseTimeout (string) -- If set, specifies the default maximum duration of decision tasks for this workflow type. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultExecutionStartToCloseTimeout (string) -- If set, specifies the default maximum duration for executions of this workflow type. You can override this default when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
The duration is specified in seconds; an integer greater than or equal to 0. Unlike some of the other timeout parameters in Amazon SWF, you cannot specify a value of "NONE" for defaultExecutionStartToCloseTimeout ; there is a one-year max limit on the time that a workflow execution can run. Exceeding this limit will always cause the workflow execution to time out.
defaultTaskList (dict) -- If set, specifies the default task list to use for scheduling decision tasks for executions of this workflow type. This default is used only if a task list is not provided when starting the execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision.
name (string) -- [REQUIRED]The name of the task list.
defaultTaskPriority (string) -- The default task priority to assign to the workflow type. If not assigned, then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
defaultChildPolicy (string) -- If set, specifies the default policy to use for the child workflow executions when a workflow execution of this type is terminated, by calling the TerminateWorkflowExecution action explicitly or due to an expired timeout. This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution decision.
The supported child policies are:
TERMINATE: the child executions will be terminated.
REQUEST_CANCEL: a request to cancel will be attempted for each child execution by recording a WorkflowExecutionCancelRequested event in its history. It is up to the decider to take appropriate actions when it receives an execution history with this event.
ABANDON: no action will be taken. The child executions will continue to run.
defaultLambdaRole (string) -- The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions.
This default can be overridden when starting a workflow execution using the StartWorkflowExecution action or the StartChildWorkflowExecution and ContinueAsNewWorkflowExecution decision.
"""
pass |
def create_checklist_item(self, card_id, checklist_id, checklistitem_json):
"""
Create a ChecklistItem object from JSON object
"""
return trolly.checklist.ChecklistItem(
trello_client=self,
card_id=card_id,
checklist_id=checklist_id,
checklistitem_id=checklistitem_json['id'].encode('utf-8'),
name=checklistitem_json['name'].encode('utf-8'),
state=checklistitem_json['state'].encode('utf-8')
) | Create a ChecklistItem object from JSON object | Below is the the instruction that describes the task:
### Input:
Create a ChecklistItem object from JSON object
### Response:
def create_checklist_item(self, card_id, checklist_id, checklistitem_json):
"""
Create a ChecklistItem object from JSON object
"""
return trolly.checklist.ChecklistItem(
trello_client=self,
card_id=card_id,
checklist_id=checklist_id,
checklistitem_id=checklistitem_json['id'].encode('utf-8'),
name=checklistitem_json['name'].encode('utf-8'),
state=checklistitem_json['state'].encode('utf-8')
) |
def verify_address(self, addr1="", addr2="", city="", fname="", lname="", phone="", province="", postal="", country="", email="", recordID="", freeform= ""):
"""verify_address
Builds a JSON request to send to Melissa data. Takes in all needed address info.
Args:
addr1 (str):Contains info for Melissa data
addr2 (str):Contains info for Melissa data
city (str):Contains info for Melissa data
fname (str):Contains info for Melissa data
lname (str):Contains info for Melissa data
phone (str):Contains info for Melissa data
province (str):Contains info for Melissa data
postal (str):Contains info for Melissa data
country (str):Contains info for Melissa data
email (str):Contains info for Melissa data
recordID (str):Contains info for Melissa data
freeform (str):Contains info for Melissa data
Returns:
result, a string containing the result codes from MelissaData
"""
data = {
"TransmissionReference": "",
"CustomerID": self.custID,
"Actions": "Check",
"Options": "",
"Columns": "",
"Records": [{
"RecordID": recordID,
"CompanyName": "",
"FullName": fname + " " + lname,
"AddressLine1": addr1,
"AddressLine2": addr2,
"Suite": "",
"City": city,
"State": province,
"PostalCode": postal,
"Country": country,
"PhoneNumber": phone,
"EmailAddress": email,
"FreeForm": freeform,
}]
}
self.country = country
data = json.dumps(data)
result = requests.post("https://personator.melissadata.net/v3/WEB/ContactVerify/doContactVerify", data=data)
result = json.loads(result.text)
result = self.parse_results(result)
return result | verify_address
Builds a JSON request to send to Melissa data. Takes in all needed address info.
Args:
addr1 (str):Contains info for Melissa data
addr2 (str):Contains info for Melissa data
city (str):Contains info for Melissa data
fname (str):Contains info for Melissa data
lname (str):Contains info for Melissa data
phone (str):Contains info for Melissa data
province (str):Contains info for Melissa data
postal (str):Contains info for Melissa data
country (str):Contains info for Melissa data
email (str):Contains info for Melissa data
recordID (str):Contains info for Melissa data
freeform (str):Contains info for Melissa data
Returns:
result, a string containing the result codes from MelissaData | Below is the the instruction that describes the task:
### Input:
verify_address
Builds a JSON request to send to Melissa data. Takes in all needed address info.
Args:
addr1 (str):Contains info for Melissa data
addr2 (str):Contains info for Melissa data
city (str):Contains info for Melissa data
fname (str):Contains info for Melissa data
lname (str):Contains info for Melissa data
phone (str):Contains info for Melissa data
province (str):Contains info for Melissa data
postal (str):Contains info for Melissa data
country (str):Contains info for Melissa data
email (str):Contains info for Melissa data
recordID (str):Contains info for Melissa data
freeform (str):Contains info for Melissa data
Returns:
result, a string containing the result codes from MelissaData
### Response:
def verify_address(self, addr1="", addr2="", city="", fname="", lname="", phone="", province="", postal="", country="", email="", recordID="", freeform= ""):
"""verify_address
Builds a JSON request to send to Melissa data. Takes in all needed address info.
Args:
addr1 (str):Contains info for Melissa data
addr2 (str):Contains info for Melissa data
city (str):Contains info for Melissa data
fname (str):Contains info for Melissa data
lname (str):Contains info for Melissa data
phone (str):Contains info for Melissa data
province (str):Contains info for Melissa data
postal (str):Contains info for Melissa data
country (str):Contains info for Melissa data
email (str):Contains info for Melissa data
recordID (str):Contains info for Melissa data
freeform (str):Contains info for Melissa data
Returns:
result, a string containing the result codes from MelissaData
"""
data = {
"TransmissionReference": "",
"CustomerID": self.custID,
"Actions": "Check",
"Options": "",
"Columns": "",
"Records": [{
"RecordID": recordID,
"CompanyName": "",
"FullName": fname + " " + lname,
"AddressLine1": addr1,
"AddressLine2": addr2,
"Suite": "",
"City": city,
"State": province,
"PostalCode": postal,
"Country": country,
"PhoneNumber": phone,
"EmailAddress": email,
"FreeForm": freeform,
}]
}
self.country = country
data = json.dumps(data)
result = requests.post("https://personator.melissadata.net/v3/WEB/ContactVerify/doContactVerify", data=data)
result = json.loads(result.text)
result = self.parse_results(result)
return result |
def create_agreement(self, agreement_id, did, condition_ids, time_locks, time_outs,
consumer_address, account):
"""
Create the service agreement. Return true if it is created successfully.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:param time_locks: is a list of uint time lock values associated to each Condition, int
:param time_outs: is a list of uint time out values associated to each Condition, int
:param consumer_address: ethereum account address of consumer, hex str
:param account: Account instance creating the agreement
:return: bool
"""
logger.info(
f'Creating agreement {agreement_id} with did={did}, consumer={consumer_address}.')
tx_hash = self.send_transaction(
'createAgreement',
(agreement_id,
did,
condition_ids,
time_locks,
time_outs,
consumer_address),
transact={'from': account.address,
'passphrase': account.password}
)
receipt = self.get_tx_receipt(tx_hash)
return receipt and receipt.status == 1 | Create the service agreement. Return true if it is created successfully.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:param time_locks: is a list of uint time lock values associated to each Condition, int
:param time_outs: is a list of uint time out values associated to each Condition, int
:param consumer_address: ethereum account address of consumer, hex str
:param account: Account instance creating the agreement
:return: bool | Below is the the instruction that describes the task:
### Input:
Create the service agreement. Return true if it is created successfully.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:param time_locks: is a list of uint time lock values associated to each Condition, int
:param time_outs: is a list of uint time out values associated to each Condition, int
:param consumer_address: ethereum account address of consumer, hex str
:param account: Account instance creating the agreement
:return: bool
### Response:
def create_agreement(self, agreement_id, did, condition_ids, time_locks, time_outs,
consumer_address, account):
"""
Create the service agreement. Return true if it is created successfully.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:param time_locks: is a list of uint time lock values associated to each Condition, int
:param time_outs: is a list of uint time out values associated to each Condition, int
:param consumer_address: ethereum account address of consumer, hex str
:param account: Account instance creating the agreement
:return: bool
"""
logger.info(
f'Creating agreement {agreement_id} with did={did}, consumer={consumer_address}.')
tx_hash = self.send_transaction(
'createAgreement',
(agreement_id,
did,
condition_ids,
time_locks,
time_outs,
consumer_address),
transact={'from': account.address,
'passphrase': account.password}
)
receipt = self.get_tx_receipt(tx_hash)
return receipt and receipt.status == 1 |
def get_objective_bank_ids_by_objective(self, objective_id):
"""Gets the list of ``ObjectiveBank`` ``Ids`` mapped to an ``Objective``.
arg: objective_id (osid.id.Id): ``Id`` of an ``Objective``
return: (osid.id.IdList) - list of objective bank ``Ids``
raise: NotFound - ``objective_id`` is not found
raise: NullArgument - ``objective_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_objective_lookup_session(proxy=self._proxy)
lookup_session.use_federated_objective_bank_view()
objective = lookup_session.get_objective(objective_id)
id_list = []
for idstr in objective._my_map['assignedObjectiveBankIds']:
id_list.append(Id(idstr))
return IdList(id_list) | Gets the list of ``ObjectiveBank`` ``Ids`` mapped to an ``Objective``.
arg: objective_id (osid.id.Id): ``Id`` of an ``Objective``
return: (osid.id.IdList) - list of objective bank ``Ids``
raise: NotFound - ``objective_id`` is not found
raise: NullArgument - ``objective_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the list of ``ObjectiveBank`` ``Ids`` mapped to an ``Objective``.
arg: objective_id (osid.id.Id): ``Id`` of an ``Objective``
return: (osid.id.IdList) - list of objective bank ``Ids``
raise: NotFound - ``objective_id`` is not found
raise: NullArgument - ``objective_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_objective_bank_ids_by_objective(self, objective_id):
"""Gets the list of ``ObjectiveBank`` ``Ids`` mapped to an ``Objective``.
arg: objective_id (osid.id.Id): ``Id`` of an ``Objective``
return: (osid.id.IdList) - list of objective bank ``Ids``
raise: NotFound - ``objective_id`` is not found
raise: NullArgument - ``objective_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_objective_lookup_session(proxy=self._proxy)
lookup_session.use_federated_objective_bank_view()
objective = lookup_session.get_objective(objective_id)
id_list = []
for idstr in objective._my_map['assignedObjectiveBankIds']:
id_list.append(Id(idstr))
return IdList(id_list) |
def runner(self, fun, **kwargs):
'''
Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>`
'''
return self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) | Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>` | Below is the the instruction that describes the task:
### Input:
Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>`
### Response:
def runner(self, fun, **kwargs):
'''
Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>`
'''
return self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) |
def on_menu_save_interpretation(self, event):
'''
save interpretations to a redo file
'''
thellier_gui_redo_file = open(
os.path.join(self.WD, "thellier_GUI.redo"), 'w')
#--------------------------------------------------
# write interpretations to thellier_GUI.redo
#--------------------------------------------------
spec_list = list(self.Data.keys())
spec_list.sort()
redo_specimens_list = []
for sp in spec_list:
if 'saved' not in self.Data[sp]['pars']:
continue
if not self.Data[sp]['pars']['saved']:
continue
redo_specimens_list.append(sp)
thellier_gui_redo_file.write("%s %.0f %.0f\n" % (
sp, self.Data[sp]['pars']['measurement_step_min'], self.Data[sp]['pars']['measurement_step_max']))
dlg1 = wx.MessageDialog(
self, caption="Saved:", message="File thellier_GUI.redo is saved in MagIC working folder", style=wx.OK)
result = self.show_dlg(dlg1)
if result == wx.ID_OK:
dlg1.Destroy()
thellier_gui_redo_file.close()
return
thellier_gui_redo_file.close()
self.close_warning = False | save interpretations to a redo file | Below is the the instruction that describes the task:
### Input:
save interpretations to a redo file
### Response:
def on_menu_save_interpretation(self, event):
'''
save interpretations to a redo file
'''
thellier_gui_redo_file = open(
os.path.join(self.WD, "thellier_GUI.redo"), 'w')
#--------------------------------------------------
# write interpretations to thellier_GUI.redo
#--------------------------------------------------
spec_list = list(self.Data.keys())
spec_list.sort()
redo_specimens_list = []
for sp in spec_list:
if 'saved' not in self.Data[sp]['pars']:
continue
if not self.Data[sp]['pars']['saved']:
continue
redo_specimens_list.append(sp)
thellier_gui_redo_file.write("%s %.0f %.0f\n" % (
sp, self.Data[sp]['pars']['measurement_step_min'], self.Data[sp]['pars']['measurement_step_max']))
dlg1 = wx.MessageDialog(
self, caption="Saved:", message="File thellier_GUI.redo is saved in MagIC working folder", style=wx.OK)
result = self.show_dlg(dlg1)
if result == wx.ID_OK:
dlg1.Destroy()
thellier_gui_redo_file.close()
return
thellier_gui_redo_file.close()
self.close_warning = False |
def getInterpretation(self):
"""
Get the value of the previously POSTed Tropo action.
"""
actions = self._actions
if (type (actions) is list):
dict = actions[0]
else:
dict = actions
return dict['interpretation'] | Get the value of the previously POSTed Tropo action. | Below is the the instruction that describes the task:
### Input:
Get the value of the previously POSTed Tropo action.
### Response:
def getInterpretation(self):
"""
Get the value of the previously POSTed Tropo action.
"""
actions = self._actions
if (type (actions) is list):
dict = actions[0]
else:
dict = actions
return dict['interpretation'] |
def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
) | Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time. | Below is the the instruction that describes the task:
### Input:
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
### Response:
def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
) |
def read_properties(filename):
"""read properties file into bunch.
:param filename: string
:rtype: bunch (dict like and object like)
"""
s = path(filename).text()
dummy_section = 'xxx'
cfgparser = configparser.RawConfigParser()
# avoid converting options to lower case
cfgparser.optionxform = str
cfgparser.readfp(StringIO('[%s]\n' % dummy_section + s))
bunch = AutoBunch()
for x in cfgparser.options(dummy_section):
setattr(bunch, x, cfgparser.get(dummy_section, str(x)))
return bunch | read properties file into bunch.
:param filename: string
:rtype: bunch (dict like and object like) | Below is the the instruction that describes the task:
### Input:
read properties file into bunch.
:param filename: string
:rtype: bunch (dict like and object like)
### Response:
def read_properties(filename):
"""read properties file into bunch.
:param filename: string
:rtype: bunch (dict like and object like)
"""
s = path(filename).text()
dummy_section = 'xxx'
cfgparser = configparser.RawConfigParser()
# avoid converting options to lower case
cfgparser.optionxform = str
cfgparser.readfp(StringIO('[%s]\n' % dummy_section + s))
bunch = AutoBunch()
for x in cfgparser.options(dummy_section):
setattr(bunch, x, cfgparser.get(dummy_section, str(x)))
return bunch |
def run_command(provider, context, command, capture=False, interactive=False,
ignore_status=False, quiet=False, stdin=None, env=None,
**kwargs):
"""Run a custom command as a hook
Keyword Arguments:
command (list or str):
Command to run
capture (bool, optional):
If enabled, capture the command's stdout and stderr, and return
them in the hook result. Default: false
interactive (bool, optional):
If enabled, allow the command to interact with stdin. Otherwise,
stdin will be set to the null device. Default: false
ignore_status (bool, optional):
Don't fail the hook if the command returns a non-zero status.
Default: false
quiet (bool, optional):
Redirect the command's stdout and stderr to the null device,
silencing all output. Should not be enaled if `capture` is also
enabled. Default: false
stdin (str, optional):
String to send to the stdin of the command. Implicitly disables
`interactive`.
env (dict, optional):
Dictionary of environment variable overrides for the command
context. Will be merged with the current environment.
**kwargs:
Any other arguments will be forwarded to the `subprocess.Popen`
function. Interesting ones include: `cwd` and `shell`.
Examples:
.. code-block:: yaml
pre_build:
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: copy_env
args:
command: ['cp', 'environment.template', 'environment']
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: get_git_commit
args:
command: ['git', 'rev-parse', 'HEAD']
cwd: ./my-git-repo
capture: true
- path: stacker.hooks.command.run_command
args:
command: `cd $PROJECT_DIR/project; npm install'
env:
PROJECT_DIR: ./my-project
shell: true
"""
if quiet and capture:
raise ImproperlyConfigured(
__name__ + '.run_command',
'Cannot enable `quiet` and `capture` options simultaneously')
if quiet:
out_err_type = _devnull()
elif capture:
out_err_type = PIPE
else:
out_err_type = None
if interactive:
in_type = None
elif stdin:
in_type = PIPE
else:
in_type = _devnull()
if env:
full_env = os.environ.copy()
full_env.update(env)
env = full_env
logger.info('Running command: %s', command)
proc = Popen(command, stdin=in_type, stdout=out_err_type,
stderr=out_err_type, env=env, **kwargs)
try:
out, err = proc.communicate(stdin)
status = proc.wait()
if status == 0 or ignore_status:
return {
'returncode': proc.returncode,
'stdout': out,
'stderr': err
}
# Don't print the command line again if we already did earlier
if logger.isEnabledFor(logging.INFO):
logger.warn('Command failed with returncode %d', status)
else:
logger.warn('Command failed with returncode %d: %s', status,
command)
return None
finally:
if proc.returncode is None:
proc.kill() | Run a custom command as a hook
Keyword Arguments:
command (list or str):
Command to run
capture (bool, optional):
If enabled, capture the command's stdout and stderr, and return
them in the hook result. Default: false
interactive (bool, optional):
If enabled, allow the command to interact with stdin. Otherwise,
stdin will be set to the null device. Default: false
ignore_status (bool, optional):
Don't fail the hook if the command returns a non-zero status.
Default: false
quiet (bool, optional):
Redirect the command's stdout and stderr to the null device,
silencing all output. Should not be enaled if `capture` is also
enabled. Default: false
stdin (str, optional):
String to send to the stdin of the command. Implicitly disables
`interactive`.
env (dict, optional):
Dictionary of environment variable overrides for the command
context. Will be merged with the current environment.
**kwargs:
Any other arguments will be forwarded to the `subprocess.Popen`
function. Interesting ones include: `cwd` and `shell`.
Examples:
.. code-block:: yaml
pre_build:
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: copy_env
args:
command: ['cp', 'environment.template', 'environment']
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: get_git_commit
args:
command: ['git', 'rev-parse', 'HEAD']
cwd: ./my-git-repo
capture: true
- path: stacker.hooks.command.run_command
args:
command: `cd $PROJECT_DIR/project; npm install'
env:
PROJECT_DIR: ./my-project
shell: true | Below is the the instruction that describes the task:
### Input:
Run a custom command as a hook
Keyword Arguments:
command (list or str):
Command to run
capture (bool, optional):
If enabled, capture the command's stdout and stderr, and return
them in the hook result. Default: false
interactive (bool, optional):
If enabled, allow the command to interact with stdin. Otherwise,
stdin will be set to the null device. Default: false
ignore_status (bool, optional):
Don't fail the hook if the command returns a non-zero status.
Default: false
quiet (bool, optional):
Redirect the command's stdout and stderr to the null device,
silencing all output. Should not be enaled if `capture` is also
enabled. Default: false
stdin (str, optional):
String to send to the stdin of the command. Implicitly disables
`interactive`.
env (dict, optional):
Dictionary of environment variable overrides for the command
context. Will be merged with the current environment.
**kwargs:
Any other arguments will be forwarded to the `subprocess.Popen`
function. Interesting ones include: `cwd` and `shell`.
Examples:
.. code-block:: yaml
pre_build:
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: copy_env
args:
command: ['cp', 'environment.template', 'environment']
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: get_git_commit
args:
command: ['git', 'rev-parse', 'HEAD']
cwd: ./my-git-repo
capture: true
- path: stacker.hooks.command.run_command
args:
command: `cd $PROJECT_DIR/project; npm install'
env:
PROJECT_DIR: ./my-project
shell: true
### Response:
def run_command(provider, context, command, capture=False, interactive=False,
ignore_status=False, quiet=False, stdin=None, env=None,
**kwargs):
"""Run a custom command as a hook
Keyword Arguments:
command (list or str):
Command to run
capture (bool, optional):
If enabled, capture the command's stdout and stderr, and return
them in the hook result. Default: false
interactive (bool, optional):
If enabled, allow the command to interact with stdin. Otherwise,
stdin will be set to the null device. Default: false
ignore_status (bool, optional):
Don't fail the hook if the command returns a non-zero status.
Default: false
quiet (bool, optional):
Redirect the command's stdout and stderr to the null device,
silencing all output. Should not be enaled if `capture` is also
enabled. Default: false
stdin (str, optional):
String to send to the stdin of the command. Implicitly disables
`interactive`.
env (dict, optional):
Dictionary of environment variable overrides for the command
context. Will be merged with the current environment.
**kwargs:
Any other arguments will be forwarded to the `subprocess.Popen`
function. Interesting ones include: `cwd` and `shell`.
Examples:
.. code-block:: yaml
pre_build:
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: copy_env
args:
command: ['cp', 'environment.template', 'environment']
- path: stacker.hooks.command.run_command
required: true
enabled: true
data_key: get_git_commit
args:
command: ['git', 'rev-parse', 'HEAD']
cwd: ./my-git-repo
capture: true
- path: stacker.hooks.command.run_command
args:
command: `cd $PROJECT_DIR/project; npm install'
env:
PROJECT_DIR: ./my-project
shell: true
"""
if quiet and capture:
raise ImproperlyConfigured(
__name__ + '.run_command',
'Cannot enable `quiet` and `capture` options simultaneously')
if quiet:
out_err_type = _devnull()
elif capture:
out_err_type = PIPE
else:
out_err_type = None
if interactive:
in_type = None
elif stdin:
in_type = PIPE
else:
in_type = _devnull()
if env:
full_env = os.environ.copy()
full_env.update(env)
env = full_env
logger.info('Running command: %s', command)
proc = Popen(command, stdin=in_type, stdout=out_err_type,
stderr=out_err_type, env=env, **kwargs)
try:
out, err = proc.communicate(stdin)
status = proc.wait()
if status == 0 or ignore_status:
return {
'returncode': proc.returncode,
'stdout': out,
'stderr': err
}
# Don't print the command line again if we already did earlier
if logger.isEnabledFor(logging.INFO):
logger.warn('Command failed with returncode %d', status)
else:
logger.warn('Command failed with returncode %d: %s', status,
command)
return None
finally:
if proc.returncode is None:
proc.kill() |
def _read_color_images(self, num_images):
""" Reads color images from the device """
color_images = self._ros_read_images(self._color_image_buffer, num_images, self.staleness_limit)
for i in range(0, num_images):
if self._flip_images:
color_images[i] = np.flipud(color_images[i].astype(np.uint8))
color_images[i] = np.fliplr(color_images[i].astype(np.uint8))
color_images[i] = ColorImage(color_images[i], frame=self._frame)
return color_images | Reads color images from the device | Below is the the instruction that describes the task:
### Input:
Reads color images from the device
### Response:
def _read_color_images(self, num_images):
""" Reads color images from the device """
color_images = self._ros_read_images(self._color_image_buffer, num_images, self.staleness_limit)
for i in range(0, num_images):
if self._flip_images:
color_images[i] = np.flipud(color_images[i].astype(np.uint8))
color_images[i] = np.fliplr(color_images[i].astype(np.uint8))
color_images[i] = ColorImage(color_images[i], frame=self._frame)
return color_images |
def deploy_axis_func(
cls, axis, func, num_splits, kwargs, maintain_partitioning, *partitions
):
"""Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
maintain_partitioning: If True, keep the old partitioning if possible.
If False, create a new partition layout.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
"""
# Pop these off first because they aren't expected by the function.
manual_partition = kwargs.pop("manual_partition", False)
lengths = kwargs.pop("_lengths", None)
dataframe = pandas.concat(partitions, axis=axis, copy=False)
result = func(dataframe, **kwargs)
if isinstance(result, pandas.Series):
if num_splits == 1:
return result
return [result] + [pandas.Series([]) for _ in range(num_splits - 1)]
if manual_partition:
# The split function is expecting a list
lengths = list(lengths)
# We set lengths to None so we don't use the old lengths for the resulting partition
# layout. This is done if the number of splits is changing or we are told not to
# keep the old partitioning.
elif num_splits != len(partitions) or not maintain_partitioning:
lengths = None
else:
if axis == 0:
lengths = [len(part) for part in partitions]
if sum(lengths) != len(result):
lengths = None
else:
lengths = [len(part.columns) for part in partitions]
if sum(lengths) != len(result.columns):
lengths = None
return split_result_of_axis_func_pandas(axis, num_splits, result, lengths) | Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
maintain_partitioning: If True, keep the old partitioning if possible.
If False, create a new partition layout.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames. | Below is the the instruction that describes the task:
### Input:
Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
maintain_partitioning: If True, keep the old partitioning if possible.
If False, create a new partition layout.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
### Response:
def deploy_axis_func(
cls, axis, func, num_splits, kwargs, maintain_partitioning, *partitions
):
"""Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
maintain_partitioning: If True, keep the old partitioning if possible.
If False, create a new partition layout.
partitions: All partitions that make up the full axis (row or column)
Returns:
A list of Pandas DataFrames.
"""
# Pop these off first because they aren't expected by the function.
manual_partition = kwargs.pop("manual_partition", False)
lengths = kwargs.pop("_lengths", None)
dataframe = pandas.concat(partitions, axis=axis, copy=False)
result = func(dataframe, **kwargs)
if isinstance(result, pandas.Series):
if num_splits == 1:
return result
return [result] + [pandas.Series([]) for _ in range(num_splits - 1)]
if manual_partition:
# The split function is expecting a list
lengths = list(lengths)
# We set lengths to None so we don't use the old lengths for the resulting partition
# layout. This is done if the number of splits is changing or we are told not to
# keep the old partitioning.
elif num_splits != len(partitions) or not maintain_partitioning:
lengths = None
else:
if axis == 0:
lengths = [len(part) for part in partitions]
if sum(lengths) != len(result):
lengths = None
else:
lengths = [len(part.columns) for part in partitions]
if sum(lengths) != len(result.columns):
lengths = None
return split_result_of_axis_func_pandas(axis, num_splits, result, lengths) |
def set_index_edited(self, index, edited):
"""Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None
"""
self.__edited[index.row()] = edited
self.dataChanged.emit(index, index) | Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None
### Response:
def set_index_edited(self, index, edited):
"""Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None
"""
self.__edited[index.row()] = edited
self.dataChanged.emit(index, index) |
def populate_event_que(self, que_obj):
"""Populates the event queue object.
This is for sending router events to event handler.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
drvr_obj.populate_event_que(que_obj) | Populates the event queue object.
This is for sending router events to event handler. | Below is the the instruction that describes the task:
### Input:
Populates the event queue object.
This is for sending router events to event handler.
### Response:
def populate_event_que(self, que_obj):
"""Populates the event queue object.
This is for sending router events to event handler.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
drvr_obj.populate_event_que(que_obj) |
def draw_panel(self, data, panel_params, coord, ax, **params):
"""
Plot all groups
For effeciency, geoms that do not need to partition
different groups before plotting should override this
method and avoid the groupby.
Parameters
----------
data : dataframe
Data to be plotted by this geom. This is the
dataframe created in the plot_build pipeline.
panel_params : dict
The scale information as may be required by the
axes. At this point, that information is about
ranges, ticks and labels. Keys of interest to
the geom are::
'x_range' # tuple
'y_range' # tuple
coord : coord
Coordinate (e.g. coord_cartesian) system of the
geom.
ax : axes
Axes on which to plot.
params : dict
Combined parameters for the geom and stat. Also
includes the 'zorder'.
"""
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True, drop=True)
self.draw_group(gdata, panel_params, coord, ax, **params) | Plot all groups
For effeciency, geoms that do not need to partition
different groups before plotting should override this
method and avoid the groupby.
Parameters
----------
data : dataframe
Data to be plotted by this geom. This is the
dataframe created in the plot_build pipeline.
panel_params : dict
The scale information as may be required by the
axes. At this point, that information is about
ranges, ticks and labels. Keys of interest to
the geom are::
'x_range' # tuple
'y_range' # tuple
coord : coord
Coordinate (e.g. coord_cartesian) system of the
geom.
ax : axes
Axes on which to plot.
params : dict
Combined parameters for the geom and stat. Also
includes the 'zorder'. | Below is the the instruction that describes the task:
### Input:
Plot all groups
For effeciency, geoms that do not need to partition
different groups before plotting should override this
method and avoid the groupby.
Parameters
----------
data : dataframe
Data to be plotted by this geom. This is the
dataframe created in the plot_build pipeline.
panel_params : dict
The scale information as may be required by the
axes. At this point, that information is about
ranges, ticks and labels. Keys of interest to
the geom are::
'x_range' # tuple
'y_range' # tuple
coord : coord
Coordinate (e.g. coord_cartesian) system of the
geom.
ax : axes
Axes on which to plot.
params : dict
Combined parameters for the geom and stat. Also
includes the 'zorder'.
### Response:
def draw_panel(self, data, panel_params, coord, ax, **params):
"""
Plot all groups
For effeciency, geoms that do not need to partition
different groups before plotting should override this
method and avoid the groupby.
Parameters
----------
data : dataframe
Data to be plotted by this geom. This is the
dataframe created in the plot_build pipeline.
panel_params : dict
The scale information as may be required by the
axes. At this point, that information is about
ranges, ticks and labels. Keys of interest to
the geom are::
'x_range' # tuple
'y_range' # tuple
coord : coord
Coordinate (e.g. coord_cartesian) system of the
geom.
ax : axes
Axes on which to plot.
params : dict
Combined parameters for the geom and stat. Also
includes the 'zorder'.
"""
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True, drop=True)
self.draw_group(gdata, panel_params, coord, ax, **params) |
def run(self):
"""For each file in noseOfYeti/specs, output nodes to represent each spec file"""
tokens = []
section = nodes.section()
section['ids'].append("available-tasks")
title = nodes.title()
title += nodes.Text("Default tasks")
section += title
task_finder = TaskFinder(Collector())
for name, task in sorted(task_finder.default_tasks().items(), key=lambda x: len(x[0])):
lines = [name] + [" {0}".format(line.strip()) for line in task.description.split('\n')]
viewlist = ViewList()
for line in lines:
viewlist.append(line, name)
self.state.nested_parse(viewlist, self.content_offset, section)
return [section] | For each file in noseOfYeti/specs, output nodes to represent each spec file | Below is the the instruction that describes the task:
### Input:
For each file in noseOfYeti/specs, output nodes to represent each spec file
### Response:
def run(self):
"""For each file in noseOfYeti/specs, output nodes to represent each spec file"""
tokens = []
section = nodes.section()
section['ids'].append("available-tasks")
title = nodes.title()
title += nodes.Text("Default tasks")
section += title
task_finder = TaskFinder(Collector())
for name, task in sorted(task_finder.default_tasks().items(), key=lambda x: len(x[0])):
lines = [name] + [" {0}".format(line.strip()) for line in task.description.split('\n')]
viewlist = ViewList()
for line in lines:
viewlist.append(line, name)
self.state.nested_parse(viewlist, self.content_offset, section)
return [section] |
def add_metrics(self, metrics: Iterable[float]) -> None:
"""
Helper to add multiple metrics at once.
"""
for metric in metrics:
self.add_metric(metric) | Helper to add multiple metrics at once. | Below is the the instruction that describes the task:
### Input:
Helper to add multiple metrics at once.
### Response:
def add_metrics(self, metrics: Iterable[float]) -> None:
"""
Helper to add multiple metrics at once.
"""
for metric in metrics:
self.add_metric(metric) |
def run_mace_smothr(x, y, bass_enhancement=0.0): # pylint: disable=unused-argument
"""Run the FORTRAN SMOTHR."""
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
flags = numpy.zeros((N, 7))
mace.smothr(1, x, y, weight, results, flags)
return results | Run the FORTRAN SMOTHR. | Below is the the instruction that describes the task:
### Input:
Run the FORTRAN SMOTHR.
### Response:
def run_mace_smothr(x, y, bass_enhancement=0.0): # pylint: disable=unused-argument
"""Run the FORTRAN SMOTHR."""
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
flags = numpy.zeros((N, 7))
mace.smothr(1, x, y, weight, results, flags)
return results |
def _draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawArrays(mode, vertex_list.start, vertex_list.count)
else:
starts, sizes = self.allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawArrays(mode, starts[0], int(sizes[0]))
elif gl_info.have_version(1, 4):
starts = (GLint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawArrays(mode, starts, sizes, primcount)
else:
for start, size in zip(starts, sizes):
glDrawArrays(mode, start, size)
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib() | Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw, or ``None`` for all lists in this domain. | Below is the the instruction that describes the task:
### Input:
Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
### Response:
def _draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawArrays(mode, vertex_list.start, vertex_list.count)
else:
starts, sizes = self.allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawArrays(mode, starts[0], int(sizes[0]))
elif gl_info.have_version(1, 4):
starts = (GLint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawArrays(mode, starts, sizes, primcount)
else:
for start, size in zip(starts, sizes):
glDrawArrays(mode, start, size)
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib() |
def encode_example(self, image_or_path_or_fobj):
"""Convert the given image into a dict convertible to tf example."""
if isinstance(image_or_path_or_fobj, np.ndarray):
encoded_image = self._encode_image(image_or_path_or_fobj)
elif isinstance(image_or_path_or_fobj, six.string_types):
with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f:
encoded_image = image_f.read()
else:
encoded_image = image_or_path_or_fobj.read()
return encoded_image | Convert the given image into a dict convertible to tf example. | Below is the the instruction that describes the task:
### Input:
Convert the given image into a dict convertible to tf example.
### Response:
def encode_example(self, image_or_path_or_fobj):
"""Convert the given image into a dict convertible to tf example."""
if isinstance(image_or_path_or_fobj, np.ndarray):
encoded_image = self._encode_image(image_or_path_or_fobj)
elif isinstance(image_or_path_or_fobj, six.string_types):
with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f:
encoded_image = image_f.read()
else:
encoded_image = image_or_path_or_fobj.read()
return encoded_image |
def _get_asn1_time(timestamp):
"""
Retrieve the time value of an ASN1 time object.
@param timestamp: An ASN1_GENERALIZEDTIME* (or an object safely castable to
that type) from which the time value will be retrieved.
@return: The time value from C{timestamp} as a L{bytes} string in a certain
format. Or C{None} if the object contains no time value.
"""
string_timestamp = _ffi.cast('ASN1_STRING*', timestamp)
if _lib.ASN1_STRING_length(string_timestamp) == 0:
return None
elif (
_lib.ASN1_STRING_type(string_timestamp) == _lib.V_ASN1_GENERALIZEDTIME
):
return _ffi.string(_lib.ASN1_STRING_data(string_timestamp))
else:
generalized_timestamp = _ffi.new("ASN1_GENERALIZEDTIME**")
_lib.ASN1_TIME_to_generalizedtime(timestamp, generalized_timestamp)
if generalized_timestamp[0] == _ffi.NULL:
# This may happen:
# - if timestamp was not an ASN1_TIME
# - if allocating memory for the ASN1_GENERALIZEDTIME failed
# - if a copy of the time data from timestamp cannot be made for
# the newly allocated ASN1_GENERALIZEDTIME
#
# These are difficult to test. cffi enforces the ASN1_TIME type.
# Memory allocation failures are a pain to trigger
# deterministically.
_untested_error("ASN1_TIME_to_generalizedtime")
else:
string_timestamp = _ffi.cast(
"ASN1_STRING*", generalized_timestamp[0])
string_data = _lib.ASN1_STRING_data(string_timestamp)
string_result = _ffi.string(string_data)
_lib.ASN1_GENERALIZEDTIME_free(generalized_timestamp[0])
return string_result | Retrieve the time value of an ASN1 time object.
@param timestamp: An ASN1_GENERALIZEDTIME* (or an object safely castable to
that type) from which the time value will be retrieved.
@return: The time value from C{timestamp} as a L{bytes} string in a certain
format. Or C{None} if the object contains no time value. | Below is the the instruction that describes the task:
### Input:
Retrieve the time value of an ASN1 time object.
@param timestamp: An ASN1_GENERALIZEDTIME* (or an object safely castable to
that type) from which the time value will be retrieved.
@return: The time value from C{timestamp} as a L{bytes} string in a certain
format. Or C{None} if the object contains no time value.
### Response:
def _get_asn1_time(timestamp):
"""
Retrieve the time value of an ASN1 time object.
@param timestamp: An ASN1_GENERALIZEDTIME* (or an object safely castable to
that type) from which the time value will be retrieved.
@return: The time value from C{timestamp} as a L{bytes} string in a certain
format. Or C{None} if the object contains no time value.
"""
string_timestamp = _ffi.cast('ASN1_STRING*', timestamp)
if _lib.ASN1_STRING_length(string_timestamp) == 0:
return None
elif (
_lib.ASN1_STRING_type(string_timestamp) == _lib.V_ASN1_GENERALIZEDTIME
):
return _ffi.string(_lib.ASN1_STRING_data(string_timestamp))
else:
generalized_timestamp = _ffi.new("ASN1_GENERALIZEDTIME**")
_lib.ASN1_TIME_to_generalizedtime(timestamp, generalized_timestamp)
if generalized_timestamp[0] == _ffi.NULL:
# This may happen:
# - if timestamp was not an ASN1_TIME
# - if allocating memory for the ASN1_GENERALIZEDTIME failed
# - if a copy of the time data from timestamp cannot be made for
# the newly allocated ASN1_GENERALIZEDTIME
#
# These are difficult to test. cffi enforces the ASN1_TIME type.
# Memory allocation failures are a pain to trigger
# deterministically.
_untested_error("ASN1_TIME_to_generalizedtime")
else:
string_timestamp = _ffi.cast(
"ASN1_STRING*", generalized_timestamp[0])
string_data = _lib.ASN1_STRING_data(string_timestamp)
string_result = _ffi.string(string_data)
_lib.ASN1_GENERALIZEDTIME_free(generalized_timestamp[0])
return string_result |
def dumps(self):
"""Represent the multicolumn as a string in LaTeX syntax.
Returns
-------
str
"""
args = [self.size, self.align, self.dumps_content()]
string = Command(self.latex_name, args).dumps()
return string | Represent the multicolumn as a string in LaTeX syntax.
Returns
-------
str | Below is the the instruction that describes the task:
### Input:
Represent the multicolumn as a string in LaTeX syntax.
Returns
-------
str
### Response:
def dumps(self):
"""Represent the multicolumn as a string in LaTeX syntax.
Returns
-------
str
"""
args = [self.size, self.align, self.dumps_content()]
string = Command(self.latex_name, args).dumps()
return string |
def get_file_type(variant_source):
"""Check what kind of file variant source is
Args:
variant_source (str): Path to variant source
Returns:
file_type (str): 'vcf', 'gemini' or 'unknown'
"""
file_type = 'unknown'
valid_vcf_suffixes = ('.vcf', '.vcf.gz')
if variant_source:
logger.debug("Check file type with file: {0}".format(variant_source))
if variant_source.endswith('.db'):
file_type = 'gemini'
logger.debug("File {0} is a gemini database".format(variant_source))
elif variant_source.endswith(valid_vcf_suffixes):
file_type = 'vcf'
logger.debug("File {0} is a vcf".format(variant_source))
else:
logger.debug("File is in a unknown format")
return file_type | Check what kind of file variant source is
Args:
variant_source (str): Path to variant source
Returns:
file_type (str): 'vcf', 'gemini' or 'unknown' | Below is the the instruction that describes the task:
### Input:
Check what kind of file variant source is
Args:
variant_source (str): Path to variant source
Returns:
file_type (str): 'vcf', 'gemini' or 'unknown'
### Response:
def get_file_type(variant_source):
"""Check what kind of file variant source is
Args:
variant_source (str): Path to variant source
Returns:
file_type (str): 'vcf', 'gemini' or 'unknown'
"""
file_type = 'unknown'
valid_vcf_suffixes = ('.vcf', '.vcf.gz')
if variant_source:
logger.debug("Check file type with file: {0}".format(variant_source))
if variant_source.endswith('.db'):
file_type = 'gemini'
logger.debug("File {0} is a gemini database".format(variant_source))
elif variant_source.endswith(valid_vcf_suffixes):
file_type = 'vcf'
logger.debug("File {0} is a vcf".format(variant_source))
else:
logger.debug("File is in a unknown format")
return file_type |
def _process_sentences(self, sents_list):
"""
Divide an input string to a list of substrings based on window_length and curse_forward values
:param sents_list: list [str]
:return: list [object]
"""
processed_sents = []
for sent in sents_list:
processed_sent = {
'text' : sent
}
# If the class is set to santize input before comparison, do so
if self.sanitize_input:
processed_sent['sanitized'] = self._sanitize(sent),
processed_sents.append(processed_sent)
return processed_sents | Divide an input string to a list of substrings based on window_length and curse_forward values
:param sents_list: list [str]
:return: list [object] | Below is the the instruction that describes the task:
### Input:
Divide an input string to a list of substrings based on window_length and curse_forward values
:param sents_list: list [str]
:return: list [object]
### Response:
def _process_sentences(self, sents_list):
"""
Divide an input string to a list of substrings based on window_length and curse_forward values
:param sents_list: list [str]
:return: list [object]
"""
processed_sents = []
for sent in sents_list:
processed_sent = {
'text' : sent
}
# If the class is set to santize input before comparison, do so
if self.sanitize_input:
processed_sent['sanitized'] = self._sanitize(sent),
processed_sents.append(processed_sent)
return processed_sents |
def _descriptor_changed(self, descriptor):
"""Called when the specified descriptor has changed its value."""
# Tell the descriptor it has a new value to read.
desc = descriptor_list().get(descriptor)
if desc is not None:
desc._value_read.set() | Called when the specified descriptor has changed its value. | Below is the the instruction that describes the task:
### Input:
Called when the specified descriptor has changed its value.
### Response:
def _descriptor_changed(self, descriptor):
"""Called when the specified descriptor has changed its value."""
# Tell the descriptor it has a new value to read.
desc = descriptor_list().get(descriptor)
if desc is not None:
desc._value_read.set() |
def rdf_source(self, format="turtle"):
"""
Wrapper for rdflib serializer method.
Valid options are: xml, n3, turtle, nt, pretty-xml, json-ld [trix not working out of the box]
"""
s = self.rdflib_graph.serialize(format=format)
if isinstance(s, bytes):
s = s.decode('utf-8')
return s | Wrapper for rdflib serializer method.
Valid options are: xml, n3, turtle, nt, pretty-xml, json-ld [trix not working out of the box] | Below is the the instruction that describes the task:
### Input:
Wrapper for rdflib serializer method.
Valid options are: xml, n3, turtle, nt, pretty-xml, json-ld [trix not working out of the box]
### Response:
def rdf_source(self, format="turtle"):
"""
Wrapper for rdflib serializer method.
Valid options are: xml, n3, turtle, nt, pretty-xml, json-ld [trix not working out of the box]
"""
s = self.rdflib_graph.serialize(format=format)
if isinstance(s, bytes):
s = s.decode('utf-8')
return s |
def pack(array, sub_field_array, mask, inplace=False):
""" Packs a sub field's array into another array using a mask
Parameters:
----------
array : numpy.ndarray
The array in which the sub field array will be packed into
array_in : numpy.ndarray
sub field array to pack
mask : mask (ie: 0b00001111)
Mask of the sub field
inplace : {bool}, optional
If true a new array is returned. (the default is False, which modifies the array in place)
Raises
------
OverflowError
If the values contained in the sub field array are greater than its mask's number of bits
allows
"""
lsb = least_significant_bit(mask)
max_value = int(mask >> lsb)
if sub_field_array.max() > max_value:
raise OverflowError(
"value ({}) is greater than allowed (max: {})".format(
sub_field_array.max(), max_value
)
)
if inplace:
array[:] = array & ~mask
array[:] = array | ((sub_field_array << lsb) & mask).astype(array.dtype)
else:
array = array & ~mask
return array | ((sub_field_array << lsb) & mask).astype(array.dtype) | Packs a sub field's array into another array using a mask
Parameters:
----------
array : numpy.ndarray
The array in which the sub field array will be packed into
array_in : numpy.ndarray
sub field array to pack
mask : mask (ie: 0b00001111)
Mask of the sub field
inplace : {bool}, optional
If true a new array is returned. (the default is False, which modifies the array in place)
Raises
------
OverflowError
If the values contained in the sub field array are greater than its mask's number of bits
allows | Below is the the instruction that describes the task:
### Input:
Packs a sub field's array into another array using a mask
Parameters:
----------
array : numpy.ndarray
The array in which the sub field array will be packed into
array_in : numpy.ndarray
sub field array to pack
mask : mask (ie: 0b00001111)
Mask of the sub field
inplace : {bool}, optional
If true a new array is returned. (the default is False, which modifies the array in place)
Raises
------
OverflowError
If the values contained in the sub field array are greater than its mask's number of bits
allows
### Response:
def pack(array, sub_field_array, mask, inplace=False):
""" Packs a sub field's array into another array using a mask
Parameters:
----------
array : numpy.ndarray
The array in which the sub field array will be packed into
array_in : numpy.ndarray
sub field array to pack
mask : mask (ie: 0b00001111)
Mask of the sub field
inplace : {bool}, optional
If true a new array is returned. (the default is False, which modifies the array in place)
Raises
------
OverflowError
If the values contained in the sub field array are greater than its mask's number of bits
allows
"""
lsb = least_significant_bit(mask)
max_value = int(mask >> lsb)
if sub_field_array.max() > max_value:
raise OverflowError(
"value ({}) is greater than allowed (max: {})".format(
sub_field_array.max(), max_value
)
)
if inplace:
array[:] = array & ~mask
array[:] = array | ((sub_field_array << lsb) & mask).astype(array.dtype)
else:
array = array & ~mask
return array | ((sub_field_array << lsb) & mask).astype(array.dtype) |
def watchdog_pid():
"""Get watchdog PID via ``netstat``."""
result = sh('netstat -tulpn 2>/dev/null | grep 127.0.0.1:{:d}'
.format(SPHINX_AUTOBUILD_PORT), capture=True, ignore_error=True)
pid = result.strip()
pid = pid.split()[-1] if pid else None
pid = pid.split('/', 1)[0] if pid and pid != '-' else None
return pid | Get watchdog PID via ``netstat``. | Below is the the instruction that describes the task:
### Input:
Get watchdog PID via ``netstat``.
### Response:
def watchdog_pid():
"""Get watchdog PID via ``netstat``."""
result = sh('netstat -tulpn 2>/dev/null | grep 127.0.0.1:{:d}'
.format(SPHINX_AUTOBUILD_PORT), capture=True, ignore_error=True)
pid = result.strip()
pid = pid.split()[-1] if pid else None
pid = pid.split('/', 1)[0] if pid and pid != '-' else None
return pid |
def format_data(self, data, scale=True):
"""
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`.
"""
if len(self.analytes) == 1:
# if single analyte
d = nominal_values(data[self.analytes[0]])
ds = np.array(list(zip(d, np.zeros(len(d)))))
else:
# package multiple analytes
d = [nominal_values(data[a]) for a in self.analytes]
ds = np.vstack(d).T
# identify all nan values
finite = np.isfinite(ds).sum(1) == ds.shape[1]
# remember which values are sampled
sampled = np.arange(data[self.analytes[0]].size)[finite]
# remove all nan values
ds = ds[finite]
if scale:
ds = self.scaler.transform(ds)
return ds, sampled | Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`. | Below is the the instruction that describes the task:
### Input:
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`.
### Response:
def format_data(self, data, scale=True):
"""
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`.
"""
if len(self.analytes) == 1:
# if single analyte
d = nominal_values(data[self.analytes[0]])
ds = np.array(list(zip(d, np.zeros(len(d)))))
else:
# package multiple analytes
d = [nominal_values(data[a]) for a in self.analytes]
ds = np.vstack(d).T
# identify all nan values
finite = np.isfinite(ds).sum(1) == ds.shape[1]
# remember which values are sampled
sampled = np.arange(data[self.analytes[0]].size)[finite]
# remove all nan values
ds = ds[finite]
if scale:
ds = self.scaler.transform(ds)
return ds, sampled |
def _request_activity_data(self, athlete, filename):
"""Actually do the request for activity filename
This call is slow and therefore this method is memory cached.
Keyword arguments:
athlete -- Full name of athlete
filename -- filename of request activity (e.g. \'2015_04_29_09_03_16.json\')
"""
response = self._get_request(self._activity_endpoint(athlete, filename)).json()
activity = pd.DataFrame(response['RIDE']['SAMPLES'])
activity = activity.rename(columns=ACTIVITY_COLUMN_TRANSLATION)
activity.index = pd.to_timedelta(activity.time, unit='s')
activity.drop('time', axis=1, inplace=True)
return activity[[i for i in ACTIVITY_COLUMN_ORDER if i in activity.columns]] | Actually do the request for activity filename
This call is slow and therefore this method is memory cached.
Keyword arguments:
athlete -- Full name of athlete
filename -- filename of request activity (e.g. \'2015_04_29_09_03_16.json\') | Below is the the instruction that describes the task:
### Input:
Actually do the request for activity filename
This call is slow and therefore this method is memory cached.
Keyword arguments:
athlete -- Full name of athlete
filename -- filename of request activity (e.g. \'2015_04_29_09_03_16.json\')
### Response:
def _request_activity_data(self, athlete, filename):
"""Actually do the request for activity filename
This call is slow and therefore this method is memory cached.
Keyword arguments:
athlete -- Full name of athlete
filename -- filename of request activity (e.g. \'2015_04_29_09_03_16.json\')
"""
response = self._get_request(self._activity_endpoint(athlete, filename)).json()
activity = pd.DataFrame(response['RIDE']['SAMPLES'])
activity = activity.rename(columns=ACTIVITY_COLUMN_TRANSLATION)
activity.index = pd.to_timedelta(activity.time, unit='s')
activity.drop('time', axis=1, inplace=True)
return activity[[i for i in ACTIVITY_COLUMN_ORDER if i in activity.columns]] |
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method) | Add all generated methods to result class. | Below is the the instruction that describes the task:
### Input:
Add all generated methods to result class.
### Response:
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method) |
def pull(src, dest):
"""
Pull object from target to host
:param src: string path of object on target
:param dest: string destination path on host
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PULL, src, dest]
return _exec_command(adb_full_cmd) | Pull object from target to host
:param src: string path of object on target
:param dest: string destination path on host
:return: result of _exec_command() execution | Below is the the instruction that describes the task:
### Input:
Pull object from target to host
:param src: string path of object on target
:param dest: string destination path on host
:return: result of _exec_command() execution
### Response:
def pull(src, dest):
"""
Pull object from target to host
:param src: string path of object on target
:param dest: string destination path on host
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PULL, src, dest]
return _exec_command(adb_full_cmd) |
def create(self, datastore_id=None):
"""POST /datastores/{datastore_id}/datasources: Create a new
datasource by file upload."""
datastore = self._get_ogrdatastore_by_id(datastore_id)
if not datastore:
abort(404)
# only "directory" datastores are supported at this point
if datastore.connection_type != 'directory':
abort(400)
# use mkstemp to store the file to avoid issues with
# NamedTemporaryFile that can't be open a second time
# on Windows. See:
# http://docs.python.org/library/tempfile.html#tempfile.NamedTemporaryFile
# "Whether the name can be used to open the file a second time, while the
# named temporary file is still open, varies across platforms (it can be so
# used on Unix; it cannot on Windows NT or later).
fd, fn = mkstemp()
tmpfile = os.fdopen(fd, 'wb')
fieldstorage = request.POST['datasources']
shutil.copyfileobj(fieldstorage.file, tmpfile)
fieldstorage.file.close()
tmpfile.close()
extractall(fn,
request.POST['datasources'].filename,
datastore.datastore_str)
# we are reponsible for removing the temporary file
os.remove(fn)
response.status = 201 | POST /datastores/{datastore_id}/datasources: Create a new
datasource by file upload. | Below is the the instruction that describes the task:
### Input:
POST /datastores/{datastore_id}/datasources: Create a new
datasource by file upload.
### Response:
def create(self, datastore_id=None):
"""POST /datastores/{datastore_id}/datasources: Create a new
datasource by file upload."""
datastore = self._get_ogrdatastore_by_id(datastore_id)
if not datastore:
abort(404)
# only "directory" datastores are supported at this point
if datastore.connection_type != 'directory':
abort(400)
# use mkstemp to store the file to avoid issues with
# NamedTemporaryFile that can't be open a second time
# on Windows. See:
# http://docs.python.org/library/tempfile.html#tempfile.NamedTemporaryFile
# "Whether the name can be used to open the file a second time, while the
# named temporary file is still open, varies across platforms (it can be so
# used on Unix; it cannot on Windows NT or later).
fd, fn = mkstemp()
tmpfile = os.fdopen(fd, 'wb')
fieldstorage = request.POST['datasources']
shutil.copyfileobj(fieldstorage.file, tmpfile)
fieldstorage.file.close()
tmpfile.close()
extractall(fn,
request.POST['datasources'].filename,
datastore.datastore_str)
# we are reponsible for removing the temporary file
os.remove(fn)
response.status = 201 |
def check_for_file(self, share_name, directory_name, file_name, **kwargs):
"""
Check if a file exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.exists()` takes.
:type kwargs: object
:return: True if the file exists, False otherwise.
:rtype: bool
"""
return self.connection.exists(share_name, directory_name,
file_name, **kwargs) | Check if a file exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.exists()` takes.
:type kwargs: object
:return: True if the file exists, False otherwise.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Check if a file exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.exists()` takes.
:type kwargs: object
:return: True if the file exists, False otherwise.
:rtype: bool
### Response:
def check_for_file(self, share_name, directory_name, file_name, **kwargs):
"""
Check if a file exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.exists()` takes.
:type kwargs: object
:return: True if the file exists, False otherwise.
:rtype: bool
"""
return self.connection.exists(share_name, directory_name,
file_name, **kwargs) |
def main():
"""Main entry point for CLI commands."""
options = docopt(__doc__, version=__version__)
if options['segment']:
segment(
options['<file>'],
options['--output'],
options['--target-duration'],
options['--mpegts'],
) | Main entry point for CLI commands. | Below is the the instruction that describes the task:
### Input:
Main entry point for CLI commands.
### Response:
def main():
"""Main entry point for CLI commands."""
options = docopt(__doc__, version=__version__)
if options['segment']:
segment(
options['<file>'],
options['--output'],
options['--target-duration'],
options['--mpegts'],
) |
def dataSetMissingValue(h5Dataset):
""" Returns the missingData given a HDF-5 dataset
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found.
HDF-EOS and NetCDF files seem to put the attributes in 1-element arrays. So if the
attribute contains an array of one element, that first element is returned here.
"""
attributes = h5Dataset.attrs
if not attributes:
return None # a premature optimization :-)
for key in ('missing_value', 'MissingValue', 'missingValue', 'FillValue', '_FillValue'):
if key in attributes:
missingDataValue = attributes[key]
if is_an_array(missingDataValue) and len(missingDataValue) == 1:
return missingDataValue[0] # In case of HDF-EOS and NetCDF files
else:
return missingDataValue
return None | Returns the missingData given a HDF-5 dataset
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found.
HDF-EOS and NetCDF files seem to put the attributes in 1-element arrays. So if the
attribute contains an array of one element, that first element is returned here. | Below is the the instruction that describes the task:
### Input:
Returns the missingData given a HDF-5 dataset
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found.
HDF-EOS and NetCDF files seem to put the attributes in 1-element arrays. So if the
attribute contains an array of one element, that first element is returned here.
### Response:
def dataSetMissingValue(h5Dataset):
""" Returns the missingData given a HDF-5 dataset
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found.
HDF-EOS and NetCDF files seem to put the attributes in 1-element arrays. So if the
attribute contains an array of one element, that first element is returned here.
"""
attributes = h5Dataset.attrs
if not attributes:
return None # a premature optimization :-)
for key in ('missing_value', 'MissingValue', 'missingValue', 'FillValue', '_FillValue'):
if key in attributes:
missingDataValue = attributes[key]
if is_an_array(missingDataValue) and len(missingDataValue) == 1:
return missingDataValue[0] # In case of HDF-EOS and NetCDF files
else:
return missingDataValue
return None |
def hoverLeaveEvent(self, event):
"""
Processes the hovering information for this node.
:param event | <QHoverEvent>
"""
if self._hoverSpot:
if self._hoverSpot.hoverLeaveEvent(event):
self.update()
self._hoverSpot = None
self._hovered = False
super(XNode, self).setToolTip(self._toolTip)
super(XNode, self).hoverLeaveEvent(event) | Processes the hovering information for this node.
:param event | <QHoverEvent> | Below is the the instruction that describes the task:
### Input:
Processes the hovering information for this node.
:param event | <QHoverEvent>
### Response:
def hoverLeaveEvent(self, event):
"""
Processes the hovering information for this node.
:param event | <QHoverEvent>
"""
if self._hoverSpot:
if self._hoverSpot.hoverLeaveEvent(event):
self.update()
self._hoverSpot = None
self._hovered = False
super(XNode, self).setToolTip(self._toolTip)
super(XNode, self).hoverLeaveEvent(event) |
def tox_get_python_executable(envconfig):
"""Return a python executable for the given python base name.
The first plugin/hook which returns an executable path will determine it.
``envconfig`` is the testenv configuration which contains
per-testenv configuration, notably the ``.envname`` and ``.basepython``
setting.
"""
try:
# pylint: disable=no-member
pyenv = (getattr(py.path.local.sysfind('pyenv'), 'strpath', 'pyenv')
or 'pyenv')
cmd = [pyenv, 'which', envconfig.basepython]
pipe = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
out, err = pipe.communicate()
except OSError:
err = '\'pyenv\': command not found'
LOG.warning(
"pyenv doesn't seem to be installed, you probably "
"don't want this plugin installed either."
)
else:
if pipe.poll() == 0:
return out.strip()
else:
if not envconfig.tox_pyenv_fallback:
raise PyenvWhichFailed(err)
LOG.debug("`%s` failed thru tox-pyenv plugin, falling back. "
"STDERR: \"%s\" | To disable this behavior, set "
"tox_pyenv_fallback=False in your tox.ini or use "
" --tox-pyenv-no-fallback on the command line.",
' '.join([str(x) for x in cmd]), err) | Return a python executable for the given python base name.
The first plugin/hook which returns an executable path will determine it.
``envconfig`` is the testenv configuration which contains
per-testenv configuration, notably the ``.envname`` and ``.basepython``
setting. | Below is the the instruction that describes the task:
### Input:
Return a python executable for the given python base name.
The first plugin/hook which returns an executable path will determine it.
``envconfig`` is the testenv configuration which contains
per-testenv configuration, notably the ``.envname`` and ``.basepython``
setting.
### Response:
def tox_get_python_executable(envconfig):
"""Return a python executable for the given python base name.
The first plugin/hook which returns an executable path will determine it.
``envconfig`` is the testenv configuration which contains
per-testenv configuration, notably the ``.envname`` and ``.basepython``
setting.
"""
try:
# pylint: disable=no-member
pyenv = (getattr(py.path.local.sysfind('pyenv'), 'strpath', 'pyenv')
or 'pyenv')
cmd = [pyenv, 'which', envconfig.basepython]
pipe = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
out, err = pipe.communicate()
except OSError:
err = '\'pyenv\': command not found'
LOG.warning(
"pyenv doesn't seem to be installed, you probably "
"don't want this plugin installed either."
)
else:
if pipe.poll() == 0:
return out.strip()
else:
if not envconfig.tox_pyenv_fallback:
raise PyenvWhichFailed(err)
LOG.debug("`%s` failed thru tox-pyenv plugin, falling back. "
"STDERR: \"%s\" | To disable this behavior, set "
"tox_pyenv_fallback=False in your tox.ini or use "
" --tox-pyenv-no-fallback on the command line.",
' '.join([str(x) for x in cmd]), err) |
def pretty_plot_two_axis(x, y1, y2, xlabel=None, y1label=None, y2label=None,
width=8, height=None, dpi=300):
"""
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot
"""
import palettable.colorbrewer.diverging
colors = palettable.colorbrewer.diverging.RdYlBu_4.mpl_colors
c1 = colors[0]
c2 = colors[-1]
golden_ratio = (math.sqrt(5) - 1) / 2
if not height:
height = int(width * golden_ratio)
import matplotlib.pyplot as plt
width = 12
labelsize = int(width * 3)
ticksize = int(width * 2.5)
styles = ["-", "--", "-.", "."]
fig, ax1 = plt.subplots()
fig.set_size_inches((width, height))
if dpi:
fig.set_dpi(dpi)
if isinstance(y1, dict):
for i, (k, v) in enumerate(y1.items()):
ax1.plot(x, v, c=c1, marker='s', ls=styles[i % len(styles)],
label=k)
ax1.legend(fontsize=labelsize)
else:
ax1.plot(x, y1, c=c1, marker='s', ls='-')
if xlabel:
ax1.set_xlabel(xlabel, fontsize=labelsize)
if y1label:
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel(y1label, color=c1, fontsize=labelsize)
ax1.tick_params('x', labelsize=ticksize)
ax1.tick_params('y', colors=c1, labelsize=ticksize)
ax2 = ax1.twinx()
if isinstance(y2, dict):
for i, (k, v) in enumerate(y2.items()):
ax2.plot(x, v, c=c2, marker='o', ls=styles[i % len(styles)],
label=k)
ax2.legend(fontsize=labelsize)
else:
ax2.plot(x, y2, c=c2, marker='o', ls='-')
if y2label:
# Make the y-axis label, ticks and tick labels match the line color.
ax2.set_ylabel(y2label, color=c2, fontsize=labelsize)
ax2.tick_params('y', colors=c2, labelsize=ticksize)
return plt | Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot | Below is the the instruction that describes the task:
### Input:
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot
### Response:
def pretty_plot_two_axis(x, y1, y2, xlabel=None, y1label=None, y2label=None,
width=8, height=None, dpi=300):
"""
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot
"""
import palettable.colorbrewer.diverging
colors = palettable.colorbrewer.diverging.RdYlBu_4.mpl_colors
c1 = colors[0]
c2 = colors[-1]
golden_ratio = (math.sqrt(5) - 1) / 2
if not height:
height = int(width * golden_ratio)
import matplotlib.pyplot as plt
width = 12
labelsize = int(width * 3)
ticksize = int(width * 2.5)
styles = ["-", "--", "-.", "."]
fig, ax1 = plt.subplots()
fig.set_size_inches((width, height))
if dpi:
fig.set_dpi(dpi)
if isinstance(y1, dict):
for i, (k, v) in enumerate(y1.items()):
ax1.plot(x, v, c=c1, marker='s', ls=styles[i % len(styles)],
label=k)
ax1.legend(fontsize=labelsize)
else:
ax1.plot(x, y1, c=c1, marker='s', ls='-')
if xlabel:
ax1.set_xlabel(xlabel, fontsize=labelsize)
if y1label:
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel(y1label, color=c1, fontsize=labelsize)
ax1.tick_params('x', labelsize=ticksize)
ax1.tick_params('y', colors=c1, labelsize=ticksize)
ax2 = ax1.twinx()
if isinstance(y2, dict):
for i, (k, v) in enumerate(y2.items()):
ax2.plot(x, v, c=c2, marker='o', ls=styles[i % len(styles)],
label=k)
ax2.legend(fontsize=labelsize)
else:
ax2.plot(x, y2, c=c2, marker='o', ls='-')
if y2label:
# Make the y-axis label, ticks and tick labels match the line color.
ax2.set_ylabel(y2label, color=c2, fontsize=labelsize)
ax2.tick_params('y', colors=c2, labelsize=ticksize)
return plt |
def Emulation_setCPUThrottlingRate(self, rate):
"""
Function path: Emulation.setCPUThrottlingRate
Domain: Emulation
Method name: setCPUThrottlingRate
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'rate' (type: number) -> Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc).
No return value.
Description: Enables CPU throttling to emulate slow CPUs.
"""
assert isinstance(rate, (float, int)
), "Argument 'rate' must be of type '['float', 'int']'. Received type: '%s'" % type(
rate)
subdom_funcs = self.synchronous_command('Emulation.setCPUThrottlingRate',
rate=rate)
return subdom_funcs | Function path: Emulation.setCPUThrottlingRate
Domain: Emulation
Method name: setCPUThrottlingRate
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'rate' (type: number) -> Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc).
No return value.
Description: Enables CPU throttling to emulate slow CPUs. | Below is the the instruction that describes the task:
### Input:
Function path: Emulation.setCPUThrottlingRate
Domain: Emulation
Method name: setCPUThrottlingRate
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'rate' (type: number) -> Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc).
No return value.
Description: Enables CPU throttling to emulate slow CPUs.
### Response:
def Emulation_setCPUThrottlingRate(self, rate):
"""
Function path: Emulation.setCPUThrottlingRate
Domain: Emulation
Method name: setCPUThrottlingRate
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'rate' (type: number) -> Throttling rate as a slowdown factor (1 is no throttle, 2 is 2x slowdown, etc).
No return value.
Description: Enables CPU throttling to emulate slow CPUs.
"""
assert isinstance(rate, (float, int)
), "Argument 'rate' must be of type '['float', 'int']'. Received type: '%s'" % type(
rate)
subdom_funcs = self.synchronous_command('Emulation.setCPUThrottlingRate',
rate=rate)
return subdom_funcs |
def create_socket(self):
"""Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket.
"""
socket_path = os.path.join(self.config_dir, 'pueue.sock')
# Create Socket and exit with 1, if socket can't be created
try:
if os.path.exists(socket_path):
os.remove(socket_path)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(socket_path)
self.socket.setblocking(0)
self.socket.listen(0)
# Set file permissions
os.chmod(socket_path, stat.S_IRWXU)
except Exception:
self.logger.error("Daemon couldn't socket. Aborting")
self.logger.exception()
sys.exit(1)
return self.socket | Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket. | Below is the the instruction that describes the task:
### Input:
Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket.
### Response:
def create_socket(self):
"""Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket.
"""
socket_path = os.path.join(self.config_dir, 'pueue.sock')
# Create Socket and exit with 1, if socket can't be created
try:
if os.path.exists(socket_path):
os.remove(socket_path)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(socket_path)
self.socket.setblocking(0)
self.socket.listen(0)
# Set file permissions
os.chmod(socket_path, stat.S_IRWXU)
except Exception:
self.logger.error("Daemon couldn't socket. Aborting")
self.logger.exception()
sys.exit(1)
return self.socket |
def stdout_encode(u, default='utf-8'):
""" Encodes a given string with the proper standard out encoding
If sys.stdout.encoding isn't specified, it this defaults to @default
@default: default encoding
-> #str with standard out encoding
"""
# from http://stackoverflow.com/questions/3627793/best-output-type-and-
# encoding-practices-for-repr-functions
encoding = sys.stdout.encoding or default
return u.encode(encoding, "replace").decode(encoding, "replace") | Encodes a given string with the proper standard out encoding
If sys.stdout.encoding isn't specified, it this defaults to @default
@default: default encoding
-> #str with standard out encoding | Below is the the instruction that describes the task:
### Input:
Encodes a given string with the proper standard out encoding
If sys.stdout.encoding isn't specified, it this defaults to @default
@default: default encoding
-> #str with standard out encoding
### Response:
def stdout_encode(u, default='utf-8'):
""" Encodes a given string with the proper standard out encoding
If sys.stdout.encoding isn't specified, it this defaults to @default
@default: default encoding
-> #str with standard out encoding
"""
# from http://stackoverflow.com/questions/3627793/best-output-type-and-
# encoding-practices-for-repr-functions
encoding = sys.stdout.encoding or default
return u.encode(encoding, "replace").decode(encoding, "replace") |
def _print_table_ontologies():
"""
list all local files
2015-10-18: removed 'cached' from report
2016-06-17: made a subroutine of action_listlocal()
"""
ontologies = get_localontologies()
ONTOSPY_LOCAL_MODELS = get_home_location()
if ontologies:
print("")
temp = []
from collections import namedtuple
Row = namedtuple('Row', ['N', 'Added', 'File'])
# Row = namedtuple('Row',['N','Added','Cached', 'File'])
counter = 0
for file in ontologies:
counter += 1
_counter = str(counter)
# name = Style.BRIGHT + file + Style.RESET_ALL
name = click.style(file, fg='green')
try:
mtime = os.path.getmtime(ONTOSPY_LOCAL_MODELS + "/" + file)
except OSError:
mtime = 0
last_modified_date = str(datetime.datetime.fromtimestamp(mtime))
# cached = str(os.path.exists(ONTOSPY_LOCAL_CACHE + "/" + file + ".pickle"))
temp += [Row(_counter, last_modified_date, name)]
pprinttable(temp)
print("")
return | list all local files
2015-10-18: removed 'cached' from report
2016-06-17: made a subroutine of action_listlocal() | Below is the the instruction that describes the task:
### Input:
list all local files
2015-10-18: removed 'cached' from report
2016-06-17: made a subroutine of action_listlocal()
### Response:
def _print_table_ontologies():
"""
list all local files
2015-10-18: removed 'cached' from report
2016-06-17: made a subroutine of action_listlocal()
"""
ontologies = get_localontologies()
ONTOSPY_LOCAL_MODELS = get_home_location()
if ontologies:
print("")
temp = []
from collections import namedtuple
Row = namedtuple('Row', ['N', 'Added', 'File'])
# Row = namedtuple('Row',['N','Added','Cached', 'File'])
counter = 0
for file in ontologies:
counter += 1
_counter = str(counter)
# name = Style.BRIGHT + file + Style.RESET_ALL
name = click.style(file, fg='green')
try:
mtime = os.path.getmtime(ONTOSPY_LOCAL_MODELS + "/" + file)
except OSError:
mtime = 0
last_modified_date = str(datetime.datetime.fromtimestamp(mtime))
# cached = str(os.path.exists(ONTOSPY_LOCAL_CACHE + "/" + file + ".pickle"))
temp += [Row(_counter, last_modified_date, name)]
pprinttable(temp)
print("")
return |
def enabled_service_owners():
'''
Return which packages own each of the services that are currently enabled.
CLI Example:
salt myminion introspect.enabled_service_owners
'''
error = {}
if 'pkg.owner' not in __salt__:
error['Unsupported Package Manager'] = (
'The module for the package manager on this system does not '
'support looking up which package(s) owns which file(s)'
)
if 'service.show' not in __salt__:
error['Unsupported Service Manager'] = (
'The module for the service manager on this system does not '
'support showing descriptive service data'
)
if error:
return {'Error': error}
ret = {}
services = __salt__['service.get_enabled']()
for service in services:
data = __salt__['service.show'](service)
if 'ExecStart' not in data:
continue
start_cmd = data['ExecStart']['path']
pkg = __salt__['pkg.owner'](start_cmd)
ret[service] = next(six.itervalues(pkg))
return ret | Return which packages own each of the services that are currently enabled.
CLI Example:
salt myminion introspect.enabled_service_owners | Below is the the instruction that describes the task:
### Input:
Return which packages own each of the services that are currently enabled.
CLI Example:
salt myminion introspect.enabled_service_owners
### Response:
def enabled_service_owners():
'''
Return which packages own each of the services that are currently enabled.
CLI Example:
salt myminion introspect.enabled_service_owners
'''
error = {}
if 'pkg.owner' not in __salt__:
error['Unsupported Package Manager'] = (
'The module for the package manager on this system does not '
'support looking up which package(s) owns which file(s)'
)
if 'service.show' not in __salt__:
error['Unsupported Service Manager'] = (
'The module for the service manager on this system does not '
'support showing descriptive service data'
)
if error:
return {'Error': error}
ret = {}
services = __salt__['service.get_enabled']()
for service in services:
data = __salt__['service.show'](service)
if 'ExecStart' not in data:
continue
start_cmd = data['ExecStart']['path']
pkg = __salt__['pkg.owner'](start_cmd)
ret[service] = next(six.itervalues(pkg))
return ret |
def dAbr_dV(dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St):
""" Partial derivatives of squared flow magnitudes w.r.t voltage.
Computes partial derivatives of apparent power w.r.t active and
reactive power flows. Partial derivative must equal 1 for lines
with zero flow to avoid division by zero errors (1 comes from
L'Hopital).
"""
dAf_dPf = spdiag(2 * Sf.real())
dAf_dQf = spdiag(2 * Sf.imag())
dAt_dPt = spdiag(2 * St.real())
dAt_dQt = spdiag(2 * St.imag())
# Partial derivative of apparent power magnitude w.r.t voltage
# phase angle.
dAf_dVa = dAf_dPf * dSf_dVa.real() + dAf_dQf * dSf_dVa.imag()
dAt_dVa = dAt_dPt * dSt_dVa.real() + dAt_dQt * dSt_dVa.imag()
# Partial derivative of apparent power magnitude w.r.t. voltage
# amplitude.
dAf_dVm = dAf_dPf * dSf_dVm.real() + dAf_dQf * dSf_dVm.imag()
dAt_dVm = dAt_dPt * dSt_dVm.real() + dAt_dQt * dSt_dVm.imag()
return dAf_dVa, dAf_dVm, dAt_dVa, dAt_dVm | Partial derivatives of squared flow magnitudes w.r.t voltage.
Computes partial derivatives of apparent power w.r.t active and
reactive power flows. Partial derivative must equal 1 for lines
with zero flow to avoid division by zero errors (1 comes from
L'Hopital). | Below is the the instruction that describes the task:
### Input:
Partial derivatives of squared flow magnitudes w.r.t voltage.
Computes partial derivatives of apparent power w.r.t active and
reactive power flows. Partial derivative must equal 1 for lines
with zero flow to avoid division by zero errors (1 comes from
L'Hopital).
### Response:
def dAbr_dV(dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St):
""" Partial derivatives of squared flow magnitudes w.r.t voltage.
Computes partial derivatives of apparent power w.r.t active and
reactive power flows. Partial derivative must equal 1 for lines
with zero flow to avoid division by zero errors (1 comes from
L'Hopital).
"""
dAf_dPf = spdiag(2 * Sf.real())
dAf_dQf = spdiag(2 * Sf.imag())
dAt_dPt = spdiag(2 * St.real())
dAt_dQt = spdiag(2 * St.imag())
# Partial derivative of apparent power magnitude w.r.t voltage
# phase angle.
dAf_dVa = dAf_dPf * dSf_dVa.real() + dAf_dQf * dSf_dVa.imag()
dAt_dVa = dAt_dPt * dSt_dVa.real() + dAt_dQt * dSt_dVa.imag()
# Partial derivative of apparent power magnitude w.r.t. voltage
# amplitude.
dAf_dVm = dAf_dPf * dSf_dVm.real() + dAf_dQf * dSf_dVm.imag()
dAt_dVm = dAt_dPt * dSt_dVm.real() + dAt_dQt * dSt_dVm.imag()
return dAf_dVa, dAf_dVm, dAt_dVa, dAt_dVm |
def _signed_sub_overflow(state, a, b):
"""
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a - b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 False False False False True True True
+c0000001 False False False False False False True
+ffffffff False False False False False False False
+00000000 True False False False False False False
+00000001 True False False False False False False
+3fffffff True False False False False False False
+7fffffff True True True False False False False
"""
sub = Operators.SEXTEND(a, 256, 512) - Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(sub < -(1 << 255), sub >= (1 << 255))
return cond | Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a - b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 False False False False True True True
+c0000001 False False False False False False True
+ffffffff False False False False False False False
+00000000 True False False False False False False
+00000001 True False False False False False False
+3fffffff True False False False False False False
+7fffffff True True True False False False False | Below is the the instruction that describes the task:
### Input:
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a - b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 False False False False True True True
+c0000001 False False False False False False True
+ffffffff False False False False False False False
+00000000 True False False False False False False
+00000001 True False False False False False False
+3fffffff True False False False False False False
+7fffffff True True True False False False False
### Response:
def _signed_sub_overflow(state, a, b):
"""
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a - b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 False False False False True True True
+c0000001 False False False False False False True
+ffffffff False False False False False False False
+00000000 True False False False False False False
+00000001 True False False False False False False
+3fffffff True False False False False False False
+7fffffff True True True False False False False
"""
sub = Operators.SEXTEND(a, 256, 512) - Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(sub < -(1 << 255), sub >= (1 << 255))
return cond |
def issue_and_listen_to_command_history():
"""Listen to command history updates of a single issued command."""
def tc_callback(rec):
print('TC:', rec)
command = processor.issue_command('/YSS/SIMULATOR/SWITCH_VOLTAGE_OFF', args={
'voltage_num': 1,
}, comment='im a comment')
command.create_command_history_subscription(on_data=tc_callback) | Listen to command history updates of a single issued command. | Below is the the instruction that describes the task:
### Input:
Listen to command history updates of a single issued command.
### Response:
def issue_and_listen_to_command_history():
"""Listen to command history updates of a single issued command."""
def tc_callback(rec):
print('TC:', rec)
command = processor.issue_command('/YSS/SIMULATOR/SWITCH_VOLTAGE_OFF', args={
'voltage_num': 1,
}, comment='im a comment')
command.create_command_history_subscription(on_data=tc_callback) |
def _decode(cls, value):
"""Decode the given value, reverting '%'-encoded groups."""
value = cls._DEC_RE.sub(lambda x: '%c' % int(x.group(1), 16), value)
return json.loads(value) | Decode the given value, reverting '%'-encoded groups. | Below is the the instruction that describes the task:
### Input:
Decode the given value, reverting '%'-encoded groups.
### Response:
def _decode(cls, value):
"""Decode the given value, reverting '%'-encoded groups."""
value = cls._DEC_RE.sub(lambda x: '%c' % int(x.group(1), 16), value)
return json.loads(value) |
def _update_rotation(self, event):
"""Update rotation parmeters based on mouse movement"""
p1 = event.mouse_event.press_event.pos
p2 = event.mouse_event.pos
if self._event_value is None:
self._event_value = self.azimuth, self.elevation
self.azimuth = self._event_value[0] - (p2 - p1)[0] * 0.5
self.elevation = self._event_value[1] + (p2 - p1)[1] * 0.5 | Update rotation parmeters based on mouse movement | Below is the the instruction that describes the task:
### Input:
Update rotation parmeters based on mouse movement
### Response:
def _update_rotation(self, event):
"""Update rotation parmeters based on mouse movement"""
p1 = event.mouse_event.press_event.pos
p2 = event.mouse_event.pos
if self._event_value is None:
self._event_value = self.azimuth, self.elevation
self.azimuth = self._event_value[0] - (p2 - p1)[0] * 0.5
self.elevation = self._event_value[1] + (p2 - p1)[1] * 0.5 |
def t_UPPERCASE_IDENTIFIER(self, t):
r'[A-Z][-a-zA-z0-9]*'
if t.value in self.forbidden_words:
raise error.PySmiLexerError("%s is forbidden" % t.value, lineno=t.lineno)
if t.value[-1] == '-':
raise error.PySmiLexerError("Identifier should not end with '-': %s" % t.value, lineno=t.lineno)
t.type = self.reserved.get(t.value, 'UPPERCASE_IDENTIFIER')
return t | r'[A-Z][-a-zA-z0-9]* | Below is the the instruction that describes the task:
### Input:
r'[A-Z][-a-zA-z0-9]*
### Response:
def t_UPPERCASE_IDENTIFIER(self, t):
r'[A-Z][-a-zA-z0-9]*'
if t.value in self.forbidden_words:
raise error.PySmiLexerError("%s is forbidden" % t.value, lineno=t.lineno)
if t.value[-1] == '-':
raise error.PySmiLexerError("Identifier should not end with '-': %s" % t.value, lineno=t.lineno)
t.type = self.reserved.get(t.value, 'UPPERCASE_IDENTIFIER')
return t |
def iter_paths(src_dir):
"""
Function that recursively locates files within folder
Note: scandir does not guarantee ordering
:param src_dir: string for directory to be parsed through
:return an iterable of DirEntry objects all files within the src_dir
"""
for x in scandir(os.path.join(src_dir)):
if x.is_dir(follow_symlinks=False):
for x in iter_paths(x.path):
yield x
else:
yield x | Function that recursively locates files within folder
Note: scandir does not guarantee ordering
:param src_dir: string for directory to be parsed through
:return an iterable of DirEntry objects all files within the src_dir | Below is the the instruction that describes the task:
### Input:
Function that recursively locates files within folder
Note: scandir does not guarantee ordering
:param src_dir: string for directory to be parsed through
:return an iterable of DirEntry objects all files within the src_dir
### Response:
def iter_paths(src_dir):
"""
Function that recursively locates files within folder
Note: scandir does not guarantee ordering
:param src_dir: string for directory to be parsed through
:return an iterable of DirEntry objects all files within the src_dir
"""
for x in scandir(os.path.join(src_dir)):
if x.is_dir(follow_symlinks=False):
for x in iter_paths(x.path):
yield x
else:
yield x |
def start_transmit(self, blocking=False, start_packet_groups=True, *ports):
""" Start transmit on ports.
:param blocking: True - wait for traffic end, False - return after traffic start.
:param start_packet_groups: True - clear time stamps and start collecting packet groups stats, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
"""
port_list = self.set_ports_list(*ports)
if start_packet_groups:
port_list_for_packet_groups = self.ports.values()
port_list_for_packet_groups = self.set_ports_list(*port_list_for_packet_groups)
self.api.call_rc('ixClearTimeStamp {}'.format(port_list_for_packet_groups))
self.api.call_rc('ixStartPacketGroups {}'.format(port_list_for_packet_groups))
self.api.call_rc('ixStartTransmit {}'.format(port_list))
time.sleep(0.2)
if blocking:
self.wait_transmit(*ports) | Start transmit on ports.
:param blocking: True - wait for traffic end, False - return after traffic start.
:param start_packet_groups: True - clear time stamps and start collecting packet groups stats, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports. | Below is the the instruction that describes the task:
### Input:
Start transmit on ports.
:param blocking: True - wait for traffic end, False - return after traffic start.
:param start_packet_groups: True - clear time stamps and start collecting packet groups stats, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
### Response:
def start_transmit(self, blocking=False, start_packet_groups=True, *ports):
""" Start transmit on ports.
:param blocking: True - wait for traffic end, False - return after traffic start.
:param start_packet_groups: True - clear time stamps and start collecting packet groups stats, False - don't.
:param ports: list of ports to start traffic on, if empty start on all ports.
"""
port_list = self.set_ports_list(*ports)
if start_packet_groups:
port_list_for_packet_groups = self.ports.values()
port_list_for_packet_groups = self.set_ports_list(*port_list_for_packet_groups)
self.api.call_rc('ixClearTimeStamp {}'.format(port_list_for_packet_groups))
self.api.call_rc('ixStartPacketGroups {}'.format(port_list_for_packet_groups))
self.api.call_rc('ixStartTransmit {}'.format(port_list))
time.sleep(0.2)
if blocking:
self.wait_transmit(*ports) |
async def execute(self, dc=None, token=None):
"""Execute stored operations
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
token (ObjectID): Token ID
Returns:
Collection: Results of operations.
Raises:
TransactionError: Transaction failed
"""
token_id = extract_attr(token, keys=["ID"])
try:
response = await self._api.put(
"/v1/txn",
data=self.operations,
params={
"dc": dc,
"token": token_id
})
except ConflictError as error:
errors = {elt["OpIndex"]: elt for elt in error.value["Errors"]}
operations = [op["KV"] for op in self.operations]
meta = error.meta
raise TransactionError(errors, operations, meta) from error
except Exception as error:
raise error
else:
self.operations[:] = []
results = []
for _ in response.body["Results"]:
data = _["KV"]
if data["Value"] is not None:
data["Value"] = decode_value(data["Value"], data["Flags"])
results.append(data)
return results | Execute stored operations
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
token (ObjectID): Token ID
Returns:
Collection: Results of operations.
Raises:
TransactionError: Transaction failed | Below is the the instruction that describes the task:
### Input:
Execute stored operations
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
token (ObjectID): Token ID
Returns:
Collection: Results of operations.
Raises:
TransactionError: Transaction failed
### Response:
async def execute(self, dc=None, token=None):
"""Execute stored operations
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
token (ObjectID): Token ID
Returns:
Collection: Results of operations.
Raises:
TransactionError: Transaction failed
"""
token_id = extract_attr(token, keys=["ID"])
try:
response = await self._api.put(
"/v1/txn",
data=self.operations,
params={
"dc": dc,
"token": token_id
})
except ConflictError as error:
errors = {elt["OpIndex"]: elt for elt in error.value["Errors"]}
operations = [op["KV"] for op in self.operations]
meta = error.meta
raise TransactionError(errors, operations, meta) from error
except Exception as error:
raise error
else:
self.operations[:] = []
results = []
for _ in response.body["Results"]:
data = _["KV"]
if data["Value"] is not None:
data["Value"] = decode_value(data["Value"], data["Flags"])
results.append(data)
return results |
def process_climis_crop_production_data(data_dir: str):
""" Process CliMIS crop production data """
climis_crop_production_csvs = glob(
"{data_dir}/Climis South Sudan Crop Production Data/"
"Crops_EstimatedProductionConsumptionBalance*.csv"
)
state_county_df = pd.read_csv(
f"{data_dir}/ipc_data.csv", skipinitialspace=True
)
combined_records = []
for f in climis_crop_production_csvs:
year = int(f.split("/")[-1].split("_")[2].split(".")[0])
df = pd.read_csv(f).dropna()
for i, r in df.iterrows():
record = {
"Year": year,
"Month": None,
"Source": "CliMIS",
"Country": "South Sudan",
}
region = r["State/County"].strip()
if region.lower() in state_county_df["State"].str.lower().values:
record["State"] = region
record["County"] = None
else:
potential_states = state_county_df.loc[
state_county_df["County"] == region
]["State"]
record["State"] = (
potential_states.iloc[0]
if len(potential_states) != 0
else None
)
record["County"] = region
for field in r.index:
if field != "State/County":
if "Net Cereal production" in field:
record["Variable"] = "Net Cereal Production"
record["Value"] = r[field]
if field.split()[-1].startswith("("):
record["Unit"] = field.split()[-1][1:-1].lower()
else:
record["Unit"] = None
combined_records.append(record)
df = pd.DataFrame(combined_records)
return df | Process CliMIS crop production data | Below is the the instruction that describes the task:
### Input:
Process CliMIS crop production data
### Response:
def process_climis_crop_production_data(data_dir: str):
""" Process CliMIS crop production data """
climis_crop_production_csvs = glob(
"{data_dir}/Climis South Sudan Crop Production Data/"
"Crops_EstimatedProductionConsumptionBalance*.csv"
)
state_county_df = pd.read_csv(
f"{data_dir}/ipc_data.csv", skipinitialspace=True
)
combined_records = []
for f in climis_crop_production_csvs:
year = int(f.split("/")[-1].split("_")[2].split(".")[0])
df = pd.read_csv(f).dropna()
for i, r in df.iterrows():
record = {
"Year": year,
"Month": None,
"Source": "CliMIS",
"Country": "South Sudan",
}
region = r["State/County"].strip()
if region.lower() in state_county_df["State"].str.lower().values:
record["State"] = region
record["County"] = None
else:
potential_states = state_county_df.loc[
state_county_df["County"] == region
]["State"]
record["State"] = (
potential_states.iloc[0]
if len(potential_states) != 0
else None
)
record["County"] = region
for field in r.index:
if field != "State/County":
if "Net Cereal production" in field:
record["Variable"] = "Net Cereal Production"
record["Value"] = r[field]
if field.split()[-1].startswith("("):
record["Unit"] = field.split()[-1][1:-1].lower()
else:
record["Unit"] = None
combined_records.append(record)
df = pd.DataFrame(combined_records)
return df |
def delete_user(self, user):
"""
ADMIN ONLY. Removes the user from the system. There is no 'undo'
available, so you should be certain that the user specified is the user
you wish to delete.
"""
user_id = utils.get_id(user)
uri = "users/%s" % user_id
resp, resp_body = self.method_delete(uri)
if resp.status_code == 404:
raise exc.UserNotFound("User '%s' does not exist." % user)
elif resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You are not authorized to delete "
"users.") | ADMIN ONLY. Removes the user from the system. There is no 'undo'
available, so you should be certain that the user specified is the user
you wish to delete. | Below is the the instruction that describes the task:
### Input:
ADMIN ONLY. Removes the user from the system. There is no 'undo'
available, so you should be certain that the user specified is the user
you wish to delete.
### Response:
def delete_user(self, user):
"""
ADMIN ONLY. Removes the user from the system. There is no 'undo'
available, so you should be certain that the user specified is the user
you wish to delete.
"""
user_id = utils.get_id(user)
uri = "users/%s" % user_id
resp, resp_body = self.method_delete(uri)
if resp.status_code == 404:
raise exc.UserNotFound("User '%s' does not exist." % user)
elif resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You are not authorized to delete "
"users.") |
def set_data(self, data_ap, metadata=None):
"""Use this method to SHARE (not copy) the incoming table.
"""
self._data = data_ap
if metadata:
self.update_metadata(metadata)
self.make_callback('modified') | Use this method to SHARE (not copy) the incoming table. | Below is the the instruction that describes the task:
### Input:
Use this method to SHARE (not copy) the incoming table.
### Response:
def set_data(self, data_ap, metadata=None):
"""Use this method to SHARE (not copy) the incoming table.
"""
self._data = data_ap
if metadata:
self.update_metadata(metadata)
self.make_callback('modified') |
def xrefs_from(self):
"""Xrefs from the function.
This includes the xrefs from every line in the function, as `Xref` objects.
Xrefs are filtered to exclude code references that are internal to the function. This
means that every xrefs to the function's code will NOT be returned (yet, references
to the function's data will be returnd). To get those extra xrefs, you need to iterate
the function's lines yourself.
"""
for line in self.lines:
for xref in line.xrefs_from:
if xref.type.is_flow:
continue
if xref.to in self and xref.iscode:
continue
yield xref | Xrefs from the function.
This includes the xrefs from every line in the function, as `Xref` objects.
Xrefs are filtered to exclude code references that are internal to the function. This
means that every xrefs to the function's code will NOT be returned (yet, references
to the function's data will be returnd). To get those extra xrefs, you need to iterate
the function's lines yourself. | Below is the the instruction that describes the task:
### Input:
Xrefs from the function.
This includes the xrefs from every line in the function, as `Xref` objects.
Xrefs are filtered to exclude code references that are internal to the function. This
means that every xrefs to the function's code will NOT be returned (yet, references
to the function's data will be returnd). To get those extra xrefs, you need to iterate
the function's lines yourself.
### Response:
def xrefs_from(self):
"""Xrefs from the function.
This includes the xrefs from every line in the function, as `Xref` objects.
Xrefs are filtered to exclude code references that are internal to the function. This
means that every xrefs to the function's code will NOT be returned (yet, references
to the function's data will be returnd). To get those extra xrefs, you need to iterate
the function's lines yourself.
"""
for line in self.lines:
for xref in line.xrefs_from:
if xref.type.is_flow:
continue
if xref.to in self and xref.iscode:
continue
yield xref |
def poa_components(aoi, dni, poa_sky_diffuse, poa_ground_diffuse):
r'''
Determine in-plane irradiance components.
Combines DNI with sky diffuse and ground-reflected irradiance to calculate
total, direct and diffuse irradiance components in the plane of array.
Parameters
----------
aoi : numeric
Angle of incidence of solar rays with respect to the module
surface, from :func:`aoi`.
dni : numeric
Direct normal irradiance (W/m^2), as measured from a TMY file or
calculated with a clearsky model.
poa_sky_diffuse : numeric
Diffuse irradiance (W/m^2) in the plane of the modules, as
calculated by a diffuse irradiance translation function
poa_ground_diffuse : numeric
Ground reflected irradiance (W/m^2) in the plane of the modules,
as calculated by an albedo model (eg. :func:`grounddiffuse`)
Returns
-------
irrads : OrderedDict or DataFrame
Contains the following keys:
* ``poa_global`` : Total in-plane irradiance (W/m^2)
* ``poa_direct`` : Total in-plane beam irradiance (W/m^2)
* ``poa_diffuse`` : Total in-plane diffuse irradiance (W/m^2)
* ``poa_sky_diffuse`` : In-plane diffuse irradiance from sky (W/m^2)
* ``poa_ground_diffuse`` : In-plane diffuse irradiance from ground
(W/m^2)
Notes
------
Negative beam irradiation due to aoi :math:`> 90^{\circ}` or AOI
:math:`< 0^{\circ}` is set to zero.
'''
poa_direct = np.maximum(dni * np.cos(np.radians(aoi)), 0)
poa_diffuse = poa_sky_diffuse + poa_ground_diffuse
poa_global = poa_direct + poa_diffuse
irrads = OrderedDict()
irrads['poa_global'] = poa_global
irrads['poa_direct'] = poa_direct
irrads['poa_diffuse'] = poa_diffuse
irrads['poa_sky_diffuse'] = poa_sky_diffuse
irrads['poa_ground_diffuse'] = poa_ground_diffuse
if isinstance(poa_direct, pd.Series):
irrads = pd.DataFrame(irrads)
return irrads | r'''
Determine in-plane irradiance components.
Combines DNI with sky diffuse and ground-reflected irradiance to calculate
total, direct and diffuse irradiance components in the plane of array.
Parameters
----------
aoi : numeric
Angle of incidence of solar rays with respect to the module
surface, from :func:`aoi`.
dni : numeric
Direct normal irradiance (W/m^2), as measured from a TMY file or
calculated with a clearsky model.
poa_sky_diffuse : numeric
Diffuse irradiance (W/m^2) in the plane of the modules, as
calculated by a diffuse irradiance translation function
poa_ground_diffuse : numeric
Ground reflected irradiance (W/m^2) in the plane of the modules,
as calculated by an albedo model (eg. :func:`grounddiffuse`)
Returns
-------
irrads : OrderedDict or DataFrame
Contains the following keys:
* ``poa_global`` : Total in-plane irradiance (W/m^2)
* ``poa_direct`` : Total in-plane beam irradiance (W/m^2)
* ``poa_diffuse`` : Total in-plane diffuse irradiance (W/m^2)
* ``poa_sky_diffuse`` : In-plane diffuse irradiance from sky (W/m^2)
* ``poa_ground_diffuse`` : In-plane diffuse irradiance from ground
(W/m^2)
Notes
------
Negative beam irradiation due to aoi :math:`> 90^{\circ}` or AOI
:math:`< 0^{\circ}` is set to zero. | Below is the the instruction that describes the task:
### Input:
r'''
Determine in-plane irradiance components.
Combines DNI with sky diffuse and ground-reflected irradiance to calculate
total, direct and diffuse irradiance components in the plane of array.
Parameters
----------
aoi : numeric
Angle of incidence of solar rays with respect to the module
surface, from :func:`aoi`.
dni : numeric
Direct normal irradiance (W/m^2), as measured from a TMY file or
calculated with a clearsky model.
poa_sky_diffuse : numeric
Diffuse irradiance (W/m^2) in the plane of the modules, as
calculated by a diffuse irradiance translation function
poa_ground_diffuse : numeric
Ground reflected irradiance (W/m^2) in the plane of the modules,
as calculated by an albedo model (eg. :func:`grounddiffuse`)
Returns
-------
irrads : OrderedDict or DataFrame
Contains the following keys:
* ``poa_global`` : Total in-plane irradiance (W/m^2)
* ``poa_direct`` : Total in-plane beam irradiance (W/m^2)
* ``poa_diffuse`` : Total in-plane diffuse irradiance (W/m^2)
* ``poa_sky_diffuse`` : In-plane diffuse irradiance from sky (W/m^2)
* ``poa_ground_diffuse`` : In-plane diffuse irradiance from ground
(W/m^2)
Notes
------
Negative beam irradiation due to aoi :math:`> 90^{\circ}` or AOI
:math:`< 0^{\circ}` is set to zero.
### Response:
def poa_components(aoi, dni, poa_sky_diffuse, poa_ground_diffuse):
r'''
Determine in-plane irradiance components.
Combines DNI with sky diffuse and ground-reflected irradiance to calculate
total, direct and diffuse irradiance components in the plane of array.
Parameters
----------
aoi : numeric
Angle of incidence of solar rays with respect to the module
surface, from :func:`aoi`.
dni : numeric
Direct normal irradiance (W/m^2), as measured from a TMY file or
calculated with a clearsky model.
poa_sky_diffuse : numeric
Diffuse irradiance (W/m^2) in the plane of the modules, as
calculated by a diffuse irradiance translation function
poa_ground_diffuse : numeric
Ground reflected irradiance (W/m^2) in the plane of the modules,
as calculated by an albedo model (eg. :func:`grounddiffuse`)
Returns
-------
irrads : OrderedDict or DataFrame
Contains the following keys:
* ``poa_global`` : Total in-plane irradiance (W/m^2)
* ``poa_direct`` : Total in-plane beam irradiance (W/m^2)
* ``poa_diffuse`` : Total in-plane diffuse irradiance (W/m^2)
* ``poa_sky_diffuse`` : In-plane diffuse irradiance from sky (W/m^2)
* ``poa_ground_diffuse`` : In-plane diffuse irradiance from ground
(W/m^2)
Notes
------
Negative beam irradiation due to aoi :math:`> 90^{\circ}` or AOI
:math:`< 0^{\circ}` is set to zero.
'''
poa_direct = np.maximum(dni * np.cos(np.radians(aoi)), 0)
poa_diffuse = poa_sky_diffuse + poa_ground_diffuse
poa_global = poa_direct + poa_diffuse
irrads = OrderedDict()
irrads['poa_global'] = poa_global
irrads['poa_direct'] = poa_direct
irrads['poa_diffuse'] = poa_diffuse
irrads['poa_sky_diffuse'] = poa_sky_diffuse
irrads['poa_ground_diffuse'] = poa_ground_diffuse
if isinstance(poa_direct, pd.Series):
irrads = pd.DataFrame(irrads)
return irrads |
def tab_pressed(self, event):
"""Method called when a tab from a QTabBar has been pressed."""
self.from_index = self.dock_tabbar.tabAt(event.pos())
self.dock_tabbar.setCurrentIndex(self.from_index)
if event.button() == Qt.RightButton:
if self.from_index == -1:
self.show_nontab_menu(event)
else:
self.show_tab_menu(event) | Method called when a tab from a QTabBar has been pressed. | Below is the the instruction that describes the task:
### Input:
Method called when a tab from a QTabBar has been pressed.
### Response:
def tab_pressed(self, event):
"""Method called when a tab from a QTabBar has been pressed."""
self.from_index = self.dock_tabbar.tabAt(event.pos())
self.dock_tabbar.setCurrentIndex(self.from_index)
if event.button() == Qt.RightButton:
if self.from_index == -1:
self.show_nontab_menu(event)
else:
self.show_tab_menu(event) |
def _uint2farray(ftype, num, length=None):
"""Convert an unsigned integer to an farray."""
if num < 0:
raise ValueError("expected num >= 0")
else:
objs = _uint2objs(ftype, num, length)
return farray(objs) | Convert an unsigned integer to an farray. | Below is the the instruction that describes the task:
### Input:
Convert an unsigned integer to an farray.
### Response:
def _uint2farray(ftype, num, length=None):
"""Convert an unsigned integer to an farray."""
if num < 0:
raise ValueError("expected num >= 0")
else:
objs = _uint2objs(ftype, num, length)
return farray(objs) |
def numeric_range(*args):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = numeric_range(start, stop, step)
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
argc = len(args)
if argc == 1:
stop, = args
start = type(stop)(0)
step = 1
elif argc == 2:
start, stop = args
step = 1
elif argc == 3:
start, stop, step = args
else:
err_msg = 'numeric_range takes at most 3 arguments, got {}'
raise TypeError(err_msg.format(argc))
values = (start + (step * n) for n in count())
zero = type(step)(0)
if step > zero:
return takewhile(partial(gt, stop), values)
elif step < zero:
return takewhile(partial(lt, stop), values)
else:
raise ValueError('numeric_range arg 3 must not be zero') | An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = numeric_range(start, stop, step)
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0) | Below is the the instruction that describes the task:
### Input:
An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = numeric_range(start, stop, step)
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
### Response:
def numeric_range(*args):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = numeric_range(start, stop, step)
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
argc = len(args)
if argc == 1:
stop, = args
start = type(stop)(0)
step = 1
elif argc == 2:
start, stop = args
step = 1
elif argc == 3:
start, stop, step = args
else:
err_msg = 'numeric_range takes at most 3 arguments, got {}'
raise TypeError(err_msg.format(argc))
values = (start + (step * n) for n in count())
zero = type(step)(0)
if step > zero:
return takewhile(partial(gt, stop), values)
elif step < zero:
return takewhile(partial(lt, stop), values)
else:
raise ValueError('numeric_range arg 3 must not be zero') |
def register_handler(alias: str, handler: callable):
"""
Used to register handler at the dispatcher.
:param alias: Signal alias to match handler to.
:param handler: Handler. Some callable.
:return:
"""
if SignalDispatcher.handlers.get(alias) is None:
SignalDispatcher.handlers[alias] = [handler]
else:
SignalDispatcher.handlers.get(alias).append(handler) | Used to register handler at the dispatcher.
:param alias: Signal alias to match handler to.
:param handler: Handler. Some callable.
:return: | Below is the the instruction that describes the task:
### Input:
Used to register handler at the dispatcher.
:param alias: Signal alias to match handler to.
:param handler: Handler. Some callable.
:return:
### Response:
def register_handler(alias: str, handler: callable):
"""
Used to register handler at the dispatcher.
:param alias: Signal alias to match handler to.
:param handler: Handler. Some callable.
:return:
"""
if SignalDispatcher.handlers.get(alias) is None:
SignalDispatcher.handlers[alias] = [handler]
else:
SignalDispatcher.handlers.get(alias).append(handler) |
def rgb2hsl(rgb):
"""Convert RGB representation towards HSL
:param r: Red amount (float between 0 and 1)
:param g: Green amount (float between 0 and 1)
:param b: Blue amount (float between 0 and 1)
:rtype: 3-uple for HSL values in float between 0 and 1
This algorithm came from:
http://www.easyrgb.com/index.php?X=MATH&H=19#text19
Here are some quick notion of RGB to HSL conversion:
>>> from colour import rgb2hsl
Note that if red amount is equal to green and blue, then you
should have a gray value (from black to white).
>>> rgb2hsl((1.0, 1.0, 1.0)) # doctest: +ELLIPSIS
(..., 0.0, 1.0)
>>> rgb2hsl((0.5, 0.5, 0.5)) # doctest: +ELLIPSIS
(..., 0.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 0.0)) # doctest: +ELLIPSIS
(..., 0.0, 0.0)
If only one color is different from the others, it defines the
direct Hue:
>>> rgb2hsl((0.5, 0.5, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.75)
>>> rgb2hsl((0.2, 0.1, 0.1)) # doctest: +ELLIPSIS
(0.0, 0.33..., 0.15...)
Having only one value set, you can check that:
>>> rgb2hsl((1.0, 0.0, 0.0))
(0.0, 1.0, 0.5)
>>> rgb2hsl((0.0, 1.0, 0.0)) # doctest: +ELLIPSIS
(0.33..., 1.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.5)
Regression check upon very close values in every component of
red, green and blue:
>>> rgb2hsl((0.9999999999999999, 1.0, 0.9999999999999994))
(0.0, 0.0, 0.999...)
Of course:
>>> rgb2hsl((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Green must be between 0 and 1. You provided 2.0.
And:
>>> rgb2hsl((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Blue must be between 0 and 1. You provided 1.5.
"""
r, g, b = [float(v) for v in rgb]
for name, v in {'Red': r, 'Green': g, 'Blue': b}.items():
if not (0 - FLOAT_ERROR <= v <= 1 + FLOAT_ERROR):
raise ValueError("%s must be between 0 and 1. You provided %r."
% (name, v))
vmin = min(r, g, b) ## Min. value of RGB
vmax = max(r, g, b) ## Max. value of RGB
diff = vmax - vmin ## Delta RGB value
vsum = vmin + vmax
l = vsum / 2
if diff < FLOAT_ERROR: ## This is a gray, no chroma...
return (0.0, 0.0, l)
##
## Chromatic data...
##
## Saturation
if l < 0.5:
s = diff / vsum
else:
s = diff / (2.0 - vsum)
dr = (((vmax - r) / 6) + (diff / 2)) / diff
dg = (((vmax - g) / 6) + (diff / 2)) / diff
db = (((vmax - b) / 6) + (diff / 2)) / diff
if r == vmax:
h = db - dg
elif g == vmax:
h = (1.0 / 3) + dr - db
elif b == vmax:
h = (2.0 / 3) + dg - dr
if h < 0: h += 1
if h > 1: h -= 1
return (h, s, l) | Convert RGB representation towards HSL
:param r: Red amount (float between 0 and 1)
:param g: Green amount (float between 0 and 1)
:param b: Blue amount (float between 0 and 1)
:rtype: 3-uple for HSL values in float between 0 and 1
This algorithm came from:
http://www.easyrgb.com/index.php?X=MATH&H=19#text19
Here are some quick notion of RGB to HSL conversion:
>>> from colour import rgb2hsl
Note that if red amount is equal to green and blue, then you
should have a gray value (from black to white).
>>> rgb2hsl((1.0, 1.0, 1.0)) # doctest: +ELLIPSIS
(..., 0.0, 1.0)
>>> rgb2hsl((0.5, 0.5, 0.5)) # doctest: +ELLIPSIS
(..., 0.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 0.0)) # doctest: +ELLIPSIS
(..., 0.0, 0.0)
If only one color is different from the others, it defines the
direct Hue:
>>> rgb2hsl((0.5, 0.5, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.75)
>>> rgb2hsl((0.2, 0.1, 0.1)) # doctest: +ELLIPSIS
(0.0, 0.33..., 0.15...)
Having only one value set, you can check that:
>>> rgb2hsl((1.0, 0.0, 0.0))
(0.0, 1.0, 0.5)
>>> rgb2hsl((0.0, 1.0, 0.0)) # doctest: +ELLIPSIS
(0.33..., 1.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.5)
Regression check upon very close values in every component of
red, green and blue:
>>> rgb2hsl((0.9999999999999999, 1.0, 0.9999999999999994))
(0.0, 0.0, 0.999...)
Of course:
>>> rgb2hsl((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Green must be between 0 and 1. You provided 2.0.
And:
>>> rgb2hsl((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Blue must be between 0 and 1. You provided 1.5. | Below is the the instruction that describes the task:
### Input:
Convert RGB representation towards HSL
:param r: Red amount (float between 0 and 1)
:param g: Green amount (float between 0 and 1)
:param b: Blue amount (float between 0 and 1)
:rtype: 3-uple for HSL values in float between 0 and 1
This algorithm came from:
http://www.easyrgb.com/index.php?X=MATH&H=19#text19
Here are some quick notion of RGB to HSL conversion:
>>> from colour import rgb2hsl
Note that if red amount is equal to green and blue, then you
should have a gray value (from black to white).
>>> rgb2hsl((1.0, 1.0, 1.0)) # doctest: +ELLIPSIS
(..., 0.0, 1.0)
>>> rgb2hsl((0.5, 0.5, 0.5)) # doctest: +ELLIPSIS
(..., 0.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 0.0)) # doctest: +ELLIPSIS
(..., 0.0, 0.0)
If only one color is different from the others, it defines the
direct Hue:
>>> rgb2hsl((0.5, 0.5, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.75)
>>> rgb2hsl((0.2, 0.1, 0.1)) # doctest: +ELLIPSIS
(0.0, 0.33..., 0.15...)
Having only one value set, you can check that:
>>> rgb2hsl((1.0, 0.0, 0.0))
(0.0, 1.0, 0.5)
>>> rgb2hsl((0.0, 1.0, 0.0)) # doctest: +ELLIPSIS
(0.33..., 1.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.5)
Regression check upon very close values in every component of
red, green and blue:
>>> rgb2hsl((0.9999999999999999, 1.0, 0.9999999999999994))
(0.0, 0.0, 0.999...)
Of course:
>>> rgb2hsl((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Green must be between 0 and 1. You provided 2.0.
And:
>>> rgb2hsl((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Blue must be between 0 and 1. You provided 1.5.
### Response:
def rgb2hsl(rgb):
"""Convert RGB representation towards HSL
:param r: Red amount (float between 0 and 1)
:param g: Green amount (float between 0 and 1)
:param b: Blue amount (float between 0 and 1)
:rtype: 3-uple for HSL values in float between 0 and 1
This algorithm came from:
http://www.easyrgb.com/index.php?X=MATH&H=19#text19
Here are some quick notion of RGB to HSL conversion:
>>> from colour import rgb2hsl
Note that if red amount is equal to green and blue, then you
should have a gray value (from black to white).
>>> rgb2hsl((1.0, 1.0, 1.0)) # doctest: +ELLIPSIS
(..., 0.0, 1.0)
>>> rgb2hsl((0.5, 0.5, 0.5)) # doctest: +ELLIPSIS
(..., 0.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 0.0)) # doctest: +ELLIPSIS
(..., 0.0, 0.0)
If only one color is different from the others, it defines the
direct Hue:
>>> rgb2hsl((0.5, 0.5, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.75)
>>> rgb2hsl((0.2, 0.1, 0.1)) # doctest: +ELLIPSIS
(0.0, 0.33..., 0.15...)
Having only one value set, you can check that:
>>> rgb2hsl((1.0, 0.0, 0.0))
(0.0, 1.0, 0.5)
>>> rgb2hsl((0.0, 1.0, 0.0)) # doctest: +ELLIPSIS
(0.33..., 1.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.5)
Regression check upon very close values in every component of
red, green and blue:
>>> rgb2hsl((0.9999999999999999, 1.0, 0.9999999999999994))
(0.0, 0.0, 0.999...)
Of course:
>>> rgb2hsl((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Green must be between 0 and 1. You provided 2.0.
And:
>>> rgb2hsl((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Blue must be between 0 and 1. You provided 1.5.
"""
r, g, b = [float(v) for v in rgb]
for name, v in {'Red': r, 'Green': g, 'Blue': b}.items():
if not (0 - FLOAT_ERROR <= v <= 1 + FLOAT_ERROR):
raise ValueError("%s must be between 0 and 1. You provided %r."
% (name, v))
vmin = min(r, g, b) ## Min. value of RGB
vmax = max(r, g, b) ## Max. value of RGB
diff = vmax - vmin ## Delta RGB value
vsum = vmin + vmax
l = vsum / 2
if diff < FLOAT_ERROR: ## This is a gray, no chroma...
return (0.0, 0.0, l)
##
## Chromatic data...
##
## Saturation
if l < 0.5:
s = diff / vsum
else:
s = diff / (2.0 - vsum)
dr = (((vmax - r) / 6) + (diff / 2)) / diff
dg = (((vmax - g) / 6) + (diff / 2)) / diff
db = (((vmax - b) / 6) + (diff / 2)) / diff
if r == vmax:
h = db - dg
elif g == vmax:
h = (1.0 / 3) + dr - db
elif b == vmax:
h = (2.0 / 3) + dg - dr
if h < 0: h += 1
if h > 1: h -= 1
return (h, s, l) |
def get_serializable_data_for_fields(model):
"""
Return a serialised version of the model's fields which exist as local database
columns (i.e. excluding m2m and incoming foreign key relations)
"""
pk_field = model._meta.pk
# If model is a child via multitable inheritance, use parent's pk
while pk_field.remote_field and pk_field.remote_field.parent_link:
pk_field = pk_field.remote_field.model._meta.pk
obj = {'pk': get_field_value(pk_field, model)}
for field in model._meta.fields:
if field.serialize:
obj[field.name] = get_field_value(field, model)
return obj | Return a serialised version of the model's fields which exist as local database
columns (i.e. excluding m2m and incoming foreign key relations) | Below is the the instruction that describes the task:
### Input:
Return a serialised version of the model's fields which exist as local database
columns (i.e. excluding m2m and incoming foreign key relations)
### Response:
def get_serializable_data_for_fields(model):
"""
Return a serialised version of the model's fields which exist as local database
columns (i.e. excluding m2m and incoming foreign key relations)
"""
pk_field = model._meta.pk
# If model is a child via multitable inheritance, use parent's pk
while pk_field.remote_field and pk_field.remote_field.parent_link:
pk_field = pk_field.remote_field.model._meta.pk
obj = {'pk': get_field_value(pk_field, model)}
for field in model._meta.fields:
if field.serialize:
obj[field.name] = get_field_value(field, model)
return obj |
def get_data():
"""Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists.
"""
output = []
tables = get_tables()
for table in tables:
columns = get_columns(table)
for column in columns:
if column['name'] == 'tenant_id':
output.append((table, column))
return output | Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists. | Below is the the instruction that describes the task:
### Input:
Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists.
### Response:
def get_data():
"""Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists.
"""
output = []
tables = get_tables()
for table in tables:
columns = get_columns(table)
for column in columns:
if column['name'] == 'tenant_id':
output.append((table, column))
return output |
def _stat(self, path):
'''IMPORTANT: expects `path`'s parent to already be deref()'erenced.'''
if path not in self.entries:
return OverlayStat(*self.originals['os:stat'](path)[:10], st_overlay=0)
st = self.entries[path].stat
if stat.S_ISLNK(st.st_mode):
return self._stat(self.deref(path))
return st | IMPORTANT: expects `path`'s parent to already be deref()'erenced. | Below is the the instruction that describes the task:
### Input:
IMPORTANT: expects `path`'s parent to already be deref()'erenced.
### Response:
def _stat(self, path):
'''IMPORTANT: expects `path`'s parent to already be deref()'erenced.'''
if path not in self.entries:
return OverlayStat(*self.originals['os:stat'](path)[:10], st_overlay=0)
st = self.entries[path].stat
if stat.S_ISLNK(st.st_mode):
return self._stat(self.deref(path))
return st |
def range(self, channels=None):
"""
Get the range of the specified channel(s).
The range is a two-element list specifying the smallest and largest
values that an event in a channel should have. Note that with
floating point data, some events could have values outside the
range in either direction due to instrument compensation.
The range should be transformed along with the data when passed
through a transformation function.
The range of channel "n" is extracted from the $PnR parameter as
``[0, $PnR - 1]``.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the range. If None, return a list
with the range of all channels, in the order of
``FCSData.channels``.
Return
------
array or list of arrays
The range of the specified channel(s).
"""
# Check default
if channels is None:
channels = self._channels
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Get the range of the specified channels
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._range[ch] for ch in channels]
else:
return self._range[channels] | Get the range of the specified channel(s).
The range is a two-element list specifying the smallest and largest
values that an event in a channel should have. Note that with
floating point data, some events could have values outside the
range in either direction due to instrument compensation.
The range should be transformed along with the data when passed
through a transformation function.
The range of channel "n" is extracted from the $PnR parameter as
``[0, $PnR - 1]``.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the range. If None, return a list
with the range of all channels, in the order of
``FCSData.channels``.
Return
------
array or list of arrays
The range of the specified channel(s). | Below is the the instruction that describes the task:
### Input:
Get the range of the specified channel(s).
The range is a two-element list specifying the smallest and largest
values that an event in a channel should have. Note that with
floating point data, some events could have values outside the
range in either direction due to instrument compensation.
The range should be transformed along with the data when passed
through a transformation function.
The range of channel "n" is extracted from the $PnR parameter as
``[0, $PnR - 1]``.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the range. If None, return a list
with the range of all channels, in the order of
``FCSData.channels``.
Return
------
array or list of arrays
The range of the specified channel(s).
### Response:
def range(self, channels=None):
"""
Get the range of the specified channel(s).
The range is a two-element list specifying the smallest and largest
values that an event in a channel should have. Note that with
floating point data, some events could have values outside the
range in either direction due to instrument compensation.
The range should be transformed along with the data when passed
through a transformation function.
The range of channel "n" is extracted from the $PnR parameter as
``[0, $PnR - 1]``.
Parameters
----------
channels : int, str, list of int, list of str
Channel(s) for which to get the range. If None, return a list
with the range of all channels, in the order of
``FCSData.channels``.
Return
------
array or list of arrays
The range of the specified channel(s).
"""
# Check default
if channels is None:
channels = self._channels
# Get numerical indices of channels
channels = self._name_to_index(channels)
# Get the range of the specified channels
if hasattr(channels, '__iter__') \
and not isinstance(channels, six.string_types):
return [self._range[ch] for ch in channels]
else:
return self._range[channels] |
def _truncate(p_str, p_repl):
"""
Returns p_str with truncated and ended with '...' version of p_repl.
Place of the truncation is calculated depending on p_max_width.
"""
# 4 is for '...' and an extra space at the end
text_lim = _columns() - len(escape_ansi(p_str)) - 4
truncated_str = re.sub(re.escape(p_repl), p_repl[:text_lim] + '...', p_str)
return truncated_str | Returns p_str with truncated and ended with '...' version of p_repl.
Place of the truncation is calculated depending on p_max_width. | Below is the the instruction that describes the task:
### Input:
Returns p_str with truncated and ended with '...' version of p_repl.
Place of the truncation is calculated depending on p_max_width.
### Response:
def _truncate(p_str, p_repl):
"""
Returns p_str with truncated and ended with '...' version of p_repl.
Place of the truncation is calculated depending on p_max_width.
"""
# 4 is for '...' and an extra space at the end
text_lim = _columns() - len(escape_ansi(p_str)) - 4
truncated_str = re.sub(re.escape(p_repl), p_repl[:text_lim] + '...', p_str)
return truncated_str |
def url2tmp(self, root, url):
""" convert url path to filename """
filename = url.rsplit('/', 1)[-1]
return os.path.join(root, filename) | convert url path to filename | Below is the the instruction that describes the task:
### Input:
convert url path to filename
### Response:
def url2tmp(self, root, url):
""" convert url path to filename """
filename = url.rsplit('/', 1)[-1]
return os.path.join(root, filename) |
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params={},\
working_dir='/tmp'):
"""Returns a tree from Alignment object aln.
aln: an cogent.core.alignment.Alignment object, or data that can be used
to build one.
- Clearcut only accepts aligned sequences. Alignment object used to
handle unaligned sequences.
moltype: a cogent.core.moltype object.
- NOTE: If moltype = RNA, we must convert to DNA since Clearcut v1.0.8
gives incorrect results if RNA is passed in. 'U' is treated as an
incorrect character and is excluded from distance calculations.
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Clearcut app controller.
The result will be an cogent.core.tree.PhyloNode object, or None if tree
fails.
"""
params['--out'] = get_tmp_filename(working_dir)
# Create instance of app controller, enable tree, disable alignment
app = Clearcut(InputHandler='_input_as_multiline_string', params=params, \
WorkingDir=working_dir, SuppressStdout=True,\
SuppressStderr=True)
#Input is an alignment
app.Parameters['-a'].on()
#Turn off input as distance matrix
app.Parameters['-d'].off()
#If moltype = RNA, we must convert to DNA.
if moltype == RNA:
moltype = DNA
if best_tree:
app.Parameters['-N'].on()
#Turn on correct moltype
moltype_string = moltype.label.upper()
app.Parameters[MOLTYPE_MAP[moltype_string]].on()
# Setup mapping. Clearcut clips identifiers. We will need to remap them.
# Clearcut only accepts aligned sequences. Let Alignment object handle
# unaligned sequences.
seq_aln = Alignment(aln,MolType=moltype)
#get int mapping
int_map, int_keys = seq_aln.getIntMap()
#create new Alignment object with int_map
int_map = Alignment(int_map)
# Collect result
result = app(int_map.toFasta())
# Build tree
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
for node in tree.tips():
node.Name = int_keys[node.Name]
# Clean up
result.cleanUp()
del(seq_aln, app, result, int_map, int_keys, params)
return tree | Returns a tree from Alignment object aln.
aln: an cogent.core.alignment.Alignment object, or data that can be used
to build one.
- Clearcut only accepts aligned sequences. Alignment object used to
handle unaligned sequences.
moltype: a cogent.core.moltype object.
- NOTE: If moltype = RNA, we must convert to DNA since Clearcut v1.0.8
gives incorrect results if RNA is passed in. 'U' is treated as an
incorrect character and is excluded from distance calculations.
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Clearcut app controller.
The result will be an cogent.core.tree.PhyloNode object, or None if tree
fails. | Below is the the instruction that describes the task:
### Input:
Returns a tree from Alignment object aln.
aln: an cogent.core.alignment.Alignment object, or data that can be used
to build one.
- Clearcut only accepts aligned sequences. Alignment object used to
handle unaligned sequences.
moltype: a cogent.core.moltype object.
- NOTE: If moltype = RNA, we must convert to DNA since Clearcut v1.0.8
gives incorrect results if RNA is passed in. 'U' is treated as an
incorrect character and is excluded from distance calculations.
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Clearcut app controller.
The result will be an cogent.core.tree.PhyloNode object, or None if tree
fails.
### Response:
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params={},\
working_dir='/tmp'):
"""Returns a tree from Alignment object aln.
aln: an cogent.core.alignment.Alignment object, or data that can be used
to build one.
- Clearcut only accepts aligned sequences. Alignment object used to
handle unaligned sequences.
moltype: a cogent.core.moltype object.
- NOTE: If moltype = RNA, we must convert to DNA since Clearcut v1.0.8
gives incorrect results if RNA is passed in. 'U' is treated as an
incorrect character and is excluded from distance calculations.
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Clearcut app controller.
The result will be an cogent.core.tree.PhyloNode object, or None if tree
fails.
"""
params['--out'] = get_tmp_filename(working_dir)
# Create instance of app controller, enable tree, disable alignment
app = Clearcut(InputHandler='_input_as_multiline_string', params=params, \
WorkingDir=working_dir, SuppressStdout=True,\
SuppressStderr=True)
#Input is an alignment
app.Parameters['-a'].on()
#Turn off input as distance matrix
app.Parameters['-d'].off()
#If moltype = RNA, we must convert to DNA.
if moltype == RNA:
moltype = DNA
if best_tree:
app.Parameters['-N'].on()
#Turn on correct moltype
moltype_string = moltype.label.upper()
app.Parameters[MOLTYPE_MAP[moltype_string]].on()
# Setup mapping. Clearcut clips identifiers. We will need to remap them.
# Clearcut only accepts aligned sequences. Let Alignment object handle
# unaligned sequences.
seq_aln = Alignment(aln,MolType=moltype)
#get int mapping
int_map, int_keys = seq_aln.getIntMap()
#create new Alignment object with int_map
int_map = Alignment(int_map)
# Collect result
result = app(int_map.toFasta())
# Build tree
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
for node in tree.tips():
node.Name = int_keys[node.Name]
# Clean up
result.cleanUp()
del(seq_aln, app, result, int_map, int_keys, params)
return tree |
def read_flash(self, addr=0xFF, page=0x00):
"""Read back a flash page from the Crazyflie and return it"""
buff = bytearray()
page_size = self.targets[addr].page_size
for i in range(0, int(math.ceil(page_size / 25.0))):
pk = None
retry_counter = 5
while ((not pk or pk.header != 0xFF or
struct.unpack('<BB', pk.data[0:2]) != (addr, 0x1C)) and
retry_counter >= 0):
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack('<BBHH', addr, 0x1C, page, (i * 25))
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
retry_counter -= 1
if (retry_counter < 0):
return None
else:
buff += pk.data[6:]
# For some reason we get one byte extra here...
return buff[0:page_size] | Read back a flash page from the Crazyflie and return it | Below is the the instruction that describes the task:
### Input:
Read back a flash page from the Crazyflie and return it
### Response:
def read_flash(self, addr=0xFF, page=0x00):
"""Read back a flash page from the Crazyflie and return it"""
buff = bytearray()
page_size = self.targets[addr].page_size
for i in range(0, int(math.ceil(page_size / 25.0))):
pk = None
retry_counter = 5
while ((not pk or pk.header != 0xFF or
struct.unpack('<BB', pk.data[0:2]) != (addr, 0x1C)) and
retry_counter >= 0):
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack('<BBHH', addr, 0x1C, page, (i * 25))
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
retry_counter -= 1
if (retry_counter < 0):
return None
else:
buff += pk.data[6:]
# For some reason we get one byte extra here...
return buff[0:page_size] |
def _parse_title_url(html_chunk):
"""
Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings.
"""
title = html_chunk.find("div", {"class": "comment"})
if not title:
return _parse_alt_title(html_chunk), None
title = title[0].find("h2")
if not title:
return _parse_alt_title(html_chunk), None
# look for the url of the book if present
url = None
url_tag = title[0].find("a")
if url_tag:
url = url_tag[0].params.get("href", None)
title = url_tag
return title[0].getContent(), normalize_url(BASE_URL, url) | Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings. | Below is the the instruction that describes the task:
### Input:
Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings.
### Response:
def _parse_title_url(html_chunk):
"""
Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings.
"""
title = html_chunk.find("div", {"class": "comment"})
if not title:
return _parse_alt_title(html_chunk), None
title = title[0].find("h2")
if not title:
return _parse_alt_title(html_chunk), None
# look for the url of the book if present
url = None
url_tag = title[0].find("a")
if url_tag:
url = url_tag[0].params.get("href", None)
title = url_tag
return title[0].getContent(), normalize_url(BASE_URL, url) |
def _get_candidates(self, v):
""" Collect candidates from all buckets from all hashes """
candidates = []
for lshash in self.lshashes:
for bucket_key in lshash.hash_vector(v, querying=True):
bucket_content = self.storage.get_bucket(
lshash.hash_name,
bucket_key,
)
#print 'Bucket %s size %d' % (bucket_key, len(bucket_content))
candidates.extend(bucket_content)
return candidates | Collect candidates from all buckets from all hashes | Below is the the instruction that describes the task:
### Input:
Collect candidates from all buckets from all hashes
### Response:
def _get_candidates(self, v):
""" Collect candidates from all buckets from all hashes """
candidates = []
for lshash in self.lshashes:
for bucket_key in lshash.hash_vector(v, querying=True):
bucket_content = self.storage.get_bucket(
lshash.hash_name,
bucket_key,
)
#print 'Bucket %s size %d' % (bucket_key, len(bucket_content))
candidates.extend(bucket_content)
return candidates |
def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class | Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method. | Below is the the instruction that describes the task:
### Input:
Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
### Response:
def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class |
def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for name, slab in self.slabs.items():
const_args = slab._const_args
mat_args = slab._mat_params
const_args[8] = wavelength
s = Slab(*const_args)
for mat_arg in mat_args:
s.add_material(*mat_arg)
self.slabs[name] = s
self._wl = wavelength | Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength. | Below is the the instruction that describes the task:
### Input:
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
### Response:
def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for name, slab in self.slabs.items():
const_args = slab._const_args
mat_args = slab._mat_params
const_args[8] = wavelength
s = Slab(*const_args)
for mat_arg in mat_args:
s.add_material(*mat_arg)
self.slabs[name] = s
self._wl = wavelength |
def Yoon_Thodos(T, Tc, Pc, MW):
r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
.. math::
\eta \xi \times 10^8 = 46.10 T_r^{0.618} - 20.40 \exp(-0.449T_r) + 1
9.40\exp(-4.058T_r)+1
\xi = 2173.424 T_c^{1/6} MW^{-1/2} P_c^{-2/3}
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*S]
Notes
-----
This equation has been tested. The equation uses SI units only internally.
The constant 2173.424 is an adjustment factor for units.
Average deviation within 3% for most compounds.
Greatest accuracy with dipole moments close to 0.
Hydrogen and helium have different coefficients, not implemented.
This is DIPPR Procedure 8B: Method for the Viscosity of Pure,
non hydrocarbon, nonpolar gases at low pressures
Examples
--------
>>> Yoon_Thodos(300., 556.35, 4.5596E6, 153.8)
1.0194885727776819e-05
References
----------
.. [1] Yoon, Poong, and George Thodos. "Viscosity of Nonpolar Gaseous
Mixtures at Normal Pressures." AIChE Journal 16, no. 2 (1970): 300-304.
doi:10.1002/aic.690160225.
'''
Tr = T/Tc
xi = 2173.4241*Tc**(1/6.)/(MW**0.5*Pc**(2/3.))
a = 46.1
b = 0.618
c = 20.4
d = -0.449
e = 19.4
f = -4.058
return (1. + a*Tr**b - c * exp(d*Tr) + e*exp(f*Tr))/(1E8*xi) | r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
.. math::
\eta \xi \times 10^8 = 46.10 T_r^{0.618} - 20.40 \exp(-0.449T_r) + 1
9.40\exp(-4.058T_r)+1
\xi = 2173.424 T_c^{1/6} MW^{-1/2} P_c^{-2/3}
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*S]
Notes
-----
This equation has been tested. The equation uses SI units only internally.
The constant 2173.424 is an adjustment factor for units.
Average deviation within 3% for most compounds.
Greatest accuracy with dipole moments close to 0.
Hydrogen and helium have different coefficients, not implemented.
This is DIPPR Procedure 8B: Method for the Viscosity of Pure,
non hydrocarbon, nonpolar gases at low pressures
Examples
--------
>>> Yoon_Thodos(300., 556.35, 4.5596E6, 153.8)
1.0194885727776819e-05
References
----------
.. [1] Yoon, Poong, and George Thodos. "Viscosity of Nonpolar Gaseous
Mixtures at Normal Pressures." AIChE Journal 16, no. 2 (1970): 300-304.
doi:10.1002/aic.690160225. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
.. math::
\eta \xi \times 10^8 = 46.10 T_r^{0.618} - 20.40 \exp(-0.449T_r) + 1
9.40\exp(-4.058T_r)+1
\xi = 2173.424 T_c^{1/6} MW^{-1/2} P_c^{-2/3}
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*S]
Notes
-----
This equation has been tested. The equation uses SI units only internally.
The constant 2173.424 is an adjustment factor for units.
Average deviation within 3% for most compounds.
Greatest accuracy with dipole moments close to 0.
Hydrogen and helium have different coefficients, not implemented.
This is DIPPR Procedure 8B: Method for the Viscosity of Pure,
non hydrocarbon, nonpolar gases at low pressures
Examples
--------
>>> Yoon_Thodos(300., 556.35, 4.5596E6, 153.8)
1.0194885727776819e-05
References
----------
.. [1] Yoon, Poong, and George Thodos. "Viscosity of Nonpolar Gaseous
Mixtures at Normal Pressures." AIChE Journal 16, no. 2 (1970): 300-304.
doi:10.1002/aic.690160225.
### Response:
def Yoon_Thodos(T, Tc, Pc, MW):
r'''Calculates the viscosity of a gas using an emperical formula
developed in [1]_.
.. math::
\eta \xi \times 10^8 = 46.10 T_r^{0.618} - 20.40 \exp(-0.449T_r) + 1
9.40\exp(-4.058T_r)+1
\xi = 2173.424 T_c^{1/6} MW^{-1/2} P_c^{-2/3}
Parameters
----------
T : float
Temperature of the fluid [K]
Tc : float
Critical temperature of the fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
MW : float
Molwcular weight of fluid [g/mol]
Returns
-------
mu_g : float
Viscosity of gas, [Pa*S]
Notes
-----
This equation has been tested. The equation uses SI units only internally.
The constant 2173.424 is an adjustment factor for units.
Average deviation within 3% for most compounds.
Greatest accuracy with dipole moments close to 0.
Hydrogen and helium have different coefficients, not implemented.
This is DIPPR Procedure 8B: Method for the Viscosity of Pure,
non hydrocarbon, nonpolar gases at low pressures
Examples
--------
>>> Yoon_Thodos(300., 556.35, 4.5596E6, 153.8)
1.0194885727776819e-05
References
----------
.. [1] Yoon, Poong, and George Thodos. "Viscosity of Nonpolar Gaseous
Mixtures at Normal Pressures." AIChE Journal 16, no. 2 (1970): 300-304.
doi:10.1002/aic.690160225.
'''
Tr = T/Tc
xi = 2173.4241*Tc**(1/6.)/(MW**0.5*Pc**(2/3.))
a = 46.1
b = 0.618
c = 20.4
d = -0.449
e = 19.4
f = -4.058
return (1. + a*Tr**b - c * exp(d*Tr) + e*exp(f*Tr))/(1E8*xi) |
def factory(
description="",
codes=[200],
response_example=None,
response_ctor=None,
):
"""
desc: Describes a response to an API call
args:
- name: description
type: str
desc: A description of the condition that causes this response
required: false
default: ""
- name: codes
type: int
desc: >
One or more HTTP status codes associated with
this response
required: false
default: [200]
- name: response_example
type: dict
desc: An example JSON response body
required: false
default: null
- name: response_help
type: DocString
desc: Help for @response_example
required: false
default: null
"""
return RouteMethodResponse(
description,
codes,
response_example,
DocString.from_ctor(response_ctor) if response_ctor else None,
) | desc: Describes a response to an API call
args:
- name: description
type: str
desc: A description of the condition that causes this response
required: false
default: ""
- name: codes
type: int
desc: >
One or more HTTP status codes associated with
this response
required: false
default: [200]
- name: response_example
type: dict
desc: An example JSON response body
required: false
default: null
- name: response_help
type: DocString
desc: Help for @response_example
required: false
default: null | Below is the the instruction that describes the task:
### Input:
desc: Describes a response to an API call
args:
- name: description
type: str
desc: A description of the condition that causes this response
required: false
default: ""
- name: codes
type: int
desc: >
One or more HTTP status codes associated with
this response
required: false
default: [200]
- name: response_example
type: dict
desc: An example JSON response body
required: false
default: null
- name: response_help
type: DocString
desc: Help for @response_example
required: false
default: null
### Response:
def factory(
description="",
codes=[200],
response_example=None,
response_ctor=None,
):
"""
desc: Describes a response to an API call
args:
- name: description
type: str
desc: A description of the condition that causes this response
required: false
default: ""
- name: codes
type: int
desc: >
One or more HTTP status codes associated with
this response
required: false
default: [200]
- name: response_example
type: dict
desc: An example JSON response body
required: false
default: null
- name: response_help
type: DocString
desc: Help for @response_example
required: false
default: null
"""
return RouteMethodResponse(
description,
codes,
response_example,
DocString.from_ctor(response_ctor) if response_ctor else None,
) |
def _make_record(self,row):
"""Make a record dictionary from the result of a fetch_"""
res = dict(zip(self.all_fields,row))
for k in self.types:
res[k] = self.types[k](res[k])
return res | Make a record dictionary from the result of a fetch_ | Below is the the instruction that describes the task:
### Input:
Make a record dictionary from the result of a fetch_
### Response:
def _make_record(self,row):
"""Make a record dictionary from the result of a fetch_"""
res = dict(zip(self.all_fields,row))
for k in self.types:
res[k] = self.types[k](res[k])
return res |
def _pdf(self, xloc, dist, cache):
"""Probability density function."""
return evaluation.evaluate_density(dist, -xloc, cache=cache) | Probability density function. | Below is the the instruction that describes the task:
### Input:
Probability density function.
### Response:
def _pdf(self, xloc, dist, cache):
"""Probability density function."""
return evaluation.evaluate_density(dist, -xloc, cache=cache) |
def numpart_qaoa(asset_list, A=1.0, minimizer_kwargs=None, steps=1):
"""
generate number partition driver and cost functions
:param asset_list: list to binary partition
:param A: (float) optional constant for level separation. Default=1.
:param minimizer_kwargs: Arguments for the QAOA minimizer
:param steps: (int) number of steps approximating the solution.
"""
cost_operators = []
ref_operators = []
for ii in range(len(asset_list)):
for jj in range(ii + 1, len(asset_list)):
cost_operators.append(PauliSum([PauliTerm("Z", ii, 2*asset_list[ii]) *
PauliTerm("Z", jj, A*asset_list[jj])]))
ref_operators.append(PauliSum([PauliTerm("X", ii, -1.0)]))
cost_operators.append(PauliSum([PauliTerm("I", 0, len(asset_list))]))
if minimizer_kwargs is None:
minimizer_kwargs = {'method': 'Nelder-Mead',
'options': {'ftol': 1.0e-2,
'xtol': 1.0e-2,
'disp': True}}
qc = get_qc(f"{len(asset_list)}q-qvm")
qaoa_inst = QAOA(qc, list(range(len(asset_list))), steps=steps, cost_ham=cost_operators,
ref_ham=ref_operators, store_basis=True,
minimizer=minimize, minimizer_kwargs=minimizer_kwargs,
vqe_options={'disp': print})
return qaoa_inst | generate number partition driver and cost functions
:param asset_list: list to binary partition
:param A: (float) optional constant for level separation. Default=1.
:param minimizer_kwargs: Arguments for the QAOA minimizer
:param steps: (int) number of steps approximating the solution. | Below is the the instruction that describes the task:
### Input:
generate number partition driver and cost functions
:param asset_list: list to binary partition
:param A: (float) optional constant for level separation. Default=1.
:param minimizer_kwargs: Arguments for the QAOA minimizer
:param steps: (int) number of steps approximating the solution.
### Response:
def numpart_qaoa(asset_list, A=1.0, minimizer_kwargs=None, steps=1):
"""
generate number partition driver and cost functions
:param asset_list: list to binary partition
:param A: (float) optional constant for level separation. Default=1.
:param minimizer_kwargs: Arguments for the QAOA minimizer
:param steps: (int) number of steps approximating the solution.
"""
cost_operators = []
ref_operators = []
for ii in range(len(asset_list)):
for jj in range(ii + 1, len(asset_list)):
cost_operators.append(PauliSum([PauliTerm("Z", ii, 2*asset_list[ii]) *
PauliTerm("Z", jj, A*asset_list[jj])]))
ref_operators.append(PauliSum([PauliTerm("X", ii, -1.0)]))
cost_operators.append(PauliSum([PauliTerm("I", 0, len(asset_list))]))
if minimizer_kwargs is None:
minimizer_kwargs = {'method': 'Nelder-Mead',
'options': {'ftol': 1.0e-2,
'xtol': 1.0e-2,
'disp': True}}
qc = get_qc(f"{len(asset_list)}q-qvm")
qaoa_inst = QAOA(qc, list(range(len(asset_list))), steps=steps, cost_ham=cost_operators,
ref_ham=ref_operators, store_basis=True,
minimizer=minimize, minimizer_kwargs=minimizer_kwargs,
vqe_options={'disp': print})
return qaoa_inst |
def Title(self):
"""Return the name of the Organisation
"""
field = self.getField("Name")
field = field and field.get(self) or ""
return safe_unicode(field).encode("utf-8") | Return the name of the Organisation | Below is the the instruction that describes the task:
### Input:
Return the name of the Organisation
### Response:
def Title(self):
"""Return the name of the Organisation
"""
field = self.getField("Name")
field = field and field.get(self) or ""
return safe_unicode(field).encode("utf-8") |
def import_obj(self, obj, data, dry_run):
"""
Traverses every field in this Resource and calls
:meth:`~import_export.resources.Resource.import_field`. If
``import_field()`` results in a ``ValueError`` being raised for
one of more fields, those errors are captured and reraised as a single,
multi-field ValidationError."""
errors = {}
for field in self.get_import_fields():
if isinstance(field.widget, widgets.ManyToManyWidget):
continue
try:
self.import_field(field, obj, data)
except ValueError as e:
errors[field.attribute] = ValidationError(
force_text(e), code="invalid")
if errors:
raise ValidationError(errors) | Traverses every field in this Resource and calls
:meth:`~import_export.resources.Resource.import_field`. If
``import_field()`` results in a ``ValueError`` being raised for
one of more fields, those errors are captured and reraised as a single,
multi-field ValidationError. | Below is the the instruction that describes the task:
### Input:
Traverses every field in this Resource and calls
:meth:`~import_export.resources.Resource.import_field`. If
``import_field()`` results in a ``ValueError`` being raised for
one of more fields, those errors are captured and reraised as a single,
multi-field ValidationError.
### Response:
def import_obj(self, obj, data, dry_run):
"""
Traverses every field in this Resource and calls
:meth:`~import_export.resources.Resource.import_field`. If
``import_field()`` results in a ``ValueError`` being raised for
one of more fields, those errors are captured and reraised as a single,
multi-field ValidationError."""
errors = {}
for field in self.get_import_fields():
if isinstance(field.widget, widgets.ManyToManyWidget):
continue
try:
self.import_field(field, obj, data)
except ValueError as e:
errors[field.attribute] = ValidationError(
force_text(e), code="invalid")
if errors:
raise ValidationError(errors) |
def collect(self, file_paths):
"""
Takes in a list of string file_paths, and parses through them using
the converter, strategies, and switches defined at object
initialization.
It returns two dictionaries- the first maps from
file_path strings to inner dictionaries, and those inner dictionaries
map from AnchorHub tag to converted anchors.
The second dictionary maps file_paths to lists. Each entry on the
list corresponds to a duplicate tag found in the file. The entries
are lists with the following information: [tag, line_number,
previous-anchor-used]
:param file_paths:
:return: Two dictionaries. The first maps string file paths to
dictionaries. These inner dictionaries map AnchorHub tags to
generated anchors. The second dictionary maps file paths to lists
containing information about duplicate tags found on each page.
"""
for file_path in file_paths:
self._anchors[file_path], d = self.collect_single_file(file_path)
if len(d) > 0:
# There were duplicates found in the file
self._duplicate_tags[file_path] = d
self._reset_switches()
return self._anchors, self._duplicate_tags | Takes in a list of string file_paths, and parses through them using
the converter, strategies, and switches defined at object
initialization.
It returns two dictionaries- the first maps from
file_path strings to inner dictionaries, and those inner dictionaries
map from AnchorHub tag to converted anchors.
The second dictionary maps file_paths to lists. Each entry on the
list corresponds to a duplicate tag found in the file. The entries
are lists with the following information: [tag, line_number,
previous-anchor-used]
:param file_paths:
:return: Two dictionaries. The first maps string file paths to
dictionaries. These inner dictionaries map AnchorHub tags to
generated anchors. The second dictionary maps file paths to lists
containing information about duplicate tags found on each page. | Below is the the instruction that describes the task:
### Input:
Takes in a list of string file_paths, and parses through them using
the converter, strategies, and switches defined at object
initialization.
It returns two dictionaries- the first maps from
file_path strings to inner dictionaries, and those inner dictionaries
map from AnchorHub tag to converted anchors.
The second dictionary maps file_paths to lists. Each entry on the
list corresponds to a duplicate tag found in the file. The entries
are lists with the following information: [tag, line_number,
previous-anchor-used]
:param file_paths:
:return: Two dictionaries. The first maps string file paths to
dictionaries. These inner dictionaries map AnchorHub tags to
generated anchors. The second dictionary maps file paths to lists
containing information about duplicate tags found on each page.
### Response:
def collect(self, file_paths):
"""
Takes in a list of string file_paths, and parses through them using
the converter, strategies, and switches defined at object
initialization.
It returns two dictionaries- the first maps from
file_path strings to inner dictionaries, and those inner dictionaries
map from AnchorHub tag to converted anchors.
The second dictionary maps file_paths to lists. Each entry on the
list corresponds to a duplicate tag found in the file. The entries
are lists with the following information: [tag, line_number,
previous-anchor-used]
:param file_paths:
:return: Two dictionaries. The first maps string file paths to
dictionaries. These inner dictionaries map AnchorHub tags to
generated anchors. The second dictionary maps file paths to lists
containing information about duplicate tags found on each page.
"""
for file_path in file_paths:
self._anchors[file_path], d = self.collect_single_file(file_path)
if len(d) > 0:
# There were duplicates found in the file
self._duplicate_tags[file_path] = d
self._reset_switches()
return self._anchors, self._duplicate_tags |
def dimension_set(self, p_dim, s_dim=None, dimensions=None, extant=set()):
"""
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
"""
if not dimensions:
dimensions = self.primary_dimensions
key = p_dim.name
if s_dim:
key += '/' + s_dim.name
# Ignore if the key already exists or the primary and secondary dims are the same
if key in extant or p_dim == s_dim:
return
# Don't allow geography to be a secondary dimension. It must either be a primary dimension
# ( to make a map ) or a filter, or a small-multiple
if s_dim and s_dim.valuetype_class.is_geo():
return
extant.add(key)
filtered = {}
for d in dimensions:
if d != p_dim and d != s_dim:
filtered[d.name] = d.pstats.uvalues.keys()
if p_dim.valuetype_class.is_time():
value_type = 'time'
chart_type = 'line'
elif p_dim.valuetype_class.is_geo():
value_type = 'geo'
chart_type = 'map'
else:
value_type = 'general'
chart_type = 'bar'
return dict(
key=key,
p_dim=p_dim.name,
p_dim_type=value_type,
p_label=p_dim.label_or_self.name,
s_dim=s_dim.name if s_dim else None,
s_label=s_dim.label_or_self.name if s_dim else None,
filters=filtered,
chart_type=chart_type
) | Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return: | Below is the the instruction that describes the task:
### Input:
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
### Response:
def dimension_set(self, p_dim, s_dim=None, dimensions=None, extant=set()):
"""
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
"""
if not dimensions:
dimensions = self.primary_dimensions
key = p_dim.name
if s_dim:
key += '/' + s_dim.name
# Ignore if the key already exists or the primary and secondary dims are the same
if key in extant or p_dim == s_dim:
return
# Don't allow geography to be a secondary dimension. It must either be a primary dimension
# ( to make a map ) or a filter, or a small-multiple
if s_dim and s_dim.valuetype_class.is_geo():
return
extant.add(key)
filtered = {}
for d in dimensions:
if d != p_dim and d != s_dim:
filtered[d.name] = d.pstats.uvalues.keys()
if p_dim.valuetype_class.is_time():
value_type = 'time'
chart_type = 'line'
elif p_dim.valuetype_class.is_geo():
value_type = 'geo'
chart_type = 'map'
else:
value_type = 'general'
chart_type = 'bar'
return dict(
key=key,
p_dim=p_dim.name,
p_dim_type=value_type,
p_label=p_dim.label_or_self.name,
s_dim=s_dim.name if s_dim else None,
s_label=s_dim.label_or_self.name if s_dim else None,
filters=filtered,
chart_type=chart_type
) |
def get_reqv(self):
"""
:returns: an instance of class:`RjbEquivalent` if reqv_hdf5 is set
"""
if 'reqv' not in self.inputs:
return
return {key: valid.RjbEquivalent(value)
for key, value in self.inputs['reqv'].items()} | :returns: an instance of class:`RjbEquivalent` if reqv_hdf5 is set | Below is the the instruction that describes the task:
### Input:
:returns: an instance of class:`RjbEquivalent` if reqv_hdf5 is set
### Response:
def get_reqv(self):
"""
:returns: an instance of class:`RjbEquivalent` if reqv_hdf5 is set
"""
if 'reqv' not in self.inputs:
return
return {key: valid.RjbEquivalent(value)
for key, value in self.inputs['reqv'].items()} |
def save_token(self, access_token):
"""
Stores the access token and additional data in redis.
See :class:`oauth2.store.AccessTokenStore`.
"""
self.write(access_token.token, access_token.__dict__)
unique_token_key = self._unique_token_key(access_token.client_id,
access_token.grant_type,
access_token.user_id)
self.write(unique_token_key, access_token.__dict__)
if access_token.refresh_token is not None:
self.write(access_token.refresh_token, access_token.__dict__) | Stores the access token and additional data in redis.
See :class:`oauth2.store.AccessTokenStore`. | Below is the the instruction that describes the task:
### Input:
Stores the access token and additional data in redis.
See :class:`oauth2.store.AccessTokenStore`.
### Response:
def save_token(self, access_token):
"""
Stores the access token and additional data in redis.
See :class:`oauth2.store.AccessTokenStore`.
"""
self.write(access_token.token, access_token.__dict__)
unique_token_key = self._unique_token_key(access_token.client_id,
access_token.grant_type,
access_token.user_id)
self.write(unique_token_key, access_token.__dict__)
if access_token.refresh_token is not None:
self.write(access_token.refresh_token, access_token.__dict__) |
def size(self):
""" -> #int number of keys in this instance """
return int(self._client.hget(self._bucket_key, self.key_prefix) or 0) | -> #int number of keys in this instance | Below is the the instruction that describes the task:
### Input:
-> #int number of keys in this instance
### Response:
def size(self):
""" -> #int number of keys in this instance """
return int(self._client.hget(self._bucket_key, self.key_prefix) or 0) |
def initialTrendSmoothingFactors(self, timeSeries):
""" Calculate the initial Trend smoothing Factor b0.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
:return: Returns the initial trend smoothing factor b0
"""
result = 0.0
seasonLength = self.get_parameter("seasonLength")
k = min(len(timeSeries) - seasonLength, seasonLength) #In case of only one full season, use average trend of the months that we have twice
for i in xrange(0, k):
result += (timeSeries[seasonLength + i][1] - timeSeries[i][1]) / seasonLength
return result / k | Calculate the initial Trend smoothing Factor b0.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
:return: Returns the initial trend smoothing factor b0 | Below is the the instruction that describes the task:
### Input:
Calculate the initial Trend smoothing Factor b0.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
:return: Returns the initial trend smoothing factor b0
### Response:
def initialTrendSmoothingFactors(self, timeSeries):
""" Calculate the initial Trend smoothing Factor b0.
Explanation:
http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
:return: Returns the initial trend smoothing factor b0
"""
result = 0.0
seasonLength = self.get_parameter("seasonLength")
k = min(len(timeSeries) - seasonLength, seasonLength) #In case of only one full season, use average trend of the months that we have twice
for i in xrange(0, k):
result += (timeSeries[seasonLength + i][1] - timeSeries[i][1]) / seasonLength
return result / k |
def update_user(self, id, **kwargs): # noqa: E501
"""Update user with given user groups and permissions. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param UserRequestDTO body: Example Body: <pre>{ \"identifier\": \"user@example.com\", \"groups\": [ \"user_management\" ], \"userGroups\": [ \"8b23136b-ecd2-4cb5-8c92-62477dcc4090\" ] }</pre>
:return: UserModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_user_with_http_info(id, **kwargs) # noqa: E501
return data | Update user with given user groups and permissions. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param UserRequestDTO body: Example Body: <pre>{ \"identifier\": \"user@example.com\", \"groups\": [ \"user_management\" ], \"userGroups\": [ \"8b23136b-ecd2-4cb5-8c92-62477dcc4090\" ] }</pre>
:return: UserModel
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Update user with given user groups and permissions. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param UserRequestDTO body: Example Body: <pre>{ \"identifier\": \"user@example.com\", \"groups\": [ \"user_management\" ], \"userGroups\": [ \"8b23136b-ecd2-4cb5-8c92-62477dcc4090\" ] }</pre>
:return: UserModel
If the method is called asynchronously,
returns the request thread.
### Response:
def update_user(self, id, **kwargs): # noqa: E501
"""Update user with given user groups and permissions. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param UserRequestDTO body: Example Body: <pre>{ \"identifier\": \"user@example.com\", \"groups\": [ \"user_management\" ], \"userGroups\": [ \"8b23136b-ecd2-4cb5-8c92-62477dcc4090\" ] }</pre>
:return: UserModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_user_with_http_info(id, **kwargs) # noqa: E501
return data |
def propagate_paths_and_modules(self, context, paths, modules):
"""
One size fits all method to ensure a target context has been preloaded
with a set of small files and Python modules.
"""
for path in paths:
self.propagate_to(context, mitogen.core.to_text(path))
self.router.responder.forward_modules(context, modules) | One size fits all method to ensure a target context has been preloaded
with a set of small files and Python modules. | Below is the the instruction that describes the task:
### Input:
One size fits all method to ensure a target context has been preloaded
with a set of small files and Python modules.
### Response:
def propagate_paths_and_modules(self, context, paths, modules):
"""
One size fits all method to ensure a target context has been preloaded
with a set of small files and Python modules.
"""
for path in paths:
self.propagate_to(context, mitogen.core.to_text(path))
self.router.responder.forward_modules(context, modules) |
def transform_series(series, force_list=False, buffers=None):
''' Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encoding
will be used (default: None)
If force_list is True, then this value will be ignored, and
no buffers will be generated.
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
list or dict
'''
# not checking for pd here, this function should only be called if it
# is already known that series is a Pandas Series type
if isinstance(series, pd.PeriodIndex):
vals = series.to_timestamp().values
else:
vals = series.values
return transform_array(vals, force_list=force_list, buffers=buffers) | Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encoding
will be used (default: None)
If force_list is True, then this value will be ignored, and
no buffers will be generated.
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
list or dict | Below is the the instruction that describes the task:
### Input:
Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encoding
will be used (default: None)
If force_list is True, then this value will be ignored, and
no buffers will be generated.
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
list or dict
### Response:
def transform_series(series, force_list=False, buffers=None):
''' Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encoding
will be used (default: None)
If force_list is True, then this value will be ignored, and
no buffers will be generated.
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
list or dict
'''
# not checking for pd here, this function should only be called if it
# is already known that series is a Pandas Series type
if isinstance(series, pd.PeriodIndex):
vals = series.to_timestamp().values
else:
vals = series.values
return transform_array(vals, force_list=force_list, buffers=buffers) |
def combine_first_two_dimensions(x):
"""Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...]
"""
ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0))
old_shape = x.get_shape().dims
a, b = old_shape[:2]
new_shape = [a * b if a and b else None] + old_shape[2:]
ret.set_shape(new_shape)
return ret | Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...] | Below is the the instruction that describes the task:
### Input:
Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...]
### Response:
def combine_first_two_dimensions(x):
"""Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...]
"""
ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0))
old_shape = x.get_shape().dims
a, b = old_shape[:2]
new_shape = [a * b if a and b else None] + old_shape[2:]
ret.set_shape(new_shape)
return ret |
def get_config(app, prefix='hive_'):
"""Conveniently get the security configuration for the specified
application without the annoying 'SECURITY_' prefix.
:param app: The application to inspect
"""
items = app.config.items()
prefix = prefix.upper()
def strip_prefix(tup):
return (tup[0].replace(prefix, ''), tup[1])
return dict([strip_prefix(i) for i in items if i[0].startswith(prefix)]) | Conveniently get the security configuration for the specified
application without the annoying 'SECURITY_' prefix.
:param app: The application to inspect | Below is the the instruction that describes the task:
### Input:
Conveniently get the security configuration for the specified
application without the annoying 'SECURITY_' prefix.
:param app: The application to inspect
### Response:
def get_config(app, prefix='hive_'):
"""Conveniently get the security configuration for the specified
application without the annoying 'SECURITY_' prefix.
:param app: The application to inspect
"""
items = app.config.items()
prefix = prefix.upper()
def strip_prefix(tup):
return (tup[0].replace(prefix, ''), tup[1])
return dict([strip_prefix(i) for i in items if i[0].startswith(prefix)]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.