body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
@pytest.fixture
def replacer(self, how, from_key, to_key):
'\n Object we will pass to `Series.replace`\n '
if (how == 'dict'):
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif (how == 'series'):
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
return replacer
| -4,725,402,741,145,762,000
|
Object we will pass to `Series.replace`
|
pandas/tests/indexing/test_coercion.py
|
replacer
|
701KHK1915/8-PANDAS
|
python
|
@pytest.fixture
def replacer(self, how, from_key, to_key):
'\n \n '
if (how == 'dict'):
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif (how == 'series'):
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
return replacer
|
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_594(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]:
'https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n '
required_fields = [('intent', 'intent__ext'), ('status', 'status__ext')]
_missing = object()
def _fallback():
return ''
errors: typing.List['ErrorWrapper'] = []
for (name, ext) in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if (value not in (_missing, None)):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if (ext_value not in (_missing, None)):
if isinstance(ext_value, dict):
missing_ext = (len(ext_value.get('extension', [])) == 0)
elif (getattr(ext_value.__class__, 'get_resource_type', _fallback)() == 'FHIRPrimitiveExtension'):
if (ext_value.extension and (len(ext_value.extension) > 0)):
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if (not validate_pass):
continue
if (ext_value.extension and (len(ext_value.extension) > 0)):
missing_ext = False
if missing_ext:
if (value is _missing):
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias))
if (len(errors) > 0):
raise ValidationError(errors, cls)
return values
| -6,753,809,868,066,772,000
|
https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
|
fhir/resources/task.py
|
validate_required_primitive_elements_594
|
chgl/fhir.resources
|
python
|
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_594(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]:
'https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n '
required_fields = [('intent', 'intent__ext'), ('status', 'status__ext')]
_missing = object()
def _fallback():
return
errors: typing.List['ErrorWrapper'] = []
for (name, ext) in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if (value not in (_missing, None)):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if (ext_value not in (_missing, None)):
if isinstance(ext_value, dict):
missing_ext = (len(ext_value.get('extension', [])) == 0)
elif (getattr(ext_value.__class__, 'get_resource_type', _fallback)() == 'FHIRPrimitiveExtension'):
if (ext_value.extension and (len(ext_value.extension) > 0)):
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if (not validate_pass):
continue
if (ext_value.extension and (len(ext_value.extension) > 0)):
missing_ext = False
if missing_ext:
if (value is _missing):
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias))
if (len(errors) > 0):
raise ValidationError(errors, cls)
return values
|
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1131(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]:
'https://www.hl7.org/fhir/formats.html#choice\n A few elements have a choice of more than one data type for their content.\n All such elements have a name that takes the form nnn[x].\n The "nnn" part of the name is constant, and the "[x]" is replaced with\n the title-cased name of the type that is actually used.\n The table view shows each of these names explicitly.\n\n Elements that have a choice of data type cannot repeat - they must have a\n maximum cardinality of 1. When constructing an instance of an element with a\n choice of types, the authoring system must create a single element with a\n data type chosen from among the list of permitted data types.\n '
one_of_many_fields = {'value': ['valueAddress', 'valueAge', 'valueAnnotation', 'valueAttachment', 'valueBase64Binary', 'valueBoolean', 'valueCanonical', 'valueCode', 'valueCodeableConcept', 'valueCoding', 'valueContactDetail', 'valueContactPoint', 'valueContributor', 'valueCount', 'valueDataRequirement', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueDistance', 'valueDosage', 'valueDuration', 'valueExpression', 'valueHumanName', 'valueId', 'valueIdentifier', 'valueInstant', 'valueInteger', 'valueMarkdown', 'valueMeta', 'valueMoney', 'valueOid', 'valueParameterDefinition', 'valuePeriod', 'valuePositiveInt', 'valueQuantity', 'valueRange', 'valueRatio', 'valueReference', 'valueRelatedArtifact', 'valueSampledData', 'valueSignature', 'valueString', 'valueTime', 'valueTiming', 'valueTriggerDefinition', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueUsageContext', 'valueUuid']}
for (prefix, fields) in one_of_many_fields.items():
assert (cls.__fields__[fields[0]].field_info.extra['one_of_many'] == prefix)
required = (cls.__fields__[fields[0]].field_info.extra['one_of_many_required'] is True)
found = False
for field in fields:
if ((field in values) and (values[field] is not None)):
if (found is True):
raise ValueError(f'Any of one field value is expected from this list {fields}, but got multiple!')
else:
found = True
if ((required is True) and (found is False)):
raise ValueError(f'Expect any of field value from this list {fields}.')
return values
| 1,223,021,297,998,806,000
|
https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
|
fhir/resources/task.py
|
validate_one_of_many_1131
|
chgl/fhir.resources
|
python
|
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1131(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]:
'https://www.hl7.org/fhir/formats.html#choice\n A few elements have a choice of more than one data type for their content.\n All such elements have a name that takes the form nnn[x].\n The "nnn" part of the name is constant, and the "[x]" is replaced with\n the title-cased name of the type that is actually used.\n The table view shows each of these names explicitly.\n\n Elements that have a choice of data type cannot repeat - they must have a\n maximum cardinality of 1. When constructing an instance of an element with a\n choice of types, the authoring system must create a single element with a\n data type chosen from among the list of permitted data types.\n '
one_of_many_fields = {'value': ['valueAddress', 'valueAge', 'valueAnnotation', 'valueAttachment', 'valueBase64Binary', 'valueBoolean', 'valueCanonical', 'valueCode', 'valueCodeableConcept', 'valueCoding', 'valueContactDetail', 'valueContactPoint', 'valueContributor', 'valueCount', 'valueDataRequirement', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueDistance', 'valueDosage', 'valueDuration', 'valueExpression', 'valueHumanName', 'valueId', 'valueIdentifier', 'valueInstant', 'valueInteger', 'valueMarkdown', 'valueMeta', 'valueMoney', 'valueOid', 'valueParameterDefinition', 'valuePeriod', 'valuePositiveInt', 'valueQuantity', 'valueRange', 'valueRatio', 'valueReference', 'valueRelatedArtifact', 'valueSampledData', 'valueSignature', 'valueString', 'valueTime', 'valueTiming', 'valueTriggerDefinition', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueUsageContext', 'valueUuid']}
for (prefix, fields) in one_of_many_fields.items():
assert (cls.__fields__[fields[0]].field_info.extra['one_of_many'] == prefix)
required = (cls.__fields__[fields[0]].field_info.extra['one_of_many_required'] is True)
found = False
for field in fields:
if ((field in values) and (values[field] is not None)):
if (found is True):
raise ValueError(f'Any of one field value is expected from this list {fields}, but got multiple!')
else:
found = True
if ((required is True) and (found is False)):
raise ValueError(f'Expect any of field value from this list {fields}.')
return values
|
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1260(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]:
'https://www.hl7.org/fhir/formats.html#choice\n A few elements have a choice of more than one data type for their content.\n All such elements have a name that takes the form nnn[x].\n The "nnn" part of the name is constant, and the "[x]" is replaced with\n the title-cased name of the type that is actually used.\n The table view shows each of these names explicitly.\n\n Elements that have a choice of data type cannot repeat - they must have a\n maximum cardinality of 1. When constructing an instance of an element with a\n choice of types, the authoring system must create a single element with a\n data type chosen from among the list of permitted data types.\n '
one_of_many_fields = {'value': ['valueAddress', 'valueAge', 'valueAnnotation', 'valueAttachment', 'valueBase64Binary', 'valueBoolean', 'valueCanonical', 'valueCode', 'valueCodeableConcept', 'valueCoding', 'valueContactDetail', 'valueContactPoint', 'valueContributor', 'valueCount', 'valueDataRequirement', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueDistance', 'valueDosage', 'valueDuration', 'valueExpression', 'valueHumanName', 'valueId', 'valueIdentifier', 'valueInstant', 'valueInteger', 'valueMarkdown', 'valueMeta', 'valueMoney', 'valueOid', 'valueParameterDefinition', 'valuePeriod', 'valuePositiveInt', 'valueQuantity', 'valueRange', 'valueRatio', 'valueReference', 'valueRelatedArtifact', 'valueSampledData', 'valueSignature', 'valueString', 'valueTime', 'valueTiming', 'valueTriggerDefinition', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueUsageContext', 'valueUuid']}
for (prefix, fields) in one_of_many_fields.items():
assert (cls.__fields__[fields[0]].field_info.extra['one_of_many'] == prefix)
required = (cls.__fields__[fields[0]].field_info.extra['one_of_many_required'] is True)
found = False
for field in fields:
if ((field in values) and (values[field] is not None)):
if (found is True):
raise ValueError(f'Any of one field value is expected from this list {fields}, but got multiple!')
else:
found = True
if ((required is True) and (found is False)):
raise ValueError(f'Expect any of field value from this list {fields}.')
return values
| 5,434,378,056,132,148,000
|
https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
|
fhir/resources/task.py
|
validate_one_of_many_1260
|
chgl/fhir.resources
|
python
|
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1260(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]:
'https://www.hl7.org/fhir/formats.html#choice\n A few elements have a choice of more than one data type for their content.\n All such elements have a name that takes the form nnn[x].\n The "nnn" part of the name is constant, and the "[x]" is replaced with\n the title-cased name of the type that is actually used.\n The table view shows each of these names explicitly.\n\n Elements that have a choice of data type cannot repeat - they must have a\n maximum cardinality of 1. When constructing an instance of an element with a\n choice of types, the authoring system must create a single element with a\n data type chosen from among the list of permitted data types.\n '
one_of_many_fields = {'value': ['valueAddress', 'valueAge', 'valueAnnotation', 'valueAttachment', 'valueBase64Binary', 'valueBoolean', 'valueCanonical', 'valueCode', 'valueCodeableConcept', 'valueCoding', 'valueContactDetail', 'valueContactPoint', 'valueContributor', 'valueCount', 'valueDataRequirement', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueDistance', 'valueDosage', 'valueDuration', 'valueExpression', 'valueHumanName', 'valueId', 'valueIdentifier', 'valueInstant', 'valueInteger', 'valueMarkdown', 'valueMeta', 'valueMoney', 'valueOid', 'valueParameterDefinition', 'valuePeriod', 'valuePositiveInt', 'valueQuantity', 'valueRange', 'valueRatio', 'valueReference', 'valueRelatedArtifact', 'valueSampledData', 'valueSignature', 'valueString', 'valueTime', 'valueTiming', 'valueTriggerDefinition', 'valueUnsignedInt', 'valueUri', 'valueUrl', 'valueUsageContext', 'valueUuid']}
for (prefix, fields) in one_of_many_fields.items():
assert (cls.__fields__[fields[0]].field_info.extra['one_of_many'] == prefix)
required = (cls.__fields__[fields[0]].field_info.extra['one_of_many_required'] is True)
found = False
for field in fields:
if ((field in values) and (values[field] is not None)):
if (found is True):
raise ValueError(f'Any of one field value is expected from this list {fields}, but got multiple!')
else:
found = True
if ((required is True) and (found is False)):
raise ValueError(f'Expect any of field value from this list {fields}.')
return values
|
def safe_name(name):
'Make name safe for use in XML output.'
return XML_SAFE_TAGS.get(name, name)
| -5,112,435,986,885,607,000
|
Make name safe for use in XML output.
|
chemdataextractor_batteries/chemdataextractor/parse/elements.py
|
safe_name
|
ShuHuang/batterydatabase
|
python
|
def safe_name(name):
return XML_SAFE_TAGS.get(name, name)
|
def with_condition(self, condition):
'\n Add a condition to the parser element. The condition must be a function that takes\n a match and return True or False, i.e. a function which takes tuple(list(Element), int)\n and returns bool. If the function evaluates True, the match is kept, while if the function\n evaluates False, the match is discarded. The condition is executed after any other actions.\n '
self.condition = condition
return self
| 7,868,131,931,379,119,000
|
Add a condition to the parser element. The condition must be a function that takes
a match and return True or False, i.e. a function which takes tuple(list(Element), int)
and returns bool. If the function evaluates True, the match is kept, while if the function
evaluates False, the match is discarded. The condition is executed after any other actions.
|
chemdataextractor_batteries/chemdataextractor/parse/elements.py
|
with_condition
|
ShuHuang/batterydatabase
|
python
|
def with_condition(self, condition):
'\n Add a condition to the parser element. The condition must be a function that takes\n a match and return True or False, i.e. a function which takes tuple(list(Element), int)\n and returns bool. If the function evaluates True, the match is kept, while if the function\n evaluates False, the match is discarded. The condition is executed after any other actions.\n '
self.condition = condition
return self
|
def scan(self, tokens, max_matches=six.MAXSIZE, overlap=False):
'\n Scans for matches in given tokens.\n\n :param list(tuple(string, string)) tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.\n :param int max_matches: The maximum number of matches to look for. Default is the maximum size possible for a list.\n :param bool overlap: Whether the found results are allowed to overlap. Default False.\n :returns: A generator of the results found. Each result is a tuple with the first element being a list of elements found, and the second and third elements are the start and end indices representing the span of the result.\n :rtype: generator(tuple(list(lxml.etree.Element), int, int))\n '
if (not self.streamlined):
self.streamline()
matches = 0
i = 0
length = len(tokens)
while ((i < length) and (matches < max_matches)):
try:
(results, next_i) = self.parse(tokens, i)
except ParseException as err:
i += 1
else:
if (next_i > i):
matches += 1
if (len(results) == 1):
results = results[0]
(yield (results, i, next_i))
if overlap:
i += 1
else:
i = next_i
else:
i += 1
| 4,403,405,174,798,983,700
|
Scans for matches in given tokens.
:param list(tuple(string, string)) tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.
:param int max_matches: The maximum number of matches to look for. Default is the maximum size possible for a list.
:param bool overlap: Whether the found results are allowed to overlap. Default False.
:returns: A generator of the results found. Each result is a tuple with the first element being a list of elements found, and the second and third elements are the start and end indices representing the span of the result.
:rtype: generator(tuple(list(lxml.etree.Element), int, int))
|
chemdataextractor_batteries/chemdataextractor/parse/elements.py
|
scan
|
ShuHuang/batterydatabase
|
python
|
def scan(self, tokens, max_matches=six.MAXSIZE, overlap=False):
'\n Scans for matches in given tokens.\n\n :param list(tuple(string, string)) tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.\n :param int max_matches: The maximum number of matches to look for. Default is the maximum size possible for a list.\n :param bool overlap: Whether the found results are allowed to overlap. Default False.\n :returns: A generator of the results found. Each result is a tuple with the first element being a list of elements found, and the second and third elements are the start and end indices representing the span of the result.\n :rtype: generator(tuple(list(lxml.etree.Element), int, int))\n '
if (not self.streamlined):
self.streamline()
matches = 0
i = 0
length = len(tokens)
while ((i < length) and (matches < max_matches)):
try:
(results, next_i) = self.parse(tokens, i)
except ParseException as err:
i += 1
else:
if (next_i > i):
matches += 1
if (len(results) == 1):
results = results[0]
(yield (results, i, next_i))
if overlap:
i += 1
else:
i = next_i
else:
i += 1
|
def parse(self, tokens, i, actions=True):
'\n Parse given tokens and return results\n\n :param tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.\n :type tokens: list(tuple(string, string))\n :param int i: The index at which to start scanning from\n :param bool actions: Whether the actions attached to this element will be executed. Default True.\n :returns: A tuple where the first element is a list of elements found (can be None if no results were found), and the last index investigated.\n :rtype: tuple(list(Element) or None, int)\n '
start = i
try:
(result, i) = self._parse_tokens(tokens, i, actions)
except IndexError:
raise ParseException(tokens, i, 'IndexError', self)
if actions:
for action in self.actions:
action_result = action(tokens, start, result)
if (action_result is not None):
result = action_result
if (self.condition is not None):
if (not self.condition(result)):
raise ParseException(tokens, i, 'Did not satisfy condition', self)
return (result, i)
| -8,761,858,440,767,062,000
|
Parse given tokens and return results
:param tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.
:type tokens: list(tuple(string, string))
:param int i: The index at which to start scanning from
:param bool actions: Whether the actions attached to this element will be executed. Default True.
:returns: A tuple where the first element is a list of elements found (can be None if no results were found), and the last index investigated.
:rtype: tuple(list(Element) or None, int)
|
chemdataextractor_batteries/chemdataextractor/parse/elements.py
|
parse
|
ShuHuang/batterydatabase
|
python
|
def parse(self, tokens, i, actions=True):
'\n Parse given tokens and return results\n\n :param tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.\n :type tokens: list(tuple(string, string))\n :param int i: The index at which to start scanning from\n :param bool actions: Whether the actions attached to this element will be executed. Default True.\n :returns: A tuple where the first element is a list of elements found (can be None if no results were found), and the last index investigated.\n :rtype: tuple(list(Element) or None, int)\n '
start = i
try:
(result, i) = self._parse_tokens(tokens, i, actions)
except IndexError:
raise ParseException(tokens, i, 'IndexError', self)
if actions:
for action in self.actions:
action_result = action(tokens, start, result)
if (action_result is not None):
result = action_result
if (self.condition is not None):
if (not self.condition(result)):
raise ParseException(tokens, i, 'Did not satisfy condition', self)
return (result, i)
|
def _parse_tokens(self, tokens, i, actions=True):
'\n Implemented by subclasses, parses given tokens and returns the results\n\n :param list(tuple(string, string)) tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.\n :param int i: The index at which to start scanning from\n :param bool actions: Whether the actions attached to this element will be executed. Default True.\n :returns: A tuple where the first element is a list of elements found (can be None if no results were found), and the last index investigated.\n :rtype: tuple(list(Element) or None, int)\n '
return (None, i)
| -5,887,532,987,363,269,000
|
Implemented by subclasses, parses given tokens and returns the results
:param list(tuple(string, string)) tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.
:param int i: The index at which to start scanning from
:param bool actions: Whether the actions attached to this element will be executed. Default True.
:returns: A tuple where the first element is a list of elements found (can be None if no results were found), and the last index investigated.
:rtype: tuple(list(Element) or None, int)
|
chemdataextractor_batteries/chemdataextractor/parse/elements.py
|
_parse_tokens
|
ShuHuang/batterydatabase
|
python
|
def _parse_tokens(self, tokens, i, actions=True):
'\n Implemented by subclasses, parses given tokens and returns the results\n\n :param list(tuple(string, string)) tokens: A tokenized representation of the text to scan. The first string in the tuple is the content, typically a word, and the second string is the part of speech tag.\n :param int i: The index at which to start scanning from\n :param bool actions: Whether the actions attached to this element will be executed. Default True.\n :returns: A tuple where the first element is a list of elements found (can be None if no results were found), and the last index investigated.\n :rtype: tuple(list(Element) or None, int)\n '
return (None, i)
|
def streamline(self):
'\n Streamlines internal representations. e.g., if we have something like And(And(And(And(a), b), c), d), streamline this to And(a, b, c, d)\n '
self.streamlined = True
return self
| 7,808,405,897,570,630,000
|
Streamlines internal representations. e.g., if we have something like And(And(And(And(a), b), c), d), streamline this to And(a, b, c, d)
|
chemdataextractor_batteries/chemdataextractor/parse/elements.py
|
streamline
|
ShuHuang/batterydatabase
|
python
|
def streamline(self):
'\n \n '
self.streamlined = True
return self
|
def __call__(self, name):
'\n If a BaseParserElement is called, returns the BaseParserElement with its name set to the argument. The name is used to identify the results parsed by this element.\n\n :param str name: Name to give BaseParserElement\n :returns: A BaseParserElement with the given name\n :rtype: BaseParserElement\n '
return self.set_name(name)
| -1,587,400,351,974,764,300
|
If a BaseParserElement is called, returns the BaseParserElement with its name set to the argument. The name is used to identify the results parsed by this element.
:param str name: Name to give BaseParserElement
:returns: A BaseParserElement with the given name
:rtype: BaseParserElement
|
chemdataextractor_batteries/chemdataextractor/parse/elements.py
|
__call__
|
ShuHuang/batterydatabase
|
python
|
def __call__(self, name):
'\n If a BaseParserElement is called, returns the BaseParserElement with its name set to the argument. The name is used to identify the results parsed by this element.\n\n :param str name: Name to give BaseParserElement\n :returns: A BaseParserElement with the given name\n :rtype: BaseParserElement\n '
return self.set_name(name)
|
def trace_helper(x: List[List[List[int]]], y: List[List[List[float]]], labels: List[str]) -> Tuple[(List[go.Scatter], List[str])]:
'\n this function gets results prepared by a plot-related function and\n outputs a tuple including plotly object and its corresponding legend.\n '
all_traces = []
num_chains = len(x)
num_indices = len(x[0])
for index in range(num_indices):
trace = []
for chain in range(num_chains):
trace.append(go.Scatter(x=x[chain][index], y=y[chain][index], mode='lines', name=('chain' + str(chain))))
all_traces.append(trace)
return (all_traces, labels)
| -2,237,043,114,065,933,000
|
this function gets results prepared by a plot-related function and
outputs a tuple including plotly object and its corresponding legend.
|
src/beanmachine/ppl/diagnostics/common_plots.py
|
trace_helper
|
facebookresearch/beanmachine
|
python
|
def trace_helper(x: List[List[List[int]]], y: List[List[List[float]]], labels: List[str]) -> Tuple[(List[go.Scatter], List[str])]:
'\n this function gets results prepared by a plot-related function and\n outputs a tuple including plotly object and its corresponding legend.\n '
all_traces = []
num_chains = len(x)
num_indices = len(x[0])
for index in range(num_indices):
trace = []
for chain in range(num_chains):
trace.append(go.Scatter(x=x[chain][index], y=y[chain][index], mode='lines', name=('chain' + str(chain))))
all_traces.append(trace)
return (all_traces, labels)
|
def plot_helper(query_samples: Tensor, func: Callable) -> Tuple[(List[go.Scatter], List[str])]:
'\n this function executes a plot-related function, passed as input parameter func, and\n outputs a tuple including plotly object and its corresponding legend.\n '
(num_chain, num_samples, single_sample_sz) = _samples_info(query_samples)
(x_axis, y_axis, all_labels) = ([], [], [])
for chain in range(num_chain):
flattened_data = query_samples[chain].reshape(num_samples, (- 1))
numel = flattened_data[0].numel()
(x_axis_data, y_axis_data, labels) = ([], [], [])
for i in range(numel):
index = np.unravel_index(i, single_sample_sz)
data = flattened_data[:, i]
partial_label = f' for {list(index)}'
(x_data, y_data) = func(data.detach())
x_axis_data.append(x_data)
y_axis_data.append(y_data)
labels.append(partial_label)
x_axis.append(x_axis_data)
y_axis.append(y_axis_data)
all_labels.append(labels)
return trace_helper(x_axis, y_axis, all_labels[0])
| -1,935,371,377,583,544,300
|
this function executes a plot-related function, passed as input parameter func, and
outputs a tuple including plotly object and its corresponding legend.
|
src/beanmachine/ppl/diagnostics/common_plots.py
|
plot_helper
|
facebookresearch/beanmachine
|
python
|
def plot_helper(query_samples: Tensor, func: Callable) -> Tuple[(List[go.Scatter], List[str])]:
'\n this function executes a plot-related function, passed as input parameter func, and\n outputs a tuple including plotly object and its corresponding legend.\n '
(num_chain, num_samples, single_sample_sz) = _samples_info(query_samples)
(x_axis, y_axis, all_labels) = ([], [], [])
for chain in range(num_chain):
flattened_data = query_samples[chain].reshape(num_samples, (- 1))
numel = flattened_data[0].numel()
(x_axis_data, y_axis_data, labels) = ([], [], [])
for i in range(numel):
index = np.unravel_index(i, single_sample_sz)
data = flattened_data[:, i]
partial_label = f' for {list(index)}'
(x_data, y_data) = func(data.detach())
x_axis_data.append(x_data)
y_axis_data.append(y_data)
labels.append(partial_label)
x_axis.append(x_axis_data)
y_axis.append(y_axis_data)
all_labels.append(labels)
return trace_helper(x_axis, y_axis, all_labels[0])
|
def run(self):
' This defines the sequence of actions that are taken when the barrier concurrency state is executed\n\n :return:\n '
logger.debug('Starting execution of {0}{1}'.format(self, (' (backwards)' if self.backward_execution else '')))
self.setup_run()
child_errors = {}
final_outcomes_dict = {}
decider_state = self.states[UNIQUE_DECIDER_STATE_ID]
try:
concurrency_history_item = self.setup_forward_or_backward_execution()
self.start_child_states(concurrency_history_item, decider_state)
for (history_index, state) in enumerate(self.states.values()):
if (state is not decider_state):
self.join_state(state, history_index, concurrency_history_item)
self.add_state_execution_output_to_scoped_data(state.output_data, state)
self.update_scoped_variables_with_output_dictionary(state.output_data, state)
if ('error' in state.output_data):
child_errors[state.state_id] = (state.name, state.output_data['error'])
final_outcomes_dict[state.state_id] = (state.name, state.final_outcome)
if self.backward_execution:
return self.finalize_backward_execution()
else:
self.backward_execution = False
decider_state_error = self.run_decider_state(decider_state, child_errors, final_outcomes_dict)
transition = self.get_transition_for_outcome(decider_state, decider_state.final_outcome)
if (transition is None):
transition = self.handle_no_transition(decider_state)
decider_state.state_execution_status = StateExecutionStatus.INACTIVE
if (transition is None):
self.output_data['error'] = RuntimeError('state aborted')
else:
if decider_state_error:
self.output_data['error'] = decider_state_error
self.final_outcome = self.outcomes[transition.to_outcome]
return self.finalize_concurrency_state(self.final_outcome)
except Exception as e:
logger.error('{0} had an internal error: {1}\n{2}'.format(self, str(e), str(traceback.format_exc())))
self.output_data['error'] = e
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
return self.finalize(Outcome((- 1), 'aborted'))
| 4,579,615,161,753,706,000
|
This defines the sequence of actions that are taken when the barrier concurrency state is executed
:return:
|
source/rafcon/core/states/barrier_concurrency_state.py
|
run
|
LJMP/RAFCON
|
python
|
def run(self):
' This defines the sequence of actions that are taken when the barrier concurrency state is executed\n\n :return:\n '
logger.debug('Starting execution of {0}{1}'.format(self, (' (backwards)' if self.backward_execution else )))
self.setup_run()
child_errors = {}
final_outcomes_dict = {}
decider_state = self.states[UNIQUE_DECIDER_STATE_ID]
try:
concurrency_history_item = self.setup_forward_or_backward_execution()
self.start_child_states(concurrency_history_item, decider_state)
for (history_index, state) in enumerate(self.states.values()):
if (state is not decider_state):
self.join_state(state, history_index, concurrency_history_item)
self.add_state_execution_output_to_scoped_data(state.output_data, state)
self.update_scoped_variables_with_output_dictionary(state.output_data, state)
if ('error' in state.output_data):
child_errors[state.state_id] = (state.name, state.output_data['error'])
final_outcomes_dict[state.state_id] = (state.name, state.final_outcome)
if self.backward_execution:
return self.finalize_backward_execution()
else:
self.backward_execution = False
decider_state_error = self.run_decider_state(decider_state, child_errors, final_outcomes_dict)
transition = self.get_transition_for_outcome(decider_state, decider_state.final_outcome)
if (transition is None):
transition = self.handle_no_transition(decider_state)
decider_state.state_execution_status = StateExecutionStatus.INACTIVE
if (transition is None):
self.output_data['error'] = RuntimeError('state aborted')
else:
if decider_state_error:
self.output_data['error'] = decider_state_error
self.final_outcome = self.outcomes[transition.to_outcome]
return self.finalize_concurrency_state(self.final_outcome)
except Exception as e:
logger.error('{0} had an internal error: {1}\n{2}'.format(self, str(e), str(traceback.format_exc())))
self.output_data['error'] = e
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
return self.finalize(Outcome((- 1), 'aborted'))
|
def run_decider_state(self, decider_state, child_errors, final_outcomes_dict):
' Runs the decider state of the barrier concurrency state. The decider state decides on which outcome the\n barrier concurrency is left.\n\n :param decider_state: the decider state of the barrier concurrency state\n :param child_errors: error of the concurrent branches\n :param final_outcomes_dict: dictionary of all outcomes of the concurrent branches\n :return:\n '
decider_state.state_execution_status = StateExecutionStatus.ACTIVE
decider_state.child_errors = child_errors
decider_state.final_outcomes_dict = final_outcomes_dict
decider_state.input_data = self.get_inputs_for_state(decider_state)
decider_state.output_data = self.create_output_dictionary_for_state(decider_state)
decider_state.start(self.execution_history, backward_execution=False)
decider_state.join()
decider_state_error = None
if (decider_state.final_outcome.outcome_id == (- 1)):
if ('error' in decider_state.output_data):
decider_state_error = decider_state.output_data['error']
self.add_state_execution_output_to_scoped_data(decider_state.output_data, decider_state)
self.update_scoped_variables_with_output_dictionary(decider_state.output_data, decider_state)
return decider_state_error
| -2,190,052,636,126,749,700
|
Runs the decider state of the barrier concurrency state. The decider state decides on which outcome the
barrier concurrency is left.
:param decider_state: the decider state of the barrier concurrency state
:param child_errors: error of the concurrent branches
:param final_outcomes_dict: dictionary of all outcomes of the concurrent branches
:return:
|
source/rafcon/core/states/barrier_concurrency_state.py
|
run_decider_state
|
LJMP/RAFCON
|
python
|
def run_decider_state(self, decider_state, child_errors, final_outcomes_dict):
' Runs the decider state of the barrier concurrency state. The decider state decides on which outcome the\n barrier concurrency is left.\n\n :param decider_state: the decider state of the barrier concurrency state\n :param child_errors: error of the concurrent branches\n :param final_outcomes_dict: dictionary of all outcomes of the concurrent branches\n :return:\n '
decider_state.state_execution_status = StateExecutionStatus.ACTIVE
decider_state.child_errors = child_errors
decider_state.final_outcomes_dict = final_outcomes_dict
decider_state.input_data = self.get_inputs_for_state(decider_state)
decider_state.output_data = self.create_output_dictionary_for_state(decider_state)
decider_state.start(self.execution_history, backward_execution=False)
decider_state.join()
decider_state_error = None
if (decider_state.final_outcome.outcome_id == (- 1)):
if ('error' in decider_state.output_data):
decider_state_error = decider_state.output_data['error']
self.add_state_execution_output_to_scoped_data(decider_state.output_data, decider_state)
self.update_scoped_variables_with_output_dictionary(decider_state.output_data, decider_state)
return decider_state_error
|
def _check_transition_validity(self, check_transition):
' Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.\n Start transitions are forbidden in the ConcurrencyState.\n\n :param check_transition: the transition to check for validity\n :return:\n '
(valid, message) = super(BarrierConcurrencyState, self)._check_transition_validity(check_transition)
if (not valid):
return (False, message)
from_state_id = check_transition.from_state
to_state_id = check_transition.to_state
from_outcome_id = check_transition.from_outcome
to_outcome_id = check_transition.to_outcome
if (from_state_id == UNIQUE_DECIDER_STATE_ID):
if (to_state_id != self.state_id):
return (False, 'Transition from the decider state must go to the parent state')
elif (to_state_id != UNIQUE_DECIDER_STATE_ID):
if ((from_outcome_id not in [(- 2), (- 1)]) or (to_outcome_id not in [(- 2), (- 1)])):
return (False, 'Transition from this state must go to the decider state. The only exception are transition from aborted/preempted to the parent aborted/preempted outcomes')
return (True, message)
| -2,787,156,011,743,365,000
|
Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.
Start transitions are forbidden in the ConcurrencyState.
:param check_transition: the transition to check for validity
:return:
|
source/rafcon/core/states/barrier_concurrency_state.py
|
_check_transition_validity
|
LJMP/RAFCON
|
python
|
def _check_transition_validity(self, check_transition):
' Transition of BarrierConcurrencyStates must least fulfill the condition of a ContainerState.\n Start transitions are forbidden in the ConcurrencyState.\n\n :param check_transition: the transition to check for validity\n :return:\n '
(valid, message) = super(BarrierConcurrencyState, self)._check_transition_validity(check_transition)
if (not valid):
return (False, message)
from_state_id = check_transition.from_state
to_state_id = check_transition.to_state
from_outcome_id = check_transition.from_outcome
to_outcome_id = check_transition.to_outcome
if (from_state_id == UNIQUE_DECIDER_STATE_ID):
if (to_state_id != self.state_id):
return (False, 'Transition from the decider state must go to the parent state')
elif (to_state_id != UNIQUE_DECIDER_STATE_ID):
if ((from_outcome_id not in [(- 2), (- 1)]) or (to_outcome_id not in [(- 2), (- 1)])):
return (False, 'Transition from this state must go to the decider state. The only exception are transition from aborted/preempted to the parent aborted/preempted outcomes')
return (True, message)
|
@lock_state_machine
def add_state(self, state, storage_load=False):
'Overwrite the parent class add_state method\n\n Add automatic transition generation for the decider_state.\n\n :param state: The state to be added\n :return:\n '
state_id = super(BarrierConcurrencyState, self).add_state(state)
if ((not storage_load) and (not self.__init_running) and (not (state.state_id == UNIQUE_DECIDER_STATE_ID))):
for (o_id, o) in list(state.outcomes.items()):
if ((not (o_id == (- 1))) and (not (o_id == (- 2)))):
self.add_transition(state.state_id, o_id, self.states[UNIQUE_DECIDER_STATE_ID].state_id, None)
return state_id
| 450,937,260,812,671,040
|
Overwrite the parent class add_state method
Add automatic transition generation for the decider_state.
:param state: The state to be added
:return:
|
source/rafcon/core/states/barrier_concurrency_state.py
|
add_state
|
LJMP/RAFCON
|
python
|
@lock_state_machine
def add_state(self, state, storage_load=False):
'Overwrite the parent class add_state method\n\n Add automatic transition generation for the decider_state.\n\n :param state: The state to be added\n :return:\n '
state_id = super(BarrierConcurrencyState, self).add_state(state)
if ((not storage_load) and (not self.__init_running) and (not (state.state_id == UNIQUE_DECIDER_STATE_ID))):
for (o_id, o) in list(state.outcomes.items()):
if ((not (o_id == (- 1))) and (not (o_id == (- 2)))):
self.add_transition(state.state_id, o_id, self.states[UNIQUE_DECIDER_STATE_ID].state_id, None)
return state_id
|
@ContainerState.states.setter
@lock_state_machine
@Observable.observed
def states(self, states):
' Overwrite the setter of the container state base class as special handling for the decider state is needed.\n\n :param states: the dictionary of new states\n :raises exceptions.TypeError: if the states parameter is not of type dict\n '
state_ids = list(self.states.keys())
for state_id in state_ids:
if ((state_id == UNIQUE_DECIDER_STATE_ID) and (UNIQUE_DECIDER_STATE_ID not in states)):
continue
self.remove_state(state_id)
if (states is not None):
if (not isinstance(states, dict)):
raise TypeError('states must be of type dict')
decider_state = states.pop(UNIQUE_DECIDER_STATE_ID, None)
if (decider_state is not None):
self.add_state(decider_state)
for state in states.values():
self.add_state(state)
| 412,972,436,944,697,600
|
Overwrite the setter of the container state base class as special handling for the decider state is needed.
:param states: the dictionary of new states
:raises exceptions.TypeError: if the states parameter is not of type dict
|
source/rafcon/core/states/barrier_concurrency_state.py
|
states
|
LJMP/RAFCON
|
python
|
@ContainerState.states.setter
@lock_state_machine
@Observable.observed
def states(self, states):
' Overwrite the setter of the container state base class as special handling for the decider state is needed.\n\n :param states: the dictionary of new states\n :raises exceptions.TypeError: if the states parameter is not of type dict\n '
state_ids = list(self.states.keys())
for state_id in state_ids:
if ((state_id == UNIQUE_DECIDER_STATE_ID) and (UNIQUE_DECIDER_STATE_ID not in states)):
continue
self.remove_state(state_id)
if (states is not None):
if (not isinstance(states, dict)):
raise TypeError('states must be of type dict')
decider_state = states.pop(UNIQUE_DECIDER_STATE_ID, None)
if (decider_state is not None):
self.add_state(decider_state)
for state in states.values():
self.add_state(state)
|
def remove_state(self, state_id, recursive=True, force=False, destroy=True):
' Overwrite the parent class remove state method by checking if the user tries to delete the decider state\n\n :param state_id: the id of the state to remove\n :param recursive: a flag to indicate a recursive disassembling of all substates\n :param force: a flag to indicate forcefully deletion of all states (important of the decider state in the\n barrier concurrency state)\n :param destroy: a flag which indicates if the state should not only be disconnected from the state but also\n destroyed, including all its state elements\n :raises exceptions.AttributeError: if the state_id parameter is the decider state\n '
if ((state_id == UNIQUE_DECIDER_STATE_ID) and (force is False)):
raise AttributeError('You are not allowed to delete the decider state.')
else:
return ContainerState.remove_state(self, state_id, recursive=recursive, force=force, destroy=destroy)
| 884,845,709,618,582,000
|
Overwrite the parent class remove state method by checking if the user tries to delete the decider state
:param state_id: the id of the state to remove
:param recursive: a flag to indicate a recursive disassembling of all substates
:param force: a flag to indicate forcefully deletion of all states (important of the decider state in the
barrier concurrency state)
:param destroy: a flag which indicates if the state should not only be disconnected from the state but also
destroyed, including all its state elements
:raises exceptions.AttributeError: if the state_id parameter is the decider state
|
source/rafcon/core/states/barrier_concurrency_state.py
|
remove_state
|
LJMP/RAFCON
|
python
|
def remove_state(self, state_id, recursive=True, force=False, destroy=True):
' Overwrite the parent class remove state method by checking if the user tries to delete the decider state\n\n :param state_id: the id of the state to remove\n :param recursive: a flag to indicate a recursive disassembling of all substates\n :param force: a flag to indicate forcefully deletion of all states (important of the decider state in the\n barrier concurrency state)\n :param destroy: a flag which indicates if the state should not only be disconnected from the state but also\n destroyed, including all its state elements\n :raises exceptions.AttributeError: if the state_id parameter is the decider state\n '
if ((state_id == UNIQUE_DECIDER_STATE_ID) and (force is False)):
raise AttributeError('You are not allowed to delete the decider state.')
else:
return ContainerState.remove_state(self, state_id, recursive=recursive, force=force, destroy=destroy)
|
def get_outcome_for_state_name(self, name):
' Returns the final outcome of the child state specified by name.\n\n Note: This is utility function that is used by the programmer to make a decision based on the final outcome\n of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want\n to use state-ids in his code this utility function was defined.\n\n :param name: The name of the state to get the final outcome for.\n :return:\n '
return_value = None
for (state_id, name_outcome_tuple) in self.final_outcomes_dict.items():
if (name_outcome_tuple[0] == name):
return_value = name_outcome_tuple[1]
break
return return_value
| 5,817,289,023,625,837,000
|
Returns the final outcome of the child state specified by name.
Note: This is utility function that is used by the programmer to make a decision based on the final outcome
of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want
to use state-ids in his code this utility function was defined.
:param name: The name of the state to get the final outcome for.
:return:
|
source/rafcon/core/states/barrier_concurrency_state.py
|
get_outcome_for_state_name
|
LJMP/RAFCON
|
python
|
def get_outcome_for_state_name(self, name):
' Returns the final outcome of the child state specified by name.\n\n Note: This is utility function that is used by the programmer to make a decision based on the final outcome\n of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want\n to use state-ids in his code this utility function was defined.\n\n :param name: The name of the state to get the final outcome for.\n :return:\n '
return_value = None
for (state_id, name_outcome_tuple) in self.final_outcomes_dict.items():
if (name_outcome_tuple[0] == name):
return_value = name_outcome_tuple[1]
break
return return_value
|
def get_outcome_for_state_id(self, state_id):
' Returns the final outcome of the child state specified by the state_id.\n\n :param state_id: The id of the state to get the final outcome for.\n :return:\n '
return_value = None
for (s_id, name_outcome_tuple) in self.final_outcomes_dict.items():
if (s_id == state_id):
return_value = name_outcome_tuple[1]
break
return return_value
| 4,762,681,882,078,964,000
|
Returns the final outcome of the child state specified by the state_id.
:param state_id: The id of the state to get the final outcome for.
:return:
|
source/rafcon/core/states/barrier_concurrency_state.py
|
get_outcome_for_state_id
|
LJMP/RAFCON
|
python
|
def get_outcome_for_state_id(self, state_id):
' Returns the final outcome of the child state specified by the state_id.\n\n :param state_id: The id of the state to get the final outcome for.\n :return:\n '
return_value = None
for (s_id, name_outcome_tuple) in self.final_outcomes_dict.items():
if (s_id == state_id):
return_value = name_outcome_tuple[1]
break
return return_value
|
def get_errors_for_state_name(self, name):
' Returns the error message of the child state specified by name.\n\n Note: This is utility function that is used by the programmer to make a decision based on the final outcome\n of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want\n to use state-ids in his code this utility function was defined.\n\n :param name: The name of the state to get the error message for\n :return:\n '
return_value = None
for (state_id, name_outcome_tuple) in self.child_errors.items():
if (name_outcome_tuple[0] == name):
return_value = name_outcome_tuple[1]
break
return return_value
| -4,617,795,323,207,786,000
|
Returns the error message of the child state specified by name.
Note: This is utility function that is used by the programmer to make a decision based on the final outcome
of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want
to use state-ids in his code this utility function was defined.
:param name: The name of the state to get the error message for
:return:
|
source/rafcon/core/states/barrier_concurrency_state.py
|
get_errors_for_state_name
|
LJMP/RAFCON
|
python
|
def get_errors_for_state_name(self, name):
' Returns the error message of the child state specified by name.\n\n Note: This is utility function that is used by the programmer to make a decision based on the final outcome\n of its child states. A state is not uniquely specified by the name, but as the programmer normally does not want\n to use state-ids in his code this utility function was defined.\n\n :param name: The name of the state to get the error message for\n :return:\n '
return_value = None
for (state_id, name_outcome_tuple) in self.child_errors.items():
if (name_outcome_tuple[0] == name):
return_value = name_outcome_tuple[1]
break
return return_value
|
def wait_for_complete(queue_name, job_list=None, job_name_prefix=None, poll_interval=10, idle_log_timeout=None, kill_on_log_timeout=False, stash_log_method=None, tag_instances=False, result_record=None):
"Return when all jobs in the given list finished.\n\n If not job list is given, return when all jobs in queue finished.\n\n Parameters\n ----------\n queue_name : str\n The name of the queue to wait for completion.\n job_list : Optional[list(dict)]\n A list of jobID-s in a dict, as returned by the submit function.\n Example: [{'jobId': 'e6b00f24-a466-4a72-b735-d205e29117b4'}, ...]\n If not given, this function will return if all jobs completed.\n job_name_prefix : Optional[str]\n A prefix for the name of the jobs to wait for. This is useful if the\n explicit job list is not available but filtering is needed.\n poll_interval : Optional[int]\n The time delay between API calls to check the job statuses.\n idle_log_timeout : Optional[int] or None\n If not None, then track the logs of the active jobs, and if new output\n is not produced after `idle_log_timeout` seconds, a warning is printed.\n If `kill_on_log_timeout` is set to True, the job will also be\n terminated.\n kill_on_log_timeout : Optional[bool]\n If True, and if `idle_log_timeout` is set, jobs will be terminated\n after timeout. This has no effect if `idle_log_timeout` is None.\n Default is False.\n stash_log_method : Optional[str]\n Select a method to store the job logs, either 's3' or 'local'. If no\n method is specified, the logs will not be loaded off of AWS. If 's3' is\n specified, then `job_name_prefix` must also be given, as this will\n indicate where on s3 to store the logs.\n tag_instances : bool\n Default is False. If True, apply tags to the instances. This is toady\n typically done by each job, so in most cases this should not be needed.\n result_record : dict\n A dict which will be modified in place to record the results of the job.\n "
if ((stash_log_method == 's3') and (job_name_prefix is None)):
raise Exception('A job_name_prefix is required to post logs on s3.')
start_time = datetime.now()
if (job_list is None):
job_id_list = []
else:
job_id_list = [job['jobId'] for job in job_list]
def get_jobs_by_status(status, job_id_filter=None, job_name_prefix=None):
res = batch_client.list_jobs(jobQueue=queue_name, jobStatus=status, maxResults=10000)
jobs = res['jobSummaryList']
if job_name_prefix:
jobs = [job for job in jobs if job['jobName'].startswith(job_name_prefix)]
if job_id_filter:
jobs = [job_def for job_def in jobs if (job_def['jobId'] in job_id_filter)]
return jobs
job_log_dict = {}
def check_logs(job_defs):
'Updates teh job_log_dict.'
stalled_jobs = set()
for job_def in job_defs:
try:
log_lines = get_job_log(job_def, write_file=False)
jid = job_def['jobId']
now = datetime.now()
if (jid not in job_log_dict.keys()):
logger.info(('Adding job %s to the log tracker at %s.' % (jid, now)))
job_log_dict[jid] = {'log': log_lines, 'last change time': now}
elif (len(job_log_dict[jid]['log']) == len(log_lines)):
check_dt = (now - job_log_dict[jid]['last change time'])
logger.warning(("Job '%s' has not produced output for %d seconds." % (job_def['jobName'], check_dt.seconds)))
if (check_dt.seconds > idle_log_timeout):
logger.warning(("Job '%s' has stalled." % job_def['jobName']))
stalled_jobs.add(jid)
else:
old_log = job_log_dict[jid]['log']
old_log += log_lines[len(old_log):]
job_log_dict[jid]['last change time'] = now
except Exception as e:
logger.error(('Failed to check log for: %s' % str(job_def)))
logger.exception(e)
return stalled_jobs
observed_job_def_dict = {}
def get_dict_of_job_tuples(job_defs):
return {jdef['jobId']: [(k, jdef[k]) for k in ['jobName', 'jobId']] for jdef in job_defs}
batch_client = boto3.client('batch')
if tag_instances:
ecs_cluster_name = get_ecs_cluster_for_queue(queue_name, batch_client)
terminate_msg = 'Job log has stalled for at least %f minutes.'
terminated_jobs = set()
stashed_id_set = set()
while True:
pre_run = []
for status in ('SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING'):
pre_run += get_jobs_by_status(status, job_id_list, job_name_prefix)
running = get_jobs_by_status('RUNNING', job_id_list, job_name_prefix)
failed = get_jobs_by_status('FAILED', job_id_list, job_name_prefix)
done = get_jobs_by_status('SUCCEEDED', job_id_list, job_name_prefix)
observed_job_def_dict.update(get_dict_of_job_tuples((pre_run + running)))
logger.info(('(%d s)=(pre: %d, running: %d, failed: %d, done: %d)' % ((datetime.now() - start_time).seconds, len(pre_run), len(running), len(failed), len(done))))
stalled_jobs = check_logs(running)
if (idle_log_timeout is not None):
if kill_on_log_timeout:
for jid in (stalled_jobs - terminated_jobs):
batch_client.terminate_job(jobId=jid, reason=(terminate_msg % (idle_log_timeout / 60.0)))
logger.info(('Terminating %s.' % jid))
terminated_jobs.add(jid)
if job_id_list:
if ((len(failed) + len(done)) == len(job_id_list)):
ret = 0
break
elif (((len(failed) + len(done)) > 0) and ((len(pre_run) + len(running)) == 0)):
ret = 0
break
if tag_instances:
tag_instances_on_cluster(ecs_cluster_name)
if stash_log_method:
stash_logs(observed_job_def_dict, done, failed, queue_name, stash_log_method, job_name_prefix, start_time.strftime('%Y%m%d_%H%M%S'), ids_stashed=stashed_id_set)
sleep(poll_interval)
if stash_log_method:
stash_logs(observed_job_def_dict, done, failed, queue_name, stash_log_method, job_name_prefix, start_time.strftime('%Y%m%d_%H%M%S'), ids_stashed=stashed_id_set)
result_record['terminated'] = terminated_jobs
result_record['failed'] = failed
result_record['succeeded'] = done
return ret
| -1,837,903,769,100,694,800
|
Return when all jobs in the given list finished.
If not job list is given, return when all jobs in queue finished.
Parameters
----------
queue_name : str
The name of the queue to wait for completion.
job_list : Optional[list(dict)]
A list of jobID-s in a dict, as returned by the submit function.
Example: [{'jobId': 'e6b00f24-a466-4a72-b735-d205e29117b4'}, ...]
If not given, this function will return if all jobs completed.
job_name_prefix : Optional[str]
A prefix for the name of the jobs to wait for. This is useful if the
explicit job list is not available but filtering is needed.
poll_interval : Optional[int]
The time delay between API calls to check the job statuses.
idle_log_timeout : Optional[int] or None
If not None, then track the logs of the active jobs, and if new output
is not produced after `idle_log_timeout` seconds, a warning is printed.
If `kill_on_log_timeout` is set to True, the job will also be
terminated.
kill_on_log_timeout : Optional[bool]
If True, and if `idle_log_timeout` is set, jobs will be terminated
after timeout. This has no effect if `idle_log_timeout` is None.
Default is False.
stash_log_method : Optional[str]
Select a method to store the job logs, either 's3' or 'local'. If no
method is specified, the logs will not be loaded off of AWS. If 's3' is
specified, then `job_name_prefix` must also be given, as this will
indicate where on s3 to store the logs.
tag_instances : bool
Default is False. If True, apply tags to the instances. This is toady
typically done by each job, so in most cases this should not be needed.
result_record : dict
A dict which will be modified in place to record the results of the job.
|
indra/tools/reading/submit_reading_pipeline.py
|
wait_for_complete
|
budakn/INDRA
|
python
|
def wait_for_complete(queue_name, job_list=None, job_name_prefix=None, poll_interval=10, idle_log_timeout=None, kill_on_log_timeout=False, stash_log_method=None, tag_instances=False, result_record=None):
"Return when all jobs in the given list finished.\n\n If not job list is given, return when all jobs in queue finished.\n\n Parameters\n ----------\n queue_name : str\n The name of the queue to wait for completion.\n job_list : Optional[list(dict)]\n A list of jobID-s in a dict, as returned by the submit function.\n Example: [{'jobId': 'e6b00f24-a466-4a72-b735-d205e29117b4'}, ...]\n If not given, this function will return if all jobs completed.\n job_name_prefix : Optional[str]\n A prefix for the name of the jobs to wait for. This is useful if the\n explicit job list is not available but filtering is needed.\n poll_interval : Optional[int]\n The time delay between API calls to check the job statuses.\n idle_log_timeout : Optional[int] or None\n If not None, then track the logs of the active jobs, and if new output\n is not produced after `idle_log_timeout` seconds, a warning is printed.\n If `kill_on_log_timeout` is set to True, the job will also be\n terminated.\n kill_on_log_timeout : Optional[bool]\n If True, and if `idle_log_timeout` is set, jobs will be terminated\n after timeout. This has no effect if `idle_log_timeout` is None.\n Default is False.\n stash_log_method : Optional[str]\n Select a method to store the job logs, either 's3' or 'local'. If no\n method is specified, the logs will not be loaded off of AWS. If 's3' is\n specified, then `job_name_prefix` must also be given, as this will\n indicate where on s3 to store the logs.\n tag_instances : bool\n Default is False. If True, apply tags to the instances. This is toady\n typically done by each job, so in most cases this should not be needed.\n result_record : dict\n A dict which will be modified in place to record the results of the job.\n "
if ((stash_log_method == 's3') and (job_name_prefix is None)):
raise Exception('A job_name_prefix is required to post logs on s3.')
start_time = datetime.now()
if (job_list is None):
job_id_list = []
else:
job_id_list = [job['jobId'] for job in job_list]
def get_jobs_by_status(status, job_id_filter=None, job_name_prefix=None):
res = batch_client.list_jobs(jobQueue=queue_name, jobStatus=status, maxResults=10000)
jobs = res['jobSummaryList']
if job_name_prefix:
jobs = [job for job in jobs if job['jobName'].startswith(job_name_prefix)]
if job_id_filter:
jobs = [job_def for job_def in jobs if (job_def['jobId'] in job_id_filter)]
return jobs
job_log_dict = {}
def check_logs(job_defs):
'Updates teh job_log_dict.'
stalled_jobs = set()
for job_def in job_defs:
try:
log_lines = get_job_log(job_def, write_file=False)
jid = job_def['jobId']
now = datetime.now()
if (jid not in job_log_dict.keys()):
logger.info(('Adding job %s to the log tracker at %s.' % (jid, now)))
job_log_dict[jid] = {'log': log_lines, 'last change time': now}
elif (len(job_log_dict[jid]['log']) == len(log_lines)):
check_dt = (now - job_log_dict[jid]['last change time'])
logger.warning(("Job '%s' has not produced output for %d seconds." % (job_def['jobName'], check_dt.seconds)))
if (check_dt.seconds > idle_log_timeout):
logger.warning(("Job '%s' has stalled." % job_def['jobName']))
stalled_jobs.add(jid)
else:
old_log = job_log_dict[jid]['log']
old_log += log_lines[len(old_log):]
job_log_dict[jid]['last change time'] = now
except Exception as e:
logger.error(('Failed to check log for: %s' % str(job_def)))
logger.exception(e)
return stalled_jobs
observed_job_def_dict = {}
def get_dict_of_job_tuples(job_defs):
return {jdef['jobId']: [(k, jdef[k]) for k in ['jobName', 'jobId']] for jdef in job_defs}
batch_client = boto3.client('batch')
if tag_instances:
ecs_cluster_name = get_ecs_cluster_for_queue(queue_name, batch_client)
terminate_msg = 'Job log has stalled for at least %f minutes.'
terminated_jobs = set()
stashed_id_set = set()
while True:
pre_run = []
for status in ('SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING'):
pre_run += get_jobs_by_status(status, job_id_list, job_name_prefix)
running = get_jobs_by_status('RUNNING', job_id_list, job_name_prefix)
failed = get_jobs_by_status('FAILED', job_id_list, job_name_prefix)
done = get_jobs_by_status('SUCCEEDED', job_id_list, job_name_prefix)
observed_job_def_dict.update(get_dict_of_job_tuples((pre_run + running)))
logger.info(('(%d s)=(pre: %d, running: %d, failed: %d, done: %d)' % ((datetime.now() - start_time).seconds, len(pre_run), len(running), len(failed), len(done))))
stalled_jobs = check_logs(running)
if (idle_log_timeout is not None):
if kill_on_log_timeout:
for jid in (stalled_jobs - terminated_jobs):
batch_client.terminate_job(jobId=jid, reason=(terminate_msg % (idle_log_timeout / 60.0)))
logger.info(('Terminating %s.' % jid))
terminated_jobs.add(jid)
if job_id_list:
if ((len(failed) + len(done)) == len(job_id_list)):
ret = 0
break
elif (((len(failed) + len(done)) > 0) and ((len(pre_run) + len(running)) == 0)):
ret = 0
break
if tag_instances:
tag_instances_on_cluster(ecs_cluster_name)
if stash_log_method:
stash_logs(observed_job_def_dict, done, failed, queue_name, stash_log_method, job_name_prefix, start_time.strftime('%Y%m%d_%H%M%S'), ids_stashed=stashed_id_set)
sleep(poll_interval)
if stash_log_method:
stash_logs(observed_job_def_dict, done, failed, queue_name, stash_log_method, job_name_prefix, start_time.strftime('%Y%m%d_%H%M%S'), ids_stashed=stashed_id_set)
result_record['terminated'] = terminated_jobs
result_record['failed'] = failed
result_record['succeeded'] = done
return ret
|
def get_ecs_cluster_for_queue(queue_name, batch_client=None):
'Get the name of the ecs cluster using the batch client.'
if (batch_client is None):
batch_client = boto3.client('batch')
queue_resp = batch_client.describe_job_queues(jobQueues=[queue_name])
if (len(queue_resp['jobQueues']) == 1):
queue = queue_resp['jobQueues'][0]
else:
raise BatchReadingError(('Error finding queue with name %s.' % queue_name))
compute_env_names = queue['computeEnvironmentOrder']
if (len(compute_env_names) == 1):
compute_env_name = compute_env_names[0]['computeEnvironment']
else:
raise BatchReadingError(('Error finding the compute environment name for %s.' % queue_name))
compute_envs = batch_client.describe_compute_environments(computeEnvironments=[compute_env_name])['computeEnvironments']
if (len(compute_envs) == 1):
compute_env = compute_envs[0]
else:
raise BatchReadingError(('Error getting compute environment %s for %s. Got %d enviornments instead of 1.' % (compute_env_name, queue_name, len(compute_envs))))
ecs_cluster_name = os.path.basename(compute_env['ecsClusterArn'])
return ecs_cluster_name
| 3,727,563,321,851,856,400
|
Get the name of the ecs cluster using the batch client.
|
indra/tools/reading/submit_reading_pipeline.py
|
get_ecs_cluster_for_queue
|
budakn/INDRA
|
python
|
def get_ecs_cluster_for_queue(queue_name, batch_client=None):
if (batch_client is None):
batch_client = boto3.client('batch')
queue_resp = batch_client.describe_job_queues(jobQueues=[queue_name])
if (len(queue_resp['jobQueues']) == 1):
queue = queue_resp['jobQueues'][0]
else:
raise BatchReadingError(('Error finding queue with name %s.' % queue_name))
compute_env_names = queue['computeEnvironmentOrder']
if (len(compute_env_names) == 1):
compute_env_name = compute_env_names[0]['computeEnvironment']
else:
raise BatchReadingError(('Error finding the compute environment name for %s.' % queue_name))
compute_envs = batch_client.describe_compute_environments(computeEnvironments=[compute_env_name])['computeEnvironments']
if (len(compute_envs) == 1):
compute_env = compute_envs[0]
else:
raise BatchReadingError(('Error getting compute environment %s for %s. Got %d enviornments instead of 1.' % (compute_env_name, queue_name, len(compute_envs))))
ecs_cluster_name = os.path.basename(compute_env['ecsClusterArn'])
return ecs_cluster_name
|
def tag_instances_on_cluster(cluster_name, project='cwc'):
'Adds project tag to untagged instances in a given cluster.\n\n Parameters\n ----------\n cluster_name : str\n The name of the AWS ECS cluster in which running instances\n should be tagged.\n project : str\n The name of the project to tag instances with.\n '
ecs = boto3.client('ecs')
task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns']
if (not task_arns):
return
tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks']
container_instances = ecs.describe_container_instances(cluster=cluster_name, containerInstances=[task['containerInstanceArn'] for task in tasks])['containerInstances']
ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances]
for instance_id in ec2_instance_ids:
tag_instance(instance_id, project=project)
return
| 2,285,739,003,705,012,000
|
Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with.
|
indra/tools/reading/submit_reading_pipeline.py
|
tag_instances_on_cluster
|
budakn/INDRA
|
python
|
def tag_instances_on_cluster(cluster_name, project='cwc'):
'Adds project tag to untagged instances in a given cluster.\n\n Parameters\n ----------\n cluster_name : str\n The name of the AWS ECS cluster in which running instances\n should be tagged.\n project : str\n The name of the project to tag instances with.\n '
ecs = boto3.client('ecs')
task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns']
if (not task_arns):
return
tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks']
container_instances = ecs.describe_container_instances(cluster=cluster_name, containerInstances=[task['containerInstanceArn'] for task in tasks])['containerInstances']
ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances]
for instance_id in ec2_instance_ids:
tag_instance(instance_id, project=project)
return
|
def submit_reading(basename, pmid_list_filename, readers, start_ix=None, end_ix=None, pmids_per_job=3000, num_tries=2, force_read=False, force_fulltext=False, project_name=None):
'Submit an old-style pmid-centered no-database s3 only reading job.\n\n This function is provided for the sake of backward compatibility. It is\n preferred that you use the object-oriented PmidSubmitter and the\n submit_reading job going forward.\n '
sub = PmidSubmitter(basename, readers, project_name)
sub.set_options(force_read, force_fulltext)
sub.submit_reading(pmid_list_filename, start_ix, end_ix, pmids_per_job, num_tries)
return sub.job_list
| -9,154,570,921,491,134,000
|
Submit an old-style pmid-centered no-database s3 only reading job.
This function is provided for the sake of backward compatibility. It is
preferred that you use the object-oriented PmidSubmitter and the
submit_reading job going forward.
|
indra/tools/reading/submit_reading_pipeline.py
|
submit_reading
|
budakn/INDRA
|
python
|
def submit_reading(basename, pmid_list_filename, readers, start_ix=None, end_ix=None, pmids_per_job=3000, num_tries=2, force_read=False, force_fulltext=False, project_name=None):
'Submit an old-style pmid-centered no-database s3 only reading job.\n\n This function is provided for the sake of backward compatibility. It is\n preferred that you use the object-oriented PmidSubmitter and the\n submit_reading job going forward.\n '
sub = PmidSubmitter(basename, readers, project_name)
sub.set_options(force_read, force_fulltext)
sub.submit_reading(pmid_list_filename, start_ix, end_ix, pmids_per_job, num_tries)
return sub.job_list
|
def submit_combine(basename, readers, job_ids=None, project_name=None):
'Submit a batch job to combine the outputs of a reading job.\n\n This function is provided for backwards compatibility. You should use the\n PmidSubmitter and submit_combine methods.\n '
sub = PmidSubmitter(basename, readers, project_name)
sub.job_list = job_ids
sub.submit_combine()
return sub
| 9,031,977,390,302,815,000
|
Submit a batch job to combine the outputs of a reading job.
This function is provided for backwards compatibility. You should use the
PmidSubmitter and submit_combine methods.
|
indra/tools/reading/submit_reading_pipeline.py
|
submit_combine
|
budakn/INDRA
|
python
|
def submit_combine(basename, readers, job_ids=None, project_name=None):
'Submit a batch job to combine the outputs of a reading job.\n\n This function is provided for backwards compatibility. You should use the\n PmidSubmitter and submit_combine methods.\n '
sub = PmidSubmitter(basename, readers, project_name)
sub.job_list = job_ids
sub.submit_combine()
return sub
|
def submit_db_reading(basename, id_list_filename, readers, start_ix=None, end_ix=None, pmids_per_job=3000, num_tries=2, force_read=False, force_fulltext=False, read_all_fulltext=False, project_name=None, max_reach_input_len=None, max_reach_space_ratio=None, no_stmts=False):
'Submit batch reading jobs that uses the database for content and results.\n\n This function is provided for backwards compatibility, use DbReadingSubmitter\n and its submit_reading method instead.\n '
sub = DbReadingSubmitter(basename, readers, project_name)
sub.set_options(force_read, no_stmts, force_fulltext, read_all_fulltext, max_reach_input_len, max_reach_space_ratio)
sub.submit_reading(id_list_filename, start_ix, end_ix, pmids_per_job, num_tries)
return sub
| -3,004,520,482,277,667,300
|
Submit batch reading jobs that uses the database for content and results.
This function is provided for backwards compatibility, use DbReadingSubmitter
and its submit_reading method instead.
|
indra/tools/reading/submit_reading_pipeline.py
|
submit_db_reading
|
budakn/INDRA
|
python
|
def submit_db_reading(basename, id_list_filename, readers, start_ix=None, end_ix=None, pmids_per_job=3000, num_tries=2, force_read=False, force_fulltext=False, read_all_fulltext=False, project_name=None, max_reach_input_len=None, max_reach_space_ratio=None, no_stmts=False):
'Submit batch reading jobs that uses the database for content and results.\n\n This function is provided for backwards compatibility, use DbReadingSubmitter\n and its submit_reading method instead.\n '
sub = DbReadingSubmitter(basename, readers, project_name)
sub.set_options(force_read, no_stmts, force_fulltext, read_all_fulltext, max_reach_input_len, max_reach_space_ratio)
sub.submit_reading(id_list_filename, start_ix, end_ix, pmids_per_job, num_tries)
return sub
|
def check_logs(job_defs):
'Updates teh job_log_dict.'
stalled_jobs = set()
for job_def in job_defs:
try:
log_lines = get_job_log(job_def, write_file=False)
jid = job_def['jobId']
now = datetime.now()
if (jid not in job_log_dict.keys()):
logger.info(('Adding job %s to the log tracker at %s.' % (jid, now)))
job_log_dict[jid] = {'log': log_lines, 'last change time': now}
elif (len(job_log_dict[jid]['log']) == len(log_lines)):
check_dt = (now - job_log_dict[jid]['last change time'])
logger.warning(("Job '%s' has not produced output for %d seconds." % (job_def['jobName'], check_dt.seconds)))
if (check_dt.seconds > idle_log_timeout):
logger.warning(("Job '%s' has stalled." % job_def['jobName']))
stalled_jobs.add(jid)
else:
old_log = job_log_dict[jid]['log']
old_log += log_lines[len(old_log):]
job_log_dict[jid]['last change time'] = now
except Exception as e:
logger.error(('Failed to check log for: %s' % str(job_def)))
logger.exception(e)
return stalled_jobs
| 6,554,997,909,822,204,000
|
Updates teh job_log_dict.
|
indra/tools/reading/submit_reading_pipeline.py
|
check_logs
|
budakn/INDRA
|
python
|
def check_logs(job_defs):
stalled_jobs = set()
for job_def in job_defs:
try:
log_lines = get_job_log(job_def, write_file=False)
jid = job_def['jobId']
now = datetime.now()
if (jid not in job_log_dict.keys()):
logger.info(('Adding job %s to the log tracker at %s.' % (jid, now)))
job_log_dict[jid] = {'log': log_lines, 'last change time': now}
elif (len(job_log_dict[jid]['log']) == len(log_lines)):
check_dt = (now - job_log_dict[jid]['last change time'])
logger.warning(("Job '%s' has not produced output for %d seconds." % (job_def['jobName'], check_dt.seconds)))
if (check_dt.seconds > idle_log_timeout):
logger.warning(("Job '%s' has stalled." % job_def['jobName']))
stalled_jobs.add(jid)
else:
old_log = job_log_dict[jid]['log']
old_log += log_lines[len(old_log):]
job_log_dict[jid]['last change time'] = now
except Exception as e:
logger.error(('Failed to check log for: %s' % str(job_def)))
logger.exception(e)
return stalled_jobs
|
def set_options(self, **kwargs):
'Set the options of reading job.'
self.options = kwargs
return
| -7,237,647,657,749,120,000
|
Set the options of reading job.
|
indra/tools/reading/submit_reading_pipeline.py
|
set_options
|
budakn/INDRA
|
python
|
def set_options(self, **kwargs):
self.options = kwargs
return
|
def watch_and_wait(self, poll_interval=10, idle_log_timeout=None, kill_on_timeout=False, stash_log_method=None, tag_instances=False, **kwargs):
'This provides shortcut access to the wait_for_complete_function.'
return wait_for_complete(self._job_queue, job_list=self.job_list, job_name_prefix=self.basename, poll_interval=poll_interval, idle_log_timeout=idle_log_timeout, kill_on_log_timeout=kill_on_timeout, stash_log_method=stash_log_method, tag_instances=tag_instances, **kwargs)
| 1,382,470,957,625,140,000
|
This provides shortcut access to the wait_for_complete_function.
|
indra/tools/reading/submit_reading_pipeline.py
|
watch_and_wait
|
budakn/INDRA
|
python
|
def watch_and_wait(self, poll_interval=10, idle_log_timeout=None, kill_on_timeout=False, stash_log_method=None, tag_instances=False, **kwargs):
return wait_for_complete(self._job_queue, job_list=self.job_list, job_name_prefix=self.basename, poll_interval=poll_interval, idle_log_timeout=idle_log_timeout, kill_on_log_timeout=kill_on_timeout, stash_log_method=stash_log_method, tag_instances=tag_instances, **kwargs)
|
def set_options(self, force_read=False, force_fulltext=False):
'Set the options for this run.'
self.options['force_read'] = force_read
self.options['force_fulltext'] = force_fulltext
return
| -2,327,768,471,245,930,500
|
Set the options for this run.
|
indra/tools/reading/submit_reading_pipeline.py
|
set_options
|
budakn/INDRA
|
python
|
def set_options(self, force_read=False, force_fulltext=False):
self.options['force_read'] = force_read
self.options['force_fulltext'] = force_fulltext
return
|
@staticmethod
def _parse_time(time_str):
'Create a timedelta or datetime object from default string reprs.'
try:
if ('-' in time_str):
time_fmt = '%Y-%m-%d %H:%M:%S'
if ('.' in time_str):
(pre_dec, post_dec) = time_str.split('.')
dt = datetime.strptime(pre_dec, time_fmt)
dt.replace(microsecond=int(post_dec))
else:
dt = datetime.strftime(time_str, time_fmt)
return dt
else:
if ('day' in time_str):
m = re.match('(?P<days>[-\\d]+) day[s]*, (?P<hours>\\d+):(?P<minutes>\\d+):(?P<seconds>\\d[\\.\\d+]*)', time_str)
else:
m = re.match('(?P<hours>\\d+):(?P<minutes>\\d+):(?P<seconds>\\d[\\.\\d+]*)', time_str)
return timedelta(**{key: float(val) for (key, val) in m.groupdict().items()})
except Exception as e:
logger.error(('Failed to parse "%s".' % time_str))
raise e
| 1,290,442,873,732,414,500
|
Create a timedelta or datetime object from default string reprs.
|
indra/tools/reading/submit_reading_pipeline.py
|
_parse_time
|
budakn/INDRA
|
python
|
@staticmethod
def _parse_time(time_str):
try:
if ('-' in time_str):
time_fmt = '%Y-%m-%d %H:%M:%S'
if ('.' in time_str):
(pre_dec, post_dec) = time_str.split('.')
dt = datetime.strptime(pre_dec, time_fmt)
dt.replace(microsecond=int(post_dec))
else:
dt = datetime.strftime(time_str, time_fmt)
return dt
else:
if ('day' in time_str):
m = re.match('(?P<days>[-\\d]+) day[s]*, (?P<hours>\\d+):(?P<minutes>\\d+):(?P<seconds>\\d[\\.\\d+]*)', time_str)
else:
m = re.match('(?P<hours>\\d+):(?P<minutes>\\d+):(?P<seconds>\\d[\\.\\d+]*)', time_str)
return timedelta(**{key: float(val) for (key, val) in m.groupdict().items()})
except Exception as e:
logger.error(('Failed to parse "%s".' % time_str))
raise e
|
def produce_report(self):
'Produce a report of the batch jobs.'
s3_prefix = ('reading_results/%s/logs/%s/' % (self.basename, self._job_queue))
logger.info(('Producing batch report for %s, from prefix %s.' % (self.basename, s3_prefix)))
s3 = boto3.client('s3')
file_tree = self._get_results_file_tree(s3, s3_prefix)
logger.info(('Found %d relevant files.' % len(file_tree)))
stat_files = {'git_info.txt': (self._handle_git_info, self._report_git_info), 'timing.txt': (self._handle_timing, self._report_timing), 'raw_tuples.pkl': (None, None), 'hist_data.pkl': (self._handle_hist_data, self._report_hist_data), 'sum_data.pkl': (self._handle_sum_data, self._report_sum_data)}
stat_aggs = {}
for (stat_file, (handle_stats, report_stats)) in stat_files.items():
logger.info(('Aggregating %s...' % stat_file))
my_agg = {}
file_paths = file_tree.get_paths(stat_file)
logger.info(('Found %d files for %s.' % (len(file_paths), stat_file)))
for (sub_path, file_entry) in file_paths:
s3_key = file_entry['key']
ref = sub_path[0]
file = s3.get_object(Bucket=bucket_name, Key=s3_key)
file_bytes = file['Body'].read()
if (handle_stats is not None):
handle_stats(ref, my_agg, file_bytes)
if ((report_stats is not None) and len(my_agg)):
report_stats(my_agg)
stat_aggs[stat_file] = my_agg
for (end_type, jobs) in self.run_record.items():
self.reporter.add_text(('Jobs %s: %d' % (end_type, len(jobs))), section='Totals')
s3_prefix = ('reading_results/%s/' % self.basename)
fname = self.reporter.make_report()
with open(fname, 'rb') as f:
s3.put_object(Bucket=bucket_name, Key=(s3_prefix + fname), Body=f.read())
s3.put_object(Bucket=bucket_name, Key=(s3_prefix + ('stat_aggregates_%s.pkl' % self.time_tag)), Body=pickle.dumps(stat_aggs))
return (file_tree, stat_aggs)
| -5,124,152,994,296,084,000
|
Produce a report of the batch jobs.
|
indra/tools/reading/submit_reading_pipeline.py
|
produce_report
|
budakn/INDRA
|
python
|
def produce_report(self):
s3_prefix = ('reading_results/%s/logs/%s/' % (self.basename, self._job_queue))
logger.info(('Producing batch report for %s, from prefix %s.' % (self.basename, s3_prefix)))
s3 = boto3.client('s3')
file_tree = self._get_results_file_tree(s3, s3_prefix)
logger.info(('Found %d relevant files.' % len(file_tree)))
stat_files = {'git_info.txt': (self._handle_git_info, self._report_git_info), 'timing.txt': (self._handle_timing, self._report_timing), 'raw_tuples.pkl': (None, None), 'hist_data.pkl': (self._handle_hist_data, self._report_hist_data), 'sum_data.pkl': (self._handle_sum_data, self._report_sum_data)}
stat_aggs = {}
for (stat_file, (handle_stats, report_stats)) in stat_files.items():
logger.info(('Aggregating %s...' % stat_file))
my_agg = {}
file_paths = file_tree.get_paths(stat_file)
logger.info(('Found %d files for %s.' % (len(file_paths), stat_file)))
for (sub_path, file_entry) in file_paths:
s3_key = file_entry['key']
ref = sub_path[0]
file = s3.get_object(Bucket=bucket_name, Key=s3_key)
file_bytes = file['Body'].read()
if (handle_stats is not None):
handle_stats(ref, my_agg, file_bytes)
if ((report_stats is not None) and len(my_agg)):
report_stats(my_agg)
stat_aggs[stat_file] = my_agg
for (end_type, jobs) in self.run_record.items():
self.reporter.add_text(('Jobs %s: %d' % (end_type, len(jobs))), section='Totals')
s3_prefix = ('reading_results/%s/' % self.basename)
fname = self.reporter.make_report()
with open(fname, 'rb') as f:
s3.put_object(Bucket=bucket_name, Key=(s3_prefix + fname), Body=f.read())
s3.put_object(Bucket=bucket_name, Key=(s3_prefix + ('stat_aggregates_%s.pkl' % self.time_tag)), Body=pickle.dumps(stat_aggs))
return (file_tree, stat_aggs)
|
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
' Determine reserved, available, reserved but unconfirmed and used seats. '
for ticket in self:
ticket.seats_availability = ('unlimited' if (ticket.seats_max == 0) else 'limited')
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
if self.ids:
state_field = {'draft': 'seats_unconfirmed', 'open': 'seats_reserved', 'done': 'seats_used'}
query = " SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n "
self.env.cr.execute(query, (tuple(self.ids),))
for (event_ticket_id, state, num) in self.env.cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
for ticket in self:
if (ticket.seats_max > 0):
ticket.seats_available = (ticket.seats_max - (ticket.seats_reserved + ticket.seats_used))
| -2,804,826,914,822,728,000
|
Determine reserved, available, reserved but unconfirmed and used seats.
|
addons/event_sale/models/event.py
|
_compute_seats
|
jjiege/odoo
|
python
|
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
' '
for ticket in self:
ticket.seats_availability = ('unlimited' if (ticket.seats_max == 0) else 'limited')
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
if self.ids:
state_field = {'draft': 'seats_unconfirmed', 'open': 'seats_reserved', 'done': 'seats_used'}
query = " SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n "
self.env.cr.execute(query, (tuple(self.ids),))
for (event_ticket_id, state, num) in self.env.cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
for ticket in self:
if (ticket.seats_max > 0):
ticket.seats_available = (ticket.seats_max - (ticket.seats_reserved + ticket.seats_used))
|
def get_ticket_multiline_description_sale(self):
' Compute a multiline description of this ticket, in the context of sales.\n It will often be used as the default description of a sales order line referencing this ticket.\n\n 1. the first line is the ticket name\n 2. the second line is the event name (if it exists, which should be the case with a normal workflow) or the product name (if it exists)\n\n We decided to ignore entirely the product name and the product description_sale because they are considered to be replaced by the ticket name and event name.\n -> the workflow of creating a new event also does not lead to filling them correctly, as the product is created through the event interface\n '
name = self.display_name
if self.event_id:
name += ('\n' + self.event_id.display_name)
elif self.product_id:
name += ('\n' + self.product_id.display_name)
return name
| -3,671,193,765,973,056,000
|
Compute a multiline description of this ticket, in the context of sales.
It will often be used as the default description of a sales order line referencing this ticket.
1. the first line is the ticket name
2. the second line is the event name (if it exists, which should be the case with a normal workflow) or the product name (if it exists)
We decided to ignore entirely the product name and the product description_sale because they are considered to be replaced by the ticket name and event name.
-> the workflow of creating a new event also does not lead to filling them correctly, as the product is created through the event interface
|
addons/event_sale/models/event.py
|
get_ticket_multiline_description_sale
|
jjiege/odoo
|
python
|
def get_ticket_multiline_description_sale(self):
' Compute a multiline description of this ticket, in the context of sales.\n It will often be used as the default description of a sales order line referencing this ticket.\n\n 1. the first line is the ticket name\n 2. the second line is the event name (if it exists, which should be the case with a normal workflow) or the product name (if it exists)\n\n We decided to ignore entirely the product name and the product description_sale because they are considered to be replaced by the ticket name and event name.\n -> the workflow of creating a new event also does not lead to filling them correctly, as the product is created through the event interface\n '
name = self.display_name
if self.event_id:
name += ('\n' + self.event_id.display_name)
elif self.product_id:
name += ('\n' + self.product_id.display_name)
return name
|
@api.model
def _prepare_attendee_values(self, registration):
' Override to add sale related stuff '
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(EventRegistration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({'event_id': line_id.event_id.id, 'event_id': line_id.event_id.id, 'event_ticket_id': line_id.event_ticket_id.id, 'origin': line_id.order_id.name, 'sale_order_id': line_id.order_id.id, 'sale_order_line_id': line_id.id})
return att_data
| 8,489,737,217,404,569,000
|
Override to add sale related stuff
|
addons/event_sale/models/event.py
|
_prepare_attendee_values
|
jjiege/odoo
|
python
|
@api.model
def _prepare_attendee_values(self, registration):
' '
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(EventRegistration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({'event_id': line_id.event_id.id, 'event_id': line_id.event_id.id, 'event_ticket_id': line_id.event_ticket_id.id, 'origin': line_id.order_id.name, 'sale_order_id': line_id.order_id.id, 'sale_order_line_id': line_id.id})
return att_data
|
def __init__(self, **kwargs):
'CreateEntityResponse - a model defined in Swagger'
self.compute_parent_updates()
for k in kwargs:
if (k not in self.swagger_types):
raise ValueError(("CreateEntityResponse got unexpected argument '%s'" % k))
get_parent().__init__(self, **kwargs)
self._description = None
self._name = None
if ('description' in kwargs):
self.description = kwargs['description']
if ('name' in kwargs):
self.name = kwargs['name']
| 6,175,245,595,622,558,000
|
CreateEntityResponse - a model defined in Swagger
|
factern_client/com/factern/model/create_entity_response.py
|
__init__
|
Factern/factern-client-python
|
python
|
def __init__(self, **kwargs):
self.compute_parent_updates()
for k in kwargs:
if (k not in self.swagger_types):
raise ValueError(("CreateEntityResponse got unexpected argument '%s'" % k))
get_parent().__init__(self, **kwargs)
self._description = None
self._name = None
if ('description' in kwargs):
self.description = kwargs['description']
if ('name' in kwargs):
self.name = kwargs['name']
|
@property
def description(self):
'Gets the description of this CreateEntityResponse. # noqa: E501\n\n\n :return: The description of this CreateEntityResponse. # noqa: E501\n :rtype: str\n '
return self._description
| -6,438,640,782,097,517,000
|
Gets the description of this CreateEntityResponse. # noqa: E501
:return: The description of this CreateEntityResponse. # noqa: E501
:rtype: str
|
factern_client/com/factern/model/create_entity_response.py
|
description
|
Factern/factern-client-python
|
python
|
@property
def description(self):
'Gets the description of this CreateEntityResponse. # noqa: E501\n\n\n :return: The description of this CreateEntityResponse. # noqa: E501\n :rtype: str\n '
return self._description
|
@description.setter
def description(self, description):
'Sets the description of this CreateEntityResponse.\n\n\n :param description: The description of this CreateEntityResponse. # noqa: E501\n :type: str\n '
self._description = description
| -6,894,999,339,211,008,000
|
Sets the description of this CreateEntityResponse.
:param description: The description of this CreateEntityResponse. # noqa: E501
:type: str
|
factern_client/com/factern/model/create_entity_response.py
|
description
|
Factern/factern-client-python
|
python
|
@description.setter
def description(self, description):
'Sets the description of this CreateEntityResponse.\n\n\n :param description: The description of this CreateEntityResponse. # noqa: E501\n :type: str\n '
self._description = description
|
@property
def name(self):
'Gets the name of this CreateEntityResponse. # noqa: E501\n\n\n :return: The name of this CreateEntityResponse. # noqa: E501\n :rtype: str\n '
return self._name
| -3,667,751,320,017,525,000
|
Gets the name of this CreateEntityResponse. # noqa: E501
:return: The name of this CreateEntityResponse. # noqa: E501
:rtype: str
|
factern_client/com/factern/model/create_entity_response.py
|
name
|
Factern/factern-client-python
|
python
|
@property
def name(self):
'Gets the name of this CreateEntityResponse. # noqa: E501\n\n\n :return: The name of this CreateEntityResponse. # noqa: E501\n :rtype: str\n '
return self._name
|
@name.setter
def name(self, name):
'Sets the name of this CreateEntityResponse.\n\n\n :param name: The name of this CreateEntityResponse. # noqa: E501\n :type: str\n '
self._name = name
| 8,633,343,821,934,959,000
|
Sets the name of this CreateEntityResponse.
:param name: The name of this CreateEntityResponse. # noqa: E501
:type: str
|
factern_client/com/factern/model/create_entity_response.py
|
name
|
Factern/factern-client-python
|
python
|
@name.setter
def name(self, name):
'Sets the name of this CreateEntityResponse.\n\n\n :param name: The name of this CreateEntityResponse. # noqa: E501\n :type: str\n '
self._name = name
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
| -2,772,352,302,133,010,000
|
Returns the model properties as a dict
|
factern_client/com/factern/model/create_entity_response.py
|
to_dict
|
Factern/factern-client-python
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
factern_client/com/factern/model/create_entity_response.py
|
to_str
|
Factern/factern-client-python
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
factern_client/com/factern/model/create_entity_response.py
|
__repr__
|
Factern/factern-client-python
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, CreateEntityResponse)):
return False
return (self.__dict__ == other.__dict__)
| -8,022,951,761,105,448,000
|
Returns true if both objects are equal
|
factern_client/com/factern/model/create_entity_response.py
|
__eq__
|
Factern/factern-client-python
|
python
|
def __eq__(self, other):
if (not isinstance(other, CreateEntityResponse)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
factern_client/com/factern/model/create_entity_response.py
|
__ne__
|
Factern/factern-client-python
|
python
|
def __ne__(self, other):
return (not (self == other))
|
async def renew_expired_facts(self) -> None:
'\n Send out requests to renew expired facts\n '
LOGGER.info('Renewing expired parameters')
updated_before = (datetime.datetime.now().astimezone() - datetime.timedelta(0, (self._fact_expire - self._fact_renew)))
expired_params = (await data.Parameter.get_updated_before(updated_before))
LOGGER.debug(('Renewing %d expired parameters' % len(expired_params)))
for param in expired_params:
if (param.environment is None):
LOGGER.warning('Found parameter without environment (%s for resource %s). Deleting it.', param.name, param.resource_id)
(await param.delete())
else:
LOGGER.debug('Requesting new parameter value for %s of resource %s in env %s', param.name, param.resource_id, param.environment)
(await self.agentmanager.request_parameter(param.environment, param.resource_id))
unknown_parameters = (await data.UnknownParameter.get_list(resolved=False))
for u in unknown_parameters:
if (u.environment is None):
LOGGER.warning('Found unknown parameter without environment (%s for resource %s). Deleting it.', u.name, u.resource_id)
(await u.delete())
else:
LOGGER.debug('Requesting value for unknown parameter %s of resource %s in env %s', u.name, u.resource_id, u.id)
(await self.agentmanager.request_parameter(u.environment, u.resource_id))
LOGGER.info('Done renewing expired parameters')
| 3,964,860,801,135,793,700
|
Send out requests to renew expired facts
|
src/inmanta/server/services/paramservice.py
|
renew_expired_facts
|
inmanta/inmanta-core
|
python
|
async def renew_expired_facts(self) -> None:
'\n \n '
LOGGER.info('Renewing expired parameters')
updated_before = (datetime.datetime.now().astimezone() - datetime.timedelta(0, (self._fact_expire - self._fact_renew)))
expired_params = (await data.Parameter.get_updated_before(updated_before))
LOGGER.debug(('Renewing %d expired parameters' % len(expired_params)))
for param in expired_params:
if (param.environment is None):
LOGGER.warning('Found parameter without environment (%s for resource %s). Deleting it.', param.name, param.resource_id)
(await param.delete())
else:
LOGGER.debug('Requesting new parameter value for %s of resource %s in env %s', param.name, param.resource_id, param.environment)
(await self.agentmanager.request_parameter(param.environment, param.resource_id))
unknown_parameters = (await data.UnknownParameter.get_list(resolved=False))
for u in unknown_parameters:
if (u.environment is None):
LOGGER.warning('Found unknown parameter without environment (%s for resource %s). Deleting it.', u.name, u.resource_id)
(await u.delete())
else:
LOGGER.debug('Requesting value for unknown parameter %s of resource %s in env %s', u.name, u.resource_id, u.id)
(await self.agentmanager.request_parameter(u.environment, u.resource_id))
LOGGER.info('Done renewing expired parameters')
|
async def _update_param(self, env: data.Environment, name: str, value: str, source: str, resource_id: str, metadata: JsonType, recompile: bool=False) -> bool:
'\n Update or set a parameter.\n\n This method returns true if:\n - this update resolves an unknown\n - recompile is true and the parameter updates an existing parameter to a new value\n '
LOGGER.debug('Updating/setting parameter %s in env %s (for resource %s)', name, env.id, resource_id)
if (not isinstance(value, str)):
value = str(value)
if (resource_id is None):
resource_id = ''
params = (await data.Parameter.get_list(environment=env.id, name=name, resource_id=resource_id))
value_updated = True
if (len(params) == 0):
param = data.Parameter(environment=env.id, name=name, resource_id=resource_id, value=value, source=source, updated=datetime.datetime.now().astimezone(), metadata=metadata)
(await param.insert())
else:
param = params[0]
value_updated = (param.value != value)
(await param.update(source=source, value=value, updated=datetime.datetime.now().astimezone(), metadata=metadata))
unknown_params = (await data.UnknownParameter.get_list(environment=env.id, name=name, resource_id=resource_id, resolved=False))
if (len(unknown_params) > 0):
LOGGER.info('Received values for unknown parameters %s, triggering a recompile', ', '.join([x.name for x in unknown_params]))
for p in unknown_params:
(await p.update_fields(resolved=True))
return True
return (recompile and value_updated)
| 2,212,729,937,336,484,600
|
Update or set a parameter.
This method returns true if:
- this update resolves an unknown
- recompile is true and the parameter updates an existing parameter to a new value
|
src/inmanta/server/services/paramservice.py
|
_update_param
|
inmanta/inmanta-core
|
python
|
async def _update_param(self, env: data.Environment, name: str, value: str, source: str, resource_id: str, metadata: JsonType, recompile: bool=False) -> bool:
'\n Update or set a parameter.\n\n This method returns true if:\n - this update resolves an unknown\n - recompile is true and the parameter updates an existing parameter to a new value\n '
LOGGER.debug('Updating/setting parameter %s in env %s (for resource %s)', name, env.id, resource_id)
if (not isinstance(value, str)):
value = str(value)
if (resource_id is None):
resource_id =
params = (await data.Parameter.get_list(environment=env.id, name=name, resource_id=resource_id))
value_updated = True
if (len(params) == 0):
param = data.Parameter(environment=env.id, name=name, resource_id=resource_id, value=value, source=source, updated=datetime.datetime.now().astimezone(), metadata=metadata)
(await param.insert())
else:
param = params[0]
value_updated = (param.value != value)
(await param.update(source=source, value=value, updated=datetime.datetime.now().astimezone(), metadata=metadata))
unknown_params = (await data.UnknownParameter.get_list(environment=env.id, name=name, resource_id=resource_id, resolved=False))
if (len(unknown_params) > 0):
LOGGER.info('Received values for unknown parameters %s, triggering a recompile', ', '.join([x.name for x in unknown_params]))
for p in unknown_params:
(await p.update_fields(resolved=True))
return True
return (recompile and value_updated)
|
def get_queryset(self):
"\n Annotate queryset with an alias field 'name'.\n\n We want to test whether this annotation has been run after\n calling `baker.make()`.\n "
return super(MovieManager, self).get_queryset().annotate(name=models.F('title'))
| 1,360,840,873,464,003,300
|
Annotate queryset with an alias field 'name'.
We want to test whether this annotation has been run after
calling `baker.make()`.
|
tests/generic/models.py
|
get_queryset
|
atimilson/model_bakery
|
python
|
def get_queryset(self):
"\n Annotate queryset with an alias field 'name'.\n\n We want to test whether this annotation has been run after\n calling `baker.make()`.\n "
return super(MovieManager, self).get_queryset().annotate(name=models.F('title'))
|
def start(self) -> None:
'Starts the pushers off in a background process.'
if (not self._should_start_pushers):
logger.info('Not starting pushers because they are disabled in the config')
return
run_as_background_process('start_pushers', self._start_pushers)
| -2,023,674,344,118,949,400
|
Starts the pushers off in a background process.
|
synapse/push/pusherpool.py
|
start
|
3ayazaya/synapse
|
python
|
def start(self) -> None:
if (not self._should_start_pushers):
logger.info('Not starting pushers because they are disabled in the config')
return
run_as_background_process('start_pushers', self._start_pushers)
|
async def add_pusher(self, user_id: str, access_token: Optional[int], kind: str, app_id: str, app_display_name: str, device_display_name: str, pushkey: str, lang: Optional[str], data: JsonDict, profile_tag: str='') -> Optional[Pusher]:
'Creates a new pusher and adds it to the pool\n\n Returns:\n The newly created pusher.\n '
if (kind == 'email'):
email_owner = (await self.store.get_user_id_by_threepid('email', canonicalise_email(pushkey)))
if (email_owner != user_id):
raise SynapseError(400, 'Email not found', Codes.THREEPID_NOT_FOUND)
time_now_msec = self.clock.time_msec()
last_stream_ordering = self.store.get_room_max_stream_ordering()
self.pusher_factory.create_pusher(PusherConfig(id=None, user_name=user_id, access_token=access_token, profile_tag=profile_tag, kind=kind, app_id=app_id, app_display_name=app_display_name, device_display_name=device_display_name, pushkey=pushkey, ts=time_now_msec, lang=lang, data=data, last_stream_ordering=last_stream_ordering, last_success=None, failing_since=None))
(await self.store.add_pusher(user_id=user_id, access_token=access_token, kind=kind, app_id=app_id, app_display_name=app_display_name, device_display_name=device_display_name, pushkey=pushkey, pushkey_ts=time_now_msec, lang=lang, data=data, last_stream_ordering=last_stream_ordering, profile_tag=profile_tag))
pusher = (await self.start_pusher_by_id(app_id, pushkey, user_id))
return pusher
| -4,144,691,672,634,155,500
|
Creates a new pusher and adds it to the pool
Returns:
The newly created pusher.
|
synapse/push/pusherpool.py
|
add_pusher
|
3ayazaya/synapse
|
python
|
async def add_pusher(self, user_id: str, access_token: Optional[int], kind: str, app_id: str, app_display_name: str, device_display_name: str, pushkey: str, lang: Optional[str], data: JsonDict, profile_tag: str=) -> Optional[Pusher]:
'Creates a new pusher and adds it to the pool\n\n Returns:\n The newly created pusher.\n '
if (kind == 'email'):
email_owner = (await self.store.get_user_id_by_threepid('email', canonicalise_email(pushkey)))
if (email_owner != user_id):
raise SynapseError(400, 'Email not found', Codes.THREEPID_NOT_FOUND)
time_now_msec = self.clock.time_msec()
last_stream_ordering = self.store.get_room_max_stream_ordering()
self.pusher_factory.create_pusher(PusherConfig(id=None, user_name=user_id, access_token=access_token, profile_tag=profile_tag, kind=kind, app_id=app_id, app_display_name=app_display_name, device_display_name=device_display_name, pushkey=pushkey, ts=time_now_msec, lang=lang, data=data, last_stream_ordering=last_stream_ordering, last_success=None, failing_since=None))
(await self.store.add_pusher(user_id=user_id, access_token=access_token, kind=kind, app_id=app_id, app_display_name=app_display_name, device_display_name=device_display_name, pushkey=pushkey, pushkey_ts=time_now_msec, lang=lang, data=data, last_stream_ordering=last_stream_ordering, profile_tag=profile_tag))
pusher = (await self.start_pusher_by_id(app_id, pushkey, user_id))
return pusher
|
async def remove_pushers_by_access_token(self, user_id: str, access_tokens: Iterable[int]) -> None:
'Remove the pushers for a given user corresponding to a set of\n access_tokens.\n\n Args:\n user_id: user to remove pushers for\n access_tokens: access token *ids* to remove pushers for\n '
tokens = set(access_tokens)
for p in (await self.store.get_pushers_by_user_id(user_id)):
if (p.access_token in tokens):
logger.info('Removing pusher for app id %s, pushkey %s, user %s', p.app_id, p.pushkey, p.user_name)
(await self.remove_pusher(p.app_id, p.pushkey, p.user_name))
| 8,722,035,526,725,559,000
|
Remove the pushers for a given user corresponding to a set of
access_tokens.
Args:
user_id: user to remove pushers for
access_tokens: access token *ids* to remove pushers for
|
synapse/push/pusherpool.py
|
remove_pushers_by_access_token
|
3ayazaya/synapse
|
python
|
async def remove_pushers_by_access_token(self, user_id: str, access_tokens: Iterable[int]) -> None:
'Remove the pushers for a given user corresponding to a set of\n access_tokens.\n\n Args:\n user_id: user to remove pushers for\n access_tokens: access token *ids* to remove pushers for\n '
tokens = set(access_tokens)
for p in (await self.store.get_pushers_by_user_id(user_id)):
if (p.access_token in tokens):
logger.info('Removing pusher for app id %s, pushkey %s, user %s', p.app_id, p.pushkey, p.user_name)
(await self.remove_pusher(p.app_id, p.pushkey, p.user_name))
|
async def start_pusher_by_id(self, app_id: str, pushkey: str, user_id: str) -> Optional[Pusher]:
'Look up the details for the given pusher, and start it\n\n Returns:\n The pusher started, if any\n '
if (not self._should_start_pushers):
return None
if (not self._pusher_shard_config.should_handle(self._instance_name, user_id)):
return None
resultlist = (await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey))
pusher_config = None
for r in resultlist:
if (r.user_name == user_id):
pusher_config = r
pusher = None
if pusher_config:
pusher = (await self._start_pusher(pusher_config))
return pusher
| 5,989,586,175,201,948,000
|
Look up the details for the given pusher, and start it
Returns:
The pusher started, if any
|
synapse/push/pusherpool.py
|
start_pusher_by_id
|
3ayazaya/synapse
|
python
|
async def start_pusher_by_id(self, app_id: str, pushkey: str, user_id: str) -> Optional[Pusher]:
'Look up the details for the given pusher, and start it\n\n Returns:\n The pusher started, if any\n '
if (not self._should_start_pushers):
return None
if (not self._pusher_shard_config.should_handle(self._instance_name, user_id)):
return None
resultlist = (await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey))
pusher_config = None
for r in resultlist:
if (r.user_name == user_id):
pusher_config = r
pusher = None
if pusher_config:
pusher = (await self._start_pusher(pusher_config))
return pusher
|
async def _start_pushers(self) -> None:
'Start all the pushers'
pushers = (await self.store.get_all_pushers())
(await concurrently_execute(self._start_pusher, pushers, 10))
logger.info('Started pushers')
| -6,608,109,463,484,806,000
|
Start all the pushers
|
synapse/push/pusherpool.py
|
_start_pushers
|
3ayazaya/synapse
|
python
|
async def _start_pushers(self) -> None:
pushers = (await self.store.get_all_pushers())
(await concurrently_execute(self._start_pusher, pushers, 10))
logger.info('Started pushers')
|
async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]:
'Start the given pusher\n\n Args:\n pusher_config: The pusher configuration with the values pulled from the db table\n\n Returns:\n The newly created pusher or None.\n '
if (not self._pusher_shard_config.should_handle(self._instance_name, pusher_config.user_name)):
return None
try:
p = self.pusher_factory.create_pusher(pusher_config)
except PusherConfigException as e:
logger.warning('Pusher incorrectly configured id=%i, user=%s, appid=%s, pushkey=%s: %s', pusher_config.id, pusher_config.user_name, pusher_config.app_id, pusher_config.pushkey, e)
return None
except Exception:
logger.exception("Couldn't start pusher id %i: caught Exception", pusher_config.id)
return None
if (not p):
return None
appid_pushkey = ('%s:%s' % (pusher_config.app_id, pusher_config.pushkey))
byuser = self.pushers.setdefault(pusher_config.user_name, {})
if (appid_pushkey in byuser):
byuser[appid_pushkey].on_stop()
byuser[appid_pushkey] = p
synapse_pushers.labels(type(p).__name__, p.app_id).inc()
user_id = pusher_config.user_name
last_stream_ordering = pusher_config.last_stream_ordering
if last_stream_ordering:
have_notifs = (await self.store.get_if_maybe_push_in_range_for_user(user_id, last_stream_ordering))
else:
have_notifs = True
p.on_started(have_notifs)
return p
| -4,503,619,665,891,171,300
|
Start the given pusher
Args:
pusher_config: The pusher configuration with the values pulled from the db table
Returns:
The newly created pusher or None.
|
synapse/push/pusherpool.py
|
_start_pusher
|
3ayazaya/synapse
|
python
|
async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]:
'Start the given pusher\n\n Args:\n pusher_config: The pusher configuration with the values pulled from the db table\n\n Returns:\n The newly created pusher or None.\n '
if (not self._pusher_shard_config.should_handle(self._instance_name, pusher_config.user_name)):
return None
try:
p = self.pusher_factory.create_pusher(pusher_config)
except PusherConfigException as e:
logger.warning('Pusher incorrectly configured id=%i, user=%s, appid=%s, pushkey=%s: %s', pusher_config.id, pusher_config.user_name, pusher_config.app_id, pusher_config.pushkey, e)
return None
except Exception:
logger.exception("Couldn't start pusher id %i: caught Exception", pusher_config.id)
return None
if (not p):
return None
appid_pushkey = ('%s:%s' % (pusher_config.app_id, pusher_config.pushkey))
byuser = self.pushers.setdefault(pusher_config.user_name, {})
if (appid_pushkey in byuser):
byuser[appid_pushkey].on_stop()
byuser[appid_pushkey] = p
synapse_pushers.labels(type(p).__name__, p.app_id).inc()
user_id = pusher_config.user_name
last_stream_ordering = pusher_config.last_stream_ordering
if last_stream_ordering:
have_notifs = (await self.store.get_if_maybe_push_in_range_for_user(user_id, last_stream_ordering))
else:
have_notifs = True
p.on_started(have_notifs)
return p
|
def get(self, resource_group_name, workspace_name, sql_pool_name, custom_headers=None, raw=False, **operation_config):
"Get a SQL pool's blob auditing policy.\n\n Get a SQL pool's blob auditing policy.\n\n :param resource_group_name: The name of the resource group. The name\n is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace\n :type workspace_name: str\n :param sql_pool_name: SQL pool name\n :type sql_pool_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: SqlPoolBlobAuditingPolicy or ClientRawResponse if raw=true\n :rtype: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy or\n ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n "
url = self.get.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\\w\\._\\(\\)]+$'), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str'), 'sqlPoolName': self._serialize.url('sql_pool_name', sql_pool_name, 'str'), 'blobAuditingPolicyName': self._serialize.url('self.blob_auditing_policy_name', self.blob_auditing_policy_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str', min_length=1)
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if (self.config.accept_language is not None):
header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if (response.status_code not in [200]):
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if (response.status_code == 200):
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| -6,123,978,912,562,874,000
|
Get a SQL pool's blob auditing policy.
Get a SQL pool's blob auditing policy.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SqlPoolBlobAuditingPolicy or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
|
venv/lib/python3.8/site-packages/azure/mgmt/synapse/operations/_sql_pool_blob_auditing_policies_operations.py
|
get
|
amcclead7336/Enterprise_Data_Science_Final
|
python
|
def get(self, resource_group_name, workspace_name, sql_pool_name, custom_headers=None, raw=False, **operation_config):
"Get a SQL pool's blob auditing policy.\n\n Get a SQL pool's blob auditing policy.\n\n :param resource_group_name: The name of the resource group. The name\n is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace\n :type workspace_name: str\n :param sql_pool_name: SQL pool name\n :type sql_pool_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: SqlPoolBlobAuditingPolicy or ClientRawResponse if raw=true\n :rtype: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy or\n ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n "
url = self.get.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\\w\\._\\(\\)]+$'), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str'), 'sqlPoolName': self._serialize.url('sql_pool_name', sql_pool_name, 'str'), 'blobAuditingPolicyName': self._serialize.url('self.blob_auditing_policy_name', self.blob_auditing_policy_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str', min_length=1)
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if (self.config.accept_language is not None):
header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if (response.status_code not in [200]):
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if (response.status_code == 200):
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
def create_or_update(self, resource_group_name, workspace_name, sql_pool_name, parameters, custom_headers=None, raw=False, **operation_config):
"Creates or updates a SQL pool's blob auditing policy.\n\n Creates or updates a SQL pool's blob auditing policy.\n\n :param resource_group_name: The name of the resource group. The name\n is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace\n :type workspace_name: str\n :param sql_pool_name: SQL pool name\n :type sql_pool_name: str\n :param parameters: The database blob auditing policy.\n :type parameters: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: SqlPoolBlobAuditingPolicy or ClientRawResponse if raw=true\n :rtype: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy or\n ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n "
url = self.create_or_update.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\\w\\._\\(\\)]+$'), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str'), 'sqlPoolName': self._serialize.url('sql_pool_name', sql_pool_name, 'str'), 'blobAuditingPolicyName': self._serialize.url('self.blob_auditing_policy_name', self.blob_auditing_policy_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str', min_length=1)
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if (self.config.accept_language is not None):
header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'SqlPoolBlobAuditingPolicy')
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if (response.status_code not in [200, 201]):
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if (response.status_code == 200):
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if (response.status_code == 201):
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 4,094,808,656,080,038,400
|
Creates or updates a SQL pool's blob auditing policy.
Creates or updates a SQL pool's blob auditing policy.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param parameters: The database blob auditing policy.
:type parameters: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SqlPoolBlobAuditingPolicy or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
|
venv/lib/python3.8/site-packages/azure/mgmt/synapse/operations/_sql_pool_blob_auditing_policies_operations.py
|
create_or_update
|
amcclead7336/Enterprise_Data_Science_Final
|
python
|
def create_or_update(self, resource_group_name, workspace_name, sql_pool_name, parameters, custom_headers=None, raw=False, **operation_config):
"Creates or updates a SQL pool's blob auditing policy.\n\n Creates or updates a SQL pool's blob auditing policy.\n\n :param resource_group_name: The name of the resource group. The name\n is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace\n :type workspace_name: str\n :param sql_pool_name: SQL pool name\n :type sql_pool_name: str\n :param parameters: The database blob auditing policy.\n :type parameters: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: SqlPoolBlobAuditingPolicy or ClientRawResponse if raw=true\n :rtype: ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy or\n ~msrest.pipeline.ClientRawResponse\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n "
url = self.create_or_update.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\\w\\._\\(\\)]+$'), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str'), 'sqlPoolName': self._serialize.url('sql_pool_name', sql_pool_name, 'str'), 'blobAuditingPolicyName': self._serialize.url('self.blob_auditing_policy_name', self.blob_auditing_policy_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str', min_length=1)
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if (self.config.accept_language is not None):
header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str')
body_content = self._serialize.body(parameters, 'SqlPoolBlobAuditingPolicy')
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if (response.status_code not in [200, 201]):
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if (response.status_code == 200):
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if (response.status_code == 201):
deserialized = self._deserialize('SqlPoolBlobAuditingPolicy', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
def list_by_sql_pool(self, resource_group_name, workspace_name, sql_pool_name, custom_headers=None, raw=False, **operation_config):
'Lists auditing settings of a Sql pool.\n\n :param resource_group_name: The name of the resource group. The name\n is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace\n :type workspace_name: str\n :param sql_pool_name: SQL pool name\n :type sql_pool_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: An iterator like instance of SqlPoolBlobAuditingPolicy\n :rtype:\n ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicyPaged[~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n '
def prepare_request(next_link=None):
if (not next_link):
url = self.list_by_sql_pool.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\\w\\._\\(\\)]+$'), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str'), 'sqlPoolName': self._serialize.url('sql_pool_name', sql_pool_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if (self.config.accept_language is not None):
header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if (response.status_code not in [200]):
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.SqlPoolBlobAuditingPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
| 7,417,312,686,727,259,000
|
Lists auditing settings of a Sql pool.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace
:type workspace_name: str
:param sql_pool_name: SQL pool name
:type sql_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SqlPoolBlobAuditingPolicy
:rtype:
~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicyPaged[~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
|
venv/lib/python3.8/site-packages/azure/mgmt/synapse/operations/_sql_pool_blob_auditing_policies_operations.py
|
list_by_sql_pool
|
amcclead7336/Enterprise_Data_Science_Final
|
python
|
def list_by_sql_pool(self, resource_group_name, workspace_name, sql_pool_name, custom_headers=None, raw=False, **operation_config):
'Lists auditing settings of a Sql pool.\n\n :param resource_group_name: The name of the resource group. The name\n is case insensitive.\n :type resource_group_name: str\n :param workspace_name: The name of the workspace\n :type workspace_name: str\n :param sql_pool_name: SQL pool name\n :type sql_pool_name: str\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :param operation_config: :ref:`Operation configuration\n overrides<msrest:optionsforoperations>`.\n :return: An iterator like instance of SqlPoolBlobAuditingPolicy\n :rtype:\n ~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicyPaged[~azure.mgmt.synapse.models.SqlPoolBlobAuditingPolicy]\n :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`\n '
def prepare_request(next_link=None):
if (not next_link):
url = self.list_by_sql_pool.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self.config.subscription_id', self.config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\\w\\._\\(\\)]+$'), 'workspaceName': self._serialize.url('workspace_name', workspace_name, 'str'), 'sqlPoolName': self._serialize.url('sql_pool_name', sql_pool_name, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str', min_length=1)
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if (self.config.accept_language is not None):
header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if (response.status_code not in [200]):
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.SqlPoolBlobAuditingPolicyPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
|
def choice(a, size=None, replace=True, p=None, random_state=None):
"\n choice(a, size=None, replace=True, p=None)\n\n Generates a random sample from a given 1-D array\n\n .. versionadded:: 1.7.0\n\n Parameters\n -----------\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a was np.arange(n)\n\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n replace : boolean, optional\n Whether the sample is with or without replacement.\n\n p : 1-D array-like, optional\n The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribtion over all\n entries in a.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n\n Returns\n --------\n samples : 1-D ndarray, shape (size,)\n The generated random samples\n\n Raises\n -------\n ValueError\n If a is an int and less than zero, if a or p are not 1-dimensional,\n if a is an array-like of size 0, if p is not a vector of\n probabilities, if a and p have different lengths, or if\n replace=False and the sample size is greater than the population\n size\n\n See Also\n ---------\n randint, shuffle, permutation\n\n Examples\n ---------\n Generate a uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3) # doctest: +SKIP\n array([0, 3, 4])\n >>> #This is equivalent to np.random.randint(0,5,3)\n\n Generate a non-uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP\n array([3, 3, 0])\n\n Generate a uniform random sample from np.arange(5) of size 3 without\n replacement:\n\n >>> np.random.choice(5, 3, replace=False) # doctest: +SKIP\n array([3,1,0])\n >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]\n\n Generate a non-uniform random sample from np.arange(5) of size\n 3 without replacement:\n\n >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])\n ... # doctest: +SKIP\n array([2, 3, 0])\n\n Any of the above can be repeated with an arbitrary array-like\n instead of just integers. For instance:\n\n >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']\n >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])\n ... # doctest: +SKIP\n array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],\n dtype='|S11')\n\n "
random_state = check_random_state(random_state)
a = np.array(a, copy=False)
if (a.ndim == 0):
try:
pop_size = operator.index(a.item())
except TypeError:
raise ValueError('a must be 1-dimensional or an integer')
if (pop_size <= 0):
raise ValueError('a must be greater than 0')
elif (a.ndim != 1):
raise ValueError('a must be 1-dimensional')
else:
pop_size = a.shape[0]
if (pop_size is 0):
raise ValueError('a must be non-empty')
if (None != p):
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if (p.ndim != 1):
raise ValueError('p must be 1-dimensional')
if (p.size != pop_size):
raise ValueError('a and p must have same size')
if np.any((p < 0)):
raise ValueError('probabilities are not non-negative')
if (not np.allclose(p.sum(), 1)):
raise ValueError('probabilities do not sum to 1')
shape = size
if (shape is not None):
size = np.prod(shape, dtype=np.intp)
else:
size = 1
if replace:
if (None != p):
cdf = p.cumsum()
cdf /= cdf[(- 1)]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if (size > pop_size):
raise ValueError("Cannot take a larger sample than population when 'replace=False'")
if (None != p):
if (np.sum((p > 0)) < size):
raise ValueError('Fewer non-zero entries in p than size')
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while (n_uniq < size):
x = random_state.rand((size - n_uniq))
if (n_uniq > 0):
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[(- 1)]
new = cdf.searchsorted(x, side='right')
(_, unique_indices) = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:(n_uniq + new.size)] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if (shape is not None):
idx.shape = shape
if ((shape is None) and isinstance(idx, np.ndarray)):
idx = idx.item(0)
if (a.ndim == 0):
return idx
if ((shape is not None) and (idx.ndim == 0)):
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
| 1,923,530,375,439,504,600
|
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
|
venv/lib/python2.7/site-packages/sklearn/utils/random.py
|
choice
|
bopopescu/fbserver
|
python
|
def choice(a, size=None, replace=True, p=None, random_state=None):
"\n choice(a, size=None, replace=True, p=None)\n\n Generates a random sample from a given 1-D array\n\n .. versionadded:: 1.7.0\n\n Parameters\n -----------\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a was np.arange(n)\n\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n replace : boolean, optional\n Whether the sample is with or without replacement.\n\n p : 1-D array-like, optional\n The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribtion over all\n entries in a.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n\n Returns\n --------\n samples : 1-D ndarray, shape (size,)\n The generated random samples\n\n Raises\n -------\n ValueError\n If a is an int and less than zero, if a or p are not 1-dimensional,\n if a is an array-like of size 0, if p is not a vector of\n probabilities, if a and p have different lengths, or if\n replace=False and the sample size is greater than the population\n size\n\n See Also\n ---------\n randint, shuffle, permutation\n\n Examples\n ---------\n Generate a uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3) # doctest: +SKIP\n array([0, 3, 4])\n >>> #This is equivalent to np.random.randint(0,5,3)\n\n Generate a non-uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP\n array([3, 3, 0])\n\n Generate a uniform random sample from np.arange(5) of size 3 without\n replacement:\n\n >>> np.random.choice(5, 3, replace=False) # doctest: +SKIP\n array([3,1,0])\n >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]\n\n Generate a non-uniform random sample from np.arange(5) of size\n 3 without replacement:\n\n >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])\n ... # doctest: +SKIP\n array([2, 3, 0])\n\n Any of the above can be repeated with an arbitrary array-like\n instead of just integers. For instance:\n\n >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']\n >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])\n ... # doctest: +SKIP\n array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],\n dtype='|S11')\n\n "
random_state = check_random_state(random_state)
a = np.array(a, copy=False)
if (a.ndim == 0):
try:
pop_size = operator.index(a.item())
except TypeError:
raise ValueError('a must be 1-dimensional or an integer')
if (pop_size <= 0):
raise ValueError('a must be greater than 0')
elif (a.ndim != 1):
raise ValueError('a must be 1-dimensional')
else:
pop_size = a.shape[0]
if (pop_size is 0):
raise ValueError('a must be non-empty')
if (None != p):
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if (p.ndim != 1):
raise ValueError('p must be 1-dimensional')
if (p.size != pop_size):
raise ValueError('a and p must have same size')
if np.any((p < 0)):
raise ValueError('probabilities are not non-negative')
if (not np.allclose(p.sum(), 1)):
raise ValueError('probabilities do not sum to 1')
shape = size
if (shape is not None):
size = np.prod(shape, dtype=np.intp)
else:
size = 1
if replace:
if (None != p):
cdf = p.cumsum()
cdf /= cdf[(- 1)]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if (size > pop_size):
raise ValueError("Cannot take a larger sample than population when 'replace=False'")
if (None != p):
if (np.sum((p > 0)) < size):
raise ValueError('Fewer non-zero entries in p than size')
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while (n_uniq < size):
x = random_state.rand((size - n_uniq))
if (n_uniq > 0):
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[(- 1)]
new = cdf.searchsorted(x, side='right')
(_, unique_indices) = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:(n_uniq + new.size)] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if (shape is not None):
idx.shape = shape
if ((shape is None) and isinstance(idx, np.ndarray)):
idx = idx.item(0)
if (a.ndim == 0):
return idx
if ((shape is not None) and (idx.ndim == 0)):
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
|
def test_equality(self):
' Tests the equality operator.\n '
self.assertNotEqual(self.v1, self.v2)
expected_result = Vector2D((- 1), 1)
self.assertEqual(self.v2, expected_result)
| 5,486,552,652,260,747,000
|
Tests the equality operator.
|
Chapter3_CodeTesting/UnitTesting/test_vector.py
|
test_equality
|
franneck94/UdemyPythonProEng
|
python
|
def test_equality(self):
' \n '
self.assertNotEqual(self.v1, self.v2)
expected_result = Vector2D((- 1), 1)
self.assertEqual(self.v2, expected_result)
|
def test_add(self):
' Tests the addition operator.\n '
result = (self.v1 + self.v2)
expected_result = Vector2D((- 1), 1)
self.assertEqual(result, expected_result)
| -8,180,965,754,012,174,000
|
Tests the addition operator.
|
Chapter3_CodeTesting/UnitTesting/test_vector.py
|
test_add
|
franneck94/UdemyPythonProEng
|
python
|
def test_add(self):
' \n '
result = (self.v1 + self.v2)
expected_result = Vector2D((- 1), 1)
self.assertEqual(result, expected_result)
|
def test_sub(self):
' Tests the subtraction operator.\n '
result = (self.v2 - self.v3)
expected_result = Vector2D((- 3.5), 3.5)
self.assertEqual(result, expected_result)
| -3,483,669,794,062,639,600
|
Tests the subtraction operator.
|
Chapter3_CodeTesting/UnitTesting/test_vector.py
|
test_sub
|
franneck94/UdemyPythonProEng
|
python
|
def test_sub(self):
' \n '
result = (self.v2 - self.v3)
expected_result = Vector2D((- 3.5), 3.5)
self.assertEqual(result, expected_result)
|
def test_mul(self):
' Tests the multiplication operator.\n '
result1 = (self.v1 * 5)
expected_result1 = Vector2D(0.0, 0.0)
self.assertEqual(result1, expected_result1)
result2 = (self.v1 * self.v2)
expected_result2 = 0.0
self.assertEqual(result2, expected_result2)
| -6,555,608,015,718,426,000
|
Tests the multiplication operator.
|
Chapter3_CodeTesting/UnitTesting/test_vector.py
|
test_mul
|
franneck94/UdemyPythonProEng
|
python
|
def test_mul(self):
' \n '
result1 = (self.v1 * 5)
expected_result1 = Vector2D(0.0, 0.0)
self.assertEqual(result1, expected_result1)
result2 = (self.v1 * self.v2)
expected_result2 = 0.0
self.assertEqual(result2, expected_result2)
|
def test_div(self):
' Tests the multiplication operator.\n '
result = (self.v3 / 5)
expected_result = Vector2D(0.5, (- 0.5))
self.assertEqual(result, expected_result)
| 297,827,253,147,501,300
|
Tests the multiplication operator.
|
Chapter3_CodeTesting/UnitTesting/test_vector.py
|
test_div
|
franneck94/UdemyPythonProEng
|
python
|
def test_div(self):
' \n '
result = (self.v3 / 5)
expected_result = Vector2D(0.5, (- 0.5))
self.assertEqual(result, expected_result)
|
def __init__(self, api_version=None, kind=None, name=None, preferred_version=None, server_address_by_client_cid_rs=None, versions=None):
'\n V1APIGroup - a model defined in Swagger\n '
self._api_version = None
self._kind = None
self._name = None
self._preferred_version = None
self._server_address_by_client_cid_rs = None
self._versions = None
self.discriminator = None
if (api_version is not None):
self.api_version = api_version
if (kind is not None):
self.kind = kind
self.name = name
if (preferred_version is not None):
self.preferred_version = preferred_version
if (server_address_by_client_cid_rs is not None):
self.server_address_by_client_cid_rs = server_address_by_client_cid_rs
self.versions = versions
| -3,529,317,202,730,752,000
|
V1APIGroup - a model defined in Swagger
|
kubernetes/client/models/v1_api_group.py
|
__init__
|
Jamim/kubernetes-client-python
|
python
|
def __init__(self, api_version=None, kind=None, name=None, preferred_version=None, server_address_by_client_cid_rs=None, versions=None):
'\n \n '
self._api_version = None
self._kind = None
self._name = None
self._preferred_version = None
self._server_address_by_client_cid_rs = None
self._versions = None
self.discriminator = None
if (api_version is not None):
self.api_version = api_version
if (kind is not None):
self.kind = kind
self.name = name
if (preferred_version is not None):
self.preferred_version = preferred_version
if (server_address_by_client_cid_rs is not None):
self.server_address_by_client_cid_rs = server_address_by_client_cid_rs
self.versions = versions
|
@property
def api_version(self):
'\n Gets the api_version of this V1APIGroup.\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources\n\n :return: The api_version of this V1APIGroup.\n :rtype: str\n '
return self._api_version
| -677,712,337,667,077,900
|
Gets the api_version of this V1APIGroup.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1APIGroup.
:rtype: str
|
kubernetes/client/models/v1_api_group.py
|
api_version
|
Jamim/kubernetes-client-python
|
python
|
@property
def api_version(self):
'\n Gets the api_version of this V1APIGroup.\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources\n\n :return: The api_version of this V1APIGroup.\n :rtype: str\n '
return self._api_version
|
@api_version.setter
def api_version(self, api_version):
'\n Sets the api_version of this V1APIGroup.\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources\n\n :param api_version: The api_version of this V1APIGroup.\n :type: str\n '
self._api_version = api_version
| -7,464,931,077,191,557,000
|
Sets the api_version of this V1APIGroup.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1APIGroup.
:type: str
|
kubernetes/client/models/v1_api_group.py
|
api_version
|
Jamim/kubernetes-client-python
|
python
|
@api_version.setter
def api_version(self, api_version):
'\n Sets the api_version of this V1APIGroup.\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources\n\n :param api_version: The api_version of this V1APIGroup.\n :type: str\n '
self._api_version = api_version
|
@property
def kind(self):
'\n Gets the kind of this V1APIGroup.\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\n\n :return: The kind of this V1APIGroup.\n :rtype: str\n '
return self._kind
| -5,332,471,152,682,945,000
|
Gets the kind of this V1APIGroup.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1APIGroup.
:rtype: str
|
kubernetes/client/models/v1_api_group.py
|
kind
|
Jamim/kubernetes-client-python
|
python
|
@property
def kind(self):
'\n Gets the kind of this V1APIGroup.\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\n\n :return: The kind of this V1APIGroup.\n :rtype: str\n '
return self._kind
|
@kind.setter
def kind(self, kind):
'\n Sets the kind of this V1APIGroup.\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\n\n :param kind: The kind of this V1APIGroup.\n :type: str\n '
self._kind = kind
| 3,370,698,044,492,508,700
|
Sets the kind of this V1APIGroup.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1APIGroup.
:type: str
|
kubernetes/client/models/v1_api_group.py
|
kind
|
Jamim/kubernetes-client-python
|
python
|
@kind.setter
def kind(self, kind):
'\n Sets the kind of this V1APIGroup.\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\n\n :param kind: The kind of this V1APIGroup.\n :type: str\n '
self._kind = kind
|
@property
def name(self):
'\n Gets the name of this V1APIGroup.\n name is the name of the group.\n\n :return: The name of this V1APIGroup.\n :rtype: str\n '
return self._name
| -7,469,766,574,270,034,000
|
Gets the name of this V1APIGroup.
name is the name of the group.
:return: The name of this V1APIGroup.
:rtype: str
|
kubernetes/client/models/v1_api_group.py
|
name
|
Jamim/kubernetes-client-python
|
python
|
@property
def name(self):
'\n Gets the name of this V1APIGroup.\n name is the name of the group.\n\n :return: The name of this V1APIGroup.\n :rtype: str\n '
return self._name
|
@name.setter
def name(self, name):
'\n Sets the name of this V1APIGroup.\n name is the name of the group.\n\n :param name: The name of this V1APIGroup.\n :type: str\n '
if (name is None):
raise ValueError('Invalid value for `name`, must not be `None`')
self._name = name
| -5,055,398,109,243,604,000
|
Sets the name of this V1APIGroup.
name is the name of the group.
:param name: The name of this V1APIGroup.
:type: str
|
kubernetes/client/models/v1_api_group.py
|
name
|
Jamim/kubernetes-client-python
|
python
|
@name.setter
def name(self, name):
'\n Sets the name of this V1APIGroup.\n name is the name of the group.\n\n :param name: The name of this V1APIGroup.\n :type: str\n '
if (name is None):
raise ValueError('Invalid value for `name`, must not be `None`')
self._name = name
|
@property
def preferred_version(self):
'\n Gets the preferred_version of this V1APIGroup.\n preferredVersion is the version preferred by the API server, which probably is the storage version.\n\n :return: The preferred_version of this V1APIGroup.\n :rtype: V1GroupVersionForDiscovery\n '
return self._preferred_version
| -1,632,329,830,477,617,700
|
Gets the preferred_version of this V1APIGroup.
preferredVersion is the version preferred by the API server, which probably is the storage version.
:return: The preferred_version of this V1APIGroup.
:rtype: V1GroupVersionForDiscovery
|
kubernetes/client/models/v1_api_group.py
|
preferred_version
|
Jamim/kubernetes-client-python
|
python
|
@property
def preferred_version(self):
'\n Gets the preferred_version of this V1APIGroup.\n preferredVersion is the version preferred by the API server, which probably is the storage version.\n\n :return: The preferred_version of this V1APIGroup.\n :rtype: V1GroupVersionForDiscovery\n '
return self._preferred_version
|
@preferred_version.setter
def preferred_version(self, preferred_version):
'\n Sets the preferred_version of this V1APIGroup.\n preferredVersion is the version preferred by the API server, which probably is the storage version.\n\n :param preferred_version: The preferred_version of this V1APIGroup.\n :type: V1GroupVersionForDiscovery\n '
self._preferred_version = preferred_version
| 1,623,455,171,021,236,000
|
Sets the preferred_version of this V1APIGroup.
preferredVersion is the version preferred by the API server, which probably is the storage version.
:param preferred_version: The preferred_version of this V1APIGroup.
:type: V1GroupVersionForDiscovery
|
kubernetes/client/models/v1_api_group.py
|
preferred_version
|
Jamim/kubernetes-client-python
|
python
|
@preferred_version.setter
def preferred_version(self, preferred_version):
'\n Sets the preferred_version of this V1APIGroup.\n preferredVersion is the version preferred by the API server, which probably is the storage version.\n\n :param preferred_version: The preferred_version of this V1APIGroup.\n :type: V1GroupVersionForDiscovery\n '
self._preferred_version = preferred_version
|
@property
def server_address_by_client_cid_rs(self):
'\n Gets the server_address_by_client_cid_rs of this V1APIGroup.\n a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.\n\n :return: The server_address_by_client_cid_rs of this V1APIGroup.\n :rtype: list[V1ServerAddressByClientCIDR]\n '
return self._server_address_by_client_cid_rs
| -4,830,230,150,165,324,000
|
Gets the server_address_by_client_cid_rs of this V1APIGroup.
a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
:return: The server_address_by_client_cid_rs of this V1APIGroup.
:rtype: list[V1ServerAddressByClientCIDR]
|
kubernetes/client/models/v1_api_group.py
|
server_address_by_client_cid_rs
|
Jamim/kubernetes-client-python
|
python
|
@property
def server_address_by_client_cid_rs(self):
'\n Gets the server_address_by_client_cid_rs of this V1APIGroup.\n a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.\n\n :return: The server_address_by_client_cid_rs of this V1APIGroup.\n :rtype: list[V1ServerAddressByClientCIDR]\n '
return self._server_address_by_client_cid_rs
|
@server_address_by_client_cid_rs.setter
def server_address_by_client_cid_rs(self, server_address_by_client_cid_rs):
'\n Sets the server_address_by_client_cid_rs of this V1APIGroup.\n a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.\n\n :param server_address_by_client_cid_rs: The server_address_by_client_cid_rs of this V1APIGroup.\n :type: list[V1ServerAddressByClientCIDR]\n '
self._server_address_by_client_cid_rs = server_address_by_client_cid_rs
| -4,706,609,688,793,086,000
|
Sets the server_address_by_client_cid_rs of this V1APIGroup.
a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
:param server_address_by_client_cid_rs: The server_address_by_client_cid_rs of this V1APIGroup.
:type: list[V1ServerAddressByClientCIDR]
|
kubernetes/client/models/v1_api_group.py
|
server_address_by_client_cid_rs
|
Jamim/kubernetes-client-python
|
python
|
@server_address_by_client_cid_rs.setter
def server_address_by_client_cid_rs(self, server_address_by_client_cid_rs):
'\n Sets the server_address_by_client_cid_rs of this V1APIGroup.\n a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.\n\n :param server_address_by_client_cid_rs: The server_address_by_client_cid_rs of this V1APIGroup.\n :type: list[V1ServerAddressByClientCIDR]\n '
self._server_address_by_client_cid_rs = server_address_by_client_cid_rs
|
@property
def versions(self):
'\n Gets the versions of this V1APIGroup.\n versions are the versions supported in this group.\n\n :return: The versions of this V1APIGroup.\n :rtype: list[V1GroupVersionForDiscovery]\n '
return self._versions
| 5,117,786,574,111,289,000
|
Gets the versions of this V1APIGroup.
versions are the versions supported in this group.
:return: The versions of this V1APIGroup.
:rtype: list[V1GroupVersionForDiscovery]
|
kubernetes/client/models/v1_api_group.py
|
versions
|
Jamim/kubernetes-client-python
|
python
|
@property
def versions(self):
'\n Gets the versions of this V1APIGroup.\n versions are the versions supported in this group.\n\n :return: The versions of this V1APIGroup.\n :rtype: list[V1GroupVersionForDiscovery]\n '
return self._versions
|
@versions.setter
def versions(self, versions):
'\n Sets the versions of this V1APIGroup.\n versions are the versions supported in this group.\n\n :param versions: The versions of this V1APIGroup.\n :type: list[V1GroupVersionForDiscovery]\n '
if (versions is None):
raise ValueError('Invalid value for `versions`, must not be `None`')
self._versions = versions
| 3,347,067,186,250,300,000
|
Sets the versions of this V1APIGroup.
versions are the versions supported in this group.
:param versions: The versions of this V1APIGroup.
:type: list[V1GroupVersionForDiscovery]
|
kubernetes/client/models/v1_api_group.py
|
versions
|
Jamim/kubernetes-client-python
|
python
|
@versions.setter
def versions(self, versions):
'\n Sets the versions of this V1APIGroup.\n versions are the versions supported in this group.\n\n :param versions: The versions of this V1APIGroup.\n :type: list[V1GroupVersionForDiscovery]\n '
if (versions is None):
raise ValueError('Invalid value for `versions`, must not be `None`')
self._versions = versions
|
def to_dict(self):
'\n Returns the model properties as a dict\n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
| 2,191,974,537,531,847,000
|
Returns the model properties as a dict
|
kubernetes/client/models/v1_api_group.py
|
to_dict
|
Jamim/kubernetes-client-python
|
python
|
def to_dict(self):
'\n \n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_str(self):
'\n Returns the string representation of the model\n '
return pformat(self.to_dict())
| -3,531,024,894,346,511,000
|
Returns the string representation of the model
|
kubernetes/client/models/v1_api_group.py
|
to_str
|
Jamim/kubernetes-client-python
|
python
|
def to_str(self):
'\n \n '
return pformat(self.to_dict())
|
def __repr__(self):
'\n For `print` and `pprint`\n '
return self.to_str()
| 5,853,962,500,611,353,000
|
For `print` and `pprint`
|
kubernetes/client/models/v1_api_group.py
|
__repr__
|
Jamim/kubernetes-client-python
|
python
|
def __repr__(self):
'\n \n '
return self.to_str()
|
def __eq__(self, other):
'\n Returns true if both objects are equal\n '
if (not isinstance(other, V1APIGroup)):
return False
return (self.__dict__ == other.__dict__)
| 2,386,882,772,361,960,400
|
Returns true if both objects are equal
|
kubernetes/client/models/v1_api_group.py
|
__eq__
|
Jamim/kubernetes-client-python
|
python
|
def __eq__(self, other):
'\n \n '
if (not isinstance(other, V1APIGroup)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'\n Returns true if both objects are not equal\n '
return (not (self == other))
| 3,600,423,175,817,510,400
|
Returns true if both objects are not equal
|
kubernetes/client/models/v1_api_group.py
|
__ne__
|
Jamim/kubernetes-client-python
|
python
|
def __ne__(self, other):
'\n \n '
return (not (self == other))
|
def __init__(self, seed: int, brain: BrainParameters, trainer_params: Dict[(str, Any)], is_training: bool, load: bool):
'\n Policy for Proximal Policy Optimization Networks.\n :param seed: Random seed.\n :param brain: Assigned Brain object.\n :param trainer_params: Defined training parameters.\n :param is_training: Whether the model should be trained.\n :param load: Whether a pre-trained model will be loaded or a new one created.\n '
super().__init__(seed, brain, trainer_params)
reward_signal_configs = trainer_params['reward_signals']
self.inference_dict: Dict[(str, tf.Tensor)] = {}
self.update_dict: Dict[(str, tf.Tensor)] = {}
self.stats_name_to_update_name = {'Losses/Value Loss': 'value_loss', 'Losses/Policy Loss': 'policy_loss'}
self.create_model(brain, trainer_params, reward_signal_configs, is_training, load, seed)
self.create_reward_signals(reward_signal_configs)
with self.graph.as_default():
self.bc_module: Optional[BCModule] = None
if ('pretraining' in trainer_params):
BCModule.check_config(trainer_params['pretraining'])
self.bc_module = BCModule(self, policy_learning_rate=trainer_params['learning_rate'], default_batch_size=trainer_params['batch_size'], default_num_epoch=trainer_params['num_epoch'], **trainer_params['pretraining'])
if load:
self._load_graph()
else:
self._initialize_graph()
| 3,863,017,292,052,306,000
|
Policy for Proximal Policy Optimization Networks.
:param seed: Random seed.
:param brain: Assigned Brain object.
:param trainer_params: Defined training parameters.
:param is_training: Whether the model should be trained.
:param load: Whether a pre-trained model will be loaded or a new one created.
|
ml-agents/mlagents/trainers/ppo/policy.py
|
__init__
|
DdATM/ML-FlappyBird
|
python
|
def __init__(self, seed: int, brain: BrainParameters, trainer_params: Dict[(str, Any)], is_training: bool, load: bool):
'\n Policy for Proximal Policy Optimization Networks.\n :param seed: Random seed.\n :param brain: Assigned Brain object.\n :param trainer_params: Defined training parameters.\n :param is_training: Whether the model should be trained.\n :param load: Whether a pre-trained model will be loaded or a new one created.\n '
super().__init__(seed, brain, trainer_params)
reward_signal_configs = trainer_params['reward_signals']
self.inference_dict: Dict[(str, tf.Tensor)] = {}
self.update_dict: Dict[(str, tf.Tensor)] = {}
self.stats_name_to_update_name = {'Losses/Value Loss': 'value_loss', 'Losses/Policy Loss': 'policy_loss'}
self.create_model(brain, trainer_params, reward_signal_configs, is_training, load, seed)
self.create_reward_signals(reward_signal_configs)
with self.graph.as_default():
self.bc_module: Optional[BCModule] = None
if ('pretraining' in trainer_params):
BCModule.check_config(trainer_params['pretraining'])
self.bc_module = BCModule(self, policy_learning_rate=trainer_params['learning_rate'], default_batch_size=trainer_params['batch_size'], default_num_epoch=trainer_params['num_epoch'], **trainer_params['pretraining'])
if load:
self._load_graph()
else:
self._initialize_graph()
|
def create_model(self, brain, trainer_params, reward_signal_configs, is_training, load, seed):
'\n Create PPO model\n :param brain: Assigned Brain object.\n :param trainer_params: Defined training parameters.\n :param reward_signal_configs: Reward signal config\n :param seed: Random seed.\n '
with self.graph.as_default():
self.model = PPOModel(brain=brain, lr=float(trainer_params['learning_rate']), lr_schedule=LearningRateSchedule(trainer_params.get('learning_rate_schedule', 'linear')), h_size=int(trainer_params['hidden_units']), epsilon=float(trainer_params['epsilon']), beta=float(trainer_params['beta']), max_step=float(trainer_params['max_steps']), normalize=trainer_params['normalize'], use_recurrent=trainer_params['use_recurrent'], num_layers=int(trainer_params['num_layers']), m_size=self.m_size, seed=seed, stream_names=list(reward_signal_configs.keys()), vis_encode_type=EncoderType(trainer_params.get('vis_encode_type', 'simple')))
self.model.create_ppo_optimizer()
self.inference_dict.update({'action': self.model.output, 'log_probs': self.model.all_log_probs, 'value_heads': self.model.value_heads, 'value': self.model.value, 'entropy': self.model.entropy, 'learning_rate': self.model.learning_rate})
if self.use_continuous_act:
self.inference_dict['pre_action'] = self.model.output_pre
if self.use_recurrent:
self.inference_dict['memory_out'] = self.model.memory_out
self.total_policy_loss = self.model.abs_policy_loss
self.update_dict.update({'value_loss': self.model.value_loss, 'policy_loss': self.total_policy_loss, 'update_batch': self.model.update_batch})
| -7,139,445,690,236,091,000
|
Create PPO model
:param brain: Assigned Brain object.
:param trainer_params: Defined training parameters.
:param reward_signal_configs: Reward signal config
:param seed: Random seed.
|
ml-agents/mlagents/trainers/ppo/policy.py
|
create_model
|
DdATM/ML-FlappyBird
|
python
|
def create_model(self, brain, trainer_params, reward_signal_configs, is_training, load, seed):
'\n Create PPO model\n :param brain: Assigned Brain object.\n :param trainer_params: Defined training parameters.\n :param reward_signal_configs: Reward signal config\n :param seed: Random seed.\n '
with self.graph.as_default():
self.model = PPOModel(brain=brain, lr=float(trainer_params['learning_rate']), lr_schedule=LearningRateSchedule(trainer_params.get('learning_rate_schedule', 'linear')), h_size=int(trainer_params['hidden_units']), epsilon=float(trainer_params['epsilon']), beta=float(trainer_params['beta']), max_step=float(trainer_params['max_steps']), normalize=trainer_params['normalize'], use_recurrent=trainer_params['use_recurrent'], num_layers=int(trainer_params['num_layers']), m_size=self.m_size, seed=seed, stream_names=list(reward_signal_configs.keys()), vis_encode_type=EncoderType(trainer_params.get('vis_encode_type', 'simple')))
self.model.create_ppo_optimizer()
self.inference_dict.update({'action': self.model.output, 'log_probs': self.model.all_log_probs, 'value_heads': self.model.value_heads, 'value': self.model.value, 'entropy': self.model.entropy, 'learning_rate': self.model.learning_rate})
if self.use_continuous_act:
self.inference_dict['pre_action'] = self.model.output_pre
if self.use_recurrent:
self.inference_dict['memory_out'] = self.model.memory_out
self.total_policy_loss = self.model.abs_policy_loss
self.update_dict.update({'value_loss': self.model.value_loss, 'policy_loss': self.total_policy_loss, 'update_batch': self.model.update_batch})
|
def create_reward_signals(self, reward_signal_configs):
'\n Create reward signals\n :param reward_signal_configs: Reward signal config.\n '
self.reward_signals = {}
with self.graph.as_default():
for (reward_signal, config) in reward_signal_configs.items():
self.reward_signals[reward_signal] = create_reward_signal(self, self.model, reward_signal, config)
self.update_dict.update(self.reward_signals[reward_signal].update_dict)
| -167,883,442,589,641,500
|
Create reward signals
:param reward_signal_configs: Reward signal config.
|
ml-agents/mlagents/trainers/ppo/policy.py
|
create_reward_signals
|
DdATM/ML-FlappyBird
|
python
|
def create_reward_signals(self, reward_signal_configs):
'\n Create reward signals\n :param reward_signal_configs: Reward signal config.\n '
self.reward_signals = {}
with self.graph.as_default():
for (reward_signal, config) in reward_signal_configs.items():
self.reward_signals[reward_signal] = create_reward_signal(self, self.model, reward_signal, config)
self.update_dict.update(self.reward_signals[reward_signal].update_dict)
|
@timed
def evaluate(self, brain_info):
'\n Evaluates policy for the agent experiences provided.\n :param brain_info: BrainInfo object containing inputs.\n :return: Outputs from network as defined by self.inference_dict.\n '
feed_dict = {self.model.batch_size: len(brain_info.vector_observations), self.model.sequence_length: 1}
epsilon = None
if self.use_recurrent:
if (not self.use_continuous_act):
feed_dict[self.model.prev_action] = self.retrieve_previous_action(brain_info.agents)
feed_dict[self.model.memory_in] = self.retrieve_memories(brain_info.agents)
if self.use_continuous_act:
epsilon = np.random.normal(size=(len(brain_info.vector_observations), self.model.act_size[0]))
feed_dict[self.model.epsilon] = epsilon
feed_dict = self.fill_eval_dict(feed_dict, brain_info)
run_out = self._execute_model(feed_dict, self.inference_dict)
if self.use_continuous_act:
run_out['random_normal_epsilon'] = epsilon
return run_out
| 3,825,865,838,681,638,000
|
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
|
ml-agents/mlagents/trainers/ppo/policy.py
|
evaluate
|
DdATM/ML-FlappyBird
|
python
|
@timed
def evaluate(self, brain_info):
'\n Evaluates policy for the agent experiences provided.\n :param brain_info: BrainInfo object containing inputs.\n :return: Outputs from network as defined by self.inference_dict.\n '
feed_dict = {self.model.batch_size: len(brain_info.vector_observations), self.model.sequence_length: 1}
epsilon = None
if self.use_recurrent:
if (not self.use_continuous_act):
feed_dict[self.model.prev_action] = self.retrieve_previous_action(brain_info.agents)
feed_dict[self.model.memory_in] = self.retrieve_memories(brain_info.agents)
if self.use_continuous_act:
epsilon = np.random.normal(size=(len(brain_info.vector_observations), self.model.act_size[0]))
feed_dict[self.model.epsilon] = epsilon
feed_dict = self.fill_eval_dict(feed_dict, brain_info)
run_out = self._execute_model(feed_dict, self.inference_dict)
if self.use_continuous_act:
run_out['random_normal_epsilon'] = epsilon
return run_out
|
@timed
def update(self, mini_batch, num_sequences):
'\n Performs update on model.\n :param mini_batch: Batch of experiences.\n :param num_sequences: Number of sequences to process.\n :return: Results of update.\n '
feed_dict = self.construct_feed_dict(self.model, mini_batch, num_sequences)
stats_needed = self.stats_name_to_update_name
update_stats = {}
for (_, reward_signal) in self.reward_signals.items():
feed_dict.update(reward_signal.prepare_update(self.model, mini_batch, num_sequences))
stats_needed.update(reward_signal.stats_name_to_update_name)
update_vals = self._execute_model(feed_dict, self.update_dict)
for (stat_name, update_name) in stats_needed.items():
update_stats[stat_name] = update_vals[update_name]
return update_stats
| -1,626,452,048,094,340,600
|
Performs update on model.
:param mini_batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
|
ml-agents/mlagents/trainers/ppo/policy.py
|
update
|
DdATM/ML-FlappyBird
|
python
|
@timed
def update(self, mini_batch, num_sequences):
'\n Performs update on model.\n :param mini_batch: Batch of experiences.\n :param num_sequences: Number of sequences to process.\n :return: Results of update.\n '
feed_dict = self.construct_feed_dict(self.model, mini_batch, num_sequences)
stats_needed = self.stats_name_to_update_name
update_stats = {}
for (_, reward_signal) in self.reward_signals.items():
feed_dict.update(reward_signal.prepare_update(self.model, mini_batch, num_sequences))
stats_needed.update(reward_signal.stats_name_to_update_name)
update_vals = self._execute_model(feed_dict, self.update_dict)
for (stat_name, update_name) in stats_needed.items():
update_stats[stat_name] = update_vals[update_name]
return update_stats
|
def get_value_estimates(self, brain_info: BrainInfo, idx: int, done: bool) -> Dict[(str, float)]:
'\n Generates value estimates for bootstrapping.\n :param brain_info: BrainInfo to be used for bootstrapping.\n :param idx: Index in BrainInfo of agent.\n :param done: Whether or not this is the last element of the episode, in which case the value estimate will be 0.\n :return: The value estimate dictionary with key being the name of the reward signal and the value the\n corresponding value estimate.\n '
feed_dict: Dict[(tf.Tensor, Any)] = {self.model.batch_size: 1, self.model.sequence_length: 1}
for i in range(len(brain_info.visual_observations)):
feed_dict[self.model.visual_in[i]] = [brain_info.visual_observations[i][idx]]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = [brain_info.vector_observations[idx]]
agent_id = brain_info.agents[idx]
if self.use_recurrent:
feed_dict[self.model.memory_in] = self.retrieve_memories([agent_id])
if ((not self.use_continuous_act) and self.use_recurrent):
feed_dict[self.model.prev_action] = self.retrieve_previous_action([agent_id])
value_estimates = self.sess.run(self.model.value_heads, feed_dict)
value_estimates = {k: float(v) for (k, v) in value_estimates.items()}
if done:
for k in value_estimates:
if self.reward_signals[k].use_terminal_states:
value_estimates[k] = 0.0
return value_estimates
| -2,508,050,501,292,764,700
|
Generates value estimates for bootstrapping.
:param brain_info: BrainInfo to be used for bootstrapping.
:param idx: Index in BrainInfo of agent.
:param done: Whether or not this is the last element of the episode, in which case the value estimate will be 0.
:return: The value estimate dictionary with key being the name of the reward signal and the value the
corresponding value estimate.
|
ml-agents/mlagents/trainers/ppo/policy.py
|
get_value_estimates
|
DdATM/ML-FlappyBird
|
python
|
def get_value_estimates(self, brain_info: BrainInfo, idx: int, done: bool) -> Dict[(str, float)]:
'\n Generates value estimates for bootstrapping.\n :param brain_info: BrainInfo to be used for bootstrapping.\n :param idx: Index in BrainInfo of agent.\n :param done: Whether or not this is the last element of the episode, in which case the value estimate will be 0.\n :return: The value estimate dictionary with key being the name of the reward signal and the value the\n corresponding value estimate.\n '
feed_dict: Dict[(tf.Tensor, Any)] = {self.model.batch_size: 1, self.model.sequence_length: 1}
for i in range(len(brain_info.visual_observations)):
feed_dict[self.model.visual_in[i]] = [brain_info.visual_observations[i][idx]]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = [brain_info.vector_observations[idx]]
agent_id = brain_info.agents[idx]
if self.use_recurrent:
feed_dict[self.model.memory_in] = self.retrieve_memories([agent_id])
if ((not self.use_continuous_act) and self.use_recurrent):
feed_dict[self.model.prev_action] = self.retrieve_previous_action([agent_id])
value_estimates = self.sess.run(self.model.value_heads, feed_dict)
value_estimates = {k: float(v) for (k, v) in value_estimates.items()}
if done:
for k in value_estimates:
if self.reward_signals[k].use_terminal_states:
value_estimates[k] = 0.0
return value_estimates
|
def mpi_split_evaluator_run(target_evaluator, termination_params=None, core_params=None, exception_handling_params=None, log=None, gradient_only=False, line_search=True):
'The supported scenario is that each MPI worker rank has a target evaluator\n that has part of the data. Each rank calculates a bit of the functional and\n gradients, but then mpi reduce is used to sum them all up. There has been\n no low-level redesign to support MPI. In particular, the ext.minimizer is\n run (wastefully) by every worker rank, using the same data. It is assumed that\n the calculation of compute_functional_and_gradients() is overwhelmingly the rate\n limiting step, and that is what MPI parallelism is intended to distribute here.'
from libtbx.mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if (termination_params is None):
termination_params = termination_parameters()
if (core_params is None):
core_params = core_parameters()
if (exception_handling_params is None):
exception_handling_params = exception_handling_parameters()
x = target_evaluator.x
if (log is not None):
print('lbfgs minimizer():', file=log)
print(' x.size():', x.size(), file=log)
print(' m:', core_params.m, file=log)
print(' maxfev:', core_params.maxfev, file=log)
print(' gtol:', core_params.gtol, file=log)
print(' xtol:', core_params.xtol, file=log)
print(' stpmin:', core_params.stpmin, file=log)
print(' stpmax:', core_params.stpmax, file=log)
print('lbfgs traditional_convergence_test:', termination_params.traditional_convergence_test, file=log)
minimizer = ext.minimizer(x.size(), core_params.m, core_params.maxfev, core_params.gtol, core_params.xtol, core_params.stpmin, core_params.stpmax)
if termination_params.traditional_convergence_test:
is_converged = ext.traditional_convergence_test(x.size(), termination_params.traditional_convergence_test_eps)
else:
is_converged = ext.drop_convergence_test(n_test_points=termination_params.drop_convergence_test_n_test_points, max_drop_eps=termination_params.drop_convergence_test_max_drop_eps, iteration_coefficient=termination_params.drop_convergence_test_iteration_coefficient)
callback_after_step = getattr(target_evaluator, 'callback_after_step', None)
diag_mode = getattr(target_evaluator, 'diag_mode', None)
if (diag_mode is not None):
assert (diag_mode in ['once', 'always'])
(f_min, x_min) = (None, None)
(f, g) = (None, None)
try:
while 1:
if (diag_mode is None):
(f_term, g_term) = target_evaluator.compute_functional_and_gradients()
f_total = comm.reduce(f_term, MPI.SUM, 0)
g_total = comm.reduce(g_term, MPI.SUM, 0)
if (rank == 0):
transmit = (f_total, g_total)
else:
transmit = None
(f, g) = comm.bcast(transmit, root=0)
if (False and (rank == 0)):
print(('%s %10.4f' % ('MPI stp', f)), '[', ' '.join([('%10.4f' % a) for a in x]), ']')
d = None
else:
(f, g, d) = target_evaluator.compute_functional_gradients_diag()
if (diag_mode == 'once'):
diag_mode = None
if (f_min is None):
if (not termination_params.traditional_convergence_test):
is_converged(f)
(f_min, x_min) = (f, x.deep_copy())
elif (f_min > f):
(f_min, x_min) = (f, x.deep_copy())
if (log is not None):
print(('lbfgs minimizer.run(): f=%.6g, |g|=%.6g, x_min=%.6g, x_mean=%.6g, x_max=%.6g' % (f, g.norm(), flex.min(x), flex.mean(x), flex.max(x))), file=log)
if (d is None):
if minimizer.run(x, f, g, gradient_only, line_search):
continue
elif minimizer.run(x, f, g, d, gradient_only, line_search):
continue
if (log is not None):
print('lbfgs minimizer step', file=log)
if (callback_after_step is not None):
if (callback_after_step(minimizer) is True):
if (log is not None):
print('lbfgs minimizer stop: callback_after_step is True', file=log)
break
if termination_params.traditional_convergence_test:
if ((minimizer.iter() >= termination_params.min_iterations) and is_converged(x, g)):
if (log is not None):
print('lbfgs minimizer stop: traditional_convergence_test', file=log)
break
elif is_converged(f):
if (log is not None):
print('lbfgs minimizer stop: drop_convergence_test', file=log)
break
if ((termination_params.max_iterations is not None) and (minimizer.iter() >= termination_params.max_iterations)):
if (log is not None):
print('lbfgs minimizer stop: max_iterations', file=log)
break
if ((termination_params.max_calls is not None) and (minimizer.nfun() > termination_params.max_calls)):
if (log is not None):
print('lbfgs minimizer stop: max_calls', file=log)
break
if (d is None):
if (not minimizer.run(x, f, g, gradient_only, line_search)):
break
elif (not minimizer.run(x, f, g, d, gradient_only, line_search)):
break
except RuntimeError as e:
minimizer.error = str(e)
if (log is not None):
print('lbfgs minimizer exception:', str(e), file=log)
if (x_min is not None):
x.clear()
x.extend(x_min)
error_classification = exception_handling_params.filter(minimizer.error, x.size(), x, g)
if (error_classification > 0):
raise
elif (error_classification < 0):
minimizer.is_unusual_error = True
else:
minimizer.is_unusual_error = False
else:
minimizer.error = None
minimizer.is_unusual_error = None
if (log is not None):
print('lbfgs minimizer done.', file=log)
return minimizer
| 2,306,094,089,549,778,000
|
The supported scenario is that each MPI worker rank has a target evaluator
that has part of the data. Each rank calculates a bit of the functional and
gradients, but then mpi reduce is used to sum them all up. There has been
no low-level redesign to support MPI. In particular, the ext.minimizer is
run (wastefully) by every worker rank, using the same data. It is assumed that
the calculation of compute_functional_and_gradients() is overwhelmingly the rate
limiting step, and that is what MPI parallelism is intended to distribute here.
|
modules/cctbx_project/scitbx/lbfgs/tst_mpi_split_evaluator.py
|
mpi_split_evaluator_run
|
jorgediazjr/dials-dev20191018
|
python
|
def mpi_split_evaluator_run(target_evaluator, termination_params=None, core_params=None, exception_handling_params=None, log=None, gradient_only=False, line_search=True):
'The supported scenario is that each MPI worker rank has a target evaluator\n that has part of the data. Each rank calculates a bit of the functional and\n gradients, but then mpi reduce is used to sum them all up. There has been\n no low-level redesign to support MPI. In particular, the ext.minimizer is\n run (wastefully) by every worker rank, using the same data. It is assumed that\n the calculation of compute_functional_and_gradients() is overwhelmingly the rate\n limiting step, and that is what MPI parallelism is intended to distribute here.'
from libtbx.mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if (termination_params is None):
termination_params = termination_parameters()
if (core_params is None):
core_params = core_parameters()
if (exception_handling_params is None):
exception_handling_params = exception_handling_parameters()
x = target_evaluator.x
if (log is not None):
print('lbfgs minimizer():', file=log)
print(' x.size():', x.size(), file=log)
print(' m:', core_params.m, file=log)
print(' maxfev:', core_params.maxfev, file=log)
print(' gtol:', core_params.gtol, file=log)
print(' xtol:', core_params.xtol, file=log)
print(' stpmin:', core_params.stpmin, file=log)
print(' stpmax:', core_params.stpmax, file=log)
print('lbfgs traditional_convergence_test:', termination_params.traditional_convergence_test, file=log)
minimizer = ext.minimizer(x.size(), core_params.m, core_params.maxfev, core_params.gtol, core_params.xtol, core_params.stpmin, core_params.stpmax)
if termination_params.traditional_convergence_test:
is_converged = ext.traditional_convergence_test(x.size(), termination_params.traditional_convergence_test_eps)
else:
is_converged = ext.drop_convergence_test(n_test_points=termination_params.drop_convergence_test_n_test_points, max_drop_eps=termination_params.drop_convergence_test_max_drop_eps, iteration_coefficient=termination_params.drop_convergence_test_iteration_coefficient)
callback_after_step = getattr(target_evaluator, 'callback_after_step', None)
diag_mode = getattr(target_evaluator, 'diag_mode', None)
if (diag_mode is not None):
assert (diag_mode in ['once', 'always'])
(f_min, x_min) = (None, None)
(f, g) = (None, None)
try:
while 1:
if (diag_mode is None):
(f_term, g_term) = target_evaluator.compute_functional_and_gradients()
f_total = comm.reduce(f_term, MPI.SUM, 0)
g_total = comm.reduce(g_term, MPI.SUM, 0)
if (rank == 0):
transmit = (f_total, g_total)
else:
transmit = None
(f, g) = comm.bcast(transmit, root=0)
if (False and (rank == 0)):
print(('%s %10.4f' % ('MPI stp', f)), '[', ' '.join([('%10.4f' % a) for a in x]), ']')
d = None
else:
(f, g, d) = target_evaluator.compute_functional_gradients_diag()
if (diag_mode == 'once'):
diag_mode = None
if (f_min is None):
if (not termination_params.traditional_convergence_test):
is_converged(f)
(f_min, x_min) = (f, x.deep_copy())
elif (f_min > f):
(f_min, x_min) = (f, x.deep_copy())
if (log is not None):
print(('lbfgs minimizer.run(): f=%.6g, |g|=%.6g, x_min=%.6g, x_mean=%.6g, x_max=%.6g' % (f, g.norm(), flex.min(x), flex.mean(x), flex.max(x))), file=log)
if (d is None):
if minimizer.run(x, f, g, gradient_only, line_search):
continue
elif minimizer.run(x, f, g, d, gradient_only, line_search):
continue
if (log is not None):
print('lbfgs minimizer step', file=log)
if (callback_after_step is not None):
if (callback_after_step(minimizer) is True):
if (log is not None):
print('lbfgs minimizer stop: callback_after_step is True', file=log)
break
if termination_params.traditional_convergence_test:
if ((minimizer.iter() >= termination_params.min_iterations) and is_converged(x, g)):
if (log is not None):
print('lbfgs minimizer stop: traditional_convergence_test', file=log)
break
elif is_converged(f):
if (log is not None):
print('lbfgs minimizer stop: drop_convergence_test', file=log)
break
if ((termination_params.max_iterations is not None) and (minimizer.iter() >= termination_params.max_iterations)):
if (log is not None):
print('lbfgs minimizer stop: max_iterations', file=log)
break
if ((termination_params.max_calls is not None) and (minimizer.nfun() > termination_params.max_calls)):
if (log is not None):
print('lbfgs minimizer stop: max_calls', file=log)
break
if (d is None):
if (not minimizer.run(x, f, g, gradient_only, line_search)):
break
elif (not minimizer.run(x, f, g, d, gradient_only, line_search)):
break
except RuntimeError as e:
minimizer.error = str(e)
if (log is not None):
print('lbfgs minimizer exception:', str(e), file=log)
if (x_min is not None):
x.clear()
x.extend(x_min)
error_classification = exception_handling_params.filter(minimizer.error, x.size(), x, g)
if (error_classification > 0):
raise
elif (error_classification < 0):
minimizer.is_unusual_error = True
else:
minimizer.is_unusual_error = False
else:
minimizer.error = None
minimizer.is_unusual_error = None
if (log is not None):
print('lbfgs minimizer done.', file=log)
return minimizer
|
def patient_shutdown(self):
'\n Stops the serve_forever loop.\n\n Blocks until the loop has finished. This must be called while\n serve_forever() is running in another thread, or it will\n deadlock.\n '
self._StoppableWSGIServer__serving = False
if (not self._StoppableWSGIServer__is_shut_down.wait(30)):
raise RuntimeError('Failed to shutdown the live test server in 2 seconds. The server might be stuck or generating a slow response.')
| -3,871,733,391,667,745,300
|
Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
|
autocomplete_light/tests/test_widget.py
|
patient_shutdown
|
kimetrica/django-autocomplete-light
|
python
|
def patient_shutdown(self):
'\n Stops the serve_forever loop.\n\n Blocks until the loop has finished. This must be called while\n serve_forever() is running in another thread, or it will\n deadlock.\n '
self._StoppableWSGIServer__serving = False
if (not self._StoppableWSGIServer__is_shut_down.wait(30)):
raise RuntimeError('Failed to shutdown the live test server in 2 seconds. The server might be stuck or generating a slow response.')
|
def testDecompressLimited(self):
'Decompressed data buffering should be limited'
bomb = bz2.compress((b'\x00' * int(2000000.0)), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\x00')
max_decomp = (1 + DEFAULT_BUFFER_SIZE)
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp, 'Excessive amount of data was decompressed')
| -5,949,346,707,476,452,000
|
Decompressed data buffering should be limited
|
www/src/Lib/test/test_bz2.py
|
testDecompressLimited
|
Froggo8311/brython
|
python
|
def testDecompressLimited(self):
bomb = bz2.compress((b'\x00' * int(2000000.0)), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\x00')
max_decomp = (1 + DEFAULT_BUFFER_SIZE)
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp, 'Excessive amount of data was decompressed')
|
def message(err_number):
'Return the error message associated with the error code. Positive\n error codes are interpreted as system error numbers, and\n negative error codes are interpreted as GEOPM error numbers.\n\n Args:\n err_number (int): Error code to be interpreted.\n\n Returns:\n str: Error message associated with error code.\n\n '
global _ffi
global _dl
name_max = 1024
result_cstr = _ffi.new('char[]', name_max)
_dl.geopm_error_message(err_number, result_cstr, name_max)
return _ffi.string(result_cstr).decode()
| 534,698,338,327,494,500
|
Return the error message associated with the error code. Positive
error codes are interpreted as system error numbers, and
negative error codes are interpreted as GEOPM error numbers.
Args:
err_number (int): Error code to be interpreted.
Returns:
str: Error message associated with error code.
|
scripts/geopmpy/error.py
|
message
|
RyoTTa/geopm
|
python
|
def message(err_number):
'Return the error message associated with the error code. Positive\n error codes are interpreted as system error numbers, and\n negative error codes are interpreted as GEOPM error numbers.\n\n Args:\n err_number (int): Error code to be interpreted.\n\n Returns:\n str: Error message associated with error code.\n\n '
global _ffi
global _dl
name_max = 1024
result_cstr = _ffi.new('char[]', name_max)
_dl.geopm_error_message(err_number, result_cstr, name_max)
return _ffi.string(result_cstr).decode()
|
def get_samples_from_trace(trace, with_intermediates=False):
' Extracts all sample values from a numpyro trace.\n\n :param trace: trace object obtained from `numpyro.handlers.trace().get_trace()`\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
samples = {k: ((v['value'], v['intermediates']) if with_intermediates else v['value']) for (k, v) in trace.items() if (v['type'] == 'sample')}
return samples
| 2,113,946,218,417,119,200
|
Extracts all sample values from a numpyro trace.
:param trace: trace object obtained from `numpyro.handlers.trace().get_trace()`
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
|
d3p/modelling.py
|
get_samples_from_trace
|
DPBayes/d3p
|
python
|
def get_samples_from_trace(trace, with_intermediates=False):
' Extracts all sample values from a numpyro trace.\n\n :param trace: trace object obtained from `numpyro.handlers.trace().get_trace()`\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
samples = {k: ((v['value'], v['intermediates']) if with_intermediates else v['value']) for (k, v) in trace.items() if (v['type'] == 'sample')}
return samples
|
def sample_prior_predictive(rng_key, model, model_args, substitutes=None, with_intermediates=False, **kwargs):
' Samples once from the prior predictive distribution.\n\n Individual sample sites, as designated by `sample`, can be frozen to\n pre-determined values given in `substitutes`. In that case, values for these\n sites are not actually sampled but the value provided in `substitutes` is\n returned as the sample. This facilitates conditional sampling.\n\n Note that if the model function is written in such a way that it returns, e.g.,\n multiple observations from a single prior draw, the same is true for the\n values returned by this function.\n\n :param rng_key: Jax PRNG key\n :param model: Function representing the model using numpyro distributions\n and the `sample` primitive\n :param model_args: Arguments to the model function\n :param substitutes: An optional dictionary of frozen substitutes for\n sample sites.\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :param **kwargs: Keyword arguments passed to the model function.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
if (substitutes is None):
substitutes = dict()
model = seed(substitute(model, data=substitutes), rng_key)
t = trace(model).get_trace(*model_args, **kwargs)
return get_samples_from_trace(t, with_intermediates)
| 4,661,393,722,677,076,000
|
Samples once from the prior predictive distribution.
Individual sample sites, as designated by `sample`, can be frozen to
pre-determined values given in `substitutes`. In that case, values for these
sites are not actually sampled but the value provided in `substitutes` is
returned as the sample. This facilitates conditional sampling.
Note that if the model function is written in such a way that it returns, e.g.,
multiple observations from a single prior draw, the same is true for the
values returned by this function.
:param rng_key: Jax PRNG key
:param model: Function representing the model using numpyro distributions
and the `sample` primitive
:param model_args: Arguments to the model function
:param substitutes: An optional dictionary of frozen substitutes for
sample sites.
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:param **kwargs: Keyword arguments passed to the model function.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
|
d3p/modelling.py
|
sample_prior_predictive
|
DPBayes/d3p
|
python
|
def sample_prior_predictive(rng_key, model, model_args, substitutes=None, with_intermediates=False, **kwargs):
' Samples once from the prior predictive distribution.\n\n Individual sample sites, as designated by `sample`, can be frozen to\n pre-determined values given in `substitutes`. In that case, values for these\n sites are not actually sampled but the value provided in `substitutes` is\n returned as the sample. This facilitates conditional sampling.\n\n Note that if the model function is written in such a way that it returns, e.g.,\n multiple observations from a single prior draw, the same is true for the\n values returned by this function.\n\n :param rng_key: Jax PRNG key\n :param model: Function representing the model using numpyro distributions\n and the `sample` primitive\n :param model_args: Arguments to the model function\n :param substitutes: An optional dictionary of frozen substitutes for\n sample sites.\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :param **kwargs: Keyword arguments passed to the model function.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
if (substitutes is None):
substitutes = dict()
model = seed(substitute(model, data=substitutes), rng_key)
t = trace(model).get_trace(*model_args, **kwargs)
return get_samples_from_trace(t, with_intermediates)
|
def sample_posterior_predictive(rng_key, model, model_args, guide, guide_args, params, with_intermediates=False, **kwargs):
' Samples once from the posterior predictive distribution.\n\n Note that if the model function is written in such a way that it returns, e.g.,\n multiple observations from a single posterior draw, the same is true for the\n values returned by this function.\n\n :param rng_key: Jax PRNG key\n :param model: Function representing the model using numpyro distributions\n and the `sample` primitive\n :param model_args: Arguments to the model function\n :param guide: Function representing the variational distribution (the guide)\n using numpyro distributions as well as the `sample` and `param` primitives\n :param guide_args: Arguments to the guide function\n :param params: A dictionary providing values for the parameters\n designated by call to `param` in the guide\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :param **kwargs: Keyword arguments passed to the model and guide functions.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
(model_rng_key, guide_rng_key) = jax.random.split(rng_key)
guide = seed(substitute(guide, data=params), guide_rng_key)
guide_samples = get_samples_from_trace(trace(guide).get_trace(*guide_args, **kwargs), with_intermediates)
model_params = dict(**params)
if with_intermediates:
model_params.update({k: v[0] for (k, v) in guide_samples.items()})
else:
model_params.update({k: v for (k, v) in guide_samples.items()})
model = seed(substitute(model, data=model_params), model_rng_key)
model_samples = get_samples_from_trace(trace(model).get_trace(*model_args, **kwargs), with_intermediates)
guide_samples.update(model_samples)
return guide_samples
| -7,343,252,411,584,260,000
|
Samples once from the posterior predictive distribution.
Note that if the model function is written in such a way that it returns, e.g.,
multiple observations from a single posterior draw, the same is true for the
values returned by this function.
:param rng_key: Jax PRNG key
:param model: Function representing the model using numpyro distributions
and the `sample` primitive
:param model_args: Arguments to the model function
:param guide: Function representing the variational distribution (the guide)
using numpyro distributions as well as the `sample` and `param` primitives
:param guide_args: Arguments to the guide function
:param params: A dictionary providing values for the parameters
designated by call to `param` in the guide
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:param **kwargs: Keyword arguments passed to the model and guide functions.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
|
d3p/modelling.py
|
sample_posterior_predictive
|
DPBayes/d3p
|
python
|
def sample_posterior_predictive(rng_key, model, model_args, guide, guide_args, params, with_intermediates=False, **kwargs):
' Samples once from the posterior predictive distribution.\n\n Note that if the model function is written in such a way that it returns, e.g.,\n multiple observations from a single posterior draw, the same is true for the\n values returned by this function.\n\n :param rng_key: Jax PRNG key\n :param model: Function representing the model using numpyro distributions\n and the `sample` primitive\n :param model_args: Arguments to the model function\n :param guide: Function representing the variational distribution (the guide)\n using numpyro distributions as well as the `sample` and `param` primitives\n :param guide_args: Arguments to the guide function\n :param params: A dictionary providing values for the parameters\n designated by call to `param` in the guide\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :param **kwargs: Keyword arguments passed to the model and guide functions.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
(model_rng_key, guide_rng_key) = jax.random.split(rng_key)
guide = seed(substitute(guide, data=params), guide_rng_key)
guide_samples = get_samples_from_trace(trace(guide).get_trace(*guide_args, **kwargs), with_intermediates)
model_params = dict(**params)
if with_intermediates:
model_params.update({k: v[0] for (k, v) in guide_samples.items()})
else:
model_params.update({k: v for (k, v) in guide_samples.items()})
model = seed(substitute(model, data=model_params), model_rng_key)
model_samples = get_samples_from_trace(trace(model).get_trace(*model_args, **kwargs), with_intermediates)
guide_samples.update(model_samples)
return guide_samples
|
def sample_multi_prior_predictive(rng_key, n, model, model_args, substitutes=None, with_intermediates=False, **kwargs):
' Samples n times from the prior predictive distribution.\n\n Individual sample sites, as designated by `sample`, can be frozen to\n pre-determined values given in `substitutes`. In that case, values for these\n sites are not actually sampled but the value provided in `substitutes` is\n returned as the sample. This facilitates conditional sampling.\n\n Note that if the model function is written in such a way that it returns, e.g.,\n multiple observations, say n_model many, from a single prior draw, the same is\n true for the values returned by this function, i.e., this function will\n output n x n_model observations.\n\n :param rng_key: Jax PRNG key\n :param n: Number of draws from the prior predictive.\n :param model: Function representing the model using numpyro distributions\n and the `sample` primitive\n :param model_args: Arguments to the model function\n :param substitutes: An optional dictionary of frozen substitutes for\n sample sites.\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :param **kwargs: Keyword arguments passed to the model function.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
def single_sample_fn(rng):
return sample_prior_predictive(rng, model, model_args, substitutes=substitutes, with_intermediates=with_intermediates, **kwargs)
return _sample_a_lot(rng_key, n, single_sample_fn)
| 2,363,878,908,160,044,500
|
Samples n times from the prior predictive distribution.
Individual sample sites, as designated by `sample`, can be frozen to
pre-determined values given in `substitutes`. In that case, values for these
sites are not actually sampled but the value provided in `substitutes` is
returned as the sample. This facilitates conditional sampling.
Note that if the model function is written in such a way that it returns, e.g.,
multiple observations, say n_model many, from a single prior draw, the same is
true for the values returned by this function, i.e., this function will
output n x n_model observations.
:param rng_key: Jax PRNG key
:param n: Number of draws from the prior predictive.
:param model: Function representing the model using numpyro distributions
and the `sample` primitive
:param model_args: Arguments to the model function
:param substitutes: An optional dictionary of frozen substitutes for
sample sites.
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:param **kwargs: Keyword arguments passed to the model function.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
|
d3p/modelling.py
|
sample_multi_prior_predictive
|
DPBayes/d3p
|
python
|
def sample_multi_prior_predictive(rng_key, n, model, model_args, substitutes=None, with_intermediates=False, **kwargs):
' Samples n times from the prior predictive distribution.\n\n Individual sample sites, as designated by `sample`, can be frozen to\n pre-determined values given in `substitutes`. In that case, values for these\n sites are not actually sampled but the value provided in `substitutes` is\n returned as the sample. This facilitates conditional sampling.\n\n Note that if the model function is written in such a way that it returns, e.g.,\n multiple observations, say n_model many, from a single prior draw, the same is\n true for the values returned by this function, i.e., this function will\n output n x n_model observations.\n\n :param rng_key: Jax PRNG key\n :param n: Number of draws from the prior predictive.\n :param model: Function representing the model using numpyro distributions\n and the `sample` primitive\n :param model_args: Arguments to the model function\n :param substitutes: An optional dictionary of frozen substitutes for\n sample sites.\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :param **kwargs: Keyword arguments passed to the model function.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
def single_sample_fn(rng):
return sample_prior_predictive(rng, model, model_args, substitutes=substitutes, with_intermediates=with_intermediates, **kwargs)
return _sample_a_lot(rng_key, n, single_sample_fn)
|
def sample_multi_posterior_predictive(rng_key, n, model, model_args, guide, guide_args, params, with_intermediates=False, **kwargs):
' Samples n times from the posterior predictive distribution.\n\n Note that if the model function is written in such a way that it returns, e.g.,\n multiple observations, say n_model many, from a single posterior draw, the same is\n true for the values returned by this function, i.e., this function will\n output n x n_model observations.\n\n :param rng_key: Jax PRNG key\n :param model: Function representing the model using numpyro distributions\n and the `sample` primitive\n :param model_args: Arguments to the model function\n :param guide: Function representing the variational distribution (the guide)\n using numpyro distributions as well as the `sample` and `param` primitives\n :param guide_args: Arguments to the guide function\n :param params: A dictionary providing values for the parameters\n designated by call to `param` in the guide\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :param **kwargs: Keyword arguments passed to the model and guide functions.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
def single_sample_fn(rng):
return sample_posterior_predictive(rng, model, model_args, guide, guide_args, params, with_intermediates=with_intermediates, **kwargs)
return _sample_a_lot(rng_key, n, single_sample_fn)
| -2,692,090,732,426,950,000
|
Samples n times from the posterior predictive distribution.
Note that if the model function is written in such a way that it returns, e.g.,
multiple observations, say n_model many, from a single posterior draw, the same is
true for the values returned by this function, i.e., this function will
output n x n_model observations.
:param rng_key: Jax PRNG key
:param model: Function representing the model using numpyro distributions
and the `sample` primitive
:param model_args: Arguments to the model function
:param guide: Function representing the variational distribution (the guide)
using numpyro distributions as well as the `sample` and `param` primitives
:param guide_args: Arguments to the guide function
:param params: A dictionary providing values for the parameters
designated by call to `param` in the guide
:param with_intermediates: If True, intermediate(/latent) samples from
sample site distributions are included in the result.
:param **kwargs: Keyword arguments passed to the model and guide functions.
:return: Dictionary of sampled values associated with the names given
via `sample()` in the model. If with_intermediates is True,
dictionary values are tuples where the first element is the final
sample values and the second element is a list of intermediate values.
|
d3p/modelling.py
|
sample_multi_posterior_predictive
|
DPBayes/d3p
|
python
|
def sample_multi_posterior_predictive(rng_key, n, model, model_args, guide, guide_args, params, with_intermediates=False, **kwargs):
' Samples n times from the posterior predictive distribution.\n\n Note that if the model function is written in such a way that it returns, e.g.,\n multiple observations, say n_model many, from a single posterior draw, the same is\n true for the values returned by this function, i.e., this function will\n output n x n_model observations.\n\n :param rng_key: Jax PRNG key\n :param model: Function representing the model using numpyro distributions\n and the `sample` primitive\n :param model_args: Arguments to the model function\n :param guide: Function representing the variational distribution (the guide)\n using numpyro distributions as well as the `sample` and `param` primitives\n :param guide_args: Arguments to the guide function\n :param params: A dictionary providing values for the parameters\n designated by call to `param` in the guide\n :param with_intermediates: If True, intermediate(/latent) samples from\n sample site distributions are included in the result.\n :param **kwargs: Keyword arguments passed to the model and guide functions.\n :return: Dictionary of sampled values associated with the names given\n via `sample()` in the model. If with_intermediates is True,\n dictionary values are tuples where the first element is the final\n sample values and the second element is a list of intermediate values.\n '
def single_sample_fn(rng):
return sample_posterior_predictive(rng, model, model_args, guide, guide_args, params, with_intermediates=with_intermediates, **kwargs)
return _sample_a_lot(rng_key, n, single_sample_fn)
|
def make_observed_model(model, obs_to_model_args_fn):
' Transforms a generative model function into one with fixed observations\n for likelihood evaluation in the SVI algorithm.\n\n :param model: Any generative model function using the numpyro `sample`\n primitive.\n :param obs_to_model_args_fn: A function mapping from an argument list compatible\n with SVI (i.e., accepting a batch of observations) to that of `model`. The\n mapping function can take arbitrary arguments and must return a tuple\n (args, kwargs, observations), where args and kwargs are passed to `model`\n as argument and keyword arguments and observations is a dictionary of\n observations for sample sites in `model` that will be fixed using the\n `observe` handler.\n '
def transformed_model_fn(*args, **kwargs):
(mapped_args, mapped_kwargs, fixed_obs) = obs_to_model_args_fn(*args, **kwargs)
return condition(model, data=fixed_obs)(*mapped_args, **mapped_kwargs)
return transformed_model_fn
| 3,910,599,582,833,376,000
|
Transforms a generative model function into one with fixed observations
for likelihood evaluation in the SVI algorithm.
:param model: Any generative model function using the numpyro `sample`
primitive.
:param obs_to_model_args_fn: A function mapping from an argument list compatible
with SVI (i.e., accepting a batch of observations) to that of `model`. The
mapping function can take arbitrary arguments and must return a tuple
(args, kwargs, observations), where args and kwargs are passed to `model`
as argument and keyword arguments and observations is a dictionary of
observations for sample sites in `model` that will be fixed using the
`observe` handler.
|
d3p/modelling.py
|
make_observed_model
|
DPBayes/d3p
|
python
|
def make_observed_model(model, obs_to_model_args_fn):
' Transforms a generative model function into one with fixed observations\n for likelihood evaluation in the SVI algorithm.\n\n :param model: Any generative model function using the numpyro `sample`\n primitive.\n :param obs_to_model_args_fn: A function mapping from an argument list compatible\n with SVI (i.e., accepting a batch of observations) to that of `model`. The\n mapping function can take arbitrary arguments and must return a tuple\n (args, kwargs, observations), where args and kwargs are passed to `model`\n as argument and keyword arguments and observations is a dictionary of\n observations for sample sites in `model` that will be fixed using the\n `observe` handler.\n '
def transformed_model_fn(*args, **kwargs):
(mapped_args, mapped_kwargs, fixed_obs) = obs_to_model_args_fn(*args, **kwargs)
return condition(model, data=fixed_obs)(*mapped_args, **mapped_kwargs)
return transformed_model_fn
|
def filter_detections(boxes, scores, is_training, gpu_id):
'\n :param boxes: [-1, 4]\n :param scores: [-1, ]\n :param labels: [-1, ]\n :return:\n '
if is_training:
indices = tf.reshape(tf.where(tf.greater(scores, cfgs.VIS_SCORE)), [(- 1)])
else:
indices = tf.reshape(tf.where(tf.greater(scores, cfgs.FILTERED_SCORE)), [(- 1)])
if cfgs.NMS:
filtered_boxes = tf.gather(boxes, indices)
filtered_scores = tf.gather(scores, indices)
if (cfgs.ANGLE_RANGE == 180):
filtered_boxes = tf.py_func(coordinate_present_convert, inp=[filtered_boxes, 1], Tout=[tf.float32])
filtered_boxes = tf.reshape(filtered_boxes, [(- 1), 5])
max_output_size = (4000 if ('DOTA' in cfgs.NET_NAME) else 200)
nms_indices = nms_rotate.nms_rotate(decode_boxes=filtered_boxes, scores=filtered_scores, iou_threshold=cfgs.NMS_IOU_THRESHOLD, max_output_size=(100 if is_training else max_output_size), use_angle_condition=False, angle_threshold=15, use_gpu=True, gpu_id=gpu_id)
indices = tf.gather(indices, nms_indices)
return indices
| 4,244,747,397,604,209,000
|
:param boxes: [-1, 4]
:param scores: [-1, ]
:param labels: [-1, ]
:return:
|
libs/detection_oprations/refine_proposal_opr_csl.py
|
filter_detections
|
DLPerf/R3Det_Tensorflow
|
python
|
def filter_detections(boxes, scores, is_training, gpu_id):
'\n :param boxes: [-1, 4]\n :param scores: [-1, ]\n :param labels: [-1, ]\n :return:\n '
if is_training:
indices = tf.reshape(tf.where(tf.greater(scores, cfgs.VIS_SCORE)), [(- 1)])
else:
indices = tf.reshape(tf.where(tf.greater(scores, cfgs.FILTERED_SCORE)), [(- 1)])
if cfgs.NMS:
filtered_boxes = tf.gather(boxes, indices)
filtered_scores = tf.gather(scores, indices)
if (cfgs.ANGLE_RANGE == 180):
filtered_boxes = tf.py_func(coordinate_present_convert, inp=[filtered_boxes, 1], Tout=[tf.float32])
filtered_boxes = tf.reshape(filtered_boxes, [(- 1), 5])
max_output_size = (4000 if ('DOTA' in cfgs.NET_NAME) else 200)
nms_indices = nms_rotate.nms_rotate(decode_boxes=filtered_boxes, scores=filtered_scores, iou_threshold=cfgs.NMS_IOU_THRESHOLD, max_output_size=(100 if is_training else max_output_size), use_angle_condition=False, angle_threshold=15, use_gpu=True, gpu_id=gpu_id)
indices = tf.gather(indices, nms_indices)
return indices
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.