code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def convert_to_feature_collection(self):
"""Convert data into a FeatureCollection if it is not already."""
if self.data['type'] == 'FeatureCollection':
return
if not self.embed:
raise ValueError(
'Data is not a FeatureCollection, but it should be to apply '
'style or highlight. Because `embed=False` it cannot be '
'converted into one.\nEither change your geojson data to a '
'FeatureCollection, set `embed=True` or disable styling.')
# Catch case when GeoJSON is just a single Feature or a geometry.
if 'geometry' not in self.data.keys():
# Catch case when GeoJSON is just a geometry.
self.data = {'type': 'Feature', 'geometry': self.data}
self.data = {'type': 'FeatureCollection', 'features': [self.data]}
|
def function[convert_to_feature_collection, parameter[self]]:
constant[Convert data into a FeatureCollection if it is not already.]
if compare[call[name[self].data][constant[type]] equal[==] constant[FeatureCollection]] begin[:]
return[None]
if <ast.UnaryOp object at 0x7da2049630d0> begin[:]
<ast.Raise object at 0x7da204962500>
if compare[constant[geometry] <ast.NotIn object at 0x7da2590d7190> call[name[self].data.keys, parameter[]]] begin[:]
name[self].data assign[=] dictionary[[<ast.Constant object at 0x7da20cabedd0>, <ast.Constant object at 0x7da20cabdb10>], [<ast.Constant object at 0x7da20cabe200>, <ast.Attribute object at 0x7da20cabe410>]]
name[self].data assign[=] dictionary[[<ast.Constant object at 0x7da20cabf400>, <ast.Constant object at 0x7da20cabfeb0>], [<ast.Constant object at 0x7da20cabc3a0>, <ast.List object at 0x7da20cabc4f0>]]
|
keyword[def] identifier[convert_to_feature_collection] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[data] [ literal[string] ]== literal[string] :
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[embed] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
literal[string]
literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[data] . identifier[keys] ():
identifier[self] . identifier[data] ={ literal[string] : literal[string] , literal[string] : identifier[self] . identifier[data] }
identifier[self] . identifier[data] ={ literal[string] : literal[string] , literal[string] :[ identifier[self] . identifier[data] ]}
|
def convert_to_feature_collection(self):
"""Convert data into a FeatureCollection if it is not already."""
if self.data['type'] == 'FeatureCollection':
return # depends on [control=['if'], data=[]]
if not self.embed:
raise ValueError('Data is not a FeatureCollection, but it should be to apply style or highlight. Because `embed=False` it cannot be converted into one.\nEither change your geojson data to a FeatureCollection, set `embed=True` or disable styling.') # depends on [control=['if'], data=[]]
# Catch case when GeoJSON is just a single Feature or a geometry.
if 'geometry' not in self.data.keys():
# Catch case when GeoJSON is just a geometry.
self.data = {'type': 'Feature', 'geometry': self.data} # depends on [control=['if'], data=[]]
self.data = {'type': 'FeatureCollection', 'features': [self.data]}
|
def get_image_format(filename):
"""Get the image format."""
image = None
bad_image = 1
image_format = NONE_FORMAT
sequenced = False
try:
bad_image = Image.open(filename).verify()
image = Image.open(filename)
image_format = image.format
sequenced = _is_image_sequenced(image)
except (OSError, IOError, AttributeError):
pass
if sequenced:
image_format = gif.SEQUENCED_TEMPLATE.format(image_format)
elif image is None or bad_image or image_format == NONE_FORMAT:
image_format = ERROR_FORMAT
comic_format = comic.get_comic_format(filename)
if comic_format:
image_format = comic_format
if (Settings.verbose > 1) and image_format == ERROR_FORMAT and \
(not Settings.list_only):
print(filename, "doesn't look like an image or comic archive.")
return image_format
|
def function[get_image_format, parameter[filename]]:
constant[Get the image format.]
variable[image] assign[=] constant[None]
variable[bad_image] assign[=] constant[1]
variable[image_format] assign[=] name[NONE_FORMAT]
variable[sequenced] assign[=] constant[False]
<ast.Try object at 0x7da207f01bd0>
if name[sequenced] begin[:]
variable[image_format] assign[=] call[name[gif].SEQUENCED_TEMPLATE.format, parameter[name[image_format]]]
return[name[image_format]]
|
keyword[def] identifier[get_image_format] ( identifier[filename] ):
literal[string]
identifier[image] = keyword[None]
identifier[bad_image] = literal[int]
identifier[image_format] = identifier[NONE_FORMAT]
identifier[sequenced] = keyword[False]
keyword[try] :
identifier[bad_image] = identifier[Image] . identifier[open] ( identifier[filename] ). identifier[verify] ()
identifier[image] = identifier[Image] . identifier[open] ( identifier[filename] )
identifier[image_format] = identifier[image] . identifier[format]
identifier[sequenced] = identifier[_is_image_sequenced] ( identifier[image] )
keyword[except] ( identifier[OSError] , identifier[IOError] , identifier[AttributeError] ):
keyword[pass]
keyword[if] identifier[sequenced] :
identifier[image_format] = identifier[gif] . identifier[SEQUENCED_TEMPLATE] . identifier[format] ( identifier[image_format] )
keyword[elif] identifier[image] keyword[is] keyword[None] keyword[or] identifier[bad_image] keyword[or] identifier[image_format] == identifier[NONE_FORMAT] :
identifier[image_format] = identifier[ERROR_FORMAT]
identifier[comic_format] = identifier[comic] . identifier[get_comic_format] ( identifier[filename] )
keyword[if] identifier[comic_format] :
identifier[image_format] = identifier[comic_format]
keyword[if] ( identifier[Settings] . identifier[verbose] > literal[int] ) keyword[and] identifier[image_format] == identifier[ERROR_FORMAT] keyword[and] ( keyword[not] identifier[Settings] . identifier[list_only] ):
identifier[print] ( identifier[filename] , literal[string] )
keyword[return] identifier[image_format]
|
def get_image_format(filename):
"""Get the image format."""
image = None
bad_image = 1
image_format = NONE_FORMAT
sequenced = False
try:
bad_image = Image.open(filename).verify()
image = Image.open(filename)
image_format = image.format
sequenced = _is_image_sequenced(image) # depends on [control=['try'], data=[]]
except (OSError, IOError, AttributeError):
pass # depends on [control=['except'], data=[]]
if sequenced:
image_format = gif.SEQUENCED_TEMPLATE.format(image_format) # depends on [control=['if'], data=[]]
elif image is None or bad_image or image_format == NONE_FORMAT:
image_format = ERROR_FORMAT
comic_format = comic.get_comic_format(filename)
if comic_format:
image_format = comic_format # depends on [control=['if'], data=[]]
if Settings.verbose > 1 and image_format == ERROR_FORMAT and (not Settings.list_only):
print(filename, "doesn't look like an image or comic archive.") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return image_format
|
def symlink(parser, cmd, args):
"""
Set up symlinks for (a subset of) the pwny apps.
"""
parser.add_argument(
'apps',
nargs=argparse.REMAINDER,
help='Which apps to create symlinks for.'
)
args = parser.parse_args(args)
base_dir, pwny_main = os.path.split(sys.argv[0])
for app_name, config in MAIN_FUNCTIONS.items():
if not config['symlink'] or (args.apps and app_name not in args.apps):
continue
dest = os.path.join(base_dir, app_name)
if not os.path.exists(dest):
print('Creating symlink %s' % dest)
os.symlink(pwny_main, dest)
else:
print('Not creating symlink %s (file already exists)' % dest)
|
def function[symlink, parameter[parser, cmd, args]]:
constant[
Set up symlinks for (a subset of) the pwny apps.
]
call[name[parser].add_argument, parameter[constant[apps]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[args]]]
<ast.Tuple object at 0x7da18f09dd50> assign[=] call[name[os].path.split, parameter[call[name[sys].argv][constant[0]]]]
for taget[tuple[[<ast.Name object at 0x7da18f09c820>, <ast.Name object at 0x7da18f09d1e0>]]] in starred[call[name[MAIN_FUNCTIONS].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18f09c5b0> begin[:]
continue
variable[dest] assign[=] call[name[os].path.join, parameter[name[base_dir], name[app_name]]]
if <ast.UnaryOp object at 0x7da18f09cf40> begin[:]
call[name[print], parameter[binary_operation[constant[Creating symlink %s] <ast.Mod object at 0x7da2590d6920> name[dest]]]]
call[name[os].symlink, parameter[name[pwny_main], name[dest]]]
|
keyword[def] identifier[symlink] ( identifier[parser] , identifier[cmd] , identifier[args] ):
literal[string]
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[nargs] = identifier[argparse] . identifier[REMAINDER] ,
identifier[help] = literal[string]
)
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[args] )
identifier[base_dir] , identifier[pwny_main] = identifier[os] . identifier[path] . identifier[split] ( identifier[sys] . identifier[argv] [ literal[int] ])
keyword[for] identifier[app_name] , identifier[config] keyword[in] identifier[MAIN_FUNCTIONS] . identifier[items] ():
keyword[if] keyword[not] identifier[config] [ literal[string] ] keyword[or] ( identifier[args] . identifier[apps] keyword[and] identifier[app_name] keyword[not] keyword[in] identifier[args] . identifier[apps] ):
keyword[continue]
identifier[dest] = identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , identifier[app_name] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[dest] ):
identifier[print] ( literal[string] % identifier[dest] )
identifier[os] . identifier[symlink] ( identifier[pwny_main] , identifier[dest] )
keyword[else] :
identifier[print] ( literal[string] % identifier[dest] )
|
def symlink(parser, cmd, args):
"""
Set up symlinks for (a subset of) the pwny apps.
"""
parser.add_argument('apps', nargs=argparse.REMAINDER, help='Which apps to create symlinks for.')
args = parser.parse_args(args)
(base_dir, pwny_main) = os.path.split(sys.argv[0])
for (app_name, config) in MAIN_FUNCTIONS.items():
if not config['symlink'] or (args.apps and app_name not in args.apps):
continue # depends on [control=['if'], data=[]]
dest = os.path.join(base_dir, app_name)
if not os.path.exists(dest):
print('Creating symlink %s' % dest)
os.symlink(pwny_main, dest) # depends on [control=['if'], data=[]]
else:
print('Not creating symlink %s (file already exists)' % dest) # depends on [control=['for'], data=[]]
|
def _on_dynamodb_exception(self, error):
"""Dynamically handle DynamoDB exceptions, returning HTTP error
responses.
:param exceptions.DynamoDBException error:
"""
if isinstance(error, exceptions.ConditionalCheckFailedException):
raise web.HTTPError(409, reason='Condition Check Failure')
elif isinstance(error, exceptions.NoCredentialsError):
if _no_creds_should_return_429():
raise web.HTTPError(429, reason='Instance Credentials Failure')
elif isinstance(error, (exceptions.ThroughputExceeded,
exceptions.ThrottlingException)):
raise web.HTTPError(429, reason='Too Many Requests')
if hasattr(self, 'logger'):
self.logger.error('DynamoDB Error: %s', error)
raise web.HTTPError(500, reason=str(error))
|
def function[_on_dynamodb_exception, parameter[self, error]]:
constant[Dynamically handle DynamoDB exceptions, returning HTTP error
responses.
:param exceptions.DynamoDBException error:
]
if call[name[isinstance], parameter[name[error], name[exceptions].ConditionalCheckFailedException]] begin[:]
<ast.Raise object at 0x7da1b23469b0>
if call[name[hasattr], parameter[name[self], constant[logger]]] begin[:]
call[name[self].logger.error, parameter[constant[DynamoDB Error: %s], name[error]]]
<ast.Raise object at 0x7da1b2347010>
|
keyword[def] identifier[_on_dynamodb_exception] ( identifier[self] , identifier[error] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[error] , identifier[exceptions] . identifier[ConditionalCheckFailedException] ):
keyword[raise] identifier[web] . identifier[HTTPError] ( literal[int] , identifier[reason] = literal[string] )
keyword[elif] identifier[isinstance] ( identifier[error] , identifier[exceptions] . identifier[NoCredentialsError] ):
keyword[if] identifier[_no_creds_should_return_429] ():
keyword[raise] identifier[web] . identifier[HTTPError] ( literal[int] , identifier[reason] = literal[string] )
keyword[elif] identifier[isinstance] ( identifier[error] ,( identifier[exceptions] . identifier[ThroughputExceeded] ,
identifier[exceptions] . identifier[ThrottlingException] )):
keyword[raise] identifier[web] . identifier[HTTPError] ( literal[int] , identifier[reason] = literal[string] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[logger] . identifier[error] ( literal[string] , identifier[error] )
keyword[raise] identifier[web] . identifier[HTTPError] ( literal[int] , identifier[reason] = identifier[str] ( identifier[error] ))
|
def _on_dynamodb_exception(self, error):
"""Dynamically handle DynamoDB exceptions, returning HTTP error
responses.
:param exceptions.DynamoDBException error:
"""
if isinstance(error, exceptions.ConditionalCheckFailedException):
raise web.HTTPError(409, reason='Condition Check Failure') # depends on [control=['if'], data=[]]
elif isinstance(error, exceptions.NoCredentialsError):
if _no_creds_should_return_429():
raise web.HTTPError(429, reason='Instance Credentials Failure') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(error, (exceptions.ThroughputExceeded, exceptions.ThrottlingException)):
raise web.HTTPError(429, reason='Too Many Requests') # depends on [control=['if'], data=[]]
if hasattr(self, 'logger'):
self.logger.error('DynamoDB Error: %s', error) # depends on [control=['if'], data=[]]
raise web.HTTPError(500, reason=str(error))
|
def b_rgb(self, r, g, b, text=None, fore=None, style=None):
""" A chained method that sets the back color to an RGB value.
Arguments:
r : Red value.
g : Green value.
b : Blue value.
text : Text to style if not building up color codes.
fore : Fore color for the text.
style : Style for the text.
"""
return self.chained(text=text, fore=fore, back=(r, g, b), style=style)
|
def function[b_rgb, parameter[self, r, g, b, text, fore, style]]:
constant[ A chained method that sets the back color to an RGB value.
Arguments:
r : Red value.
g : Green value.
b : Blue value.
text : Text to style if not building up color codes.
fore : Fore color for the text.
style : Style for the text.
]
return[call[name[self].chained, parameter[]]]
|
keyword[def] identifier[b_rgb] ( identifier[self] , identifier[r] , identifier[g] , identifier[b] , identifier[text] = keyword[None] , identifier[fore] = keyword[None] , identifier[style] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[chained] ( identifier[text] = identifier[text] , identifier[fore] = identifier[fore] , identifier[back] =( identifier[r] , identifier[g] , identifier[b] ), identifier[style] = identifier[style] )
|
def b_rgb(self, r, g, b, text=None, fore=None, style=None):
""" A chained method that sets the back color to an RGB value.
Arguments:
r : Red value.
g : Green value.
b : Blue value.
text : Text to style if not building up color codes.
fore : Fore color for the text.
style : Style for the text.
"""
return self.chained(text=text, fore=fore, back=(r, g, b), style=style)
|
def _get_mean(self, C, mag, rake, dip, rrup, rjb):
"""
Return mean value (eq. 1, page 319).
"""
f1 = self._compute_magnitude_scaling(C, mag)
f2 = self._compute_distance_scaling(C, mag, rrup)
f3 = self._compute_faulting_mechanism(C, rake, dip)
f4 = self._compute_far_source_soil_effect(C)
f5 = self._compute_hanging_wall_effect(C, rjb, rrup, dip, mag)
mean = (
C['c1'] + f1 + C['c4'] * np.log(np.sqrt(f2)) + f3 + f4 + f5
)
return mean
|
def function[_get_mean, parameter[self, C, mag, rake, dip, rrup, rjb]]:
constant[
Return mean value (eq. 1, page 319).
]
variable[f1] assign[=] call[name[self]._compute_magnitude_scaling, parameter[name[C], name[mag]]]
variable[f2] assign[=] call[name[self]._compute_distance_scaling, parameter[name[C], name[mag], name[rrup]]]
variable[f3] assign[=] call[name[self]._compute_faulting_mechanism, parameter[name[C], name[rake], name[dip]]]
variable[f4] assign[=] call[name[self]._compute_far_source_soil_effect, parameter[name[C]]]
variable[f5] assign[=] call[name[self]._compute_hanging_wall_effect, parameter[name[C], name[rjb], name[rrup], name[dip], name[mag]]]
variable[mean] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[C]][constant[c1]] + name[f1]] + binary_operation[call[name[C]][constant[c4]] * call[name[np].log, parameter[call[name[np].sqrt, parameter[name[f2]]]]]]] + name[f3]] + name[f4]] + name[f5]]
return[name[mean]]
|
keyword[def] identifier[_get_mean] ( identifier[self] , identifier[C] , identifier[mag] , identifier[rake] , identifier[dip] , identifier[rrup] , identifier[rjb] ):
literal[string]
identifier[f1] = identifier[self] . identifier[_compute_magnitude_scaling] ( identifier[C] , identifier[mag] )
identifier[f2] = identifier[self] . identifier[_compute_distance_scaling] ( identifier[C] , identifier[mag] , identifier[rrup] )
identifier[f3] = identifier[self] . identifier[_compute_faulting_mechanism] ( identifier[C] , identifier[rake] , identifier[dip] )
identifier[f4] = identifier[self] . identifier[_compute_far_source_soil_effect] ( identifier[C] )
identifier[f5] = identifier[self] . identifier[_compute_hanging_wall_effect] ( identifier[C] , identifier[rjb] , identifier[rrup] , identifier[dip] , identifier[mag] )
identifier[mean] =(
identifier[C] [ literal[string] ]+ identifier[f1] + identifier[C] [ literal[string] ]* identifier[np] . identifier[log] ( identifier[np] . identifier[sqrt] ( identifier[f2] ))+ identifier[f3] + identifier[f4] + identifier[f5]
)
keyword[return] identifier[mean]
|
def _get_mean(self, C, mag, rake, dip, rrup, rjb):
"""
Return mean value (eq. 1, page 319).
"""
f1 = self._compute_magnitude_scaling(C, mag)
f2 = self._compute_distance_scaling(C, mag, rrup)
f3 = self._compute_faulting_mechanism(C, rake, dip)
f4 = self._compute_far_source_soil_effect(C)
f5 = self._compute_hanging_wall_effect(C, rjb, rrup, dip, mag)
mean = C['c1'] + f1 + C['c4'] * np.log(np.sqrt(f2)) + f3 + f4 + f5
return mean
|
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Date objects as a Period.
:type dt: Date or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Period
"""
if dt is None:
dt = self.today()
return Period(self, Date(dt.year, dt.month, dt.day), absolute=abs)
|
def function[diff, parameter[self, dt, abs]]:
constant[
Returns the difference between two Date objects as a Period.
:type dt: Date or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Period
]
if compare[name[dt] is constant[None]] begin[:]
variable[dt] assign[=] call[name[self].today, parameter[]]
return[call[name[Period], parameter[name[self], call[name[Date], parameter[name[dt].year, name[dt].month, name[dt].day]]]]]
|
keyword[def] identifier[diff] ( identifier[self] , identifier[dt] = keyword[None] , identifier[abs] = keyword[True] ):
literal[string]
keyword[if] identifier[dt] keyword[is] keyword[None] :
identifier[dt] = identifier[self] . identifier[today] ()
keyword[return] identifier[Period] ( identifier[self] , identifier[Date] ( identifier[dt] . identifier[year] , identifier[dt] . identifier[month] , identifier[dt] . identifier[day] ), identifier[absolute] = identifier[abs] )
|
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Date objects as a Period.
:type dt: Date or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Period
"""
if dt is None:
dt = self.today() # depends on [control=['if'], data=['dt']]
return Period(self, Date(dt.year, dt.month, dt.day), absolute=abs)
|
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
"""
children = CodeObjects(self.code)
return [ByteParser(code=c, text=self.text) for c in children]
|
def function[child_parsers, parameter[self]]:
constant[Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
]
variable[children] assign[=] call[name[CodeObjects], parameter[name[self].code]]
return[<ast.ListComp object at 0x7da18ede7c40>]
|
keyword[def] identifier[child_parsers] ( identifier[self] ):
literal[string]
identifier[children] = identifier[CodeObjects] ( identifier[self] . identifier[code] )
keyword[return] [ identifier[ByteParser] ( identifier[code] = identifier[c] , identifier[text] = identifier[self] . identifier[text] ) keyword[for] identifier[c] keyword[in] identifier[children] ]
|
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
"""
children = CodeObjects(self.code)
return [ByteParser(code=c, text=self.text) for c in children]
|
def create_with_zero_body(self, uri=None, timeout=-1, custom_headers=None):
"""
Makes a POST request to create a resource when no request body is required.
Args:
uri:
Can be either the resource ID or the resource URI.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers:
Allows set specific HTTP headers.
Returns:
Created resource.
"""
if not uri:
uri = self._uri
logger.debug('Create with zero body (uri = %s)' % uri)
return self.__do_post(uri, {}, timeout, custom_headers)
|
def function[create_with_zero_body, parameter[self, uri, timeout, custom_headers]]:
constant[
Makes a POST request to create a resource when no request body is required.
Args:
uri:
Can be either the resource ID or the resource URI.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers:
Allows set specific HTTP headers.
Returns:
Created resource.
]
if <ast.UnaryOp object at 0x7da207f98940> begin[:]
variable[uri] assign[=] name[self]._uri
call[name[logger].debug, parameter[binary_operation[constant[Create with zero body (uri = %s)] <ast.Mod object at 0x7da2590d6920> name[uri]]]]
return[call[name[self].__do_post, parameter[name[uri], dictionary[[], []], name[timeout], name[custom_headers]]]]
|
keyword[def] identifier[create_with_zero_body] ( identifier[self] , identifier[uri] = keyword[None] , identifier[timeout] =- literal[int] , identifier[custom_headers] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[uri] :
identifier[uri] = identifier[self] . identifier[_uri]
identifier[logger] . identifier[debug] ( literal[string] % identifier[uri] )
keyword[return] identifier[self] . identifier[__do_post] ( identifier[uri] ,{}, identifier[timeout] , identifier[custom_headers] )
|
def create_with_zero_body(self, uri=None, timeout=-1, custom_headers=None):
"""
Makes a POST request to create a resource when no request body is required.
Args:
uri:
Can be either the resource ID or the resource URI.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers:
Allows set specific HTTP headers.
Returns:
Created resource.
"""
if not uri:
uri = self._uri # depends on [control=['if'], data=[]]
logger.debug('Create with zero body (uri = %s)' % uri)
return self.__do_post(uri, {}, timeout, custom_headers)
|
def overlay(self, matchers, force=False):
"""
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
"""
for m in matchers:
if m in self._ran_matchers:
continue
self._ran_matchers.append(m)
self.overlays += list(m.offset_overlays(self))
self.overlays.sort(key=lambda o: o.start, reverse=True)
|
def function[overlay, parameter[self, matchers, force]]:
constant[
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
]
for taget[name[m]] in starred[name[matchers]] begin[:]
if compare[name[m] in name[self]._ran_matchers] begin[:]
continue
call[name[self]._ran_matchers.append, parameter[name[m]]]
<ast.AugAssign object at 0x7da2043468c0>
call[name[self].overlays.sort, parameter[]]
|
keyword[def] identifier[overlay] ( identifier[self] , identifier[matchers] , identifier[force] = keyword[False] ):
literal[string]
keyword[for] identifier[m] keyword[in] identifier[matchers] :
keyword[if] identifier[m] keyword[in] identifier[self] . identifier[_ran_matchers] :
keyword[continue]
identifier[self] . identifier[_ran_matchers] . identifier[append] ( identifier[m] )
identifier[self] . identifier[overlays] += identifier[list] ( identifier[m] . identifier[offset_overlays] ( identifier[self] ))
identifier[self] . identifier[overlays] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[o] : identifier[o] . identifier[start] , identifier[reverse] = keyword[True] )
|
def overlay(self, matchers, force=False):
"""
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
"""
for m in matchers:
if m in self._ran_matchers:
continue # depends on [control=['if'], data=[]]
self._ran_matchers.append(m)
self.overlays += list(m.offset_overlays(self)) # depends on [control=['for'], data=['m']]
self.overlays.sort(key=lambda o: o.start, reverse=True)
|
def _fact_to_tuple(self, fact):
"""
Convert a ``Fact`` to its normalized tuple.
This is where all type conversion for ``Fact`` attributes to strings as
well as any normalization happens.
Note:
Because different writers may require different types, we need to
do this individually.
Args:
fact (hamster_lib.Fact): Fact to be converted.
Returns:
FactTuple: Tuple representing the original ``Fact``.
"""
# Fields that may have ``None`` value will be represented by ''
if fact.category:
category = fact.category.name
else:
category = ''
description = fact.description or ''
return FactTuple(
start=fact.start.strftime(self.datetime_format),
end=fact.end.strftime(self.datetime_format),
activity=text_type(fact.activity.name),
duration=fact.get_string_delta(format='%M'),
category=text_type(category),
description=text_type(description),
)
|
def function[_fact_to_tuple, parameter[self, fact]]:
constant[
Convert a ``Fact`` to its normalized tuple.
This is where all type conversion for ``Fact`` attributes to strings as
well as any normalization happens.
Note:
Because different writers may require different types, we need to
do this individually.
Args:
fact (hamster_lib.Fact): Fact to be converted.
Returns:
FactTuple: Tuple representing the original ``Fact``.
]
if name[fact].category begin[:]
variable[category] assign[=] name[fact].category.name
variable[description] assign[=] <ast.BoolOp object at 0x7da1b0cc08b0>
return[call[name[FactTuple], parameter[]]]
|
keyword[def] identifier[_fact_to_tuple] ( identifier[self] , identifier[fact] ):
literal[string]
keyword[if] identifier[fact] . identifier[category] :
identifier[category] = identifier[fact] . identifier[category] . identifier[name]
keyword[else] :
identifier[category] = literal[string]
identifier[description] = identifier[fact] . identifier[description] keyword[or] literal[string]
keyword[return] identifier[FactTuple] (
identifier[start] = identifier[fact] . identifier[start] . identifier[strftime] ( identifier[self] . identifier[datetime_format] ),
identifier[end] = identifier[fact] . identifier[end] . identifier[strftime] ( identifier[self] . identifier[datetime_format] ),
identifier[activity] = identifier[text_type] ( identifier[fact] . identifier[activity] . identifier[name] ),
identifier[duration] = identifier[fact] . identifier[get_string_delta] ( identifier[format] = literal[string] ),
identifier[category] = identifier[text_type] ( identifier[category] ),
identifier[description] = identifier[text_type] ( identifier[description] ),
)
|
def _fact_to_tuple(self, fact):
"""
Convert a ``Fact`` to its normalized tuple.
This is where all type conversion for ``Fact`` attributes to strings as
well as any normalization happens.
Note:
Because different writers may require different types, we need to
do this individually.
Args:
fact (hamster_lib.Fact): Fact to be converted.
Returns:
FactTuple: Tuple representing the original ``Fact``.
"""
# Fields that may have ``None`` value will be represented by ''
if fact.category:
category = fact.category.name # depends on [control=['if'], data=[]]
else:
category = ''
description = fact.description or ''
return FactTuple(start=fact.start.strftime(self.datetime_format), end=fact.end.strftime(self.datetime_format), activity=text_type(fact.activity.name), duration=fact.get_string_delta(format='%M'), category=text_type(category), description=text_type(description))
|
def present(self, results):
"Present the results as a list."
for (score, d) in results:
doc = self.documents[d]
print ("%5.2f|%25s | %s"
% (100 * score, doc.url, doc.title[:45].expandtabs()))
|
def function[present, parameter[self, results]]:
constant[Present the results as a list.]
for taget[tuple[[<ast.Name object at 0x7da18bccaf50>, <ast.Name object at 0x7da18bccb9d0>]]] in starred[name[results]] begin[:]
variable[doc] assign[=] call[name[self].documents][name[d]]
call[name[print], parameter[binary_operation[constant[%5.2f|%25s | %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da20c6a8730>, <ast.Attribute object at 0x7da20c6a89a0>, <ast.Call object at 0x7da20c6ab820>]]]]]
|
keyword[def] identifier[present] ( identifier[self] , identifier[results] ):
literal[string]
keyword[for] ( identifier[score] , identifier[d] ) keyword[in] identifier[results] :
identifier[doc] = identifier[self] . identifier[documents] [ identifier[d] ]
identifier[print] ( literal[string]
%( literal[int] * identifier[score] , identifier[doc] . identifier[url] , identifier[doc] . identifier[title] [: literal[int] ]. identifier[expandtabs] ()))
|
def present(self, results):
"""Present the results as a list."""
for (score, d) in results:
doc = self.documents[d]
print('%5.2f|%25s | %s' % (100 * score, doc.url, doc.title[:45].expandtabs())) # depends on [control=['for'], data=[]]
|
def _updateParamsFrom(self, otherObj, updater=_default_param_updater,
exclude=None, prefix=""):
"""
:note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._updateParamsFrom`
"""
PropDeclrCollector._updateParamsFrom(self, otherObj, updater, exclude, prefix)
|
def function[_updateParamsFrom, parameter[self, otherObj, updater, exclude, prefix]]:
constant[
:note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._updateParamsFrom`
]
call[name[PropDeclrCollector]._updateParamsFrom, parameter[name[self], name[otherObj], name[updater], name[exclude], name[prefix]]]
|
keyword[def] identifier[_updateParamsFrom] ( identifier[self] , identifier[otherObj] , identifier[updater] = identifier[_default_param_updater] ,
identifier[exclude] = keyword[None] , identifier[prefix] = literal[string] ):
literal[string]
identifier[PropDeclrCollector] . identifier[_updateParamsFrom] ( identifier[self] , identifier[otherObj] , identifier[updater] , identifier[exclude] , identifier[prefix] )
|
def _updateParamsFrom(self, otherObj, updater=_default_param_updater, exclude=None, prefix=''):
"""
:note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._updateParamsFrom`
"""
PropDeclrCollector._updateParamsFrom(self, otherObj, updater, exclude, prefix)
|
def track_execution(cmd, project, experiment, **kwargs):
"""Guard the execution of the given command.
The given command (`cmd`) will be executed inside a database context.
As soon as you leave the context we will commit the transaction.
Any necessary modifications to the database can be identified inside
the context with the RunInfo object.
Args:
cmd: The command we guard.
project: The project we track for.
experiment: The experiment we track for.
Yields:
RunInfo: A context object that carries the necessary
database transaction.
"""
runner = RunInfo(cmd=cmd, project=project, experiment=experiment, **kwargs)
yield runner
runner.commit()
|
def function[track_execution, parameter[cmd, project, experiment]]:
constant[Guard the execution of the given command.
The given command (`cmd`) will be executed inside a database context.
As soon as you leave the context we will commit the transaction.
Any necessary modifications to the database can be identified inside
the context with the RunInfo object.
Args:
cmd: The command we guard.
project: The project we track for.
experiment: The experiment we track for.
Yields:
RunInfo: A context object that carries the necessary
database transaction.
]
variable[runner] assign[=] call[name[RunInfo], parameter[]]
<ast.Yield object at 0x7da2046219f0>
call[name[runner].commit, parameter[]]
|
keyword[def] identifier[track_execution] ( identifier[cmd] , identifier[project] , identifier[experiment] ,** identifier[kwargs] ):
literal[string]
identifier[runner] = identifier[RunInfo] ( identifier[cmd] = identifier[cmd] , identifier[project] = identifier[project] , identifier[experiment] = identifier[experiment] ,** identifier[kwargs] )
keyword[yield] identifier[runner]
identifier[runner] . identifier[commit] ()
|
def track_execution(cmd, project, experiment, **kwargs):
"""Guard the execution of the given command.
The given command (`cmd`) will be executed inside a database context.
As soon as you leave the context we will commit the transaction.
Any necessary modifications to the database can be identified inside
the context with the RunInfo object.
Args:
cmd: The command we guard.
project: The project we track for.
experiment: The experiment we track for.
Yields:
RunInfo: A context object that carries the necessary
database transaction.
"""
runner = RunInfo(cmd=cmd, project=project, experiment=experiment, **kwargs)
yield runner
runner.commit()
|
async def set_update_cb(self, cb):
"""Register the update callback."""
if self._report_task is not None and not self._report_task.cancelled():
self.loop.create_task(self._report_task.cancel())
self._update_cb = cb
if cb is not None:
self._report_task = self.loop.create_task(self._report())
|
<ast.AsyncFunctionDef object at 0x7da18f720880>
|
keyword[async] keyword[def] identifier[set_update_cb] ( identifier[self] , identifier[cb] ):
literal[string]
keyword[if] identifier[self] . identifier[_report_task] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[_report_task] . identifier[cancelled] ():
identifier[self] . identifier[loop] . identifier[create_task] ( identifier[self] . identifier[_report_task] . identifier[cancel] ())
identifier[self] . identifier[_update_cb] = identifier[cb]
keyword[if] identifier[cb] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_report_task] = identifier[self] . identifier[loop] . identifier[create_task] ( identifier[self] . identifier[_report] ())
|
async def set_update_cb(self, cb):
"""Register the update callback."""
if self._report_task is not None and (not self._report_task.cancelled()):
self.loop.create_task(self._report_task.cancel()) # depends on [control=['if'], data=[]]
self._update_cb = cb
if cb is not None:
self._report_task = self.loop.create_task(self._report()) # depends on [control=['if'], data=[]]
|
def read(*p):
"""Build a file path from paths and return the contents."""
with open(os.path.join(*p), 'r') as fi:
return fi.read()
|
def function[read, parameter[]]:
constant[Build a file path from paths and return the contents.]
with call[name[open], parameter[call[name[os].path.join, parameter[<ast.Starred object at 0x7da20c7cbf70>]], constant[r]]] begin[:]
return[call[name[fi].read, parameter[]]]
|
keyword[def] identifier[read] (* identifier[p] ):
literal[string]
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] (* identifier[p] ), literal[string] ) keyword[as] identifier[fi] :
keyword[return] identifier[fi] . identifier[read] ()
|
def read(*p):
"""Build a file path from paths and return the contents."""
with open(os.path.join(*p), 'r') as fi:
return fi.read() # depends on [control=['with'], data=['fi']]
|
def patch():
"""
patch the built-in `urllib/httplib/httplib.client` methods for tracing.
"""
if getattr(httplib, PATCH_FLAG, False):
return
# we set an attribute to avoid multiple wrapping
setattr(httplib, PATCH_FLAG, True)
wrapt.wrap_function_wrapper(
httplib_client_module,
'HTTPConnection._send_request',
_send_request
)
wrapt.wrap_function_wrapper(
httplib_client_module,
'HTTPConnection.getresponse',
_xray_traced_http_getresponse
)
wrapt.wrap_function_wrapper(
httplib_client_module,
'HTTPResponse.read',
_xray_traced_http_client_read
)
|
def function[patch, parameter[]]:
constant[
patch the built-in `urllib/httplib/httplib.client` methods for tracing.
]
if call[name[getattr], parameter[name[httplib], name[PATCH_FLAG], constant[False]]] begin[:]
return[None]
call[name[setattr], parameter[name[httplib], name[PATCH_FLAG], constant[True]]]
call[name[wrapt].wrap_function_wrapper, parameter[name[httplib_client_module], constant[HTTPConnection._send_request], name[_send_request]]]
call[name[wrapt].wrap_function_wrapper, parameter[name[httplib_client_module], constant[HTTPConnection.getresponse], name[_xray_traced_http_getresponse]]]
call[name[wrapt].wrap_function_wrapper, parameter[name[httplib_client_module], constant[HTTPResponse.read], name[_xray_traced_http_client_read]]]
|
keyword[def] identifier[patch] ():
literal[string]
keyword[if] identifier[getattr] ( identifier[httplib] , identifier[PATCH_FLAG] , keyword[False] ):
keyword[return]
identifier[setattr] ( identifier[httplib] , identifier[PATCH_FLAG] , keyword[True] )
identifier[wrapt] . identifier[wrap_function_wrapper] (
identifier[httplib_client_module] ,
literal[string] ,
identifier[_send_request]
)
identifier[wrapt] . identifier[wrap_function_wrapper] (
identifier[httplib_client_module] ,
literal[string] ,
identifier[_xray_traced_http_getresponse]
)
identifier[wrapt] . identifier[wrap_function_wrapper] (
identifier[httplib_client_module] ,
literal[string] ,
identifier[_xray_traced_http_client_read]
)
|
def patch():
"""
patch the built-in `urllib/httplib/httplib.client` methods for tracing.
"""
if getattr(httplib, PATCH_FLAG, False):
return # depends on [control=['if'], data=[]]
# we set an attribute to avoid multiple wrapping
setattr(httplib, PATCH_FLAG, True)
wrapt.wrap_function_wrapper(httplib_client_module, 'HTTPConnection._send_request', _send_request)
wrapt.wrap_function_wrapper(httplib_client_module, 'HTTPConnection.getresponse', _xray_traced_http_getresponse)
wrapt.wrap_function_wrapper(httplib_client_module, 'HTTPResponse.read', _xray_traced_http_client_read)
|
def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
:obj:`all_methods` and obj:`all_methods_P` as a set of methods for
which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods, methods_P = [], []
Tmins, Tmaxs = [], []
if self.CASRN in _VDISaturationDict:
methods.append(VDI_TABULAR)
Ts, props = VDI_tabular_data(self.CASRN, 'Mu (g)')
self.VDI_Tmin = Ts[0]
self.VDI_Tmax = Ts[-1]
self.tabular_data[VDI_TABULAR] = (Ts, props)
Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)
if has_CoolProp and self.CASRN in coolprop_dict:
methods.append(COOLPROP); methods_P.append(COOLPROP)
self.CP_f = coolprop_fluids[self.CASRN]
Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tmax)
if self.CASRN in Perrys2_312.index:
methods.append(DIPPR_PERRY_8E)
_, C1, C2, C3, C4, self.Perrys2_312_Tmin, self.Perrys2_312_Tmax = _Perrys2_312_values[Perrys2_312.index.get_loc(self.CASRN)].tolist()
self.Perrys2_312_coeffs = [C1, C2, C3, C4]
Tmins.append(self.Perrys2_312_Tmin); Tmaxs.append(self.Perrys2_312_Tmax)
if self.CASRN in VDI_PPDS_8.index:
methods.append(VDI_PPDS)
self.VDI_PPDS_coeffs = _VDI_PPDS_8_values[VDI_PPDS_8.index.get_loc(self.CASRN)].tolist()[1:]
self.VDI_PPDS_coeffs.reverse() # in format for horner's scheme
if all([self.Tc, self.Pc, self.MW]):
methods.append(GHARAGHEIZI)
methods.append(YOON_THODOS)
methods.append(STIEL_THODOS)
Tmins.append(0); Tmaxs.append(5E3) # Intelligently set limit
# GHARAGHEIZI turns nonsesical at ~15 K, YOON_THODOS fine to 0 K,
# same as STIEL_THODOS
if all([self.Tc, self.Pc, self.Zc, self.MW]):
methods.append(LUCAS_GAS)
Tmins.append(0); Tmaxs.append(1E3)
self.all_methods = set(methods)
self.all_methods_P = set(methods_P)
if Tmins and Tmaxs:
self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
|
def function[load_all_methods, parameter[self]]:
constant[Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
:obj:`all_methods` and obj:`all_methods_P` as a set of methods for
which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
]
<ast.Tuple object at 0x7da20c795ed0> assign[=] tuple[[<ast.List object at 0x7da20c796830>, <ast.List object at 0x7da20c7966b0>]]
<ast.Tuple object at 0x7da20c794eb0> assign[=] tuple[[<ast.List object at 0x7da20c7940d0>, <ast.List object at 0x7da20c794e20>]]
if compare[name[self].CASRN in name[_VDISaturationDict]] begin[:]
call[name[methods].append, parameter[name[VDI_TABULAR]]]
<ast.Tuple object at 0x7da20c794be0> assign[=] call[name[VDI_tabular_data], parameter[name[self].CASRN, constant[Mu (g)]]]
name[self].VDI_Tmin assign[=] call[name[Ts]][constant[0]]
name[self].VDI_Tmax assign[=] call[name[Ts]][<ast.UnaryOp object at 0x7da20c795060>]
call[name[self].tabular_data][name[VDI_TABULAR]] assign[=] tuple[[<ast.Name object at 0x7da20c7960e0>, <ast.Name object at 0x7da20c795c60>]]
call[name[Tmins].append, parameter[name[self].VDI_Tmin]]
call[name[Tmaxs].append, parameter[name[self].VDI_Tmax]]
if <ast.BoolOp object at 0x7da20c796740> begin[:]
call[name[methods].append, parameter[name[COOLPROP]]]
call[name[methods_P].append, parameter[name[COOLPROP]]]
name[self].CP_f assign[=] call[name[coolprop_fluids]][name[self].CASRN]
call[name[Tmins].append, parameter[name[self].CP_f.Tmin]]
call[name[Tmaxs].append, parameter[name[self].CP_f.Tmax]]
if compare[name[self].CASRN in name[Perrys2_312].index] begin[:]
call[name[methods].append, parameter[name[DIPPR_PERRY_8E]]]
<ast.Tuple object at 0x7da20c794550> assign[=] call[call[name[_Perrys2_312_values]][call[name[Perrys2_312].index.get_loc, parameter[name[self].CASRN]]].tolist, parameter[]]
name[self].Perrys2_312_coeffs assign[=] list[[<ast.Name object at 0x7da20c7949d0>, <ast.Name object at 0x7da20c795660>, <ast.Name object at 0x7da20c795690>, <ast.Name object at 0x7da20c794dc0>]]
call[name[Tmins].append, parameter[name[self].Perrys2_312_Tmin]]
call[name[Tmaxs].append, parameter[name[self].Perrys2_312_Tmax]]
if compare[name[self].CASRN in name[VDI_PPDS_8].index] begin[:]
call[name[methods].append, parameter[name[VDI_PPDS]]]
name[self].VDI_PPDS_coeffs assign[=] call[call[call[name[_VDI_PPDS_8_values]][call[name[VDI_PPDS_8].index.get_loc, parameter[name[self].CASRN]]].tolist, parameter[]]][<ast.Slice object at 0x7da20c992230>]
call[name[self].VDI_PPDS_coeffs.reverse, parameter[]]
if call[name[all], parameter[list[[<ast.Attribute object at 0x7da20c990040>, <ast.Attribute object at 0x7da20c9909a0>, <ast.Attribute object at 0x7da20c992b30>]]]] begin[:]
call[name[methods].append, parameter[name[GHARAGHEIZI]]]
call[name[methods].append, parameter[name[YOON_THODOS]]]
call[name[methods].append, parameter[name[STIEL_THODOS]]]
call[name[Tmins].append, parameter[constant[0]]]
call[name[Tmaxs].append, parameter[constant[5000.0]]]
if call[name[all], parameter[list[[<ast.Attribute object at 0x7da20c9914e0>, <ast.Attribute object at 0x7da20c992e30>, <ast.Attribute object at 0x7da20c992080>, <ast.Attribute object at 0x7da20c990910>]]]] begin[:]
call[name[methods].append, parameter[name[LUCAS_GAS]]]
call[name[Tmins].append, parameter[constant[0]]]
call[name[Tmaxs].append, parameter[constant[1000.0]]]
name[self].all_methods assign[=] call[name[set], parameter[name[methods]]]
name[self].all_methods_P assign[=] call[name[set], parameter[name[methods_P]]]
if <ast.BoolOp object at 0x7da20c991a80> begin[:]
<ast.Tuple object at 0x7da20c991e40> assign[=] tuple[[<ast.Call object at 0x7da20c9902e0>, <ast.Call object at 0x7da20c992c20>]]
|
keyword[def] identifier[load_all_methods] ( identifier[self] ):
literal[string]
identifier[methods] , identifier[methods_P] =[],[]
identifier[Tmins] , identifier[Tmaxs] =[],[]
keyword[if] identifier[self] . identifier[CASRN] keyword[in] identifier[_VDISaturationDict] :
identifier[methods] . identifier[append] ( identifier[VDI_TABULAR] )
identifier[Ts] , identifier[props] = identifier[VDI_tabular_data] ( identifier[self] . identifier[CASRN] , literal[string] )
identifier[self] . identifier[VDI_Tmin] = identifier[Ts] [ literal[int] ]
identifier[self] . identifier[VDI_Tmax] = identifier[Ts] [- literal[int] ]
identifier[self] . identifier[tabular_data] [ identifier[VDI_TABULAR] ]=( identifier[Ts] , identifier[props] )
identifier[Tmins] . identifier[append] ( identifier[self] . identifier[VDI_Tmin] ); identifier[Tmaxs] . identifier[append] ( identifier[self] . identifier[VDI_Tmax] )
keyword[if] identifier[has_CoolProp] keyword[and] identifier[self] . identifier[CASRN] keyword[in] identifier[coolprop_dict] :
identifier[methods] . identifier[append] ( identifier[COOLPROP] ); identifier[methods_P] . identifier[append] ( identifier[COOLPROP] )
identifier[self] . identifier[CP_f] = identifier[coolprop_fluids] [ identifier[self] . identifier[CASRN] ]
identifier[Tmins] . identifier[append] ( identifier[self] . identifier[CP_f] . identifier[Tmin] ); identifier[Tmaxs] . identifier[append] ( identifier[self] . identifier[CP_f] . identifier[Tmax] )
keyword[if] identifier[self] . identifier[CASRN] keyword[in] identifier[Perrys2_312] . identifier[index] :
identifier[methods] . identifier[append] ( identifier[DIPPR_PERRY_8E] )
identifier[_] , identifier[C1] , identifier[C2] , identifier[C3] , identifier[C4] , identifier[self] . identifier[Perrys2_312_Tmin] , identifier[self] . identifier[Perrys2_312_Tmax] = identifier[_Perrys2_312_values] [ identifier[Perrys2_312] . identifier[index] . identifier[get_loc] ( identifier[self] . identifier[CASRN] )]. identifier[tolist] ()
identifier[self] . identifier[Perrys2_312_coeffs] =[ identifier[C1] , identifier[C2] , identifier[C3] , identifier[C4] ]
identifier[Tmins] . identifier[append] ( identifier[self] . identifier[Perrys2_312_Tmin] ); identifier[Tmaxs] . identifier[append] ( identifier[self] . identifier[Perrys2_312_Tmax] )
keyword[if] identifier[self] . identifier[CASRN] keyword[in] identifier[VDI_PPDS_8] . identifier[index] :
identifier[methods] . identifier[append] ( identifier[VDI_PPDS] )
identifier[self] . identifier[VDI_PPDS_coeffs] = identifier[_VDI_PPDS_8_values] [ identifier[VDI_PPDS_8] . identifier[index] . identifier[get_loc] ( identifier[self] . identifier[CASRN] )]. identifier[tolist] ()[ literal[int] :]
identifier[self] . identifier[VDI_PPDS_coeffs] . identifier[reverse] ()
keyword[if] identifier[all] ([ identifier[self] . identifier[Tc] , identifier[self] . identifier[Pc] , identifier[self] . identifier[MW] ]):
identifier[methods] . identifier[append] ( identifier[GHARAGHEIZI] )
identifier[methods] . identifier[append] ( identifier[YOON_THODOS] )
identifier[methods] . identifier[append] ( identifier[STIEL_THODOS] )
identifier[Tmins] . identifier[append] ( literal[int] ); identifier[Tmaxs] . identifier[append] ( literal[int] )
keyword[if] identifier[all] ([ identifier[self] . identifier[Tc] , identifier[self] . identifier[Pc] , identifier[self] . identifier[Zc] , identifier[self] . identifier[MW] ]):
identifier[methods] . identifier[append] ( identifier[LUCAS_GAS] )
identifier[Tmins] . identifier[append] ( literal[int] ); identifier[Tmaxs] . identifier[append] ( literal[int] )
identifier[self] . identifier[all_methods] = identifier[set] ( identifier[methods] )
identifier[self] . identifier[all_methods_P] = identifier[set] ( identifier[methods_P] )
keyword[if] identifier[Tmins] keyword[and] identifier[Tmaxs] :
identifier[self] . identifier[Tmin] , identifier[self] . identifier[Tmax] = identifier[min] ( identifier[Tmins] ), identifier[max] ( identifier[Tmaxs] )
|
def load_all_methods(self):
"""Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
:obj:`all_methods` and obj:`all_methods_P` as a set of methods for
which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
"""
(methods, methods_P) = ([], [])
(Tmins, Tmaxs) = ([], [])
if self.CASRN in _VDISaturationDict:
methods.append(VDI_TABULAR)
(Ts, props) = VDI_tabular_data(self.CASRN, 'Mu (g)')
self.VDI_Tmin = Ts[0]
self.VDI_Tmax = Ts[-1]
self.tabular_data[VDI_TABULAR] = (Ts, props)
Tmins.append(self.VDI_Tmin)
Tmaxs.append(self.VDI_Tmax) # depends on [control=['if'], data=[]]
if has_CoolProp and self.CASRN in coolprop_dict:
methods.append(COOLPROP)
methods_P.append(COOLPROP)
self.CP_f = coolprop_fluids[self.CASRN]
Tmins.append(self.CP_f.Tmin)
Tmaxs.append(self.CP_f.Tmax) # depends on [control=['if'], data=[]]
if self.CASRN in Perrys2_312.index:
methods.append(DIPPR_PERRY_8E)
(_, C1, C2, C3, C4, self.Perrys2_312_Tmin, self.Perrys2_312_Tmax) = _Perrys2_312_values[Perrys2_312.index.get_loc(self.CASRN)].tolist()
self.Perrys2_312_coeffs = [C1, C2, C3, C4]
Tmins.append(self.Perrys2_312_Tmin)
Tmaxs.append(self.Perrys2_312_Tmax) # depends on [control=['if'], data=[]]
if self.CASRN in VDI_PPDS_8.index:
methods.append(VDI_PPDS)
self.VDI_PPDS_coeffs = _VDI_PPDS_8_values[VDI_PPDS_8.index.get_loc(self.CASRN)].tolist()[1:]
self.VDI_PPDS_coeffs.reverse() # in format for horner's scheme # depends on [control=['if'], data=[]]
if all([self.Tc, self.Pc, self.MW]):
methods.append(GHARAGHEIZI)
methods.append(YOON_THODOS)
methods.append(STIEL_THODOS)
Tmins.append(0)
Tmaxs.append(5000.0) # Intelligently set limit # depends on [control=['if'], data=[]]
# GHARAGHEIZI turns nonsesical at ~15 K, YOON_THODOS fine to 0 K,
# same as STIEL_THODOS
if all([self.Tc, self.Pc, self.Zc, self.MW]):
methods.append(LUCAS_GAS)
Tmins.append(0)
Tmaxs.append(1000.0) # depends on [control=['if'], data=[]]
self.all_methods = set(methods)
self.all_methods_P = set(methods_P)
if Tmins and Tmaxs:
(self.Tmin, self.Tmax) = (min(Tmins), max(Tmaxs)) # depends on [control=['if'], data=[]]
|
def update_source(self, id, **kwargs): # noqa: E501
"""Update metadata (description or tags) for a specific source. # noqa: E501
The \"hidden\" property is stored as a tag. To set the value, add \"hidden\": <value> to the list of tags. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_source(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Source body: Example Body: <pre>{ \"sourceName\": \"source.name\", \"tags\": {\"sourceTag1\": true}, \"description\": \"Source Description\" }</pre>
:return: ResponseContainerSource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_source_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_source_with_http_info(id, **kwargs) # noqa: E501
return data
|
def function[update_source, parameter[self, id]]:
constant[Update metadata (description or tags) for a specific source. # noqa: E501
The "hidden" property is stored as a tag. To set the value, add "hidden": <value> to the list of tags. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_source(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Source body: Example Body: <pre>{ "sourceName": "source.name", "tags": {"sourceTag1": true}, "description": "Source Description" }</pre>
:return: ResponseContainerSource
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].update_source_with_http_info, parameter[name[id]]]]
|
keyword[def] identifier[update_source] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[update_source_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[update_source_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[return] identifier[data]
|
def update_source(self, id, **kwargs): # noqa: E501
'Update metadata (description or tags) for a specific source. # noqa: E501\n\n The "hidden" property is stored as a tag. To set the value, add "hidden": <value> to the list of tags. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_source(id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str id: (required)\n :param Source body: Example Body: <pre>{ "sourceName": "source.name", "tags": {"sourceTag1": true}, "description": "Source Description" }</pre>\n :return: ResponseContainerSource\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_source_with_http_info(id, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.update_source_with_http_info(id, **kwargs) # noqa: E501
return data
|
def chunks(event_list, chunk_size):
"""Yield successive n-sized chunks from the event list."""
for i in range(0, len(event_list), chunk_size):
yield event_list[i:i + chunk_size]
|
def function[chunks, parameter[event_list, chunk_size]]:
constant[Yield successive n-sized chunks from the event list.]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[event_list]]], name[chunk_size]]]] begin[:]
<ast.Yield object at 0x7da1b1252a40>
|
keyword[def] identifier[chunks] ( identifier[event_list] , identifier[chunk_size] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[event_list] ), identifier[chunk_size] ):
keyword[yield] identifier[event_list] [ identifier[i] : identifier[i] + identifier[chunk_size] ]
|
def chunks(event_list, chunk_size):
"""Yield successive n-sized chunks from the event list."""
for i in range(0, len(event_list), chunk_size):
yield event_list[i:i + chunk_size] # depends on [control=['for'], data=['i']]
|
def _read_certificates(self):
"""
Reads end-entity and intermediate certificate information from the
TLS session
"""
cert_context_pointer_pointer = new(crypt32, 'CERT_CONTEXT **')
result = secur32.QueryContextAttributesW(
self._context_handle_pointer,
Secur32Const.SECPKG_ATTR_REMOTE_CERT_CONTEXT,
cert_context_pointer_pointer
)
handle_error(result, TLSError)
cert_context_pointer = unwrap(cert_context_pointer_pointer)
cert_context_pointer = cast(crypt32, 'CERT_CONTEXT *', cert_context_pointer)
cert_context = unwrap(cert_context_pointer)
cert_data = bytes_from_buffer(cert_context.pbCertEncoded, native(int, cert_context.cbCertEncoded))
self._certificate = x509.Certificate.load(cert_data)
self._intermediates = []
store_handle = None
try:
store_handle = cert_context.hCertStore
context_pointer = crypt32.CertEnumCertificatesInStore(store_handle, null())
while not is_null(context_pointer):
context = unwrap(context_pointer)
data = bytes_from_buffer(context.pbCertEncoded, native(int, context.cbCertEncoded))
# The cert store seems to include the end-entity certificate as
# the last entry, but we already have that from the struct.
if data != cert_data:
self._intermediates.append(x509.Certificate.load(data))
context_pointer = crypt32.CertEnumCertificatesInStore(store_handle, context_pointer)
finally:
if store_handle:
crypt32.CertCloseStore(store_handle, 0)
|
def function[_read_certificates, parameter[self]]:
constant[
Reads end-entity and intermediate certificate information from the
TLS session
]
variable[cert_context_pointer_pointer] assign[=] call[name[new], parameter[name[crypt32], constant[CERT_CONTEXT **]]]
variable[result] assign[=] call[name[secur32].QueryContextAttributesW, parameter[name[self]._context_handle_pointer, name[Secur32Const].SECPKG_ATTR_REMOTE_CERT_CONTEXT, name[cert_context_pointer_pointer]]]
call[name[handle_error], parameter[name[result], name[TLSError]]]
variable[cert_context_pointer] assign[=] call[name[unwrap], parameter[name[cert_context_pointer_pointer]]]
variable[cert_context_pointer] assign[=] call[name[cast], parameter[name[crypt32], constant[CERT_CONTEXT *], name[cert_context_pointer]]]
variable[cert_context] assign[=] call[name[unwrap], parameter[name[cert_context_pointer]]]
variable[cert_data] assign[=] call[name[bytes_from_buffer], parameter[name[cert_context].pbCertEncoded, call[name[native], parameter[name[int], name[cert_context].cbCertEncoded]]]]
name[self]._certificate assign[=] call[name[x509].Certificate.load, parameter[name[cert_data]]]
name[self]._intermediates assign[=] list[[]]
variable[store_handle] assign[=] constant[None]
<ast.Try object at 0x7da1b00da9b0>
|
keyword[def] identifier[_read_certificates] ( identifier[self] ):
literal[string]
identifier[cert_context_pointer_pointer] = identifier[new] ( identifier[crypt32] , literal[string] )
identifier[result] = identifier[secur32] . identifier[QueryContextAttributesW] (
identifier[self] . identifier[_context_handle_pointer] ,
identifier[Secur32Const] . identifier[SECPKG_ATTR_REMOTE_CERT_CONTEXT] ,
identifier[cert_context_pointer_pointer]
)
identifier[handle_error] ( identifier[result] , identifier[TLSError] )
identifier[cert_context_pointer] = identifier[unwrap] ( identifier[cert_context_pointer_pointer] )
identifier[cert_context_pointer] = identifier[cast] ( identifier[crypt32] , literal[string] , identifier[cert_context_pointer] )
identifier[cert_context] = identifier[unwrap] ( identifier[cert_context_pointer] )
identifier[cert_data] = identifier[bytes_from_buffer] ( identifier[cert_context] . identifier[pbCertEncoded] , identifier[native] ( identifier[int] , identifier[cert_context] . identifier[cbCertEncoded] ))
identifier[self] . identifier[_certificate] = identifier[x509] . identifier[Certificate] . identifier[load] ( identifier[cert_data] )
identifier[self] . identifier[_intermediates] =[]
identifier[store_handle] = keyword[None]
keyword[try] :
identifier[store_handle] = identifier[cert_context] . identifier[hCertStore]
identifier[context_pointer] = identifier[crypt32] . identifier[CertEnumCertificatesInStore] ( identifier[store_handle] , identifier[null] ())
keyword[while] keyword[not] identifier[is_null] ( identifier[context_pointer] ):
identifier[context] = identifier[unwrap] ( identifier[context_pointer] )
identifier[data] = identifier[bytes_from_buffer] ( identifier[context] . identifier[pbCertEncoded] , identifier[native] ( identifier[int] , identifier[context] . identifier[cbCertEncoded] ))
keyword[if] identifier[data] != identifier[cert_data] :
identifier[self] . identifier[_intermediates] . identifier[append] ( identifier[x509] . identifier[Certificate] . identifier[load] ( identifier[data] ))
identifier[context_pointer] = identifier[crypt32] . identifier[CertEnumCertificatesInStore] ( identifier[store_handle] , identifier[context_pointer] )
keyword[finally] :
keyword[if] identifier[store_handle] :
identifier[crypt32] . identifier[CertCloseStore] ( identifier[store_handle] , literal[int] )
|
def _read_certificates(self):
"""
Reads end-entity and intermediate certificate information from the
TLS session
"""
cert_context_pointer_pointer = new(crypt32, 'CERT_CONTEXT **')
result = secur32.QueryContextAttributesW(self._context_handle_pointer, Secur32Const.SECPKG_ATTR_REMOTE_CERT_CONTEXT, cert_context_pointer_pointer)
handle_error(result, TLSError)
cert_context_pointer = unwrap(cert_context_pointer_pointer)
cert_context_pointer = cast(crypt32, 'CERT_CONTEXT *', cert_context_pointer)
cert_context = unwrap(cert_context_pointer)
cert_data = bytes_from_buffer(cert_context.pbCertEncoded, native(int, cert_context.cbCertEncoded))
self._certificate = x509.Certificate.load(cert_data)
self._intermediates = []
store_handle = None
try:
store_handle = cert_context.hCertStore
context_pointer = crypt32.CertEnumCertificatesInStore(store_handle, null())
while not is_null(context_pointer):
context = unwrap(context_pointer)
data = bytes_from_buffer(context.pbCertEncoded, native(int, context.cbCertEncoded))
# The cert store seems to include the end-entity certificate as
# the last entry, but we already have that from the struct.
if data != cert_data:
self._intermediates.append(x509.Certificate.load(data)) # depends on [control=['if'], data=['data']]
context_pointer = crypt32.CertEnumCertificatesInStore(store_handle, context_pointer) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
finally:
if store_handle:
crypt32.CertCloseStore(store_handle, 0) # depends on [control=['if'], data=[]]
|
def crc16(cmd, use_byte=False):
"""
CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype:
"""
crc = 0xFFFF
# crc16 计算方法, 需要使用 byte
if hasattr(cmd, 'encode'):
cmd = bytes.fromhex(cmd)
for _ in cmd:
c = _ & 0x00FF
crc ^= c
for i in range(8):
if crc & 0x0001 > 0:
crc >>= 1
crc ^= 0xA001
else:
crc >>= 1
# modbus crc16计算时,需要高/低位倒置
t = [(crc & 0x00FF), (crc >> 8 & 0xFF)]
crc = '%02X%02X' % (t[0], t[1])
if use_byte:
crc = bytes.fromhex(crc)
return crc
|
def function[crc16, parameter[cmd, use_byte]]:
constant[
CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype:
]
variable[crc] assign[=] constant[65535]
if call[name[hasattr], parameter[name[cmd], constant[encode]]] begin[:]
variable[cmd] assign[=] call[name[bytes].fromhex, parameter[name[cmd]]]
for taget[name[_]] in starred[name[cmd]] begin[:]
variable[c] assign[=] binary_operation[name[_] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]
<ast.AugAssign object at 0x7da1b1589420>
for taget[name[i]] in starred[call[name[range], parameter[constant[8]]]] begin[:]
if compare[binary_operation[name[crc] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b1589c00>
<ast.AugAssign object at 0x7da1b15883d0>
variable[t] assign[=] list[[<ast.BinOp object at 0x7da1b158a3e0>, <ast.BinOp object at 0x7da1b15887f0>]]
variable[crc] assign[=] binary_operation[constant[%02X%02X] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b158a320>, <ast.Subscript object at 0x7da1b15890f0>]]]
if name[use_byte] begin[:]
variable[crc] assign[=] call[name[bytes].fromhex, parameter[name[crc]]]
return[name[crc]]
|
keyword[def] identifier[crc16] ( identifier[cmd] , identifier[use_byte] = keyword[False] ):
literal[string]
identifier[crc] = literal[int]
keyword[if] identifier[hasattr] ( identifier[cmd] , literal[string] ):
identifier[cmd] = identifier[bytes] . identifier[fromhex] ( identifier[cmd] )
keyword[for] identifier[_] keyword[in] identifier[cmd] :
identifier[c] = identifier[_] & literal[int]
identifier[crc] ^= identifier[c]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
keyword[if] identifier[crc] & literal[int] > literal[int] :
identifier[crc] >>= literal[int]
identifier[crc] ^= literal[int]
keyword[else] :
identifier[crc] >>= literal[int]
identifier[t] =[( identifier[crc] & literal[int] ),( identifier[crc] >> literal[int] & literal[int] )]
identifier[crc] = literal[string] %( identifier[t] [ literal[int] ], identifier[t] [ literal[int] ])
keyword[if] identifier[use_byte] :
identifier[crc] = identifier[bytes] . identifier[fromhex] ( identifier[crc] )
keyword[return] identifier[crc]
|
def crc16(cmd, use_byte=False):
"""
CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype:
"""
crc = 65535
# crc16 计算方法, 需要使用 byte
if hasattr(cmd, 'encode'):
cmd = bytes.fromhex(cmd) # depends on [control=['if'], data=[]]
for _ in cmd:
c = _ & 255
crc ^= c
for i in range(8):
if crc & 1 > 0:
crc >>= 1
crc ^= 40961 # depends on [control=['if'], data=[]]
else:
crc >>= 1 # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['_']]
# modbus crc16计算时,需要高/低位倒置
t = [crc & 255, crc >> 8 & 255]
crc = '%02X%02X' % (t[0], t[1])
if use_byte:
crc = bytes.fromhex(crc) # depends on [control=['if'], data=[]]
return crc
|
def do_cmd_output_sub(self, cmd, regexp, subst):
'''Apply a regexp substitution to command output archived by sosreport.
cmd is the command name from which output is collected (i.e. excluding
parameters). The regexp can be a string or a compiled re object. The
substitution string, subst, is a string that replaces each occurrence
of regexp in each file collected from cmd. Internally 'cmd' is treated
as a glob with a leading and trailing '*' and each matching file from
the current module's command list is subjected to the replacement.
This function returns the number of replacements made.
'''
globstr = '*' + cmd + '*'
self._log_debug("substituting '%s' for '%s' in commands matching '%s'"
% (subst, regexp, globstr))
if not self.executed_commands:
return 0
replacements = None
try:
for called in self.executed_commands:
# was anything collected?
if called['file'] is None:
continue
if called['binary'] == 'yes':
self._log_warn("Cannot apply regex substitution to binary"
" output: '%s'" % called['exe'])
continue
if fnmatch.fnmatch(called['exe'], globstr):
path = os.path.join(self.commons['cmddir'], called['file'])
self._log_debug("applying substitution to '%s'" % path)
readable = self.archive.open_file(path)
result, replacements = re.subn(
regexp, subst, readable.read())
if replacements:
self.archive.add_string(result, path)
except Exception as e:
msg = "regex substitution failed for '%s' with: '%s'"
self._log_error(msg % (called['exe'], e))
replacements = None
return replacements
|
def function[do_cmd_output_sub, parameter[self, cmd, regexp, subst]]:
constant[Apply a regexp substitution to command output archived by sosreport.
cmd is the command name from which output is collected (i.e. excluding
parameters). The regexp can be a string or a compiled re object. The
substitution string, subst, is a string that replaces each occurrence
of regexp in each file collected from cmd. Internally 'cmd' is treated
as a glob with a leading and trailing '*' and each matching file from
the current module's command list is subjected to the replacement.
This function returns the number of replacements made.
]
variable[globstr] assign[=] binary_operation[binary_operation[constant[*] + name[cmd]] + constant[*]]
call[name[self]._log_debug, parameter[binary_operation[constant[substituting '%s' for '%s' in commands matching '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00f7c0>, <ast.Name object at 0x7da18f00c2e0>, <ast.Name object at 0x7da18f00d2d0>]]]]]
if <ast.UnaryOp object at 0x7da18f00e2c0> begin[:]
return[constant[0]]
variable[replacements] assign[=] constant[None]
<ast.Try object at 0x7da18f00ef50>
return[name[replacements]]
|
keyword[def] identifier[do_cmd_output_sub] ( identifier[self] , identifier[cmd] , identifier[regexp] , identifier[subst] ):
literal[string]
identifier[globstr] = literal[string] + identifier[cmd] + literal[string]
identifier[self] . identifier[_log_debug] ( literal[string]
%( identifier[subst] , identifier[regexp] , identifier[globstr] ))
keyword[if] keyword[not] identifier[self] . identifier[executed_commands] :
keyword[return] literal[int]
identifier[replacements] = keyword[None]
keyword[try] :
keyword[for] identifier[called] keyword[in] identifier[self] . identifier[executed_commands] :
keyword[if] identifier[called] [ literal[string] ] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[called] [ literal[string] ]== literal[string] :
identifier[self] . identifier[_log_warn] ( literal[string]
literal[string] % identifier[called] [ literal[string] ])
keyword[continue]
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[called] [ literal[string] ], identifier[globstr] ):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[commons] [ literal[string] ], identifier[called] [ literal[string] ])
identifier[self] . identifier[_log_debug] ( literal[string] % identifier[path] )
identifier[readable] = identifier[self] . identifier[archive] . identifier[open_file] ( identifier[path] )
identifier[result] , identifier[replacements] = identifier[re] . identifier[subn] (
identifier[regexp] , identifier[subst] , identifier[readable] . identifier[read] ())
keyword[if] identifier[replacements] :
identifier[self] . identifier[archive] . identifier[add_string] ( identifier[result] , identifier[path] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[msg] = literal[string]
identifier[self] . identifier[_log_error] ( identifier[msg] %( identifier[called] [ literal[string] ], identifier[e] ))
identifier[replacements] = keyword[None]
keyword[return] identifier[replacements]
|
def do_cmd_output_sub(self, cmd, regexp, subst):
"""Apply a regexp substitution to command output archived by sosreport.
cmd is the command name from which output is collected (i.e. excluding
parameters). The regexp can be a string or a compiled re object. The
substitution string, subst, is a string that replaces each occurrence
of regexp in each file collected from cmd. Internally 'cmd' is treated
as a glob with a leading and trailing '*' and each matching file from
the current module's command list is subjected to the replacement.
This function returns the number of replacements made.
"""
globstr = '*' + cmd + '*'
self._log_debug("substituting '%s' for '%s' in commands matching '%s'" % (subst, regexp, globstr))
if not self.executed_commands:
return 0 # depends on [control=['if'], data=[]]
replacements = None
try:
for called in self.executed_commands:
# was anything collected?
if called['file'] is None:
continue # depends on [control=['if'], data=[]]
if called['binary'] == 'yes':
self._log_warn("Cannot apply regex substitution to binary output: '%s'" % called['exe'])
continue # depends on [control=['if'], data=[]]
if fnmatch.fnmatch(called['exe'], globstr):
path = os.path.join(self.commons['cmddir'], called['file'])
self._log_debug("applying substitution to '%s'" % path)
readable = self.archive.open_file(path)
(result, replacements) = re.subn(regexp, subst, readable.read())
if replacements:
self.archive.add_string(result, path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['called']] # depends on [control=['try'], data=[]]
except Exception as e:
msg = "regex substitution failed for '%s' with: '%s'"
self._log_error(msg % (called['exe'], e))
replacements = None # depends on [control=['except'], data=['e']]
return replacements
|
def _run_and_log(cmd, ostree_repo_path, error_msg, wd=None):
""" run provided command and log all of its output; set path to ostree repo """
logger.debug("running command %s", cmd)
kwargs = {
"stderr": subprocess.STDOUT,
"env": os.environ.copy(),
}
if ostree_repo_path:
# must not exist, ostree will create it
kwargs["env"]["ATOMIC_OSTREE_REPO"] = ostree_repo_path
if wd:
kwargs["cwd"] = wd
try:
out = subprocess.check_output(cmd, **kwargs)
except subprocess.CalledProcessError as ex:
logger.error(ex.output)
logger.error(error_msg)
raise
logger.debug("%s", out)
|
def function[_run_and_log, parameter[cmd, ostree_repo_path, error_msg, wd]]:
constant[ run provided command and log all of its output; set path to ostree repo ]
call[name[logger].debug, parameter[constant[running command %s], name[cmd]]]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ff8ca0>, <ast.Constant object at 0x7da1b0ffb340>], [<ast.Attribute object at 0x7da1b0ff8310>, <ast.Call object at 0x7da1b0ffa560>]]
if name[ostree_repo_path] begin[:]
call[call[name[kwargs]][constant[env]]][constant[ATOMIC_OSTREE_REPO]] assign[=] name[ostree_repo_path]
if name[wd] begin[:]
call[name[kwargs]][constant[cwd]] assign[=] name[wd]
<ast.Try object at 0x7da1b0ff9300>
call[name[logger].debug, parameter[constant[%s], name[out]]]
|
keyword[def] identifier[_run_and_log] ( identifier[cmd] , identifier[ostree_repo_path] , identifier[error_msg] , identifier[wd] = keyword[None] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[cmd] )
identifier[kwargs] ={
literal[string] : identifier[subprocess] . identifier[STDOUT] ,
literal[string] : identifier[os] . identifier[environ] . identifier[copy] (),
}
keyword[if] identifier[ostree_repo_path] :
identifier[kwargs] [ literal[string] ][ literal[string] ]= identifier[ostree_repo_path]
keyword[if] identifier[wd] :
identifier[kwargs] [ literal[string] ]= identifier[wd]
keyword[try] :
identifier[out] = identifier[subprocess] . identifier[check_output] ( identifier[cmd] ,** identifier[kwargs] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[ex] :
identifier[logger] . identifier[error] ( identifier[ex] . identifier[output] )
identifier[logger] . identifier[error] ( identifier[error_msg] )
keyword[raise]
identifier[logger] . identifier[debug] ( literal[string] , identifier[out] )
|
def _run_and_log(cmd, ostree_repo_path, error_msg, wd=None):
""" run provided command and log all of its output; set path to ostree repo """
logger.debug('running command %s', cmd)
kwargs = {'stderr': subprocess.STDOUT, 'env': os.environ.copy()}
if ostree_repo_path:
# must not exist, ostree will create it
kwargs['env']['ATOMIC_OSTREE_REPO'] = ostree_repo_path # depends on [control=['if'], data=[]]
if wd:
kwargs['cwd'] = wd # depends on [control=['if'], data=[]]
try:
out = subprocess.check_output(cmd, **kwargs) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError as ex:
logger.error(ex.output)
logger.error(error_msg)
raise # depends on [control=['except'], data=['ex']]
logger.debug('%s', out)
|
def SMGetJobDictionaries(self, domain='kSMDomainSystemLaunchd'):
"""Copy all Job Dictionaries from the ServiceManagement.
Args:
domain: The name of a constant in Foundation referencing the domain.
Will copy all launchd services by default.
Returns:
A marshalled python list of dicts containing the job dictionaries.
"""
cfstring_launchd = ctypes.c_void_p.in_dll(self.dll, domain)
return CFArray(self.dll.SMCopyAllJobDictionaries(cfstring_launchd))
|
def function[SMGetJobDictionaries, parameter[self, domain]]:
constant[Copy all Job Dictionaries from the ServiceManagement.
Args:
domain: The name of a constant in Foundation referencing the domain.
Will copy all launchd services by default.
Returns:
A marshalled python list of dicts containing the job dictionaries.
]
variable[cfstring_launchd] assign[=] call[name[ctypes].c_void_p.in_dll, parameter[name[self].dll, name[domain]]]
return[call[name[CFArray], parameter[call[name[self].dll.SMCopyAllJobDictionaries, parameter[name[cfstring_launchd]]]]]]
|
keyword[def] identifier[SMGetJobDictionaries] ( identifier[self] , identifier[domain] = literal[string] ):
literal[string]
identifier[cfstring_launchd] = identifier[ctypes] . identifier[c_void_p] . identifier[in_dll] ( identifier[self] . identifier[dll] , identifier[domain] )
keyword[return] identifier[CFArray] ( identifier[self] . identifier[dll] . identifier[SMCopyAllJobDictionaries] ( identifier[cfstring_launchd] ))
|
def SMGetJobDictionaries(self, domain='kSMDomainSystemLaunchd'):
"""Copy all Job Dictionaries from the ServiceManagement.
Args:
domain: The name of a constant in Foundation referencing the domain.
Will copy all launchd services by default.
Returns:
A marshalled python list of dicts containing the job dictionaries.
"""
cfstring_launchd = ctypes.c_void_p.in_dll(self.dll, domain)
return CFArray(self.dll.SMCopyAllJobDictionaries(cfstring_launchd))
|
def add_vertices(self, vertices, vid_field=None):
"""
Add vertices to the SGraph. Vertices should be input as a list of
:class:`~turicreate.Vertex` objects, an :class:`~turicreate.SFrame`, or a
pandas DataFrame. If vertices are specified by SFrame or DataFrame,
``vid_field`` specifies which column contains the vertex ID. Remaining
columns are assumed to hold additional vertex attributes. If these
attributes are not already present in the graph's vertex data, they are
added, with existing vertices acquiring the value ``None``.
Parameters
----------
vertices : Vertex | list [Vertex] | pandas.DataFrame | SFrame
Vertex data. If the vertices are in an SFrame or DataFrame, then
``vid_field`` specifies the column containing the vertex IDs.
Additional columns are treated as vertex attributes.
vid_field : string, optional
Column in the DataFrame or SFrame to use as vertex ID. Required if
vertices is an SFrame. If ``vertices`` is a DataFrame and
``vid_field`` is not specified, the row index is used as vertex ID.
Returns
-------
out : SGraph
A new SGraph with vertices added.
See Also
--------
vertices, SFrame, add_edges
Notes
-----
- If vertices are added with indices that already exist in the graph,
they are overwritten completely. All attributes for these vertices
will conform to the specification in this method.
Examples
--------
>>> from turicreate import SGraph, Vertex, SFrame
>>> g = SGraph()
Add a single vertex.
>>> g = g.add_vertices(Vertex(0, attr={'breed': 'labrador'}))
Add a list of vertices.
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add vertices from an SFrame.
>>> sf_vert = SFrame({'id': [0, 1, 2], 'breed':['lab', 'lab', 'vizsla']})
>>> g = g.add_vertices(sf_vert, vid_field='id')
"""
sf = _vertex_data_to_sframe(vertices, vid_field)
with cython_context():
proxy = self.__proxy__.add_vertices(sf.__proxy__, _VID_COLUMN)
return SGraph(_proxy=proxy)
|
def function[add_vertices, parameter[self, vertices, vid_field]]:
constant[
Add vertices to the SGraph. Vertices should be input as a list of
:class:`~turicreate.Vertex` objects, an :class:`~turicreate.SFrame`, or a
pandas DataFrame. If vertices are specified by SFrame or DataFrame,
``vid_field`` specifies which column contains the vertex ID. Remaining
columns are assumed to hold additional vertex attributes. If these
attributes are not already present in the graph's vertex data, they are
added, with existing vertices acquiring the value ``None``.
Parameters
----------
vertices : Vertex | list [Vertex] | pandas.DataFrame | SFrame
Vertex data. If the vertices are in an SFrame or DataFrame, then
``vid_field`` specifies the column containing the vertex IDs.
Additional columns are treated as vertex attributes.
vid_field : string, optional
Column in the DataFrame or SFrame to use as vertex ID. Required if
vertices is an SFrame. If ``vertices`` is a DataFrame and
``vid_field`` is not specified, the row index is used as vertex ID.
Returns
-------
out : SGraph
A new SGraph with vertices added.
See Also
--------
vertices, SFrame, add_edges
Notes
-----
- If vertices are added with indices that already exist in the graph,
they are overwritten completely. All attributes for these vertices
will conform to the specification in this method.
Examples
--------
>>> from turicreate import SGraph, Vertex, SFrame
>>> g = SGraph()
Add a single vertex.
>>> g = g.add_vertices(Vertex(0, attr={'breed': 'labrador'}))
Add a list of vertices.
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add vertices from an SFrame.
>>> sf_vert = SFrame({'id': [0, 1, 2], 'breed':['lab', 'lab', 'vizsla']})
>>> g = g.add_vertices(sf_vert, vid_field='id')
]
variable[sf] assign[=] call[name[_vertex_data_to_sframe], parameter[name[vertices], name[vid_field]]]
with call[name[cython_context], parameter[]] begin[:]
variable[proxy] assign[=] call[name[self].__proxy__.add_vertices, parameter[name[sf].__proxy__, name[_VID_COLUMN]]]
return[call[name[SGraph], parameter[]]]
|
keyword[def] identifier[add_vertices] ( identifier[self] , identifier[vertices] , identifier[vid_field] = keyword[None] ):
literal[string]
identifier[sf] = identifier[_vertex_data_to_sframe] ( identifier[vertices] , identifier[vid_field] )
keyword[with] identifier[cython_context] ():
identifier[proxy] = identifier[self] . identifier[__proxy__] . identifier[add_vertices] ( identifier[sf] . identifier[__proxy__] , identifier[_VID_COLUMN] )
keyword[return] identifier[SGraph] ( identifier[_proxy] = identifier[proxy] )
|
def add_vertices(self, vertices, vid_field=None):
"""
Add vertices to the SGraph. Vertices should be input as a list of
:class:`~turicreate.Vertex` objects, an :class:`~turicreate.SFrame`, or a
pandas DataFrame. If vertices are specified by SFrame or DataFrame,
``vid_field`` specifies which column contains the vertex ID. Remaining
columns are assumed to hold additional vertex attributes. If these
attributes are not already present in the graph's vertex data, they are
added, with existing vertices acquiring the value ``None``.
Parameters
----------
vertices : Vertex | list [Vertex] | pandas.DataFrame | SFrame
Vertex data. If the vertices are in an SFrame or DataFrame, then
``vid_field`` specifies the column containing the vertex IDs.
Additional columns are treated as vertex attributes.
vid_field : string, optional
Column in the DataFrame or SFrame to use as vertex ID. Required if
vertices is an SFrame. If ``vertices`` is a DataFrame and
``vid_field`` is not specified, the row index is used as vertex ID.
Returns
-------
out : SGraph
A new SGraph with vertices added.
See Also
--------
vertices, SFrame, add_edges
Notes
-----
- If vertices are added with indices that already exist in the graph,
they are overwritten completely. All attributes for these vertices
will conform to the specification in this method.
Examples
--------
>>> from turicreate import SGraph, Vertex, SFrame
>>> g = SGraph()
Add a single vertex.
>>> g = g.add_vertices(Vertex(0, attr={'breed': 'labrador'}))
Add a list of vertices.
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add vertices from an SFrame.
>>> sf_vert = SFrame({'id': [0, 1, 2], 'breed':['lab', 'lab', 'vizsla']})
>>> g = g.add_vertices(sf_vert, vid_field='id')
"""
sf = _vertex_data_to_sframe(vertices, vid_field)
with cython_context():
proxy = self.__proxy__.add_vertices(sf.__proxy__, _VID_COLUMN)
return SGraph(_proxy=proxy) # depends on [control=['with'], data=[]]
|
def indices(db=None):
'''
Show all indices in the database
CLI Example:
.. code-block:: bash
salt '*' sqlite3.indices /root/test.db
'''
cur = _connect(db)
if not cur:
return False
cur.execute(
"SELECT name FROM sqlite_master WHERE type='index' ORDER BY name;"
)
rows = cur.fetchall()
return rows
|
def function[indices, parameter[db]]:
constant[
Show all indices in the database
CLI Example:
.. code-block:: bash
salt '*' sqlite3.indices /root/test.db
]
variable[cur] assign[=] call[name[_connect], parameter[name[db]]]
if <ast.UnaryOp object at 0x7da1b2045540> begin[:]
return[constant[False]]
call[name[cur].execute, parameter[constant[SELECT name FROM sqlite_master WHERE type='index' ORDER BY name;]]]
variable[rows] assign[=] call[name[cur].fetchall, parameter[]]
return[name[rows]]
|
keyword[def] identifier[indices] ( identifier[db] = keyword[None] ):
literal[string]
identifier[cur] = identifier[_connect] ( identifier[db] )
keyword[if] keyword[not] identifier[cur] :
keyword[return] keyword[False]
identifier[cur] . identifier[execute] (
literal[string]
)
identifier[rows] = identifier[cur] . identifier[fetchall] ()
keyword[return] identifier[rows]
|
def indices(db=None):
"""
Show all indices in the database
CLI Example:
.. code-block:: bash
salt '*' sqlite3.indices /root/test.db
"""
cur = _connect(db)
if not cur:
return False # depends on [control=['if'], data=[]]
cur.execute("SELECT name FROM sqlite_master WHERE type='index' ORDER BY name;")
rows = cur.fetchall()
return rows
|
def install_setuptools(python_cmd='python', use_sudo=True):
"""
Install the latest version of `setuptools`_.
::
import burlap
burlap.python_setuptools.install_setuptools()
"""
setuptools_version = package_version('setuptools', python_cmd)
distribute_version = package_version('distribute', python_cmd)
if setuptools_version is None:
_install_from_scratch(python_cmd, use_sudo)
else:
if distribute_version is None:
_upgrade_from_setuptools(python_cmd, use_sudo)
else:
_upgrade_from_distribute(python_cmd, use_sudo)
|
def function[install_setuptools, parameter[python_cmd, use_sudo]]:
constant[
Install the latest version of `setuptools`_.
::
import burlap
burlap.python_setuptools.install_setuptools()
]
variable[setuptools_version] assign[=] call[name[package_version], parameter[constant[setuptools], name[python_cmd]]]
variable[distribute_version] assign[=] call[name[package_version], parameter[constant[distribute], name[python_cmd]]]
if compare[name[setuptools_version] is constant[None]] begin[:]
call[name[_install_from_scratch], parameter[name[python_cmd], name[use_sudo]]]
|
keyword[def] identifier[install_setuptools] ( identifier[python_cmd] = literal[string] , identifier[use_sudo] = keyword[True] ):
literal[string]
identifier[setuptools_version] = identifier[package_version] ( literal[string] , identifier[python_cmd] )
identifier[distribute_version] = identifier[package_version] ( literal[string] , identifier[python_cmd] )
keyword[if] identifier[setuptools_version] keyword[is] keyword[None] :
identifier[_install_from_scratch] ( identifier[python_cmd] , identifier[use_sudo] )
keyword[else] :
keyword[if] identifier[distribute_version] keyword[is] keyword[None] :
identifier[_upgrade_from_setuptools] ( identifier[python_cmd] , identifier[use_sudo] )
keyword[else] :
identifier[_upgrade_from_distribute] ( identifier[python_cmd] , identifier[use_sudo] )
|
def install_setuptools(python_cmd='python', use_sudo=True):
"""
Install the latest version of `setuptools`_.
::
import burlap
burlap.python_setuptools.install_setuptools()
"""
setuptools_version = package_version('setuptools', python_cmd)
distribute_version = package_version('distribute', python_cmd)
if setuptools_version is None:
_install_from_scratch(python_cmd, use_sudo) # depends on [control=['if'], data=[]]
elif distribute_version is None:
_upgrade_from_setuptools(python_cmd, use_sudo) # depends on [control=['if'], data=[]]
else:
_upgrade_from_distribute(python_cmd, use_sudo)
|
def _trigger_events(view_obj, events_map, additional_kw=None):
""" Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event.
"""
if additional_kw is None:
additional_kw = {}
event_kwargs = _get_event_kwargs(view_obj)
if event_kwargs is None:
return
event_kwargs.update(additional_kw)
event_cls = _get_event_cls(view_obj, events_map)
event = event_cls(**event_kwargs)
view_obj.request.registry.notify(event)
return event
|
def function[_trigger_events, parameter[view_obj, events_map, additional_kw]]:
constant[ Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event.
]
if compare[name[additional_kw] is constant[None]] begin[:]
variable[additional_kw] assign[=] dictionary[[], []]
variable[event_kwargs] assign[=] call[name[_get_event_kwargs], parameter[name[view_obj]]]
if compare[name[event_kwargs] is constant[None]] begin[:]
return[None]
call[name[event_kwargs].update, parameter[name[additional_kw]]]
variable[event_cls] assign[=] call[name[_get_event_cls], parameter[name[view_obj], name[events_map]]]
variable[event] assign[=] call[name[event_cls], parameter[]]
call[name[view_obj].request.registry.notify, parameter[name[event]]]
return[name[event]]
|
keyword[def] identifier[_trigger_events] ( identifier[view_obj] , identifier[events_map] , identifier[additional_kw] = keyword[None] ):
literal[string]
keyword[if] identifier[additional_kw] keyword[is] keyword[None] :
identifier[additional_kw] ={}
identifier[event_kwargs] = identifier[_get_event_kwargs] ( identifier[view_obj] )
keyword[if] identifier[event_kwargs] keyword[is] keyword[None] :
keyword[return]
identifier[event_kwargs] . identifier[update] ( identifier[additional_kw] )
identifier[event_cls] = identifier[_get_event_cls] ( identifier[view_obj] , identifier[events_map] )
identifier[event] = identifier[event_cls] (** identifier[event_kwargs] )
identifier[view_obj] . identifier[request] . identifier[registry] . identifier[notify] ( identifier[event] )
keyword[return] identifier[event]
|
def _trigger_events(view_obj, events_map, additional_kw=None):
""" Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event.
"""
if additional_kw is None:
additional_kw = {} # depends on [control=['if'], data=['additional_kw']]
event_kwargs = _get_event_kwargs(view_obj)
if event_kwargs is None:
return # depends on [control=['if'], data=[]]
event_kwargs.update(additional_kw)
event_cls = _get_event_cls(view_obj, events_map)
event = event_cls(**event_kwargs)
view_obj.request.registry.notify(event)
return event
|
def run_freidman_supsmu(x, y, bass_enhancement=0.0):
"""Run the FORTRAN supersmoother."""
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
flags = numpy.zeros((N, 7))
mace.supsmu(x, y, weight, 1, 0.0, bass_enhancement, results, flags)
return results
|
def function[run_freidman_supsmu, parameter[x, y, bass_enhancement]]:
constant[Run the FORTRAN supersmoother.]
variable[N] assign[=] call[name[len], parameter[name[x]]]
variable[weight] assign[=] call[name[numpy].ones, parameter[name[N]]]
variable[results] assign[=] call[name[numpy].zeros, parameter[name[N]]]
variable[flags] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da1b1a6ef50>, <ast.Constant object at 0x7da1b1a6ecb0>]]]]
call[name[mace].supsmu, parameter[name[x], name[y], name[weight], constant[1], constant[0.0], name[bass_enhancement], name[results], name[flags]]]
return[name[results]]
|
keyword[def] identifier[run_freidman_supsmu] ( identifier[x] , identifier[y] , identifier[bass_enhancement] = literal[int] ):
literal[string]
identifier[N] = identifier[len] ( identifier[x] )
identifier[weight] = identifier[numpy] . identifier[ones] ( identifier[N] )
identifier[results] = identifier[numpy] . identifier[zeros] ( identifier[N] )
identifier[flags] = identifier[numpy] . identifier[zeros] (( identifier[N] , literal[int] ))
identifier[mace] . identifier[supsmu] ( identifier[x] , identifier[y] , identifier[weight] , literal[int] , literal[int] , identifier[bass_enhancement] , identifier[results] , identifier[flags] )
keyword[return] identifier[results]
|
def run_freidman_supsmu(x, y, bass_enhancement=0.0):
"""Run the FORTRAN supersmoother."""
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
flags = numpy.zeros((N, 7))
mace.supsmu(x, y, weight, 1, 0.0, bass_enhancement, results, flags)
return results
|
def num_cpus():
"""Return the effective number of CPUs in the system as an integer.
This cross-platform function makes an attempt at finding the total number of
available CPUs in the system, as returned by various underlying system and
python calls.
If it can't find a sensible answer, it returns 1 (though an error *may* make
it return a large positive number that's actually incorrect).
"""
# Many thanks to the Parallel Python project (http://www.parallelpython.com)
# for the names of the keys we needed to look up for this function. This
# code was inspired by their equivalent function.
ncpufuncs = {'Linux':_num_cpus_unix,
'Darwin':_num_cpus_darwin,
'Windows':_num_cpus_windows,
# On Vista, python < 2.5.2 has a bug and returns 'Microsoft'
# See http://bugs.python.org/issue1082 for details.
'Microsoft':_num_cpus_windows,
}
ncpufunc = ncpufuncs.get(platform.system(),
# default to unix version (Solaris, AIX, etc)
_num_cpus_unix)
try:
ncpus = max(1,int(ncpufunc()))
except:
ncpus = 1
return ncpus
|
def function[num_cpus, parameter[]]:
constant[Return the effective number of CPUs in the system as an integer.
This cross-platform function makes an attempt at finding the total number of
available CPUs in the system, as returned by various underlying system and
python calls.
If it can't find a sensible answer, it returns 1 (though an error *may* make
it return a large positive number that's actually incorrect).
]
variable[ncpufuncs] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c5150>, <ast.Constant object at 0x7da20c6c6710>, <ast.Constant object at 0x7da20c6c40d0>, <ast.Constant object at 0x7da20e9b0760>], [<ast.Name object at 0x7da20e9b0d30>, <ast.Name object at 0x7da20e9b3490>, <ast.Name object at 0x7da20e9b2800>, <ast.Name object at 0x7da20e9b04c0>]]
variable[ncpufunc] assign[=] call[name[ncpufuncs].get, parameter[call[name[platform].system, parameter[]], name[_num_cpus_unix]]]
<ast.Try object at 0x7da20e9b07c0>
return[name[ncpus]]
|
keyword[def] identifier[num_cpus] ():
literal[string]
identifier[ncpufuncs] ={ literal[string] : identifier[_num_cpus_unix] ,
literal[string] : identifier[_num_cpus_darwin] ,
literal[string] : identifier[_num_cpus_windows] ,
literal[string] : identifier[_num_cpus_windows] ,
}
identifier[ncpufunc] = identifier[ncpufuncs] . identifier[get] ( identifier[platform] . identifier[system] (),
identifier[_num_cpus_unix] )
keyword[try] :
identifier[ncpus] = identifier[max] ( literal[int] , identifier[int] ( identifier[ncpufunc] ()))
keyword[except] :
identifier[ncpus] = literal[int]
keyword[return] identifier[ncpus]
|
def num_cpus():
"""Return the effective number of CPUs in the system as an integer.
This cross-platform function makes an attempt at finding the total number of
available CPUs in the system, as returned by various underlying system and
python calls.
If it can't find a sensible answer, it returns 1 (though an error *may* make
it return a large positive number that's actually incorrect).
"""
# Many thanks to the Parallel Python project (http://www.parallelpython.com)
# for the names of the keys we needed to look up for this function. This
# code was inspired by their equivalent function.
# On Vista, python < 2.5.2 has a bug and returns 'Microsoft'
# See http://bugs.python.org/issue1082 for details.
ncpufuncs = {'Linux': _num_cpus_unix, 'Darwin': _num_cpus_darwin, 'Windows': _num_cpus_windows, 'Microsoft': _num_cpus_windows}
# default to unix version (Solaris, AIX, etc)
ncpufunc = ncpufuncs.get(platform.system(), _num_cpus_unix)
try:
ncpus = max(1, int(ncpufunc())) # depends on [control=['try'], data=[]]
except:
ncpus = 1 # depends on [control=['except'], data=[]]
return ncpus
|
def get_withdrawal(self, withdrawal_id, **params):
"""https://developers.coinbase.com/api/v2#show-a-withdrawal"""
return self.api_client.get_withdrawal(self.id, withdrawal_id, **params)
|
def function[get_withdrawal, parameter[self, withdrawal_id]]:
constant[https://developers.coinbase.com/api/v2#show-a-withdrawal]
return[call[name[self].api_client.get_withdrawal, parameter[name[self].id, name[withdrawal_id]]]]
|
keyword[def] identifier[get_withdrawal] ( identifier[self] , identifier[withdrawal_id] ,** identifier[params] ):
literal[string]
keyword[return] identifier[self] . identifier[api_client] . identifier[get_withdrawal] ( identifier[self] . identifier[id] , identifier[withdrawal_id] ,** identifier[params] )
|
def get_withdrawal(self, withdrawal_id, **params):
"""https://developers.coinbase.com/api/v2#show-a-withdrawal"""
return self.api_client.get_withdrawal(self.id, withdrawal_id, **params)
|
def _sanitize_string(unsanitized, itemsep, kvsep):
"""
sanitizes a string that contains multiple key/value items
:param unsanitized: the unsanitized string
:param itemsep: string that separates items
:param kvsep: string that separates key from value
:return: a sanitized string
"""
sanitized = []
kvs = unsanitized.split(itemsep)
for kv in kvs:
kv = kv.split(kvsep)
if len(kv) == 2:
sanitized.append((kv[0], _sanitize(kv[0], kv[1])))
else:
sanitized.append(kv)
return itemsep.join(kvsep.join(kv) for kv in sanitized)
|
def function[_sanitize_string, parameter[unsanitized, itemsep, kvsep]]:
constant[
sanitizes a string that contains multiple key/value items
:param unsanitized: the unsanitized string
:param itemsep: string that separates items
:param kvsep: string that separates key from value
:return: a sanitized string
]
variable[sanitized] assign[=] list[[]]
variable[kvs] assign[=] call[name[unsanitized].split, parameter[name[itemsep]]]
for taget[name[kv]] in starred[name[kvs]] begin[:]
variable[kv] assign[=] call[name[kv].split, parameter[name[kvsep]]]
if compare[call[name[len], parameter[name[kv]]] equal[==] constant[2]] begin[:]
call[name[sanitized].append, parameter[tuple[[<ast.Subscript object at 0x7da1b1bed150>, <ast.Call object at 0x7da1b1bef4f0>]]]]
return[call[name[itemsep].join, parameter[<ast.GeneratorExp object at 0x7da1b1bee440>]]]
|
keyword[def] identifier[_sanitize_string] ( identifier[unsanitized] , identifier[itemsep] , identifier[kvsep] ):
literal[string]
identifier[sanitized] =[]
identifier[kvs] = identifier[unsanitized] . identifier[split] ( identifier[itemsep] )
keyword[for] identifier[kv] keyword[in] identifier[kvs] :
identifier[kv] = identifier[kv] . identifier[split] ( identifier[kvsep] )
keyword[if] identifier[len] ( identifier[kv] )== literal[int] :
identifier[sanitized] . identifier[append] (( identifier[kv] [ literal[int] ], identifier[_sanitize] ( identifier[kv] [ literal[int] ], identifier[kv] [ literal[int] ])))
keyword[else] :
identifier[sanitized] . identifier[append] ( identifier[kv] )
keyword[return] identifier[itemsep] . identifier[join] ( identifier[kvsep] . identifier[join] ( identifier[kv] ) keyword[for] identifier[kv] keyword[in] identifier[sanitized] )
|
def _sanitize_string(unsanitized, itemsep, kvsep):
"""
sanitizes a string that contains multiple key/value items
:param unsanitized: the unsanitized string
:param itemsep: string that separates items
:param kvsep: string that separates key from value
:return: a sanitized string
"""
sanitized = []
kvs = unsanitized.split(itemsep)
for kv in kvs:
kv = kv.split(kvsep)
if len(kv) == 2:
sanitized.append((kv[0], _sanitize(kv[0], kv[1]))) # depends on [control=['if'], data=[]]
else:
sanitized.append(kv) # depends on [control=['for'], data=['kv']]
return itemsep.join((kvsep.join(kv) for kv in sanitized))
|
def _merge_doc(original, to_merge):
# type: (str, str) -> str
"""Merge two usage strings together.
Args:
original: The source of headers and initial section lines.
to_merge: The source for the additional section lines to append.
Returns:
A new usage string that contains information from both usage strings.
"""
if not original:
return to_merge or ''
if not to_merge:
return original or ''
sections = []
for name in ('usage', 'arguments', 'options'):
sections.append(_merge_section(
_get_section(name, original),
_get_section(name, to_merge)
))
return format_usage('\n\n'.join(s for s in sections).rstrip())
|
def function[_merge_doc, parameter[original, to_merge]]:
constant[Merge two usage strings together.
Args:
original: The source of headers and initial section lines.
to_merge: The source for the additional section lines to append.
Returns:
A new usage string that contains information from both usage strings.
]
if <ast.UnaryOp object at 0x7da1b28bf640> begin[:]
return[<ast.BoolOp object at 0x7da1b28bd630>]
if <ast.UnaryOp object at 0x7da1b28bd4e0> begin[:]
return[<ast.BoolOp object at 0x7da1b28bd120>]
variable[sections] assign[=] list[[]]
for taget[name[name]] in starred[tuple[[<ast.Constant object at 0x7da1b28bd6c0>, <ast.Constant object at 0x7da1b28bd450>, <ast.Constant object at 0x7da1b28bd000>]]] begin[:]
call[name[sections].append, parameter[call[name[_merge_section], parameter[call[name[_get_section], parameter[name[name], name[original]]], call[name[_get_section], parameter[name[name], name[to_merge]]]]]]]
return[call[name[format_usage], parameter[call[call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da1b28adae0>]].rstrip, parameter[]]]]]
|
keyword[def] identifier[_merge_doc] ( identifier[original] , identifier[to_merge] ):
literal[string]
keyword[if] keyword[not] identifier[original] :
keyword[return] identifier[to_merge] keyword[or] literal[string]
keyword[if] keyword[not] identifier[to_merge] :
keyword[return] identifier[original] keyword[or] literal[string]
identifier[sections] =[]
keyword[for] identifier[name] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[sections] . identifier[append] ( identifier[_merge_section] (
identifier[_get_section] ( identifier[name] , identifier[original] ),
identifier[_get_section] ( identifier[name] , identifier[to_merge] )
))
keyword[return] identifier[format_usage] ( literal[string] . identifier[join] ( identifier[s] keyword[for] identifier[s] keyword[in] identifier[sections] ). identifier[rstrip] ())
|
def _merge_doc(original, to_merge):
# type: (str, str) -> str
'Merge two usage strings together.\n\n Args:\n original: The source of headers and initial section lines.\n to_merge: The source for the additional section lines to append.\n\n Returns:\n A new usage string that contains information from both usage strings.\n '
if not original:
return to_merge or '' # depends on [control=['if'], data=[]]
if not to_merge:
return original or '' # depends on [control=['if'], data=[]]
sections = []
for name in ('usage', 'arguments', 'options'):
sections.append(_merge_section(_get_section(name, original), _get_section(name, to_merge))) # depends on [control=['for'], data=['name']]
return format_usage('\n\n'.join((s for s in sections)).rstrip())
|
def comment_issue(self, issue_id, body):
"""
Comment to an issue.
:param issue_id: the id of the comment
:param body: the comment body
:return:
"""
request_url = "{}issue/{}/comment".format(self.create_basic_url(),
issue_id)
payload = {'comment': body}
return_value = self._call_api(request_url,
method='POST', data=payload)
LOG.debug(return_value)
|
def function[comment_issue, parameter[self, issue_id, body]]:
constant[
Comment to an issue.
:param issue_id: the id of the comment
:param body: the comment body
:return:
]
variable[request_url] assign[=] call[constant[{}issue/{}/comment].format, parameter[call[name[self].create_basic_url, parameter[]], name[issue_id]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b2346170>], [<ast.Name object at 0x7da1b2345e40>]]
variable[return_value] assign[=] call[name[self]._call_api, parameter[name[request_url]]]
call[name[LOG].debug, parameter[name[return_value]]]
|
keyword[def] identifier[comment_issue] ( identifier[self] , identifier[issue_id] , identifier[body] ):
literal[string]
identifier[request_url] = literal[string] . identifier[format] ( identifier[self] . identifier[create_basic_url] (),
identifier[issue_id] )
identifier[payload] ={ literal[string] : identifier[body] }
identifier[return_value] = identifier[self] . identifier[_call_api] ( identifier[request_url] ,
identifier[method] = literal[string] , identifier[data] = identifier[payload] )
identifier[LOG] . identifier[debug] ( identifier[return_value] )
|
def comment_issue(self, issue_id, body):
"""
Comment to an issue.
:param issue_id: the id of the comment
:param body: the comment body
:return:
"""
request_url = '{}issue/{}/comment'.format(self.create_basic_url(), issue_id)
payload = {'comment': body}
return_value = self._call_api(request_url, method='POST', data=payload)
LOG.debug(return_value)
|
def kick(self, channel, nick, message=None):
"""Attempt to kick a user from a channel.
If a message is not provided, defaults to own nick.
"""
self.send("KICK", channel, nick, ":%s" % (message or self.user.nick))
|
def function[kick, parameter[self, channel, nick, message]]:
constant[Attempt to kick a user from a channel.
If a message is not provided, defaults to own nick.
]
call[name[self].send, parameter[constant[KICK], name[channel], name[nick], binary_operation[constant[:%s] <ast.Mod object at 0x7da2590d6920> <ast.BoolOp object at 0x7da20c6e5b70>]]]
|
keyword[def] identifier[kick] ( identifier[self] , identifier[channel] , identifier[nick] , identifier[message] = keyword[None] ):
literal[string]
identifier[self] . identifier[send] ( literal[string] , identifier[channel] , identifier[nick] , literal[string] %( identifier[message] keyword[or] identifier[self] . identifier[user] . identifier[nick] ))
|
def kick(self, channel, nick, message=None):
"""Attempt to kick a user from a channel.
If a message is not provided, defaults to own nick.
"""
self.send('KICK', channel, nick, ':%s' % (message or self.user.nick))
|
def create_event_object(self,
event_type,
code,
value,
timeval=None):
"""Create an evdev style structure."""
if not timeval:
self.update_timeval()
timeval = self.timeval
try:
event_code = self.type_codes[event_type]
except KeyError:
raise UnknownEventType(
"We don't know what kind of event a %s is." % event_type)
event = struct.pack(EVENT_FORMAT,
timeval[0],
timeval[1],
event_code,
code,
value)
return event
|
def function[create_event_object, parameter[self, event_type, code, value, timeval]]:
constant[Create an evdev style structure.]
if <ast.UnaryOp object at 0x7da18ede6230> begin[:]
call[name[self].update_timeval, parameter[]]
variable[timeval] assign[=] name[self].timeval
<ast.Try object at 0x7da20cabda50>
variable[event] assign[=] call[name[struct].pack, parameter[name[EVENT_FORMAT], call[name[timeval]][constant[0]], call[name[timeval]][constant[1]], name[event_code], name[code], name[value]]]
return[name[event]]
|
keyword[def] identifier[create_event_object] ( identifier[self] ,
identifier[event_type] ,
identifier[code] ,
identifier[value] ,
identifier[timeval] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[timeval] :
identifier[self] . identifier[update_timeval] ()
identifier[timeval] = identifier[self] . identifier[timeval]
keyword[try] :
identifier[event_code] = identifier[self] . identifier[type_codes] [ identifier[event_type] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[UnknownEventType] (
literal[string] % identifier[event_type] )
identifier[event] = identifier[struct] . identifier[pack] ( identifier[EVENT_FORMAT] ,
identifier[timeval] [ literal[int] ],
identifier[timeval] [ literal[int] ],
identifier[event_code] ,
identifier[code] ,
identifier[value] )
keyword[return] identifier[event]
|
def create_event_object(self, event_type, code, value, timeval=None):
"""Create an evdev style structure."""
if not timeval:
self.update_timeval()
timeval = self.timeval # depends on [control=['if'], data=[]]
try:
event_code = self.type_codes[event_type] # depends on [control=['try'], data=[]]
except KeyError:
raise UnknownEventType("We don't know what kind of event a %s is." % event_type) # depends on [control=['except'], data=[]]
event = struct.pack(EVENT_FORMAT, timeval[0], timeval[1], event_code, code, value)
return event
|
def queryset_iterator(queryset, chunksize=1000):
"""
The queryset iterator helps to keep the memory consumption down.
And also making it easier to process for weaker computers.
"""
if queryset.exists():
primary_key = 0
last_pk = queryset.order_by('-pk')[0].pk
queryset = queryset.order_by('pk')
while primary_key < last_pk:
for row in queryset.filter(pk__gt=primary_key)[:chunksize]:
primary_key = row.pk
yield row
gc.collect()
|
def function[queryset_iterator, parameter[queryset, chunksize]]:
constant[
The queryset iterator helps to keep the memory consumption down.
And also making it easier to process for weaker computers.
]
if call[name[queryset].exists, parameter[]] begin[:]
variable[primary_key] assign[=] constant[0]
variable[last_pk] assign[=] call[call[name[queryset].order_by, parameter[constant[-pk]]]][constant[0]].pk
variable[queryset] assign[=] call[name[queryset].order_by, parameter[constant[pk]]]
while compare[name[primary_key] less[<] name[last_pk]] begin[:]
for taget[name[row]] in starred[call[call[name[queryset].filter, parameter[]]][<ast.Slice object at 0x7da20cabf370>]] begin[:]
variable[primary_key] assign[=] name[row].pk
<ast.Yield object at 0x7da20cabfbe0>
call[name[gc].collect, parameter[]]
|
keyword[def] identifier[queryset_iterator] ( identifier[queryset] , identifier[chunksize] = literal[int] ):
literal[string]
keyword[if] identifier[queryset] . identifier[exists] ():
identifier[primary_key] = literal[int]
identifier[last_pk] = identifier[queryset] . identifier[order_by] ( literal[string] )[ literal[int] ]. identifier[pk]
identifier[queryset] = identifier[queryset] . identifier[order_by] ( literal[string] )
keyword[while] identifier[primary_key] < identifier[last_pk] :
keyword[for] identifier[row] keyword[in] identifier[queryset] . identifier[filter] ( identifier[pk__gt] = identifier[primary_key] )[: identifier[chunksize] ]:
identifier[primary_key] = identifier[row] . identifier[pk]
keyword[yield] identifier[row]
identifier[gc] . identifier[collect] ()
|
def queryset_iterator(queryset, chunksize=1000):
"""
The queryset iterator helps to keep the memory consumption down.
And also making it easier to process for weaker computers.
"""
if queryset.exists():
primary_key = 0
last_pk = queryset.order_by('-pk')[0].pk
queryset = queryset.order_by('pk')
while primary_key < last_pk:
for row in queryset.filter(pk__gt=primary_key)[:chunksize]:
primary_key = row.pk
yield row # depends on [control=['for'], data=['row']]
gc.collect() # depends on [control=['while'], data=['primary_key']] # depends on [control=['if'], data=[]]
|
def encode_params(self, data=None, **kwargs):
"""
Build the body for a text/plain request.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
charset = kwargs.get("charset", self.charset)
collection_format = kwargs.get("collection_format", self.collection_format)
output_str = kwargs.get("output_str", self.output_str)
if data is None:
return "", self.get_content_type(charset)
elif isinstance(data, (str, bytes)):
return data, self.get_content_type(charset)
elif hasattr(data, 'read'):
return data, self.get_content_type(charset)
elif collection_format == 'multi' and hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
result.append(b"=".join([self._encode(k, charset), self._encode(v, charset, output_str)]))
return b'\n'.join(result), self.get_content_type(charset)
elif collection_format == 'plain' and hasattr(data, '__iter__'):
results = []
for k, vs in to_key_val_dict(data).items():
results.append(b"=".join([self._encode(k, charset), self._encode(vs, charset, output_str)]))
return b'\n'.join(results), self.get_content_type(charset)
elif hasattr(data, '__iter__'):
results = []
for k, vs in to_key_val_dict(data).items():
if isinstance(vs, list):
v = self.COLLECTION_SEPARATORS[collection_format].join(e for e in vs)
key = k + '[]'
else:
v = vs
key = k
results.append(b"=".join([self._encode(key, charset), self._encode(v, charset, output_str)]))
return b"\n".join(results), self.get_content_type(charset)
else:
return str(data).encode(charset) if charset else str(data), self.get_content_type(charset)
|
def function[encode_params, parameter[self, data]]:
constant[
Build the body for a text/plain request.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
]
variable[charset] assign[=] call[name[kwargs].get, parameter[constant[charset], name[self].charset]]
variable[collection_format] assign[=] call[name[kwargs].get, parameter[constant[collection_format], name[self].collection_format]]
variable[output_str] assign[=] call[name[kwargs].get, parameter[constant[output_str], name[self].output_str]]
if compare[name[data] is constant[None]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b1848b20>, <ast.Call object at 0x7da1b184a770>]]]
|
keyword[def] identifier[encode_params] ( identifier[self] , identifier[data] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[charset] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[charset] )
identifier[collection_format] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[collection_format] )
identifier[output_str] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[output_str] )
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] literal[string] , identifier[self] . identifier[get_content_type] ( identifier[charset] )
keyword[elif] identifier[isinstance] ( identifier[data] ,( identifier[str] , identifier[bytes] )):
keyword[return] identifier[data] , identifier[self] . identifier[get_content_type] ( identifier[charset] )
keyword[elif] identifier[hasattr] ( identifier[data] , literal[string] ):
keyword[return] identifier[data] , identifier[self] . identifier[get_content_type] ( identifier[charset] )
keyword[elif] identifier[collection_format] == literal[string] keyword[and] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[result] =[]
keyword[for] identifier[k] , identifier[vs] keyword[in] identifier[to_key_val_list] ( identifier[data] ):
keyword[if] identifier[isinstance] ( identifier[vs] , identifier[basestring] ) keyword[or] keyword[not] identifier[hasattr] ( identifier[vs] , literal[string] ):
identifier[vs] =[ identifier[vs] ]
keyword[for] identifier[v] keyword[in] identifier[vs] :
identifier[result] . identifier[append] ( literal[string] . identifier[join] ([ identifier[self] . identifier[_encode] ( identifier[k] , identifier[charset] ), identifier[self] . identifier[_encode] ( identifier[v] , identifier[charset] , identifier[output_str] )]))
keyword[return] literal[string] . identifier[join] ( identifier[result] ), identifier[self] . identifier[get_content_type] ( identifier[charset] )
keyword[elif] identifier[collection_format] == literal[string] keyword[and] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[results] =[]
keyword[for] identifier[k] , identifier[vs] keyword[in] identifier[to_key_val_dict] ( identifier[data] ). identifier[items] ():
identifier[results] . identifier[append] ( literal[string] . identifier[join] ([ identifier[self] . identifier[_encode] ( identifier[k] , identifier[charset] ), identifier[self] . identifier[_encode] ( identifier[vs] , identifier[charset] , identifier[output_str] )]))
keyword[return] literal[string] . identifier[join] ( identifier[results] ), identifier[self] . identifier[get_content_type] ( identifier[charset] )
keyword[elif] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[results] =[]
keyword[for] identifier[k] , identifier[vs] keyword[in] identifier[to_key_val_dict] ( identifier[data] ). identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[vs] , identifier[list] ):
identifier[v] = identifier[self] . identifier[COLLECTION_SEPARATORS] [ identifier[collection_format] ]. identifier[join] ( identifier[e] keyword[for] identifier[e] keyword[in] identifier[vs] )
identifier[key] = identifier[k] + literal[string]
keyword[else] :
identifier[v] = identifier[vs]
identifier[key] = identifier[k]
identifier[results] . identifier[append] ( literal[string] . identifier[join] ([ identifier[self] . identifier[_encode] ( identifier[key] , identifier[charset] ), identifier[self] . identifier[_encode] ( identifier[v] , identifier[charset] , identifier[output_str] )]))
keyword[return] literal[string] . identifier[join] ( identifier[results] ), identifier[self] . identifier[get_content_type] ( identifier[charset] )
keyword[else] :
keyword[return] identifier[str] ( identifier[data] ). identifier[encode] ( identifier[charset] ) keyword[if] identifier[charset] keyword[else] identifier[str] ( identifier[data] ), identifier[self] . identifier[get_content_type] ( identifier[charset] )
|
def encode_params(self, data=None, **kwargs):
"""
Build the body for a text/plain request.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
charset = kwargs.get('charset', self.charset)
collection_format = kwargs.get('collection_format', self.collection_format)
output_str = kwargs.get('output_str', self.output_str)
if data is None:
return ('', self.get_content_type(charset)) # depends on [control=['if'], data=[]]
elif isinstance(data, (str, bytes)):
return (data, self.get_content_type(charset)) # depends on [control=['if'], data=[]]
elif hasattr(data, 'read'):
return (data, self.get_content_type(charset)) # depends on [control=['if'], data=[]]
elif collection_format == 'multi' and hasattr(data, '__iter__'):
result = []
for (k, vs) in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs] # depends on [control=['if'], data=[]]
for v in vs:
result.append(b'='.join([self._encode(k, charset), self._encode(v, charset, output_str)])) # depends on [control=['for'], data=['v']] # depends on [control=['for'], data=[]]
return (b'\n'.join(result), self.get_content_type(charset)) # depends on [control=['if'], data=[]]
elif collection_format == 'plain' and hasattr(data, '__iter__'):
results = []
for (k, vs) in to_key_val_dict(data).items():
results.append(b'='.join([self._encode(k, charset), self._encode(vs, charset, output_str)])) # depends on [control=['for'], data=[]]
return (b'\n'.join(results), self.get_content_type(charset)) # depends on [control=['if'], data=[]]
elif hasattr(data, '__iter__'):
results = []
for (k, vs) in to_key_val_dict(data).items():
if isinstance(vs, list):
v = self.COLLECTION_SEPARATORS[collection_format].join((e for e in vs))
key = k + '[]' # depends on [control=['if'], data=[]]
else:
v = vs
key = k
results.append(b'='.join([self._encode(key, charset), self._encode(v, charset, output_str)])) # depends on [control=['for'], data=[]]
return (b'\n'.join(results), self.get_content_type(charset)) # depends on [control=['if'], data=[]]
else:
return (str(data).encode(charset) if charset else str(data), self.get_content_type(charset))
|
def from_pandas(cls, df):
"""Create baloo DataFrame from pandas DataFrame.
Parameters
----------
df : pandas.frame.DataFrame
Returns
-------
DataFrame
"""
from pandas import DataFrame as PandasDataFrame, Index as PandasIndex, MultiIndex as PandasMultiIndex
check_type(df, PandasDataFrame)
if isinstance(df.index, PandasIndex):
baloo_index = Index.from_pandas(df.index)
elif isinstance(df.index, PandasMultiIndex):
baloo_index = MultiIndex.from_pandas(df.index)
else:
raise TypeError('Cannot convert pandas index of type={} to baloo'.format(type(df.index)))
baloo_data = OrderedDict((column_name, _series_from_pandas(df[column_name], baloo_index))
for column_name in df)
return DataFrame(baloo_data, baloo_index)
|
def function[from_pandas, parameter[cls, df]]:
constant[Create baloo DataFrame from pandas DataFrame.
Parameters
----------
df : pandas.frame.DataFrame
Returns
-------
DataFrame
]
from relative_module[pandas] import module[DataFrame], module[Index], module[MultiIndex]
call[name[check_type], parameter[name[df], name[PandasDataFrame]]]
if call[name[isinstance], parameter[name[df].index, name[PandasIndex]]] begin[:]
variable[baloo_index] assign[=] call[name[Index].from_pandas, parameter[name[df].index]]
variable[baloo_data] assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da1b09da110>]]
return[call[name[DataFrame], parameter[name[baloo_data], name[baloo_index]]]]
|
keyword[def] identifier[from_pandas] ( identifier[cls] , identifier[df] ):
literal[string]
keyword[from] identifier[pandas] keyword[import] identifier[DataFrame] keyword[as] identifier[PandasDataFrame] , identifier[Index] keyword[as] identifier[PandasIndex] , identifier[MultiIndex] keyword[as] identifier[PandasMultiIndex]
identifier[check_type] ( identifier[df] , identifier[PandasDataFrame] )
keyword[if] identifier[isinstance] ( identifier[df] . identifier[index] , identifier[PandasIndex] ):
identifier[baloo_index] = identifier[Index] . identifier[from_pandas] ( identifier[df] . identifier[index] )
keyword[elif] identifier[isinstance] ( identifier[df] . identifier[index] , identifier[PandasMultiIndex] ):
identifier[baloo_index] = identifier[MultiIndex] . identifier[from_pandas] ( identifier[df] . identifier[index] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[df] . identifier[index] )))
identifier[baloo_data] = identifier[OrderedDict] (( identifier[column_name] , identifier[_series_from_pandas] ( identifier[df] [ identifier[column_name] ], identifier[baloo_index] ))
keyword[for] identifier[column_name] keyword[in] identifier[df] )
keyword[return] identifier[DataFrame] ( identifier[baloo_data] , identifier[baloo_index] )
|
def from_pandas(cls, df):
"""Create baloo DataFrame from pandas DataFrame.
Parameters
----------
df : pandas.frame.DataFrame
Returns
-------
DataFrame
"""
from pandas import DataFrame as PandasDataFrame, Index as PandasIndex, MultiIndex as PandasMultiIndex
check_type(df, PandasDataFrame)
if isinstance(df.index, PandasIndex):
baloo_index = Index.from_pandas(df.index) # depends on [control=['if'], data=[]]
elif isinstance(df.index, PandasMultiIndex):
baloo_index = MultiIndex.from_pandas(df.index) # depends on [control=['if'], data=[]]
else:
raise TypeError('Cannot convert pandas index of type={} to baloo'.format(type(df.index)))
baloo_data = OrderedDict(((column_name, _series_from_pandas(df[column_name], baloo_index)) for column_name in df))
return DataFrame(baloo_data, baloo_index)
|
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > REGISTRATION_TIMEOUT_DAYS:
return False
return True
|
def function[check_token, parameter[self, user, token]]:
constant[
Check that a password reset token is correct for a given user.
]
<ast.Try object at 0x7da20c6c7790>
<ast.Try object at 0x7da20c6c75b0>
if <ast.UnaryOp object at 0x7da20c6c4d60> begin[:]
return[constant[False]]
if compare[binary_operation[call[name[self]._num_days, parameter[call[name[self]._today, parameter[]]]] - name[ts]] greater[>] name[REGISTRATION_TIMEOUT_DAYS]] begin[:]
return[constant[False]]
return[constant[True]]
|
keyword[def] identifier[check_token] ( identifier[self] , identifier[user] , identifier[token] ):
literal[string]
keyword[try] :
identifier[ts_b36] , identifier[hash] = identifier[token] . identifier[split] ( literal[string] )
keyword[except] identifier[ValueError] :
keyword[return] keyword[False]
keyword[try] :
identifier[ts] = identifier[base36_to_int] ( identifier[ts_b36] )
keyword[except] identifier[ValueError] :
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[constant_time_compare] ( identifier[self] . identifier[_make_token_with_timestamp] ( identifier[user] , identifier[ts] ), identifier[token] ):
keyword[return] keyword[False]
keyword[if] ( identifier[self] . identifier[_num_days] ( identifier[self] . identifier[_today] ())- identifier[ts] )> identifier[REGISTRATION_TIMEOUT_DAYS] :
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
(ts_b36, hash) = token.split('-') # depends on [control=['try'], data=[]]
except ValueError:
return False # depends on [control=['except'], data=[]]
try:
ts = base36_to_int(ts_b36) # depends on [control=['try'], data=[]]
except ValueError:
return False # depends on [control=['except'], data=[]]
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False # depends on [control=['if'], data=[]]
# Check the timestamp is within limit
if self._num_days(self._today()) - ts > REGISTRATION_TIMEOUT_DAYS:
return False # depends on [control=['if'], data=[]]
return True
|
def feed_forward_layers(inputs, outputs, connections):
"""
Collect the layers whose members can be evaluated in parallel in a feed-forward network.
:param inputs: list of the network input nodes
:param outputs: list of the output node identifiers
:param connections: list of (input, output) connections in the network.
Returns a list of layers, with each layer consisting of a set of node identifiers.
Note that the returned layers do not contain nodes whose output is ultimately
never used to compute the final network output.
"""
required = required_for_output(inputs, outputs, connections)
layers = []
s = set(inputs)
while 1:
# Find candidate nodes c for the next layer. These nodes should connect
# a node in s to a node not in s.
c = set(b for (a, b) in connections if a in s and b not in s)
# Keep only the used nodes whose entire input set is contained in s.
t = set()
for n in c:
if n in required and all(a in s for (a, b) in connections if b == n):
t.add(n)
if not t:
break
layers.append(t)
s = s.union(t)
return layers
|
def function[feed_forward_layers, parameter[inputs, outputs, connections]]:
constant[
Collect the layers whose members can be evaluated in parallel in a feed-forward network.
:param inputs: list of the network input nodes
:param outputs: list of the output node identifiers
:param connections: list of (input, output) connections in the network.
Returns a list of layers, with each layer consisting of a set of node identifiers.
Note that the returned layers do not contain nodes whose output is ultimately
never used to compute the final network output.
]
variable[required] assign[=] call[name[required_for_output], parameter[name[inputs], name[outputs], name[connections]]]
variable[layers] assign[=] list[[]]
variable[s] assign[=] call[name[set], parameter[name[inputs]]]
while constant[1] begin[:]
variable[c] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b18ac0d0>]]
variable[t] assign[=] call[name[set], parameter[]]
for taget[name[n]] in starred[name[c]] begin[:]
if <ast.BoolOp object at 0x7da1b18af1f0> begin[:]
call[name[t].add, parameter[name[n]]]
if <ast.UnaryOp object at 0x7da1b18affd0> begin[:]
break
call[name[layers].append, parameter[name[t]]]
variable[s] assign[=] call[name[s].union, parameter[name[t]]]
return[name[layers]]
|
keyword[def] identifier[feed_forward_layers] ( identifier[inputs] , identifier[outputs] , identifier[connections] ):
literal[string]
identifier[required] = identifier[required_for_output] ( identifier[inputs] , identifier[outputs] , identifier[connections] )
identifier[layers] =[]
identifier[s] = identifier[set] ( identifier[inputs] )
keyword[while] literal[int] :
identifier[c] = identifier[set] ( identifier[b] keyword[for] ( identifier[a] , identifier[b] ) keyword[in] identifier[connections] keyword[if] identifier[a] keyword[in] identifier[s] keyword[and] identifier[b] keyword[not] keyword[in] identifier[s] )
identifier[t] = identifier[set] ()
keyword[for] identifier[n] keyword[in] identifier[c] :
keyword[if] identifier[n] keyword[in] identifier[required] keyword[and] identifier[all] ( identifier[a] keyword[in] identifier[s] keyword[for] ( identifier[a] , identifier[b] ) keyword[in] identifier[connections] keyword[if] identifier[b] == identifier[n] ):
identifier[t] . identifier[add] ( identifier[n] )
keyword[if] keyword[not] identifier[t] :
keyword[break]
identifier[layers] . identifier[append] ( identifier[t] )
identifier[s] = identifier[s] . identifier[union] ( identifier[t] )
keyword[return] identifier[layers]
|
def feed_forward_layers(inputs, outputs, connections):
"""
Collect the layers whose members can be evaluated in parallel in a feed-forward network.
:param inputs: list of the network input nodes
:param outputs: list of the output node identifiers
:param connections: list of (input, output) connections in the network.
Returns a list of layers, with each layer consisting of a set of node identifiers.
Note that the returned layers do not contain nodes whose output is ultimately
never used to compute the final network output.
"""
required = required_for_output(inputs, outputs, connections)
layers = []
s = set(inputs)
while 1:
# Find candidate nodes c for the next layer. These nodes should connect
# a node in s to a node not in s.
c = set((b for (a, b) in connections if a in s and b not in s))
# Keep only the used nodes whose entire input set is contained in s.
t = set()
for n in c:
if n in required and all((a in s for (a, b) in connections if b == n)):
t.add(n) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']]
if not t:
break # depends on [control=['if'], data=[]]
layers.append(t)
s = s.union(t) # depends on [control=['while'], data=[]]
return layers
|
def handle_json_GET_routes(self, params):
"""Return a list of all routes."""
schedule = self.server.schedule
result = []
for r in schedule.GetRouteList():
result.append( (r.route_id, r.route_short_name, r.route_long_name) )
result.sort(key = lambda x: x[1:3])
return result
|
def function[handle_json_GET_routes, parameter[self, params]]:
constant[Return a list of all routes.]
variable[schedule] assign[=] name[self].server.schedule
variable[result] assign[=] list[[]]
for taget[name[r]] in starred[call[name[schedule].GetRouteList, parameter[]]] begin[:]
call[name[result].append, parameter[tuple[[<ast.Attribute object at 0x7da1b17b5b10>, <ast.Attribute object at 0x7da1b17b6d70>, <ast.Attribute object at 0x7da1b17b42b0>]]]]
call[name[result].sort, parameter[]]
return[name[result]]
|
keyword[def] identifier[handle_json_GET_routes] ( identifier[self] , identifier[params] ):
literal[string]
identifier[schedule] = identifier[self] . identifier[server] . identifier[schedule]
identifier[result] =[]
keyword[for] identifier[r] keyword[in] identifier[schedule] . identifier[GetRouteList] ():
identifier[result] . identifier[append] (( identifier[r] . identifier[route_id] , identifier[r] . identifier[route_short_name] , identifier[r] . identifier[route_long_name] ))
identifier[result] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] : literal[int] ])
keyword[return] identifier[result]
|
def handle_json_GET_routes(self, params):
"""Return a list of all routes."""
schedule = self.server.schedule
result = []
for r in schedule.GetRouteList():
result.append((r.route_id, r.route_short_name, r.route_long_name)) # depends on [control=['for'], data=['r']]
result.sort(key=lambda x: x[1:3])
return result
|
def _gsa_force(grav, mass_i, mass_j, position_i, position_j):
"""Gives the force of solution j on solution i.
Variable name in GSA paper given in ()
args:
grav: The gravitational constant. (G)
mass_i: The mass of solution i (derived from fitness). (M_i)
mass_j: The mass of solution j (derived from fitness). (M_j)
position_i: The position of solution i. (x_i)
position_j: The position of solution j. (x_j)
returns:
numpy.array; The force vector of solution j on solution i.
"""
position_diff = numpy.subtract(position_j, position_i)
distance = numpy.linalg.norm(position_diff)
# The first 3 terms give the magnitude of the force
# The last term is a vector that provides the direction
# Epsilon prevents divide by zero errors
return grav * (mass_i * mass_j) / (distance + EPSILON) * position_diff
|
def function[_gsa_force, parameter[grav, mass_i, mass_j, position_i, position_j]]:
constant[Gives the force of solution j on solution i.
Variable name in GSA paper given in ()
args:
grav: The gravitational constant. (G)
mass_i: The mass of solution i (derived from fitness). (M_i)
mass_j: The mass of solution j (derived from fitness). (M_j)
position_i: The position of solution i. (x_i)
position_j: The position of solution j. (x_j)
returns:
numpy.array; The force vector of solution j on solution i.
]
variable[position_diff] assign[=] call[name[numpy].subtract, parameter[name[position_j], name[position_i]]]
variable[distance] assign[=] call[name[numpy].linalg.norm, parameter[name[position_diff]]]
return[binary_operation[binary_operation[binary_operation[name[grav] * binary_operation[name[mass_i] * name[mass_j]]] / binary_operation[name[distance] + name[EPSILON]]] * name[position_diff]]]
|
keyword[def] identifier[_gsa_force] ( identifier[grav] , identifier[mass_i] , identifier[mass_j] , identifier[position_i] , identifier[position_j] ):
literal[string]
identifier[position_diff] = identifier[numpy] . identifier[subtract] ( identifier[position_j] , identifier[position_i] )
identifier[distance] = identifier[numpy] . identifier[linalg] . identifier[norm] ( identifier[position_diff] )
keyword[return] identifier[grav] *( identifier[mass_i] * identifier[mass_j] )/( identifier[distance] + identifier[EPSILON] )* identifier[position_diff]
|
def _gsa_force(grav, mass_i, mass_j, position_i, position_j):
"""Gives the force of solution j on solution i.
Variable name in GSA paper given in ()
args:
grav: The gravitational constant. (G)
mass_i: The mass of solution i (derived from fitness). (M_i)
mass_j: The mass of solution j (derived from fitness). (M_j)
position_i: The position of solution i. (x_i)
position_j: The position of solution j. (x_j)
returns:
numpy.array; The force vector of solution j on solution i.
"""
position_diff = numpy.subtract(position_j, position_i)
distance = numpy.linalg.norm(position_diff)
# The first 3 terms give the magnitude of the force
# The last term is a vector that provides the direction
# Epsilon prevents divide by zero errors
return grav * (mass_i * mass_j) / (distance + EPSILON) * position_diff
|
def K(self):
"""Normalizing constant for wishart CDF."""
K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min)
K1 /= (
np.float_power(2, 0.5 * self.n_min * self._n_max)
* self._mgamma(0.5 * self._n_max, self.n_min)
* self._mgamma(0.5 * self.n_min, self.n_min)
)
K2 = np.float_power(
2, self.alpha * self.size + 0.5 * self.size * (self.size + 1)
)
for i in xrange(self.size):
K2 *= gamma(self.alpha + i + 1)
return K1 * K2
|
def function[K, parameter[self]]:
constant[Normalizing constant for wishart CDF.]
variable[K1] assign[=] call[name[np].float_power, parameter[name[pi], binary_operation[binary_operation[constant[0.5] * name[self].n_min] * name[self].n_min]]]
<ast.AugAssign object at 0x7da20c6e4370>
variable[K2] assign[=] call[name[np].float_power, parameter[constant[2], binary_operation[binary_operation[name[self].alpha * name[self].size] + binary_operation[binary_operation[constant[0.5] * name[self].size] * binary_operation[name[self].size + constant[1]]]]]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[self].size]]] begin[:]
<ast.AugAssign object at 0x7da18c4cf5b0>
return[binary_operation[name[K1] * name[K2]]]
|
keyword[def] identifier[K] ( identifier[self] ):
literal[string]
identifier[K1] = identifier[np] . identifier[float_power] ( identifier[pi] , literal[int] * identifier[self] . identifier[n_min] * identifier[self] . identifier[n_min] )
identifier[K1] /=(
identifier[np] . identifier[float_power] ( literal[int] , literal[int] * identifier[self] . identifier[n_min] * identifier[self] . identifier[_n_max] )
* identifier[self] . identifier[_mgamma] ( literal[int] * identifier[self] . identifier[_n_max] , identifier[self] . identifier[n_min] )
* identifier[self] . identifier[_mgamma] ( literal[int] * identifier[self] . identifier[n_min] , identifier[self] . identifier[n_min] )
)
identifier[K2] = identifier[np] . identifier[float_power] (
literal[int] , identifier[self] . identifier[alpha] * identifier[self] . identifier[size] + literal[int] * identifier[self] . identifier[size] *( identifier[self] . identifier[size] + literal[int] )
)
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[self] . identifier[size] ):
identifier[K2] *= identifier[gamma] ( identifier[self] . identifier[alpha] + identifier[i] + literal[int] )
keyword[return] identifier[K1] * identifier[K2]
|
def K(self):
"""Normalizing constant for wishart CDF."""
K1 = np.float_power(pi, 0.5 * self.n_min * self.n_min)
K1 /= np.float_power(2, 0.5 * self.n_min * self._n_max) * self._mgamma(0.5 * self._n_max, self.n_min) * self._mgamma(0.5 * self.n_min, self.n_min)
K2 = np.float_power(2, self.alpha * self.size + 0.5 * self.size * (self.size + 1))
for i in xrange(self.size):
K2 *= gamma(self.alpha + i + 1) # depends on [control=['for'], data=['i']]
return K1 * K2
|
def add_fast(self, filepath, hashfn=None, force=False):
"""
Bespoke function to add filepaths but set shortcircuit to True, which
means only the first calculable hash will be stored. In this way only
one "fast" hashing function need be called for each filepath.
"""
if hashfn is None:
hashfn = fast_hashes
self.add(filepath, hashfn, force, shortcircuit=True)
|
def function[add_fast, parameter[self, filepath, hashfn, force]]:
constant[
Bespoke function to add filepaths but set shortcircuit to True, which
means only the first calculable hash will be stored. In this way only
one "fast" hashing function need be called for each filepath.
]
if compare[name[hashfn] is constant[None]] begin[:]
variable[hashfn] assign[=] name[fast_hashes]
call[name[self].add, parameter[name[filepath], name[hashfn], name[force]]]
|
keyword[def] identifier[add_fast] ( identifier[self] , identifier[filepath] , identifier[hashfn] = keyword[None] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[hashfn] keyword[is] keyword[None] :
identifier[hashfn] = identifier[fast_hashes]
identifier[self] . identifier[add] ( identifier[filepath] , identifier[hashfn] , identifier[force] , identifier[shortcircuit] = keyword[True] )
|
def add_fast(self, filepath, hashfn=None, force=False):
"""
Bespoke function to add filepaths but set shortcircuit to True, which
means only the first calculable hash will be stored. In this way only
one "fast" hashing function need be called for each filepath.
"""
if hashfn is None:
hashfn = fast_hashes # depends on [control=['if'], data=['hashfn']]
self.add(filepath, hashfn, force, shortcircuit=True)
|
def print_serial_number_info(self, serial_number, print_to_screen=True):
"""Print information about the run.
Args:
serial_number: serial number.
print_to_screen: runs the print statement if True,
returns txt if not.
Returns:
txt if print_to_screen is False, else None.
"""
r = self.select_serial_number_row(serial_number)
if r.empty:
warnings.warn("missing serial number")
return
txt1 = 80 * "="
txt1 += "\n"
txt1 += f" serial number {serial_number}\n"
txt1 = 80 * "-"
txt1 += "\n"
txt2 = ""
for label, value in zip(r.columns, r.values[0]):
if label in self.headers:
txt1 += f"{label}: \t {value}\n"
else:
txt2 += f"({label}: \t {value})\n"
if print_to_screen:
print(txt1)
print(80 * "-")
print(txt2)
print(80 * "=")
return
else:
return txt1
|
def function[print_serial_number_info, parameter[self, serial_number, print_to_screen]]:
constant[Print information about the run.
Args:
serial_number: serial number.
print_to_screen: runs the print statement if True,
returns txt if not.
Returns:
txt if print_to_screen is False, else None.
]
variable[r] assign[=] call[name[self].select_serial_number_row, parameter[name[serial_number]]]
if name[r].empty begin[:]
call[name[warnings].warn, parameter[constant[missing serial number]]]
return[None]
variable[txt1] assign[=] binary_operation[constant[80] * constant[=]]
<ast.AugAssign object at 0x7da18eb54610>
<ast.AugAssign object at 0x7da18eb57550>
variable[txt1] assign[=] binary_operation[constant[80] * constant[-]]
<ast.AugAssign object at 0x7da18eb549d0>
variable[txt2] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da18eb57520>, <ast.Name object at 0x7da18eb54bb0>]]] in starred[call[name[zip], parameter[name[r].columns, call[name[r].values][constant[0]]]]] begin[:]
if compare[name[label] in name[self].headers] begin[:]
<ast.AugAssign object at 0x7da18eb56bf0>
if name[print_to_screen] begin[:]
call[name[print], parameter[name[txt1]]]
call[name[print], parameter[binary_operation[constant[80] * constant[-]]]]
call[name[print], parameter[name[txt2]]]
call[name[print], parameter[binary_operation[constant[80] * constant[=]]]]
return[None]
|
keyword[def] identifier[print_serial_number_info] ( identifier[self] , identifier[serial_number] , identifier[print_to_screen] = keyword[True] ):
literal[string]
identifier[r] = identifier[self] . identifier[select_serial_number_row] ( identifier[serial_number] )
keyword[if] identifier[r] . identifier[empty] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return]
identifier[txt1] = literal[int] * literal[string]
identifier[txt1] += literal[string]
identifier[txt1] += literal[string]
identifier[txt1] = literal[int] * literal[string]
identifier[txt1] += literal[string]
identifier[txt2] = literal[string]
keyword[for] identifier[label] , identifier[value] keyword[in] identifier[zip] ( identifier[r] . identifier[columns] , identifier[r] . identifier[values] [ literal[int] ]):
keyword[if] identifier[label] keyword[in] identifier[self] . identifier[headers] :
identifier[txt1] += literal[string]
keyword[else] :
identifier[txt2] += literal[string]
keyword[if] identifier[print_to_screen] :
identifier[print] ( identifier[txt1] )
identifier[print] ( literal[int] * literal[string] )
identifier[print] ( identifier[txt2] )
identifier[print] ( literal[int] * literal[string] )
keyword[return]
keyword[else] :
keyword[return] identifier[txt1]
|
def print_serial_number_info(self, serial_number, print_to_screen=True):
"""Print information about the run.
Args:
serial_number: serial number.
print_to_screen: runs the print statement if True,
returns txt if not.
Returns:
txt if print_to_screen is False, else None.
"""
r = self.select_serial_number_row(serial_number)
if r.empty:
warnings.warn('missing serial number')
return # depends on [control=['if'], data=[]]
txt1 = 80 * '='
txt1 += '\n'
txt1 += f' serial number {serial_number}\n'
txt1 = 80 * '-'
txt1 += '\n'
txt2 = ''
for (label, value) in zip(r.columns, r.values[0]):
if label in self.headers:
txt1 += f'{label}: \t {value}\n' # depends on [control=['if'], data=['label']]
else:
txt2 += f'({label}: \t {value})\n' # depends on [control=['for'], data=[]]
if print_to_screen:
print(txt1)
print(80 * '-')
print(txt2)
print(80 * '=')
return # depends on [control=['if'], data=[]]
else:
return txt1
|
def serialize(self):
"""Convert the parameter into a dictionary.
:return: The parameter dictionary.
:rtype: dict
"""
pickle = super(ResourceParameter, self).serialize()
pickle['frequency'] = self.frequency
pickle['unit'] = self._unit.serialize()
return pickle
|
def function[serialize, parameter[self]]:
constant[Convert the parameter into a dictionary.
:return: The parameter dictionary.
:rtype: dict
]
variable[pickle] assign[=] call[call[name[super], parameter[name[ResourceParameter], name[self]]].serialize, parameter[]]
call[name[pickle]][constant[frequency]] assign[=] name[self].frequency
call[name[pickle]][constant[unit]] assign[=] call[name[self]._unit.serialize, parameter[]]
return[name[pickle]]
|
keyword[def] identifier[serialize] ( identifier[self] ):
literal[string]
identifier[pickle] = identifier[super] ( identifier[ResourceParameter] , identifier[self] ). identifier[serialize] ()
identifier[pickle] [ literal[string] ]= identifier[self] . identifier[frequency]
identifier[pickle] [ literal[string] ]= identifier[self] . identifier[_unit] . identifier[serialize] ()
keyword[return] identifier[pickle]
|
def serialize(self):
"""Convert the parameter into a dictionary.
:return: The parameter dictionary.
:rtype: dict
"""
pickle = super(ResourceParameter, self).serialize()
pickle['frequency'] = self.frequency
pickle['unit'] = self._unit.serialize()
return pickle
|
def execute(self, *args, **options):
"""
Overriden in order to send emails on unhandled exception.
If an unhandled exception in ``def handle(self, *args, **options)``
occurs and `--email-exception` is set or `self.email_exception` is
set to True send an email to ADMINS with the traceback and then
reraise the exception.
"""
try:
super(EmailNotificationCommand, self).execute(*args, **options)
except Exception:
if options['email_exception'] or getattr(self, 'email_exception', False):
self.send_email_notification(include_traceback=True)
raise
|
def function[execute, parameter[self]]:
constant[
Overriden in order to send emails on unhandled exception.
If an unhandled exception in ``def handle(self, *args, **options)``
occurs and `--email-exception` is set or `self.email_exception` is
set to True send an email to ADMINS with the traceback and then
reraise the exception.
]
<ast.Try object at 0x7da1b17d9f90>
|
keyword[def] identifier[execute] ( identifier[self] ,* identifier[args] ,** identifier[options] ):
literal[string]
keyword[try] :
identifier[super] ( identifier[EmailNotificationCommand] , identifier[self] ). identifier[execute] (* identifier[args] ,** identifier[options] )
keyword[except] identifier[Exception] :
keyword[if] identifier[options] [ literal[string] ] keyword[or] identifier[getattr] ( identifier[self] , literal[string] , keyword[False] ):
identifier[self] . identifier[send_email_notification] ( identifier[include_traceback] = keyword[True] )
keyword[raise]
|
def execute(self, *args, **options):
"""
Overriden in order to send emails on unhandled exception.
If an unhandled exception in ``def handle(self, *args, **options)``
occurs and `--email-exception` is set or `self.email_exception` is
set to True send an email to ADMINS with the traceback and then
reraise the exception.
"""
try:
super(EmailNotificationCommand, self).execute(*args, **options) # depends on [control=['try'], data=[]]
except Exception:
if options['email_exception'] or getattr(self, 'email_exception', False):
self.send_email_notification(include_traceback=True) # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=[]]
|
def tuple_roll(self, count=0):
'''One or more die rolls.
:param count: [0] Return list of ``count`` rolls
:return: (face, value) of roll or list of same
'''
if count:
return [self._faces[random.randint(1, self._sides) - 1] for i in range(count)]
else:
return self._faces[random.randint(1, self._sides) - 1]
|
def function[tuple_roll, parameter[self, count]]:
constant[One or more die rolls.
:param count: [0] Return list of ``count`` rolls
:return: (face, value) of roll or list of same
]
if name[count] begin[:]
return[<ast.ListComp object at 0x7da2045654e0>]
|
keyword[def] identifier[tuple_roll] ( identifier[self] , identifier[count] = literal[int] ):
literal[string]
keyword[if] identifier[count] :
keyword[return] [ identifier[self] . identifier[_faces] [ identifier[random] . identifier[randint] ( literal[int] , identifier[self] . identifier[_sides] )- literal[int] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[count] )]
keyword[else] :
keyword[return] identifier[self] . identifier[_faces] [ identifier[random] . identifier[randint] ( literal[int] , identifier[self] . identifier[_sides] )- literal[int] ]
|
def tuple_roll(self, count=0):
"""One or more die rolls.
:param count: [0] Return list of ``count`` rolls
:return: (face, value) of roll or list of same
"""
if count:
return [self._faces[random.randint(1, self._sides) - 1] for i in range(count)] # depends on [control=['if'], data=[]]
else:
return self._faces[random.randint(1, self._sides) - 1]
|
def get_urls(session, name, data, find_changelogs_fn, **kwargs):
"""
Gets URLs to changelogs.
:param session: requests Session instance
:param name: str, package name
:param data: dict, meta data
:param find_changelogs_fn: function, find_changelogs
:return: tuple, (set(changelog URLs), set(repo URLs))
"""
# check if there's a changelog in ../custom/pypi/map.txt
map = get_url_map()
if name.lower().replace("_", "-") in map:
logger.info("Package {name}'s URL is in pypi/map.txt, returning".format(name=name))
return [map[name.lower().replace("_", "-")]], set()
# if this package has valid meta data, build up a list of URL candidates we can possibly
# search for changelogs on
if "info" in data:
# add all URLs in pypi's meta data:
# {
# "info": {
# "home_page":
# "docs_url":
# "bugtrack_url":
# }
# }
candidates = [
url for url in
[data["info"].get(attr) for attr in ("home_page", "docs_url", "bugtrack_url")]
if url
]
# the latest release page on pypi might also contain links, add it
candidates.append("https://pypi.python.org/pypi/{name}/{latest_release}".format(
name=name,
latest_release=next(iter(get_releases(data)))
))
# Check the download URL as well.
if "download_url" in data:
candidates.append(data["download_url"])
if data['info']['description']:
candidates.extend(changelogs.url_re.findall(data["info"]["description"]))
return find_changelogs_fn(session=session, name=name, candidates=candidates)
return set(), set()
|
def function[get_urls, parameter[session, name, data, find_changelogs_fn]]:
constant[
Gets URLs to changelogs.
:param session: requests Session instance
:param name: str, package name
:param data: dict, meta data
:param find_changelogs_fn: function, find_changelogs
:return: tuple, (set(changelog URLs), set(repo URLs))
]
variable[map] assign[=] call[name[get_url_map], parameter[]]
if compare[call[call[name[name].lower, parameter[]].replace, parameter[constant[_], constant[-]]] in name[map]] begin[:]
call[name[logger].info, parameter[call[constant[Package {name}'s URL is in pypi/map.txt, returning].format, parameter[]]]]
return[tuple[[<ast.List object at 0x7da1b2727100>, <ast.Call object at 0x7da1b27269e0>]]]
if compare[constant[info] in name[data]] begin[:]
variable[candidates] assign[=] <ast.ListComp object at 0x7da1b27273a0>
call[name[candidates].append, parameter[call[constant[https://pypi.python.org/pypi/{name}/{latest_release}].format, parameter[]]]]
if compare[constant[download_url] in name[data]] begin[:]
call[name[candidates].append, parameter[call[name[data]][constant[download_url]]]]
if call[call[name[data]][constant[info]]][constant[description]] begin[:]
call[name[candidates].extend, parameter[call[name[changelogs].url_re.findall, parameter[call[call[name[data]][constant[info]]][constant[description]]]]]]
return[call[name[find_changelogs_fn], parameter[]]]
return[tuple[[<ast.Call object at 0x7da1b2726b00>, <ast.Call object at 0x7da1b2726860>]]]
|
keyword[def] identifier[get_urls] ( identifier[session] , identifier[name] , identifier[data] , identifier[find_changelogs_fn] ,** identifier[kwargs] ):
literal[string]
identifier[map] = identifier[get_url_map] ()
keyword[if] identifier[name] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] ) keyword[in] identifier[map] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[name] ))
keyword[return] [ identifier[map] [ identifier[name] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] )]], identifier[set] ()
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[candidates] =[
identifier[url] keyword[for] identifier[url] keyword[in]
[ identifier[data] [ literal[string] ]. identifier[get] ( identifier[attr] ) keyword[for] identifier[attr] keyword[in] ( literal[string] , literal[string] , literal[string] )]
keyword[if] identifier[url]
]
identifier[candidates] . identifier[append] ( literal[string] . identifier[format] (
identifier[name] = identifier[name] ,
identifier[latest_release] = identifier[next] ( identifier[iter] ( identifier[get_releases] ( identifier[data] )))
))
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[candidates] . identifier[append] ( identifier[data] [ literal[string] ])
keyword[if] identifier[data] [ literal[string] ][ literal[string] ]:
identifier[candidates] . identifier[extend] ( identifier[changelogs] . identifier[url_re] . identifier[findall] ( identifier[data] [ literal[string] ][ literal[string] ]))
keyword[return] identifier[find_changelogs_fn] ( identifier[session] = identifier[session] , identifier[name] = identifier[name] , identifier[candidates] = identifier[candidates] )
keyword[return] identifier[set] (), identifier[set] ()
|
def get_urls(session, name, data, find_changelogs_fn, **kwargs):
"""
Gets URLs to changelogs.
:param session: requests Session instance
:param name: str, package name
:param data: dict, meta data
:param find_changelogs_fn: function, find_changelogs
:return: tuple, (set(changelog URLs), set(repo URLs))
"""
# check if there's a changelog in ../custom/pypi/map.txt
map = get_url_map()
if name.lower().replace('_', '-') in map:
logger.info("Package {name}'s URL is in pypi/map.txt, returning".format(name=name))
return ([map[name.lower().replace('_', '-')]], set()) # depends on [control=['if'], data=['map']]
# if this package has valid meta data, build up a list of URL candidates we can possibly
# search for changelogs on
if 'info' in data:
# add all URLs in pypi's meta data:
# {
# "info": {
# "home_page":
# "docs_url":
# "bugtrack_url":
# }
# }
candidates = [url for url in [data['info'].get(attr) for attr in ('home_page', 'docs_url', 'bugtrack_url')] if url]
# the latest release page on pypi might also contain links, add it
candidates.append('https://pypi.python.org/pypi/{name}/{latest_release}'.format(name=name, latest_release=next(iter(get_releases(data)))))
# Check the download URL as well.
if 'download_url' in data:
candidates.append(data['download_url']) # depends on [control=['if'], data=['data']]
if data['info']['description']:
candidates.extend(changelogs.url_re.findall(data['info']['description'])) # depends on [control=['if'], data=[]]
return find_changelogs_fn(session=session, name=name, candidates=candidates) # depends on [control=['if'], data=['data']]
return (set(), set())
|
def full_clean(self, exclude, validate_unique=False):
"""
Validate node, on error raising ValidationErrors which can be handled by django forms
:param exclude:
:param validate_unique: Check if conflicting node exists in the labels indexes
:return:
"""
# validate against neomodel
try:
self.deflate(self.__properties__, self)
except DeflateError as e:
raise ValidationError({e.property_name: e.msg})
except RequiredProperty as e:
raise ValidationError({e.property_name: 'is required'})
|
def function[full_clean, parameter[self, exclude, validate_unique]]:
constant[
Validate node, on error raising ValidationErrors which can be handled by django forms
:param exclude:
:param validate_unique: Check if conflicting node exists in the labels indexes
:return:
]
<ast.Try object at 0x7da2047e8250>
|
keyword[def] identifier[full_clean] ( identifier[self] , identifier[exclude] , identifier[validate_unique] = keyword[False] ):
literal[string]
keyword[try] :
identifier[self] . identifier[deflate] ( identifier[self] . identifier[__properties__] , identifier[self] )
keyword[except] identifier[DeflateError] keyword[as] identifier[e] :
keyword[raise] identifier[ValidationError] ({ identifier[e] . identifier[property_name] : identifier[e] . identifier[msg] })
keyword[except] identifier[RequiredProperty] keyword[as] identifier[e] :
keyword[raise] identifier[ValidationError] ({ identifier[e] . identifier[property_name] : literal[string] })
|
def full_clean(self, exclude, validate_unique=False):
"""
Validate node, on error raising ValidationErrors which can be handled by django forms
:param exclude:
:param validate_unique: Check if conflicting node exists in the labels indexes
:return:
"""
# validate against neomodel
try:
self.deflate(self.__properties__, self) # depends on [control=['try'], data=[]]
except DeflateError as e:
raise ValidationError({e.property_name: e.msg}) # depends on [control=['except'], data=['e']]
except RequiredProperty as e:
raise ValidationError({e.property_name: 'is required'}) # depends on [control=['except'], data=['e']]
|
def get_children(self):
"""Cache superclass result"""
key = self.CHILDREN_KEY % self.pk
#children = cache.get(key, None)
# if children is None:
children = super(Page, self).get_children()
#cache.set(key, children)
return children
|
def function[get_children, parameter[self]]:
constant[Cache superclass result]
variable[key] assign[=] binary_operation[name[self].CHILDREN_KEY <ast.Mod object at 0x7da2590d6920> name[self].pk]
variable[children] assign[=] call[call[name[super], parameter[name[Page], name[self]]].get_children, parameter[]]
return[name[children]]
|
keyword[def] identifier[get_children] ( identifier[self] ):
literal[string]
identifier[key] = identifier[self] . identifier[CHILDREN_KEY] % identifier[self] . identifier[pk]
identifier[children] = identifier[super] ( identifier[Page] , identifier[self] ). identifier[get_children] ()
keyword[return] identifier[children]
|
def get_children(self):
"""Cache superclass result"""
key = self.CHILDREN_KEY % self.pk
#children = cache.get(key, None)
# if children is None:
children = super(Page, self).get_children()
#cache.set(key, children)
return children
|
def get_mounts_by_path():
'''
Gets all mounted devices and paths
:return: dict of mounted devices and related information by path
'''
mount_info = []
f = open('/proc/mounts', 'r')
for line in f:
_tmp = line.split(" ")
mount_info.append({'path': _tmp[1],
'device': _tmp[0],
'type': _tmp[2],
'options': _tmp[3]
}
)
return mount_info
|
def function[get_mounts_by_path, parameter[]]:
constant[
Gets all mounted devices and paths
:return: dict of mounted devices and related information by path
]
variable[mount_info] assign[=] list[[]]
variable[f] assign[=] call[name[open], parameter[constant[/proc/mounts], constant[r]]]
for taget[name[line]] in starred[name[f]] begin[:]
variable[_tmp] assign[=] call[name[line].split, parameter[constant[ ]]]
call[name[mount_info].append, parameter[dictionary[[<ast.Constant object at 0x7da20cabc2e0>, <ast.Constant object at 0x7da20cabf790>, <ast.Constant object at 0x7da20cabeb30>, <ast.Constant object at 0x7da20cabe0b0>], [<ast.Subscript object at 0x7da20cabd270>, <ast.Subscript object at 0x7da20cabd180>, <ast.Subscript object at 0x7da20cabf2b0>, <ast.Subscript object at 0x7da20cabc610>]]]]
return[name[mount_info]]
|
keyword[def] identifier[get_mounts_by_path] ():
literal[string]
identifier[mount_info] =[]
identifier[f] = identifier[open] ( literal[string] , literal[string] )
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[_tmp] = identifier[line] . identifier[split] ( literal[string] )
identifier[mount_info] . identifier[append] ({ literal[string] : identifier[_tmp] [ literal[int] ],
literal[string] : identifier[_tmp] [ literal[int] ],
literal[string] : identifier[_tmp] [ literal[int] ],
literal[string] : identifier[_tmp] [ literal[int] ]
}
)
keyword[return] identifier[mount_info]
|
def get_mounts_by_path():
"""
Gets all mounted devices and paths
:return: dict of mounted devices and related information by path
"""
mount_info = []
f = open('/proc/mounts', 'r')
for line in f:
_tmp = line.split(' ')
mount_info.append({'path': _tmp[1], 'device': _tmp[0], 'type': _tmp[2], 'options': _tmp[3]}) # depends on [control=['for'], data=['line']]
return mount_info
|
def _get_env_vars_value(filename):
"""
If the user provided a file containing values of environment variables, this method will read the file and
return its value
:param string filename: Path to file containing environment variable values
:return dict: Value of environment variables, if provided. None otherwise
:raises InvokeContextException: If the file was not found or not a valid JSON
"""
if not filename:
return None
# Try to read the file and parse it as JSON
try:
with open(filename, 'r') as fp:
return json.load(fp)
except Exception as ex:
raise InvokeContextException("Could not read environment variables overrides from file {}: {}".format(
filename,
str(ex)))
|
def function[_get_env_vars_value, parameter[filename]]:
constant[
If the user provided a file containing values of environment variables, this method will read the file and
return its value
:param string filename: Path to file containing environment variable values
:return dict: Value of environment variables, if provided. None otherwise
:raises InvokeContextException: If the file was not found or not a valid JSON
]
if <ast.UnaryOp object at 0x7da18f8101f0> begin[:]
return[constant[None]]
<ast.Try object at 0x7da18f812f20>
|
keyword[def] identifier[_get_env_vars_value] ( identifier[filename] ):
literal[string]
keyword[if] keyword[not] identifier[filename] :
keyword[return] keyword[None]
keyword[try] :
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[fp] :
keyword[return] identifier[json] . identifier[load] ( identifier[fp] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[raise] identifier[InvokeContextException] ( literal[string] . identifier[format] (
identifier[filename] ,
identifier[str] ( identifier[ex] )))
|
def _get_env_vars_value(filename):
"""
If the user provided a file containing values of environment variables, this method will read the file and
return its value
:param string filename: Path to file containing environment variable values
:return dict: Value of environment variables, if provided. None otherwise
:raises InvokeContextException: If the file was not found or not a valid JSON
"""
if not filename:
return None # depends on [control=['if'], data=[]]
# Try to read the file and parse it as JSON
try:
with open(filename, 'r') as fp:
return json.load(fp) # depends on [control=['with'], data=['fp']] # depends on [control=['try'], data=[]]
except Exception as ex:
raise InvokeContextException('Could not read environment variables overrides from file {}: {}'.format(filename, str(ex))) # depends on [control=['except'], data=['ex']]
|
def url_report(self, scan_url, apikey):
"""
Send URLS for list of past malicous associations
"""
url = self.base_url + "url/report"
params = {"apikey": apikey, 'resource': scan_url}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, params=params, headers=self.headers)
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", scan_url, response.status_code)
time.sleep(self.public_api_sleep_time)
|
def function[url_report, parameter[self, scan_url, apikey]]:
constant[
Send URLS for list of past malicous associations
]
variable[url] assign[=] binary_operation[name[self].base_url + constant[url/report]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b196a9e0>, <ast.Constant object at 0x7da1b1968850>], [<ast.Name object at 0x7da1b1968a90>, <ast.Name object at 0x7da1b196a740>]]
variable[rate_limit_clear] assign[=] call[name[self].rate_limit, parameter[]]
if name[rate_limit_clear] begin[:]
variable[response] assign[=] call[name[requests].post, parameter[name[url]]]
if compare[name[response].status_code equal[==] name[self].HTTP_OK] begin[:]
variable[json_response] assign[=] call[name[response].json, parameter[]]
return[name[json_response]]
call[name[time].sleep, parameter[name[self].public_api_sleep_time]]
|
keyword[def] identifier[url_report] ( identifier[self] , identifier[scan_url] , identifier[apikey] ):
literal[string]
identifier[url] = identifier[self] . identifier[base_url] + literal[string]
identifier[params] ={ literal[string] : identifier[apikey] , literal[string] : identifier[scan_url] }
identifier[rate_limit_clear] = identifier[self] . identifier[rate_limit] ()
keyword[if] identifier[rate_limit_clear] :
identifier[response] = identifier[requests] . identifier[post] ( identifier[url] , identifier[params] = identifier[params] , identifier[headers] = identifier[self] . identifier[headers] )
keyword[if] identifier[response] . identifier[status_code] == identifier[self] . identifier[HTTP_OK] :
identifier[json_response] = identifier[response] . identifier[json] ()
keyword[return] identifier[json_response]
keyword[elif] identifier[response] . identifier[status_code] == identifier[self] . identifier[HTTP_RATE_EXCEEDED] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[else] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] , identifier[scan_url] , identifier[response] . identifier[status_code] )
identifier[time] . identifier[sleep] ( identifier[self] . identifier[public_api_sleep_time] )
|
def url_report(self, scan_url, apikey):
"""
Send URLS for list of past malicous associations
"""
url = self.base_url + 'url/report'
params = {'apikey': apikey, 'resource': scan_url}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, params=params, headers=self.headers)
if response.status_code == self.HTTP_OK:
json_response = response.json()
return json_response # depends on [control=['if'], data=[]]
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20) # depends on [control=['if'], data=[]]
else:
self.logger.error('sent: %s, HTTP: %d', scan_url, response.status_code)
time.sleep(self.public_api_sleep_time) # depends on [control=['if'], data=[]]
|
def __AddEntryType(self, entry_type_name, entry_schema, parent_name):
"""Add a type for a list entry."""
entry_schema.pop('description', None)
description = 'Single entry in a %s.' % parent_name
schema = {
'id': entry_type_name,
'type': 'object',
'description': description,
'properties': {
'entry': {
'type': 'array',
'items': entry_schema,
},
},
}
self.AddDescriptorFromSchema(entry_type_name, schema)
return entry_type_name
|
def function[__AddEntryType, parameter[self, entry_type_name, entry_schema, parent_name]]:
constant[Add a type for a list entry.]
call[name[entry_schema].pop, parameter[constant[description], constant[None]]]
variable[description] assign[=] binary_operation[constant[Single entry in a %s.] <ast.Mod object at 0x7da2590d6920> name[parent_name]]
variable[schema] assign[=] dictionary[[<ast.Constant object at 0x7da1b0844cd0>, <ast.Constant object at 0x7da1b0846e30>, <ast.Constant object at 0x7da1b0847d00>, <ast.Constant object at 0x7da1b0846f50>], [<ast.Name object at 0x7da1b0846620>, <ast.Constant object at 0x7da1b0844d30>, <ast.Name object at 0x7da1b0844c40>, <ast.Dict object at 0x7da1b0847d60>]]
call[name[self].AddDescriptorFromSchema, parameter[name[entry_type_name], name[schema]]]
return[name[entry_type_name]]
|
keyword[def] identifier[__AddEntryType] ( identifier[self] , identifier[entry_type_name] , identifier[entry_schema] , identifier[parent_name] ):
literal[string]
identifier[entry_schema] . identifier[pop] ( literal[string] , keyword[None] )
identifier[description] = literal[string] % identifier[parent_name]
identifier[schema] ={
literal[string] : identifier[entry_type_name] ,
literal[string] : literal[string] ,
literal[string] : identifier[description] ,
literal[string] :{
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[entry_schema] ,
},
},
}
identifier[self] . identifier[AddDescriptorFromSchema] ( identifier[entry_type_name] , identifier[schema] )
keyword[return] identifier[entry_type_name]
|
def __AddEntryType(self, entry_type_name, entry_schema, parent_name):
"""Add a type for a list entry."""
entry_schema.pop('description', None)
description = 'Single entry in a %s.' % parent_name
schema = {'id': entry_type_name, 'type': 'object', 'description': description, 'properties': {'entry': {'type': 'array', 'items': entry_schema}}}
self.AddDescriptorFromSchema(entry_type_name, schema)
return entry_type_name
|
def addr(self):
"""
Get the concrete address of the instruction pointer, without triggering SimInspect breakpoints or generating
SimActions. An integer is returned, or an exception is raised if the instruction pointer is symbolic.
:return: an int
"""
ip = self.regs._ip
if isinstance(ip, SootAddressDescriptor):
return ip
return self.solver.eval_one(self.regs._ip)
|
def function[addr, parameter[self]]:
constant[
Get the concrete address of the instruction pointer, without triggering SimInspect breakpoints or generating
SimActions. An integer is returned, or an exception is raised if the instruction pointer is symbolic.
:return: an int
]
variable[ip] assign[=] name[self].regs._ip
if call[name[isinstance], parameter[name[ip], name[SootAddressDescriptor]]] begin[:]
return[name[ip]]
return[call[name[self].solver.eval_one, parameter[name[self].regs._ip]]]
|
keyword[def] identifier[addr] ( identifier[self] ):
literal[string]
identifier[ip] = identifier[self] . identifier[regs] . identifier[_ip]
keyword[if] identifier[isinstance] ( identifier[ip] , identifier[SootAddressDescriptor] ):
keyword[return] identifier[ip]
keyword[return] identifier[self] . identifier[solver] . identifier[eval_one] ( identifier[self] . identifier[regs] . identifier[_ip] )
|
def addr(self):
"""
Get the concrete address of the instruction pointer, without triggering SimInspect breakpoints or generating
SimActions. An integer is returned, or an exception is raised if the instruction pointer is symbolic.
:return: an int
"""
ip = self.regs._ip
if isinstance(ip, SootAddressDescriptor):
return ip # depends on [control=['if'], data=[]]
return self.solver.eval_one(self.regs._ip)
|
def local_path_export(at_start=True, env_cmd=None):
"""Retrieve paths to local install, also including environment paths if env_cmd included.
"""
paths = [get_bcbio_bin()]
if env_cmd:
env_path = os.path.dirname(get_program_python(env_cmd))
if env_path not in paths:
paths.insert(0, env_path)
if at_start:
return "export PATH=%s:\"$PATH\" && " % (":".join(paths))
else:
return "export PATH=\"$PATH\":%s && " % (":".join(paths))
|
def function[local_path_export, parameter[at_start, env_cmd]]:
constant[Retrieve paths to local install, also including environment paths if env_cmd included.
]
variable[paths] assign[=] list[[<ast.Call object at 0x7da1b18bedd0>]]
if name[env_cmd] begin[:]
variable[env_path] assign[=] call[name[os].path.dirname, parameter[call[name[get_program_python], parameter[name[env_cmd]]]]]
if compare[name[env_path] <ast.NotIn object at 0x7da2590d7190> name[paths]] begin[:]
call[name[paths].insert, parameter[constant[0], name[env_path]]]
if name[at_start] begin[:]
return[binary_operation[constant[export PATH=%s:"$PATH" && ] <ast.Mod object at 0x7da2590d6920> call[constant[:].join, parameter[name[paths]]]]]
|
keyword[def] identifier[local_path_export] ( identifier[at_start] = keyword[True] , identifier[env_cmd] = keyword[None] ):
literal[string]
identifier[paths] =[ identifier[get_bcbio_bin] ()]
keyword[if] identifier[env_cmd] :
identifier[env_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[get_program_python] ( identifier[env_cmd] ))
keyword[if] identifier[env_path] keyword[not] keyword[in] identifier[paths] :
identifier[paths] . identifier[insert] ( literal[int] , identifier[env_path] )
keyword[if] identifier[at_start] :
keyword[return] literal[string] %( literal[string] . identifier[join] ( identifier[paths] ))
keyword[else] :
keyword[return] literal[string] %( literal[string] . identifier[join] ( identifier[paths] ))
|
def local_path_export(at_start=True, env_cmd=None):
"""Retrieve paths to local install, also including environment paths if env_cmd included.
"""
paths = [get_bcbio_bin()]
if env_cmd:
env_path = os.path.dirname(get_program_python(env_cmd))
if env_path not in paths:
paths.insert(0, env_path) # depends on [control=['if'], data=['env_path', 'paths']] # depends on [control=['if'], data=[]]
if at_start:
return 'export PATH=%s:"$PATH" && ' % ':'.join(paths) # depends on [control=['if'], data=[]]
else:
return 'export PATH="$PATH":%s && ' % ':'.join(paths)
|
def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""
Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None.
"""
# For primitive values, this is only called when the value is part of an array,
# in which case we do not need to check for missing or omitted values.
start_element, end_element = _element_path_create_new(self.element_path)
self._serialize(end_element, value, state)
return start_element
|
def function[serialize, parameter[self, value, state]]:
constant[
Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None.
]
<ast.Tuple object at 0x7da1b25d34f0> assign[=] call[name[_element_path_create_new], parameter[name[self].element_path]]
call[name[self]._serialize, parameter[name[end_element], name[value], name[state]]]
return[name[start_element]]
|
keyword[def] identifier[serialize] (
identifier[self] ,
identifier[value] ,
identifier[state]
):
literal[string]
identifier[start_element] , identifier[end_element] = identifier[_element_path_create_new] ( identifier[self] . identifier[element_path] )
identifier[self] . identifier[_serialize] ( identifier[end_element] , identifier[value] , identifier[state] )
keyword[return] identifier[start_element]
|
def serialize(self, value, state): # type: Any
# type: _ProcessorState
# type: (...) -> ET.Element
'\n Serialize the value into a new element object and return the element.\n\n If the omit_empty option was specified and the value is falsey, then this will return None.\n '
# For primitive values, this is only called when the value is part of an array,
# in which case we do not need to check for missing or omitted values.
(start_element, end_element) = _element_path_create_new(self.element_path)
self._serialize(end_element, value, state)
return start_element
|
def query_positions(self, accounts):
"""查询现金和持仓
Arguments:
accounts {[type]} -- [description]
Returns:
dict-- {'cash_available':xxx,'hold_available':xxx}
"""
try:
data = self.call("positions", {'client': accounts})
if data is not None:
cash_part = data.get('subAccounts', {}).get('人民币', False)
if cash_part:
cash_available = cash_part.get('可用金额', cash_part.get('可用'))
position_part = data.get('dataTable', False)
if position_part:
res = data.get('dataTable', False)
if res:
hold_headers = res['columns']
hold_headers = [
cn_en_compare[item] for item in hold_headers
]
hold_available = pd.DataFrame(
res['rows'],
columns=hold_headers
)
if len(hold_available) == 1 and hold_available.amount[0] in [
None,
'',
0
]:
hold_available = pd.DataFrame(
data=None,
columns=hold_headers
)
return {
'cash_available':
cash_available,
'hold_available':
hold_available.assign(
amount=hold_available.amount.apply(float)
).loc[:,
['code',
'amount']].set_index('code').amount
}
else:
print(data)
return False, 'None ACCOUNT'
except:
return False
|
def function[query_positions, parameter[self, accounts]]:
constant[查询现金和持仓
Arguments:
accounts {[type]} -- [description]
Returns:
dict-- {'cash_available':xxx,'hold_available':xxx}
]
<ast.Try object at 0x7da1b2006ad0>
|
keyword[def] identifier[query_positions] ( identifier[self] , identifier[accounts] ):
literal[string]
keyword[try] :
identifier[data] = identifier[self] . identifier[call] ( literal[string] ,{ literal[string] : identifier[accounts] })
keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] :
identifier[cash_part] = identifier[data] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[cash_part] :
identifier[cash_available] = identifier[cash_part] . identifier[get] ( literal[string] , identifier[cash_part] . identifier[get] ( literal[string] ))
identifier[position_part] = identifier[data] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[position_part] :
identifier[res] = identifier[data] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[res] :
identifier[hold_headers] = identifier[res] [ literal[string] ]
identifier[hold_headers] =[
identifier[cn_en_compare] [ identifier[item] ] keyword[for] identifier[item] keyword[in] identifier[hold_headers]
]
identifier[hold_available] = identifier[pd] . identifier[DataFrame] (
identifier[res] [ literal[string] ],
identifier[columns] = identifier[hold_headers]
)
keyword[if] identifier[len] ( identifier[hold_available] )== literal[int] keyword[and] identifier[hold_available] . identifier[amount] [ literal[int] ] keyword[in] [
keyword[None] ,
literal[string] ,
literal[int]
]:
identifier[hold_available] = identifier[pd] . identifier[DataFrame] (
identifier[data] = keyword[None] ,
identifier[columns] = identifier[hold_headers]
)
keyword[return] {
literal[string] :
identifier[cash_available] ,
literal[string] :
identifier[hold_available] . identifier[assign] (
identifier[amount] = identifier[hold_available] . identifier[amount] . identifier[apply] ( identifier[float] )
). identifier[loc] [:,
[ literal[string] ,
literal[string] ]]. identifier[set_index] ( literal[string] ). identifier[amount]
}
keyword[else] :
identifier[print] ( identifier[data] )
keyword[return] keyword[False] , literal[string]
keyword[except] :
keyword[return] keyword[False]
|
def query_positions(self, accounts):
"""查询现金和持仓
Arguments:
accounts {[type]} -- [description]
Returns:
dict-- {'cash_available':xxx,'hold_available':xxx}
"""
try:
data = self.call('positions', {'client': accounts})
if data is not None:
cash_part = data.get('subAccounts', {}).get('人民币', False)
if cash_part:
cash_available = cash_part.get('可用金额', cash_part.get('可用')) # depends on [control=['if'], data=[]]
position_part = data.get('dataTable', False)
if position_part:
res = data.get('dataTable', False)
if res:
hold_headers = res['columns']
hold_headers = [cn_en_compare[item] for item in hold_headers]
hold_available = pd.DataFrame(res['rows'], columns=hold_headers) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if len(hold_available) == 1 and hold_available.amount[0] in [None, '', 0]:
hold_available = pd.DataFrame(data=None, columns=hold_headers) # depends on [control=['if'], data=[]]
return {'cash_available': cash_available, 'hold_available': hold_available.assign(amount=hold_available.amount.apply(float)).loc[:, ['code', 'amount']].set_index('code').amount} # depends on [control=['if'], data=['data']]
else:
print(data)
return (False, 'None ACCOUNT') # depends on [control=['try'], data=[]]
except:
return False # depends on [control=['except'], data=[]]
|
def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data)
|
def function[_initialize_with_array, parameter[self, data, rowBased]]:
constant[Set the matrix values from a two dimensional list.]
if name[rowBased] begin[:]
name[self].matrix assign[=] list[[]]
if compare[call[name[len], parameter[name[data]]] not_equal[!=] name[self]._rows] begin[:]
<ast.Raise object at 0x7da204345300>
for taget[name[col]] in starred[call[name[xrange], parameter[name[self]._columns]]] begin[:]
call[name[self].matrix.append, parameter[list[[]]]]
for taget[name[row]] in starred[call[name[xrange], parameter[name[self]._rows]]] begin[:]
if compare[call[name[len], parameter[call[name[data]][name[row]]]] not_equal[!=] name[self]._columns] begin[:]
<ast.Raise object at 0x7da204347df0>
call[call[name[self].matrix][name[col]].append, parameter[call[call[name[data]][name[row]]][name[col]]]]
|
keyword[def] identifier[_initialize_with_array] ( identifier[self] , identifier[data] , identifier[rowBased] = keyword[True] ):
literal[string]
keyword[if] identifier[rowBased] :
identifier[self] . identifier[matrix] =[]
keyword[if] identifier[len] ( identifier[data] )!= identifier[self] . identifier[_rows] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[col] keyword[in] identifier[xrange] ( identifier[self] . identifier[_columns] ):
identifier[self] . identifier[matrix] . identifier[append] ([])
keyword[for] identifier[row] keyword[in] identifier[xrange] ( identifier[self] . identifier[_rows] ):
keyword[if] identifier[len] ( identifier[data] [ identifier[row] ])!= identifier[self] . identifier[_columns] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[matrix] [ identifier[col] ]. identifier[append] ( identifier[data] [ identifier[row] ][ identifier[col] ])
keyword[else] :
keyword[if] identifier[len] ( identifier[data] )!= identifier[self] . identifier[_columns] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[col] keyword[in] identifier[data] :
keyword[if] identifier[len] ( identifier[col] )!= identifier[self] . identifier[_rows] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[matrix] = identifier[copy] . identifier[deepcopy] ( identifier[data] )
|
def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError('Size of Matrix does not match') # depends on [control=['if'], data=[]]
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError('Size of Matrix does not match') # depends on [control=['if'], data=[]]
self.matrix[col].append(data[row][col]) # depends on [control=['for'], data=['row']] # depends on [control=['for'], data=['col']] # depends on [control=['if'], data=[]]
else:
if len(data) != self._columns:
raise ValueError('Size of Matrix does not match') # depends on [control=['if'], data=[]]
for col in data:
if len(col) != self._rows:
raise ValueError('Size of Matrix does not match') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['col']]
self.matrix = copy.deepcopy(data)
|
def eqToEq2000(ra_deg, dec_deg, eq):
"""Convert Eq to Eq 2000."""
ra_rad = math.radians(ra_deg)
dec_rad = math.radians(dec_deg)
x = math.cos(dec_rad) * math.cos(ra_rad)
y = math.cos(dec_rad) * math.sin(ra_rad)
z = math.sin(dec_rad)
p11, p12, p13, p21, p22, p23, p31, p32, p33 = trans_coeff(eq, x, y, z)
x0 = p11 * x + p21 * y + p31 * z
y0 = p12 * x + p22 * y + p32 * z
z0 = p13 * x + p23 * y + p33 * z
new_dec = math.asin(z0)
if x0 == 0.0:
new_ra = math.pi / 2.0
else:
new_ra = math.atan(y0 / x0)
if ((y0 * math.cos(new_dec) > 0.0 and x0 * math.cos(new_dec) <= 0.0) or
(y0 * math.cos(new_dec) <= 0.0 and x0 * math.cos(new_dec) < 0.0)):
new_ra += math.pi
elif new_ra < 0.0:
new_ra += 2.0 * math.pi
#new_ra = new_ra * 12.0 * 3600.0 / math.pi
new_ra_deg = new_ra * 12.0 / math.pi * 15.0
#new_dec = new_dec * 180.0 * 3600.0 / math.pi
new_dec_deg = new_dec * 180.0 / math.pi
return (new_ra_deg, new_dec_deg)
|
def function[eqToEq2000, parameter[ra_deg, dec_deg, eq]]:
constant[Convert Eq to Eq 2000.]
variable[ra_rad] assign[=] call[name[math].radians, parameter[name[ra_deg]]]
variable[dec_rad] assign[=] call[name[math].radians, parameter[name[dec_deg]]]
variable[x] assign[=] binary_operation[call[name[math].cos, parameter[name[dec_rad]]] * call[name[math].cos, parameter[name[ra_rad]]]]
variable[y] assign[=] binary_operation[call[name[math].cos, parameter[name[dec_rad]]] * call[name[math].sin, parameter[name[ra_rad]]]]
variable[z] assign[=] call[name[math].sin, parameter[name[dec_rad]]]
<ast.Tuple object at 0x7da1b0dc1660> assign[=] call[name[trans_coeff], parameter[name[eq], name[x], name[y], name[z]]]
variable[x0] assign[=] binary_operation[binary_operation[binary_operation[name[p11] * name[x]] + binary_operation[name[p21] * name[y]]] + binary_operation[name[p31] * name[z]]]
variable[y0] assign[=] binary_operation[binary_operation[binary_operation[name[p12] * name[x]] + binary_operation[name[p22] * name[y]]] + binary_operation[name[p32] * name[z]]]
variable[z0] assign[=] binary_operation[binary_operation[binary_operation[name[p13] * name[x]] + binary_operation[name[p23] * name[y]]] + binary_operation[name[p33] * name[z]]]
variable[new_dec] assign[=] call[name[math].asin, parameter[name[z0]]]
if compare[name[x0] equal[==] constant[0.0]] begin[:]
variable[new_ra] assign[=] binary_operation[name[math].pi / constant[2.0]]
if <ast.BoolOp object at 0x7da2041da7a0> begin[:]
<ast.AugAssign object at 0x7da2041db520>
variable[new_ra_deg] assign[=] binary_operation[binary_operation[binary_operation[name[new_ra] * constant[12.0]] / name[math].pi] * constant[15.0]]
variable[new_dec_deg] assign[=] binary_operation[binary_operation[name[new_dec] * constant[180.0]] / name[math].pi]
return[tuple[[<ast.Name object at 0x7da2041da9b0>, <ast.Name object at 0x7da2041d9540>]]]
|
keyword[def] identifier[eqToEq2000] ( identifier[ra_deg] , identifier[dec_deg] , identifier[eq] ):
literal[string]
identifier[ra_rad] = identifier[math] . identifier[radians] ( identifier[ra_deg] )
identifier[dec_rad] = identifier[math] . identifier[radians] ( identifier[dec_deg] )
identifier[x] = identifier[math] . identifier[cos] ( identifier[dec_rad] )* identifier[math] . identifier[cos] ( identifier[ra_rad] )
identifier[y] = identifier[math] . identifier[cos] ( identifier[dec_rad] )* identifier[math] . identifier[sin] ( identifier[ra_rad] )
identifier[z] = identifier[math] . identifier[sin] ( identifier[dec_rad] )
identifier[p11] , identifier[p12] , identifier[p13] , identifier[p21] , identifier[p22] , identifier[p23] , identifier[p31] , identifier[p32] , identifier[p33] = identifier[trans_coeff] ( identifier[eq] , identifier[x] , identifier[y] , identifier[z] )
identifier[x0] = identifier[p11] * identifier[x] + identifier[p21] * identifier[y] + identifier[p31] * identifier[z]
identifier[y0] = identifier[p12] * identifier[x] + identifier[p22] * identifier[y] + identifier[p32] * identifier[z]
identifier[z0] = identifier[p13] * identifier[x] + identifier[p23] * identifier[y] + identifier[p33] * identifier[z]
identifier[new_dec] = identifier[math] . identifier[asin] ( identifier[z0] )
keyword[if] identifier[x0] == literal[int] :
identifier[new_ra] = identifier[math] . identifier[pi] / literal[int]
keyword[else] :
identifier[new_ra] = identifier[math] . identifier[atan] ( identifier[y0] / identifier[x0] )
keyword[if] (( identifier[y0] * identifier[math] . identifier[cos] ( identifier[new_dec] )> literal[int] keyword[and] identifier[x0] * identifier[math] . identifier[cos] ( identifier[new_dec] )<= literal[int] ) keyword[or]
( identifier[y0] * identifier[math] . identifier[cos] ( identifier[new_dec] )<= literal[int] keyword[and] identifier[x0] * identifier[math] . identifier[cos] ( identifier[new_dec] )< literal[int] )):
identifier[new_ra] += identifier[math] . identifier[pi]
keyword[elif] identifier[new_ra] < literal[int] :
identifier[new_ra] += literal[int] * identifier[math] . identifier[pi]
identifier[new_ra_deg] = identifier[new_ra] * literal[int] / identifier[math] . identifier[pi] * literal[int]
identifier[new_dec_deg] = identifier[new_dec] * literal[int] / identifier[math] . identifier[pi]
keyword[return] ( identifier[new_ra_deg] , identifier[new_dec_deg] )
|
def eqToEq2000(ra_deg, dec_deg, eq):
"""Convert Eq to Eq 2000."""
ra_rad = math.radians(ra_deg)
dec_rad = math.radians(dec_deg)
x = math.cos(dec_rad) * math.cos(ra_rad)
y = math.cos(dec_rad) * math.sin(ra_rad)
z = math.sin(dec_rad)
(p11, p12, p13, p21, p22, p23, p31, p32, p33) = trans_coeff(eq, x, y, z)
x0 = p11 * x + p21 * y + p31 * z
y0 = p12 * x + p22 * y + p32 * z
z0 = p13 * x + p23 * y + p33 * z
new_dec = math.asin(z0)
if x0 == 0.0:
new_ra = math.pi / 2.0 # depends on [control=['if'], data=[]]
else:
new_ra = math.atan(y0 / x0)
if y0 * math.cos(new_dec) > 0.0 and x0 * math.cos(new_dec) <= 0.0 or (y0 * math.cos(new_dec) <= 0.0 and x0 * math.cos(new_dec) < 0.0):
new_ra += math.pi # depends on [control=['if'], data=[]]
elif new_ra < 0.0:
new_ra += 2.0 * math.pi # depends on [control=['if'], data=['new_ra']]
#new_ra = new_ra * 12.0 * 3600.0 / math.pi
new_ra_deg = new_ra * 12.0 / math.pi * 15.0
#new_dec = new_dec * 180.0 * 3600.0 / math.pi
new_dec_deg = new_dec * 180.0 / math.pi
return (new_ra_deg, new_dec_deg)
|
def expect_no_raises(message=None, extras=None):
"""Expects no exception is raised in a context.
If the expectation is not met, the test is marked as fail after its
execution finishes.
A default message is added to the exception `details`.
Args:
message: string, custom message to add to exception's `details`.
extras: An optional field for extra information to be included in test
result.
"""
try:
yield
except Exception as e:
e_record = records.ExceptionRecord(e)
if extras:
e_record.extras = extras
msg = message or 'Got an unexpected exception'
details = '%s: %s' % (msg, e_record.details)
logging.exception(details)
e_record.details = details
recorder.add_error(e_record)
|
def function[expect_no_raises, parameter[message, extras]]:
constant[Expects no exception is raised in a context.
If the expectation is not met, the test is marked as fail after its
execution finishes.
A default message is added to the exception `details`.
Args:
message: string, custom message to add to exception's `details`.
extras: An optional field for extra information to be included in test
result.
]
<ast.Try object at 0x7da1b0746ce0>
|
keyword[def] identifier[expect_no_raises] ( identifier[message] = keyword[None] , identifier[extras] = keyword[None] ):
literal[string]
keyword[try] :
keyword[yield]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[e_record] = identifier[records] . identifier[ExceptionRecord] ( identifier[e] )
keyword[if] identifier[extras] :
identifier[e_record] . identifier[extras] = identifier[extras]
identifier[msg] = identifier[message] keyword[or] literal[string]
identifier[details] = literal[string] %( identifier[msg] , identifier[e_record] . identifier[details] )
identifier[logging] . identifier[exception] ( identifier[details] )
identifier[e_record] . identifier[details] = identifier[details]
identifier[recorder] . identifier[add_error] ( identifier[e_record] )
|
def expect_no_raises(message=None, extras=None):
"""Expects no exception is raised in a context.
If the expectation is not met, the test is marked as fail after its
execution finishes.
A default message is added to the exception `details`.
Args:
message: string, custom message to add to exception's `details`.
extras: An optional field for extra information to be included in test
result.
"""
try:
yield # depends on [control=['try'], data=[]]
except Exception as e:
e_record = records.ExceptionRecord(e)
if extras:
e_record.extras = extras # depends on [control=['if'], data=[]]
msg = message or 'Got an unexpected exception'
details = '%s: %s' % (msg, e_record.details)
logging.exception(details)
e_record.details = details
recorder.add_error(e_record) # depends on [control=['except'], data=['e']]
|
def set_name(self, name):
"""
RETURN NEW FILE WITH GIVEN EXTENSION
"""
path = self._filename.split("/")
parts = path[-1].split(".")
if len(parts) == 1:
path[-1] = name
else:
path[-1] = name + "." + parts[-1]
return File("/".join(path))
|
def function[set_name, parameter[self, name]]:
constant[
RETURN NEW FILE WITH GIVEN EXTENSION
]
variable[path] assign[=] call[name[self]._filename.split, parameter[constant[/]]]
variable[parts] assign[=] call[call[name[path]][<ast.UnaryOp object at 0x7da18ede6350>].split, parameter[constant[.]]]
if compare[call[name[len], parameter[name[parts]]] equal[==] constant[1]] begin[:]
call[name[path]][<ast.UnaryOp object at 0x7da18ede5e10>] assign[=] name[name]
return[call[name[File], parameter[call[constant[/].join, parameter[name[path]]]]]]
|
keyword[def] identifier[set_name] ( identifier[self] , identifier[name] ):
literal[string]
identifier[path] = identifier[self] . identifier[_filename] . identifier[split] ( literal[string] )
identifier[parts] = identifier[path] [- literal[int] ]. identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )== literal[int] :
identifier[path] [- literal[int] ]= identifier[name]
keyword[else] :
identifier[path] [- literal[int] ]= identifier[name] + literal[string] + identifier[parts] [- literal[int] ]
keyword[return] identifier[File] ( literal[string] . identifier[join] ( identifier[path] ))
|
def set_name(self, name):
"""
RETURN NEW FILE WITH GIVEN EXTENSION
"""
path = self._filename.split('/')
parts = path[-1].split('.')
if len(parts) == 1:
path[-1] = name # depends on [control=['if'], data=[]]
else:
path[-1] = name + '.' + parts[-1]
return File('/'.join(path))
|
def train_agent_real_env(env, learner, hparams, epoch):
"""Train the PPO agent in the real environment."""
base_algo_str = hparams.base_algo
train_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
rl_utils.update_hparams_from_hparams(
train_hparams, hparams, "real_" + base_algo_str + "_"
)
if hparams.wm_policy_param_sharing:
train_hparams.optimizer_zero_grads = True
env_fn = rl.make_real_env_fn(env)
num_env_steps = real_env_step_increment(hparams)
learner.train(
env_fn,
train_hparams,
simulated=False,
save_continuously=False,
epoch=epoch,
sampling_temp=hparams.real_sampling_temp,
num_env_steps=num_env_steps,
)
# Save unfinished rollouts to history.
env.reset()
|
def function[train_agent_real_env, parameter[env, learner, hparams, epoch]]:
constant[Train the PPO agent in the real environment.]
variable[base_algo_str] assign[=] name[hparams].base_algo
variable[train_hparams] assign[=] call[name[trainer_lib].create_hparams, parameter[name[hparams].base_algo_params]]
call[name[rl_utils].update_hparams_from_hparams, parameter[name[train_hparams], name[hparams], binary_operation[binary_operation[constant[real_] + name[base_algo_str]] + constant[_]]]]
if name[hparams].wm_policy_param_sharing begin[:]
name[train_hparams].optimizer_zero_grads assign[=] constant[True]
variable[env_fn] assign[=] call[name[rl].make_real_env_fn, parameter[name[env]]]
variable[num_env_steps] assign[=] call[name[real_env_step_increment], parameter[name[hparams]]]
call[name[learner].train, parameter[name[env_fn], name[train_hparams]]]
call[name[env].reset, parameter[]]
|
keyword[def] identifier[train_agent_real_env] ( identifier[env] , identifier[learner] , identifier[hparams] , identifier[epoch] ):
literal[string]
identifier[base_algo_str] = identifier[hparams] . identifier[base_algo]
identifier[train_hparams] = identifier[trainer_lib] . identifier[create_hparams] ( identifier[hparams] . identifier[base_algo_params] )
identifier[rl_utils] . identifier[update_hparams_from_hparams] (
identifier[train_hparams] , identifier[hparams] , literal[string] + identifier[base_algo_str] + literal[string]
)
keyword[if] identifier[hparams] . identifier[wm_policy_param_sharing] :
identifier[train_hparams] . identifier[optimizer_zero_grads] = keyword[True]
identifier[env_fn] = identifier[rl] . identifier[make_real_env_fn] ( identifier[env] )
identifier[num_env_steps] = identifier[real_env_step_increment] ( identifier[hparams] )
identifier[learner] . identifier[train] (
identifier[env_fn] ,
identifier[train_hparams] ,
identifier[simulated] = keyword[False] ,
identifier[save_continuously] = keyword[False] ,
identifier[epoch] = identifier[epoch] ,
identifier[sampling_temp] = identifier[hparams] . identifier[real_sampling_temp] ,
identifier[num_env_steps] = identifier[num_env_steps] ,
)
identifier[env] . identifier[reset] ()
|
def train_agent_real_env(env, learner, hparams, epoch):
"""Train the PPO agent in the real environment."""
base_algo_str = hparams.base_algo
train_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
rl_utils.update_hparams_from_hparams(train_hparams, hparams, 'real_' + base_algo_str + '_')
if hparams.wm_policy_param_sharing:
train_hparams.optimizer_zero_grads = True # depends on [control=['if'], data=[]]
env_fn = rl.make_real_env_fn(env)
num_env_steps = real_env_step_increment(hparams)
learner.train(env_fn, train_hparams, simulated=False, save_continuously=False, epoch=epoch, sampling_temp=hparams.real_sampling_temp, num_env_steps=num_env_steps)
# Save unfinished rollouts to history.
env.reset()
|
def visualize(G, settings, filename="dependencies", no_graphviz=False):
"""
Uses networkX to draw a graphviz dot file either (a) calls the
graphviz command "dot" to turn it into a SVG and remove the
dotfile (default), or (b) if no_graphviz is True, just output
the graphviz dot file
Args:
a NetworkX DiGraph
the settings dictionary
a filename (a default is provided
a flag indicating whether graphviz should *not* be called
Returns:
0 if everything worked
will cause fatal error on failure
"""
error = settings["error"]
if no_graphviz:
write_dot_file(G, filename)
return 0
write_dot_file(G, "tempdot")
renderer = "svg"
if re.search("\.jpg$", filename, re.IGNORECASE):
renderer = "jpg"
elif re.search("\.jpeg$", filename, re.IGNORECASE):
renderer = "jpg"
elif re.search("\.svg$", filename, re.IGNORECASE):
renderer = "svg"
elif re.search("\.png$", filename, re.IGNORECASE):
renderer = "png"
elif re.search("\.gif$", filename, re.IGNORECASE):
renderer = "gif"
elif re.search("\.ps$", filename, re.IGNORECASE):
renderer = "ps"
elif re.search("\.pdf$", filename, re.IGNORECASE):
renderer = "pdf"
else:
renderer = "svg"
filename += ".svg"
command = "dot -T{} tempdot -o {}".format(renderer, filename)
p = Popen(command, shell=True)
p.communicate()
if p.returncode:
errmes = "Either graphviz is not installed, or its not on PATH"
os.remove("tempdot")
error(errmes)
sys.exit(1)
os.remove("tempdot")
return 0
|
def function[visualize, parameter[G, settings, filename, no_graphviz]]:
constant[
Uses networkX to draw a graphviz dot file either (a) calls the
graphviz command "dot" to turn it into a SVG and remove the
dotfile (default), or (b) if no_graphviz is True, just output
the graphviz dot file
Args:
a NetworkX DiGraph
the settings dictionary
a filename (a default is provided
a flag indicating whether graphviz should *not* be called
Returns:
0 if everything worked
will cause fatal error on failure
]
variable[error] assign[=] call[name[settings]][constant[error]]
if name[no_graphviz] begin[:]
call[name[write_dot_file], parameter[name[G], name[filename]]]
return[constant[0]]
call[name[write_dot_file], parameter[name[G], constant[tempdot]]]
variable[renderer] assign[=] constant[svg]
if call[name[re].search, parameter[constant[\.jpg$], name[filename], name[re].IGNORECASE]] begin[:]
variable[renderer] assign[=] constant[jpg]
variable[command] assign[=] call[constant[dot -T{} tempdot -o {}].format, parameter[name[renderer], name[filename]]]
variable[p] assign[=] call[name[Popen], parameter[name[command]]]
call[name[p].communicate, parameter[]]
if name[p].returncode begin[:]
variable[errmes] assign[=] constant[Either graphviz is not installed, or its not on PATH]
call[name[os].remove, parameter[constant[tempdot]]]
call[name[error], parameter[name[errmes]]]
call[name[sys].exit, parameter[constant[1]]]
call[name[os].remove, parameter[constant[tempdot]]]
return[constant[0]]
|
keyword[def] identifier[visualize] ( identifier[G] , identifier[settings] , identifier[filename] = literal[string] , identifier[no_graphviz] = keyword[False] ):
literal[string]
identifier[error] = identifier[settings] [ literal[string] ]
keyword[if] identifier[no_graphviz] :
identifier[write_dot_file] ( identifier[G] , identifier[filename] )
keyword[return] literal[int]
identifier[write_dot_file] ( identifier[G] , literal[string] )
identifier[renderer] = literal[string]
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[filename] , identifier[re] . identifier[IGNORECASE] ):
identifier[renderer] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[filename] , identifier[re] . identifier[IGNORECASE] ):
identifier[renderer] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[filename] , identifier[re] . identifier[IGNORECASE] ):
identifier[renderer] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[filename] , identifier[re] . identifier[IGNORECASE] ):
identifier[renderer] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[filename] , identifier[re] . identifier[IGNORECASE] ):
identifier[renderer] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[filename] , identifier[re] . identifier[IGNORECASE] ):
identifier[renderer] = literal[string]
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[filename] , identifier[re] . identifier[IGNORECASE] ):
identifier[renderer] = literal[string]
keyword[else] :
identifier[renderer] = literal[string]
identifier[filename] += literal[string]
identifier[command] = literal[string] . identifier[format] ( identifier[renderer] , identifier[filename] )
identifier[p] = identifier[Popen] ( identifier[command] , identifier[shell] = keyword[True] )
identifier[p] . identifier[communicate] ()
keyword[if] identifier[p] . identifier[returncode] :
identifier[errmes] = literal[string]
identifier[os] . identifier[remove] ( literal[string] )
identifier[error] ( identifier[errmes] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[os] . identifier[remove] ( literal[string] )
keyword[return] literal[int]
|
def visualize(G, settings, filename='dependencies', no_graphviz=False):
"""
Uses networkX to draw a graphviz dot file either (a) calls the
graphviz command "dot" to turn it into a SVG and remove the
dotfile (default), or (b) if no_graphviz is True, just output
the graphviz dot file
Args:
a NetworkX DiGraph
the settings dictionary
a filename (a default is provided
a flag indicating whether graphviz should *not* be called
Returns:
0 if everything worked
will cause fatal error on failure
"""
error = settings['error']
if no_graphviz:
write_dot_file(G, filename)
return 0 # depends on [control=['if'], data=[]]
write_dot_file(G, 'tempdot')
renderer = 'svg'
if re.search('\\.jpg$', filename, re.IGNORECASE):
renderer = 'jpg' # depends on [control=['if'], data=[]]
elif re.search('\\.jpeg$', filename, re.IGNORECASE):
renderer = 'jpg' # depends on [control=['if'], data=[]]
elif re.search('\\.svg$', filename, re.IGNORECASE):
renderer = 'svg' # depends on [control=['if'], data=[]]
elif re.search('\\.png$', filename, re.IGNORECASE):
renderer = 'png' # depends on [control=['if'], data=[]]
elif re.search('\\.gif$', filename, re.IGNORECASE):
renderer = 'gif' # depends on [control=['if'], data=[]]
elif re.search('\\.ps$', filename, re.IGNORECASE):
renderer = 'ps' # depends on [control=['if'], data=[]]
elif re.search('\\.pdf$', filename, re.IGNORECASE):
renderer = 'pdf' # depends on [control=['if'], data=[]]
else:
renderer = 'svg'
filename += '.svg'
command = 'dot -T{} tempdot -o {}'.format(renderer, filename)
p = Popen(command, shell=True)
p.communicate()
if p.returncode:
errmes = 'Either graphviz is not installed, or its not on PATH'
os.remove('tempdot')
error(errmes)
sys.exit(1) # depends on [control=['if'], data=[]]
os.remove('tempdot')
return 0
|
def _get_result_paths(self,data):
"""Return dict of {key: ResultPath}
"""
#clustalw .aln is used when no or unkown output type specified
_treeinfo_formats = {'nj':'.nj',
'dist':'.dst',
'nexus':'.tre'}
result = {}
par = self.Parameters
abs = self._absolute
if par['-align'].isOn():
prefix = par['-infile'].Value.rsplit('.', 1)[0]
#prefix = par['-infile'].Value.split('.')[0]
aln_filename = self._aln_filename(prefix)
if par['-newtree'].isOn():
dnd_filename = abs(par['-newtree'].Value)
elif par['-usetree'].isOn():
dnd_filename = abs(par['-usetree'].Value)
else:
dnd_filename = abs(prefix + '.dnd')
result['Align'] = ResultPath(Path=aln_filename,IsWritten=True)
result['Dendro'] = ResultPath(Path=dnd_filename,IsWritten=True)
elif par['-profile'].isOn():
prefix1 = par['-profile1'].Value.rsplit('.', 1)[0]
prefix2 = par['-profile2'].Value.rsplit('.', 1)[0]
#prefix1 = par['-profile1'].Value.split('.')[0]
#prefix2 = par['-profile2'].Value.split('.')[0]
aln_filename = ''; aln_written = True
dnd1_filename = ''; tree1_written = True
dnd2_filename = ''; tree2_written = True
aln_filename = self._aln_filename(prefix1)
#usetree1
if par['-usetree1'].isOn():
tree1_written = False
#usetree2
if par['-usetree2'].isOn():
tree2_written = False
if par['-newtree1'].isOn():
dnd1_filename = abs(par['-newtree1'].Value)
aln_written=False
else:
dnd1_filename = abs(prefix1 + '.dnd')
if par['-newtree2'].isOn():
dnd2_filename = abs(par['-newtree2'].Value)
aln_written=False
else:
dnd2_filename = abs(prefix2 + '.dnd')
result['Align'] = ResultPath(Path=aln_filename,
IsWritten=aln_written)
result['Dendro1'] = ResultPath(Path=dnd1_filename,
IsWritten=tree1_written)
result['Dendro2'] = ResultPath(Path=dnd2_filename,
IsWritten=tree2_written)
elif par['-sequences'].isOn():
prefix1 = par['-profile1'].Value.rsplit('.', 1)[0]
prefix2 = par['-profile2'].Value.rsplit('.', 1)[0]
#prefix1 = par['-profile1'].Value.split('.')[0] #alignment
#prefix2 = par['-profile2'].Value.split('.')[0] #sequences
aln_filename = ''; aln_written = True
dnd_filename = ''; dnd_written = True
aln_filename = self._aln_filename(prefix2)
if par['-usetree'].isOn():
dnd_written = False
elif par['-newtree'].isOn():
aln_written = False
dnd_filename = abs(par['-newtree'].Value)
else:
dnd_filename = prefix2 + '.dnd'
result['Align'] = ResultPath(Path=aln_filename,\
IsWritten=aln_written)
result['Dendro'] = ResultPath(Path=dnd_filename,\
IsWritten=dnd_written)
elif par['-tree'].isOn():
prefix = par['-infile'].Value.rsplit('.', 1)[0]
#prefix = par['-infile'].Value.split('.')[0]
tree_filename = ''; tree_written = True
treeinfo_filename = ''; treeinfo_written = False
tree_filename = prefix + '.ph'
if par['-outputtree'].isOn() and\
par['-outputtree'].Value != 'phylip':
treeinfo_filename = prefix +\
_treeinfo_formats[par['-outputtree'].Value]
treeinfo_written = True
result['Tree'] = ResultPath(Path=tree_filename,\
IsWritten=tree_written)
result['TreeInfo'] = ResultPath(Path=treeinfo_filename,\
IsWritten=treeinfo_written)
elif par['-bootstrap'].isOn():
prefix = par['-infile'].Value.rsplit('.', 1)[0]
#prefix = par['-infile'].Value.split('.')[0]
boottree_filename = prefix + '.phb'
result['Tree'] = ResultPath(Path=boottree_filename,IsWritten=True)
return result
|
def function[_get_result_paths, parameter[self, data]]:
constant[Return dict of {key: ResultPath}
]
variable[_treeinfo_formats] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b725c0>, <ast.Constant object at 0x7da1b0b73be0>, <ast.Constant object at 0x7da1b0b721d0>], [<ast.Constant object at 0x7da1b0b73df0>, <ast.Constant object at 0x7da1b0b735b0>, <ast.Constant object at 0x7da1b0b70f70>]]
variable[result] assign[=] dictionary[[], []]
variable[par] assign[=] name[self].Parameters
variable[abs] assign[=] name[self]._absolute
if call[call[name[par]][constant[-align]].isOn, parameter[]] begin[:]
variable[prefix] assign[=] call[call[call[name[par]][constant[-infile]].Value.rsplit, parameter[constant[.], constant[1]]]][constant[0]]
variable[aln_filename] assign[=] call[name[self]._aln_filename, parameter[name[prefix]]]
if call[call[name[par]][constant[-newtree]].isOn, parameter[]] begin[:]
variable[dnd_filename] assign[=] call[name[abs], parameter[call[name[par]][constant[-newtree]].Value]]
call[name[result]][constant[Align]] assign[=] call[name[ResultPath], parameter[]]
call[name[result]][constant[Dendro]] assign[=] call[name[ResultPath], parameter[]]
return[name[result]]
|
keyword[def] identifier[_get_result_paths] ( identifier[self] , identifier[data] ):
literal[string]
identifier[_treeinfo_formats] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }
identifier[result] ={}
identifier[par] = identifier[self] . identifier[Parameters]
identifier[abs] = identifier[self] . identifier[_absolute]
keyword[if] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[prefix] = identifier[par] [ literal[string] ]. identifier[Value] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[aln_filename] = identifier[self] . identifier[_aln_filename] ( identifier[prefix] )
keyword[if] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[dnd_filename] = identifier[abs] ( identifier[par] [ literal[string] ]. identifier[Value] )
keyword[elif] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[dnd_filename] = identifier[abs] ( identifier[par] [ literal[string] ]. identifier[Value] )
keyword[else] :
identifier[dnd_filename] = identifier[abs] ( identifier[prefix] + literal[string] )
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[aln_filename] , identifier[IsWritten] = keyword[True] )
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[dnd_filename] , identifier[IsWritten] = keyword[True] )
keyword[elif] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[prefix1] = identifier[par] [ literal[string] ]. identifier[Value] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[prefix2] = identifier[par] [ literal[string] ]. identifier[Value] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[aln_filename] = literal[string] ; identifier[aln_written] = keyword[True]
identifier[dnd1_filename] = literal[string] ; identifier[tree1_written] = keyword[True]
identifier[dnd2_filename] = literal[string] ; identifier[tree2_written] = keyword[True]
identifier[aln_filename] = identifier[self] . identifier[_aln_filename] ( identifier[prefix1] )
keyword[if] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[tree1_written] = keyword[False]
keyword[if] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[tree2_written] = keyword[False]
keyword[if] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[dnd1_filename] = identifier[abs] ( identifier[par] [ literal[string] ]. identifier[Value] )
identifier[aln_written] = keyword[False]
keyword[else] :
identifier[dnd1_filename] = identifier[abs] ( identifier[prefix1] + literal[string] )
keyword[if] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[dnd2_filename] = identifier[abs] ( identifier[par] [ literal[string] ]. identifier[Value] )
identifier[aln_written] = keyword[False]
keyword[else] :
identifier[dnd2_filename] = identifier[abs] ( identifier[prefix2] + literal[string] )
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[aln_filename] ,
identifier[IsWritten] = identifier[aln_written] )
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[dnd1_filename] ,
identifier[IsWritten] = identifier[tree1_written] )
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[dnd2_filename] ,
identifier[IsWritten] = identifier[tree2_written] )
keyword[elif] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[prefix1] = identifier[par] [ literal[string] ]. identifier[Value] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[prefix2] = identifier[par] [ literal[string] ]. identifier[Value] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[aln_filename] = literal[string] ; identifier[aln_written] = keyword[True]
identifier[dnd_filename] = literal[string] ; identifier[dnd_written] = keyword[True]
identifier[aln_filename] = identifier[self] . identifier[_aln_filename] ( identifier[prefix2] )
keyword[if] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[dnd_written] = keyword[False]
keyword[elif] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[aln_written] = keyword[False]
identifier[dnd_filename] = identifier[abs] ( identifier[par] [ literal[string] ]. identifier[Value] )
keyword[else] :
identifier[dnd_filename] = identifier[prefix2] + literal[string]
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[aln_filename] , identifier[IsWritten] = identifier[aln_written] )
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[dnd_filename] , identifier[IsWritten] = identifier[dnd_written] )
keyword[elif] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[prefix] = identifier[par] [ literal[string] ]. identifier[Value] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[tree_filename] = literal[string] ; identifier[tree_written] = keyword[True]
identifier[treeinfo_filename] = literal[string] ; identifier[treeinfo_written] = keyword[False]
identifier[tree_filename] = identifier[prefix] + literal[string]
keyword[if] identifier[par] [ literal[string] ]. identifier[isOn] () keyword[and] identifier[par] [ literal[string] ]. identifier[Value] != literal[string] :
identifier[treeinfo_filename] = identifier[prefix] + identifier[_treeinfo_formats] [ identifier[par] [ literal[string] ]. identifier[Value] ]
identifier[treeinfo_written] = keyword[True]
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[tree_filename] , identifier[IsWritten] = identifier[tree_written] )
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[treeinfo_filename] , identifier[IsWritten] = identifier[treeinfo_written] )
keyword[elif] identifier[par] [ literal[string] ]. identifier[isOn] ():
identifier[prefix] = identifier[par] [ literal[string] ]. identifier[Value] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[boottree_filename] = identifier[prefix] + literal[string]
identifier[result] [ literal[string] ]= identifier[ResultPath] ( identifier[Path] = identifier[boottree_filename] , identifier[IsWritten] = keyword[True] )
keyword[return] identifier[result]
|
def _get_result_paths(self, data):
"""Return dict of {key: ResultPath}
"""
#clustalw .aln is used when no or unkown output type specified
_treeinfo_formats = {'nj': '.nj', 'dist': '.dst', 'nexus': '.tre'}
result = {}
par = self.Parameters
abs = self._absolute
if par['-align'].isOn():
prefix = par['-infile'].Value.rsplit('.', 1)[0]
#prefix = par['-infile'].Value.split('.')[0]
aln_filename = self._aln_filename(prefix)
if par['-newtree'].isOn():
dnd_filename = abs(par['-newtree'].Value) # depends on [control=['if'], data=[]]
elif par['-usetree'].isOn():
dnd_filename = abs(par['-usetree'].Value) # depends on [control=['if'], data=[]]
else:
dnd_filename = abs(prefix + '.dnd')
result['Align'] = ResultPath(Path=aln_filename, IsWritten=True)
result['Dendro'] = ResultPath(Path=dnd_filename, IsWritten=True) # depends on [control=['if'], data=[]]
elif par['-profile'].isOn():
prefix1 = par['-profile1'].Value.rsplit('.', 1)[0]
prefix2 = par['-profile2'].Value.rsplit('.', 1)[0]
#prefix1 = par['-profile1'].Value.split('.')[0]
#prefix2 = par['-profile2'].Value.split('.')[0]
aln_filename = ''
aln_written = True
dnd1_filename = ''
tree1_written = True
dnd2_filename = ''
tree2_written = True
aln_filename = self._aln_filename(prefix1)
#usetree1
if par['-usetree1'].isOn():
tree1_written = False # depends on [control=['if'], data=[]]
#usetree2
if par['-usetree2'].isOn():
tree2_written = False # depends on [control=['if'], data=[]]
if par['-newtree1'].isOn():
dnd1_filename = abs(par['-newtree1'].Value)
aln_written = False # depends on [control=['if'], data=[]]
else:
dnd1_filename = abs(prefix1 + '.dnd')
if par['-newtree2'].isOn():
dnd2_filename = abs(par['-newtree2'].Value)
aln_written = False # depends on [control=['if'], data=[]]
else:
dnd2_filename = abs(prefix2 + '.dnd')
result['Align'] = ResultPath(Path=aln_filename, IsWritten=aln_written)
result['Dendro1'] = ResultPath(Path=dnd1_filename, IsWritten=tree1_written)
result['Dendro2'] = ResultPath(Path=dnd2_filename, IsWritten=tree2_written) # depends on [control=['if'], data=[]]
elif par['-sequences'].isOn():
prefix1 = par['-profile1'].Value.rsplit('.', 1)[0]
prefix2 = par['-profile2'].Value.rsplit('.', 1)[0]
#prefix1 = par['-profile1'].Value.split('.')[0] #alignment
#prefix2 = par['-profile2'].Value.split('.')[0] #sequences
aln_filename = ''
aln_written = True
dnd_filename = ''
dnd_written = True
aln_filename = self._aln_filename(prefix2)
if par['-usetree'].isOn():
dnd_written = False # depends on [control=['if'], data=[]]
elif par['-newtree'].isOn():
aln_written = False
dnd_filename = abs(par['-newtree'].Value) # depends on [control=['if'], data=[]]
else:
dnd_filename = prefix2 + '.dnd'
result['Align'] = ResultPath(Path=aln_filename, IsWritten=aln_written)
result['Dendro'] = ResultPath(Path=dnd_filename, IsWritten=dnd_written) # depends on [control=['if'], data=[]]
elif par['-tree'].isOn():
prefix = par['-infile'].Value.rsplit('.', 1)[0]
#prefix = par['-infile'].Value.split('.')[0]
tree_filename = ''
tree_written = True
treeinfo_filename = ''
treeinfo_written = False
tree_filename = prefix + '.ph'
if par['-outputtree'].isOn() and par['-outputtree'].Value != 'phylip':
treeinfo_filename = prefix + _treeinfo_formats[par['-outputtree'].Value]
treeinfo_written = True # depends on [control=['if'], data=[]]
result['Tree'] = ResultPath(Path=tree_filename, IsWritten=tree_written)
result['TreeInfo'] = ResultPath(Path=treeinfo_filename, IsWritten=treeinfo_written) # depends on [control=['if'], data=[]]
elif par['-bootstrap'].isOn():
prefix = par['-infile'].Value.rsplit('.', 1)[0]
#prefix = par['-infile'].Value.split('.')[0]
boottree_filename = prefix + '.phb'
result['Tree'] = ResultPath(Path=boottree_filename, IsWritten=True) # depends on [control=['if'], data=[]]
return result
|
def merkleroot(merkletree: 'MerkleTreeState') -> Locksroot:
""" Return the root element of the merkle tree. """
assert merkletree.layers, 'the merkle tree layers are empty'
assert merkletree.layers[MERKLEROOT], 'the root layer is empty'
return Locksroot(merkletree.layers[MERKLEROOT][0])
|
def function[merkleroot, parameter[merkletree]]:
constant[ Return the root element of the merkle tree. ]
assert[name[merkletree].layers]
assert[call[name[merkletree].layers][name[MERKLEROOT]]]
return[call[name[Locksroot], parameter[call[call[name[merkletree].layers][name[MERKLEROOT]]][constant[0]]]]]
|
keyword[def] identifier[merkleroot] ( identifier[merkletree] : literal[string] )-> identifier[Locksroot] :
literal[string]
keyword[assert] identifier[merkletree] . identifier[layers] , literal[string]
keyword[assert] identifier[merkletree] . identifier[layers] [ identifier[MERKLEROOT] ], literal[string]
keyword[return] identifier[Locksroot] ( identifier[merkletree] . identifier[layers] [ identifier[MERKLEROOT] ][ literal[int] ])
|
def merkleroot(merkletree: 'MerkleTreeState') -> Locksroot:
""" Return the root element of the merkle tree. """
assert merkletree.layers, 'the merkle tree layers are empty'
assert merkletree.layers[MERKLEROOT], 'the root layer is empty'
return Locksroot(merkletree.layers[MERKLEROOT][0])
|
def get(self, url, parameters=None):
"""
Implement libgreader's interface for authenticated GET request
"""
if self._http == None:
self._setupHttp()
uri = url + "?" + self.getParameters(parameters)
response, content = self._http.request(uri, "GET")
return content
|
def function[get, parameter[self, url, parameters]]:
constant[
Implement libgreader's interface for authenticated GET request
]
if compare[name[self]._http equal[==] constant[None]] begin[:]
call[name[self]._setupHttp, parameter[]]
variable[uri] assign[=] binary_operation[binary_operation[name[url] + constant[?]] + call[name[self].getParameters, parameter[name[parameters]]]]
<ast.Tuple object at 0x7da204347d00> assign[=] call[name[self]._http.request, parameter[name[uri], constant[GET]]]
return[name[content]]
|
keyword[def] identifier[get] ( identifier[self] , identifier[url] , identifier[parameters] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_http] == keyword[None] :
identifier[self] . identifier[_setupHttp] ()
identifier[uri] = identifier[url] + literal[string] + identifier[self] . identifier[getParameters] ( identifier[parameters] )
identifier[response] , identifier[content] = identifier[self] . identifier[_http] . identifier[request] ( identifier[uri] , literal[string] )
keyword[return] identifier[content]
|
def get(self, url, parameters=None):
"""
Implement libgreader's interface for authenticated GET request
"""
if self._http == None:
self._setupHttp() # depends on [control=['if'], data=[]]
uri = url + '?' + self.getParameters(parameters)
(response, content) = self._http.request(uri, 'GET')
return content
|
def set_repo_permission(self, repo, permission):
"""
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:param permission: string
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
put_parameters = {
"permission": permission,
}
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/repos/" + repo._identity,
input=put_parameters
)
|
def function[set_repo_permission, parameter[self, repo, permission]]:
constant[
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:param permission: string
:rtype: None
]
assert[call[name[isinstance], parameter[name[repo], name[github].Repository.Repository]]]
variable[put_parameters] assign[=] dictionary[[<ast.Constant object at 0x7da204961780>], [<ast.Name object at 0x7da204962950>]]
<ast.Tuple object at 0x7da204963c70> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[PUT], binary_operation[binary_operation[name[self].url + constant[/repos/]] + name[repo]._identity]]]
|
keyword[def] identifier[set_repo_permission] ( identifier[self] , identifier[repo] , identifier[permission] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[repo] , identifier[github] . identifier[Repository] . identifier[Repository] ), identifier[repo]
identifier[put_parameters] ={
literal[string] : identifier[permission] ,
}
identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] (
literal[string] ,
identifier[self] . identifier[url] + literal[string] + identifier[repo] . identifier[_identity] ,
identifier[input] = identifier[put_parameters]
)
|
def set_repo_permission(self, repo, permission):
"""
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:param permission: string
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
put_parameters = {'permission': permission}
(headers, data) = self._requester.requestJsonAndCheck('PUT', self.url + '/repos/' + repo._identity, input=put_parameters)
|
def get_readlock(pid, path):
"""Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock
"""
timestamp = int(time.time() * 1e6)
lockdir_name = "%s.readlock.%i.%i" % (path, pid, timestamp)
os.mkdir(lockdir_name)
# Register function to release the readlock at the end of the script
atexit.register(release_readlock, lockdir_name=lockdir_name)
|
def function[get_readlock, parameter[pid, path]]:
constant[Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock
]
variable[timestamp] assign[=] call[name[int], parameter[binary_operation[call[name[time].time, parameter[]] * constant[1000000.0]]]]
variable[lockdir_name] assign[=] binary_operation[constant[%s.readlock.%i.%i] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2054a6dd0>, <ast.Name object at 0x7da2054a5cc0>, <ast.Name object at 0x7da2054a7400>]]]
call[name[os].mkdir, parameter[name[lockdir_name]]]
call[name[atexit].register, parameter[name[release_readlock]]]
|
keyword[def] identifier[get_readlock] ( identifier[pid] , identifier[path] ):
literal[string]
identifier[timestamp] = identifier[int] ( identifier[time] . identifier[time] ()* literal[int] )
identifier[lockdir_name] = literal[string] %( identifier[path] , identifier[pid] , identifier[timestamp] )
identifier[os] . identifier[mkdir] ( identifier[lockdir_name] )
identifier[atexit] . identifier[register] ( identifier[release_readlock] , identifier[lockdir_name] = identifier[lockdir_name] )
|
def get_readlock(pid, path):
"""Obtain a readlock on a file.
Parameters
----------
path : str
Name of the file on which to obtain a readlock
"""
timestamp = int(time.time() * 1000000.0)
lockdir_name = '%s.readlock.%i.%i' % (path, pid, timestamp)
os.mkdir(lockdir_name)
# Register function to release the readlock at the end of the script
atexit.register(release_readlock, lockdir_name=lockdir_name)
|
def calledWith(self, *args, **kwargs): #pylint: disable=invalid-name
"""
Determining whether args/kwargs are called previously
Eg.
f(1, 2, 3)
spy.calledWith(1, 2) will return True, because they are called partially
f(a=1, b=2, c=3)
spy.calledWith(a=1, b=3) will return True, because they are called partially
Return: Boolean
"""
self.__get_func = SinonSpy.__get_directly
return self.calledWithMatch(*args, **kwargs)
|
def function[calledWith, parameter[self]]:
constant[
Determining whether args/kwargs are called previously
Eg.
f(1, 2, 3)
spy.calledWith(1, 2) will return True, because they are called partially
f(a=1, b=2, c=3)
spy.calledWith(a=1, b=3) will return True, because they are called partially
Return: Boolean
]
name[self].__get_func assign[=] name[SinonSpy].__get_directly
return[call[name[self].calledWithMatch, parameter[<ast.Starred object at 0x7da18f00f0d0>]]]
|
keyword[def] identifier[calledWith] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[__get_func] = identifier[SinonSpy] . identifier[__get_directly]
keyword[return] identifier[self] . identifier[calledWithMatch] (* identifier[args] ,** identifier[kwargs] )
|
def calledWith(self, *args, **kwargs): #pylint: disable=invalid-name
'\n Determining whether args/kwargs are called previously\n Eg.\n f(1, 2, 3)\n spy.calledWith(1, 2) will return True, because they are called partially\n f(a=1, b=2, c=3)\n spy.calledWith(a=1, b=3) will return True, because they are called partially\n Return: Boolean\n '
self.__get_func = SinonSpy.__get_directly
return self.calledWithMatch(*args, **kwargs)
|
def clear_not_launched_queued_tasks(self, session=None):
"""
If the airflow scheduler restarts with pending "Queued" tasks, the tasks may or
may not
have been launched Thus, on starting up the scheduler let's check every
"Queued" task to
see if it has been launched (ie: if there is a corresponding pod on kubernetes)
If it has been launched then do nothing, otherwise reset the state to "None" so
the task
will be rescheduled
This will not be necessary in a future version of airflow in which there is
proper support
for State.LAUNCHED
"""
queued_tasks = session\
.query(TaskInstance)\
.filter(TaskInstance.state == State.QUEUED).all()
self.log.info(
'When executor started up, found %s queued task instances',
len(queued_tasks)
)
for task in queued_tasks:
dict_string = (
"dag_id={},task_id={},execution_date={},airflow-worker={}".format(
AirflowKubernetesScheduler._make_safe_label_value(task.dag_id),
AirflowKubernetesScheduler._make_safe_label_value(task.task_id),
AirflowKubernetesScheduler._datetime_to_label_safe_datestring(
task.execution_date
),
self.worker_uuid
)
)
kwargs = dict(label_selector=dict_string)
pod_list = self.kube_client.list_namespaced_pod(
self.kube_config.kube_namespace, **kwargs)
if len(pod_list.items) == 0:
self.log.info(
'TaskInstance: %s found in queued state but was not launched, '
'rescheduling', task
)
session.query(TaskInstance).filter(
TaskInstance.dag_id == task.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.execution_date == task.execution_date
).update({TaskInstance.state: State.NONE})
|
def function[clear_not_launched_queued_tasks, parameter[self, session]]:
constant[
If the airflow scheduler restarts with pending "Queued" tasks, the tasks may or
may not
have been launched Thus, on starting up the scheduler let's check every
"Queued" task to
see if it has been launched (ie: if there is a corresponding pod on kubernetes)
If it has been launched then do nothing, otherwise reset the state to "None" so
the task
will be rescheduled
This will not be necessary in a future version of airflow in which there is
proper support
for State.LAUNCHED
]
variable[queued_tasks] assign[=] call[call[call[name[session].query, parameter[name[TaskInstance]]].filter, parameter[compare[name[TaskInstance].state equal[==] name[State].QUEUED]]].all, parameter[]]
call[name[self].log.info, parameter[constant[When executor started up, found %s queued task instances], call[name[len], parameter[name[queued_tasks]]]]]
for taget[name[task]] in starred[name[queued_tasks]] begin[:]
variable[dict_string] assign[=] call[constant[dag_id={},task_id={},execution_date={},airflow-worker={}].format, parameter[call[name[AirflowKubernetesScheduler]._make_safe_label_value, parameter[name[task].dag_id]], call[name[AirflowKubernetesScheduler]._make_safe_label_value, parameter[name[task].task_id]], call[name[AirflowKubernetesScheduler]._datetime_to_label_safe_datestring, parameter[name[task].execution_date]], name[self].worker_uuid]]
variable[kwargs] assign[=] call[name[dict], parameter[]]
variable[pod_list] assign[=] call[name[self].kube_client.list_namespaced_pod, parameter[name[self].kube_config.kube_namespace]]
if compare[call[name[len], parameter[name[pod_list].items]] equal[==] constant[0]] begin[:]
call[name[self].log.info, parameter[constant[TaskInstance: %s found in queued state but was not launched, rescheduling], name[task]]]
call[call[call[name[session].query, parameter[name[TaskInstance]]].filter, parameter[compare[name[TaskInstance].dag_id equal[==] name[task].dag_id], compare[name[TaskInstance].task_id equal[==] name[task].task_id], compare[name[TaskInstance].execution_date equal[==] name[task].execution_date]]].update, parameter[dictionary[[<ast.Attribute object at 0x7da1b052b010>], [<ast.Attribute object at 0x7da1b052aa40>]]]]
|
keyword[def] identifier[clear_not_launched_queued_tasks] ( identifier[self] , identifier[session] = keyword[None] ):
literal[string]
identifier[queued_tasks] = identifier[session] . identifier[query] ( identifier[TaskInstance] ). identifier[filter] ( identifier[TaskInstance] . identifier[state] == identifier[State] . identifier[QUEUED] ). identifier[all] ()
identifier[self] . identifier[log] . identifier[info] (
literal[string] ,
identifier[len] ( identifier[queued_tasks] )
)
keyword[for] identifier[task] keyword[in] identifier[queued_tasks] :
identifier[dict_string] =(
literal[string] . identifier[format] (
identifier[AirflowKubernetesScheduler] . identifier[_make_safe_label_value] ( identifier[task] . identifier[dag_id] ),
identifier[AirflowKubernetesScheduler] . identifier[_make_safe_label_value] ( identifier[task] . identifier[task_id] ),
identifier[AirflowKubernetesScheduler] . identifier[_datetime_to_label_safe_datestring] (
identifier[task] . identifier[execution_date]
),
identifier[self] . identifier[worker_uuid]
)
)
identifier[kwargs] = identifier[dict] ( identifier[label_selector] = identifier[dict_string] )
identifier[pod_list] = identifier[self] . identifier[kube_client] . identifier[list_namespaced_pod] (
identifier[self] . identifier[kube_config] . identifier[kube_namespace] ,** identifier[kwargs] )
keyword[if] identifier[len] ( identifier[pod_list] . identifier[items] )== literal[int] :
identifier[self] . identifier[log] . identifier[info] (
literal[string]
literal[string] , identifier[task]
)
identifier[session] . identifier[query] ( identifier[TaskInstance] ). identifier[filter] (
identifier[TaskInstance] . identifier[dag_id] == identifier[task] . identifier[dag_id] ,
identifier[TaskInstance] . identifier[task_id] == identifier[task] . identifier[task_id] ,
identifier[TaskInstance] . identifier[execution_date] == identifier[task] . identifier[execution_date]
). identifier[update] ({ identifier[TaskInstance] . identifier[state] : identifier[State] . identifier[NONE] })
|
def clear_not_launched_queued_tasks(self, session=None):
"""
If the airflow scheduler restarts with pending "Queued" tasks, the tasks may or
may not
have been launched Thus, on starting up the scheduler let's check every
"Queued" task to
see if it has been launched (ie: if there is a corresponding pod on kubernetes)
If it has been launched then do nothing, otherwise reset the state to "None" so
the task
will be rescheduled
This will not be necessary in a future version of airflow in which there is
proper support
for State.LAUNCHED
"""
queued_tasks = session.query(TaskInstance).filter(TaskInstance.state == State.QUEUED).all()
self.log.info('When executor started up, found %s queued task instances', len(queued_tasks))
for task in queued_tasks:
dict_string = 'dag_id={},task_id={},execution_date={},airflow-worker={}'.format(AirflowKubernetesScheduler._make_safe_label_value(task.dag_id), AirflowKubernetesScheduler._make_safe_label_value(task.task_id), AirflowKubernetesScheduler._datetime_to_label_safe_datestring(task.execution_date), self.worker_uuid)
kwargs = dict(label_selector=dict_string)
pod_list = self.kube_client.list_namespaced_pod(self.kube_config.kube_namespace, **kwargs)
if len(pod_list.items) == 0:
self.log.info('TaskInstance: %s found in queued state but was not launched, rescheduling', task)
session.query(TaskInstance).filter(TaskInstance.dag_id == task.dag_id, TaskInstance.task_id == task.task_id, TaskInstance.execution_date == task.execution_date).update({TaskInstance.state: State.NONE}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['task']]
|
def chk_module_statements(ctx, module_stmt, canonical=False):
"""Validate the statement hierarchy according to the grammar.
Return True if module is valid, False otherwise.
"""
return chk_statement(ctx, module_stmt, top_stmts, canonical)
|
def function[chk_module_statements, parameter[ctx, module_stmt, canonical]]:
constant[Validate the statement hierarchy according to the grammar.
Return True if module is valid, False otherwise.
]
return[call[name[chk_statement], parameter[name[ctx], name[module_stmt], name[top_stmts], name[canonical]]]]
|
keyword[def] identifier[chk_module_statements] ( identifier[ctx] , identifier[module_stmt] , identifier[canonical] = keyword[False] ):
literal[string]
keyword[return] identifier[chk_statement] ( identifier[ctx] , identifier[module_stmt] , identifier[top_stmts] , identifier[canonical] )
|
def chk_module_statements(ctx, module_stmt, canonical=False):
"""Validate the statement hierarchy according to the grammar.
Return True if module is valid, False otherwise.
"""
return chk_statement(ctx, module_stmt, top_stmts, canonical)
|
def app_list(**kwargs):
"""
Show uploaded applications.
"""
ctx = Context(**kwargs)
ctx.execute_action('app:list', **{
'storage': ctx.repo.create_secure_service('storage'),
})
|
def function[app_list, parameter[]]:
constant[
Show uploaded applications.
]
variable[ctx] assign[=] call[name[Context], parameter[]]
call[name[ctx].execute_action, parameter[constant[app:list]]]
|
keyword[def] identifier[app_list] (** identifier[kwargs] ):
literal[string]
identifier[ctx] = identifier[Context] (** identifier[kwargs] )
identifier[ctx] . identifier[execute_action] ( literal[string] ,**{
literal[string] : identifier[ctx] . identifier[repo] . identifier[create_secure_service] ( literal[string] ),
})
|
def app_list(**kwargs):
"""
Show uploaded applications.
"""
ctx = Context(**kwargs)
ctx.execute_action('app:list', **{'storage': ctx.repo.create_secure_service('storage')})
|
def _find_statement_by_line(node, line):
"""Extracts the statement on a specific line from an AST.
If the line number of node matches line, it will be returned;
otherwise its children are iterated and the function is called
recursively.
:param node: An astroid node.
:type node: astroid.bases.NodeNG
:param line: The line number of the statement to extract.
:type line: int
:returns: The statement on the line, or None if no statement for the line
can be found.
:rtype: astroid.bases.NodeNG or None
"""
if isinstance(node, (nodes.ClassDef, nodes.FunctionDef)):
# This is an inaccuracy in the AST: the nodes that can be
# decorated do not carry explicit information on which line
# the actual definition (class/def), but .fromline seems to
# be close enough.
node_line = node.fromlineno
else:
node_line = node.lineno
if node_line == line:
return node
for child in node.get_children():
result = _find_statement_by_line(child, line)
if result:
return result
return None
|
def function[_find_statement_by_line, parameter[node, line]]:
constant[Extracts the statement on a specific line from an AST.
If the line number of node matches line, it will be returned;
otherwise its children are iterated and the function is called
recursively.
:param node: An astroid node.
:type node: astroid.bases.NodeNG
:param line: The line number of the statement to extract.
:type line: int
:returns: The statement on the line, or None if no statement for the line
can be found.
:rtype: astroid.bases.NodeNG or None
]
if call[name[isinstance], parameter[name[node], tuple[[<ast.Attribute object at 0x7da1b1edb9a0>, <ast.Attribute object at 0x7da1b1edb7c0>]]]] begin[:]
variable[node_line] assign[=] name[node].fromlineno
if compare[name[node_line] equal[==] name[line]] begin[:]
return[name[node]]
for taget[name[child]] in starred[call[name[node].get_children, parameter[]]] begin[:]
variable[result] assign[=] call[name[_find_statement_by_line], parameter[name[child], name[line]]]
if name[result] begin[:]
return[name[result]]
return[constant[None]]
|
keyword[def] identifier[_find_statement_by_line] ( identifier[node] , identifier[line] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[node] ,( identifier[nodes] . identifier[ClassDef] , identifier[nodes] . identifier[FunctionDef] )):
identifier[node_line] = identifier[node] . identifier[fromlineno]
keyword[else] :
identifier[node_line] = identifier[node] . identifier[lineno]
keyword[if] identifier[node_line] == identifier[line] :
keyword[return] identifier[node]
keyword[for] identifier[child] keyword[in] identifier[node] . identifier[get_children] ():
identifier[result] = identifier[_find_statement_by_line] ( identifier[child] , identifier[line] )
keyword[if] identifier[result] :
keyword[return] identifier[result]
keyword[return] keyword[None]
|
def _find_statement_by_line(node, line):
"""Extracts the statement on a specific line from an AST.
If the line number of node matches line, it will be returned;
otherwise its children are iterated and the function is called
recursively.
:param node: An astroid node.
:type node: astroid.bases.NodeNG
:param line: The line number of the statement to extract.
:type line: int
:returns: The statement on the line, or None if no statement for the line
can be found.
:rtype: astroid.bases.NodeNG or None
"""
if isinstance(node, (nodes.ClassDef, nodes.FunctionDef)):
# This is an inaccuracy in the AST: the nodes that can be
# decorated do not carry explicit information on which line
# the actual definition (class/def), but .fromline seems to
# be close enough.
node_line = node.fromlineno # depends on [control=['if'], data=[]]
else:
node_line = node.lineno
if node_line == line:
return node # depends on [control=['if'], data=[]]
for child in node.get_children():
result = _find_statement_by_line(child, line)
if result:
return result # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
return None
|
def random_val(index, tune_params):
"""return a random value for a parameter"""
key = list(tune_params.keys())[index]
return random.choice(tune_params[key])
|
def function[random_val, parameter[index, tune_params]]:
constant[return a random value for a parameter]
variable[key] assign[=] call[call[name[list], parameter[call[name[tune_params].keys, parameter[]]]]][name[index]]
return[call[name[random].choice, parameter[call[name[tune_params]][name[key]]]]]
|
keyword[def] identifier[random_val] ( identifier[index] , identifier[tune_params] ):
literal[string]
identifier[key] = identifier[list] ( identifier[tune_params] . identifier[keys] ())[ identifier[index] ]
keyword[return] identifier[random] . identifier[choice] ( identifier[tune_params] [ identifier[key] ])
|
def random_val(index, tune_params):
"""return a random value for a parameter"""
key = list(tune_params.keys())[index]
return random.choice(tune_params[key])
|
def subontology(self, minimal=False):
"""
Generates a sub-ontology based on associations
"""
return self.ontology.subontology(self.objects, minimal=minimal)
|
def function[subontology, parameter[self, minimal]]:
constant[
Generates a sub-ontology based on associations
]
return[call[name[self].ontology.subontology, parameter[name[self].objects]]]
|
keyword[def] identifier[subontology] ( identifier[self] , identifier[minimal] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[ontology] . identifier[subontology] ( identifier[self] . identifier[objects] , identifier[minimal] = identifier[minimal] )
|
def subontology(self, minimal=False):
"""
Generates a sub-ontology based on associations
"""
return self.ontology.subontology(self.objects, minimal=minimal)
|
def concat_sheets(xl_path: str, sheetnames=None, add_tab_names=False):
""" Return a pandas DataFrame with the concat'ed
content of the `sheetnames` from the Excel file in
`xl_path`.
Parameters
----------
xl_path: str
Path to the Excel file
sheetnames: list of str
List of existing sheet names of `xl_path`.
If None, will use all sheets from `xl_path`.
add_tab_names: bool
If True will add a 'Tab' column which says from which
tab the row comes from.
Returns
-------
df: pandas.DataFrame
"""
xl_path, choice = _check_xl_path(xl_path)
if sheetnames is None:
sheetnames = get_sheet_list(xl_path)
sheets = pd.read_excel(xl_path, sheetname=sheetnames)
if add_tab_names:
for tab in sheets:
sheets[tab]['Tab'] = [tab] * len(sheets[tab])
return pd.concat([sheets[tab] for tab in sheets])
|
def function[concat_sheets, parameter[xl_path, sheetnames, add_tab_names]]:
constant[ Return a pandas DataFrame with the concat'ed
content of the `sheetnames` from the Excel file in
`xl_path`.
Parameters
----------
xl_path: str
Path to the Excel file
sheetnames: list of str
List of existing sheet names of `xl_path`.
If None, will use all sheets from `xl_path`.
add_tab_names: bool
If True will add a 'Tab' column which says from which
tab the row comes from.
Returns
-------
df: pandas.DataFrame
]
<ast.Tuple object at 0x7da1b008a650> assign[=] call[name[_check_xl_path], parameter[name[xl_path]]]
if compare[name[sheetnames] is constant[None]] begin[:]
variable[sheetnames] assign[=] call[name[get_sheet_list], parameter[name[xl_path]]]
variable[sheets] assign[=] call[name[pd].read_excel, parameter[name[xl_path]]]
if name[add_tab_names] begin[:]
for taget[name[tab]] in starred[name[sheets]] begin[:]
call[call[name[sheets]][name[tab]]][constant[Tab]] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b008aa40>]] * call[name[len], parameter[call[name[sheets]][name[tab]]]]]
return[call[name[pd].concat, parameter[<ast.ListComp object at 0x7da1b008ae90>]]]
|
keyword[def] identifier[concat_sheets] ( identifier[xl_path] : identifier[str] , identifier[sheetnames] = keyword[None] , identifier[add_tab_names] = keyword[False] ):
literal[string]
identifier[xl_path] , identifier[choice] = identifier[_check_xl_path] ( identifier[xl_path] )
keyword[if] identifier[sheetnames] keyword[is] keyword[None] :
identifier[sheetnames] = identifier[get_sheet_list] ( identifier[xl_path] )
identifier[sheets] = identifier[pd] . identifier[read_excel] ( identifier[xl_path] , identifier[sheetname] = identifier[sheetnames] )
keyword[if] identifier[add_tab_names] :
keyword[for] identifier[tab] keyword[in] identifier[sheets] :
identifier[sheets] [ identifier[tab] ][ literal[string] ]=[ identifier[tab] ]* identifier[len] ( identifier[sheets] [ identifier[tab] ])
keyword[return] identifier[pd] . identifier[concat] ([ identifier[sheets] [ identifier[tab] ] keyword[for] identifier[tab] keyword[in] identifier[sheets] ])
|
def concat_sheets(xl_path: str, sheetnames=None, add_tab_names=False):
""" Return a pandas DataFrame with the concat'ed
content of the `sheetnames` from the Excel file in
`xl_path`.
Parameters
----------
xl_path: str
Path to the Excel file
sheetnames: list of str
List of existing sheet names of `xl_path`.
If None, will use all sheets from `xl_path`.
add_tab_names: bool
If True will add a 'Tab' column which says from which
tab the row comes from.
Returns
-------
df: pandas.DataFrame
"""
(xl_path, choice) = _check_xl_path(xl_path)
if sheetnames is None:
sheetnames = get_sheet_list(xl_path) # depends on [control=['if'], data=['sheetnames']]
sheets = pd.read_excel(xl_path, sheetname=sheetnames)
if add_tab_names:
for tab in sheets:
sheets[tab]['Tab'] = [tab] * len(sheets[tab]) # depends on [control=['for'], data=['tab']] # depends on [control=['if'], data=[]]
return pd.concat([sheets[tab] for tab in sheets])
|
def create_source(self, datapusher=True):
"""
Populate ckan directory from preloaded image and copy
who.ini and schema.xml info conf directory
"""
task.create_source(self.target, self._preload_image(), datapusher)
|
def function[create_source, parameter[self, datapusher]]:
constant[
Populate ckan directory from preloaded image and copy
who.ini and schema.xml info conf directory
]
call[name[task].create_source, parameter[name[self].target, call[name[self]._preload_image, parameter[]], name[datapusher]]]
|
keyword[def] identifier[create_source] ( identifier[self] , identifier[datapusher] = keyword[True] ):
literal[string]
identifier[task] . identifier[create_source] ( identifier[self] . identifier[target] , identifier[self] . identifier[_preload_image] (), identifier[datapusher] )
|
def create_source(self, datapusher=True):
"""
Populate ckan directory from preloaded image and copy
who.ini and schema.xml info conf directory
"""
task.create_source(self.target, self._preload_image(), datapusher)
|
def convert_simple_rnn(builder, layer, input_names, output_names, keras_layer):
"""Convert an SimpleRNN layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
hidden_size = keras_layer.output_dim
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
if keras_layer.consume_less not in ['cpu', 'gpu']:
raise ValueError('Cannot convert Keras layer with consume_less = %s' % keras_layer.consume_less)
W_h = np.zeros((hidden_size, hidden_size))
W_x = np.zeros((hidden_size, input_size))
b = np.zeros((hidden_size,))
if keras_layer.consume_less == 'cpu':
W_h = keras_layer.get_weights()[1].T
W_x = keras_layer.get_weights()[0].T
b = keras_layer.get_weights()[2]
else:
W_h = keras_layer.get_weights()[1].T
W_x = keras_layer.get_weights()[0].T
b = keras_layer.get_weights()[2]
# Set actication type
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_simple_rnn(
name = layer,
W_h = W_h, W_x = W_x, b = b,
hidden_size = hidden_size,
input_size = input_size,
activation = activation_str,
input_names = input_names,
output_names = output_names,
output_all=output_all,
reverse_input=reverse_input)
|
def function[convert_simple_rnn, parameter[builder, layer, input_names, output_names, keras_layer]]:
constant[Convert an SimpleRNN layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
]
variable[hidden_size] assign[=] name[keras_layer].output_dim
variable[input_size] assign[=] call[name[keras_layer].input_shape][<ast.UnaryOp object at 0x7da1b21d6dd0>]
variable[output_all] assign[=] name[keras_layer].return_sequences
variable[reverse_input] assign[=] name[keras_layer].go_backwards
if compare[name[keras_layer].consume_less <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b21d7d00>, <ast.Constant object at 0x7da1b21d7b80>]]] begin[:]
<ast.Raise object at 0x7da1b21d5990>
variable[W_h] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b21d6b30>, <ast.Name object at 0x7da1b21d6d40>]]]]
variable[W_x] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b21d6e90>, <ast.Name object at 0x7da1b21d6a70>]]]]
variable[b] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b21d5a80>]]]]
if compare[name[keras_layer].consume_less equal[==] constant[cpu]] begin[:]
variable[W_h] assign[=] call[call[name[keras_layer].get_weights, parameter[]]][constant[1]].T
variable[W_x] assign[=] call[call[name[keras_layer].get_weights, parameter[]]][constant[0]].T
variable[b] assign[=] call[call[name[keras_layer].get_weights, parameter[]]][constant[2]]
variable[activation_str] assign[=] call[name[_get_recurrent_activation_name_from_keras], parameter[name[keras_layer].activation]]
call[name[builder].add_simple_rnn, parameter[]]
|
keyword[def] identifier[convert_simple_rnn] ( identifier[builder] , identifier[layer] , identifier[input_names] , identifier[output_names] , identifier[keras_layer] ):
literal[string]
identifier[hidden_size] = identifier[keras_layer] . identifier[output_dim]
identifier[input_size] = identifier[keras_layer] . identifier[input_shape] [- literal[int] ]
identifier[output_all] = identifier[keras_layer] . identifier[return_sequences]
identifier[reverse_input] = identifier[keras_layer] . identifier[go_backwards]
keyword[if] identifier[keras_layer] . identifier[consume_less] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string] % identifier[keras_layer] . identifier[consume_less] )
identifier[W_h] = identifier[np] . identifier[zeros] (( identifier[hidden_size] , identifier[hidden_size] ))
identifier[W_x] = identifier[np] . identifier[zeros] (( identifier[hidden_size] , identifier[input_size] ))
identifier[b] = identifier[np] . identifier[zeros] (( identifier[hidden_size] ,))
keyword[if] identifier[keras_layer] . identifier[consume_less] == literal[string] :
identifier[W_h] = identifier[keras_layer] . identifier[get_weights] ()[ literal[int] ]. identifier[T]
identifier[W_x] = identifier[keras_layer] . identifier[get_weights] ()[ literal[int] ]. identifier[T]
identifier[b] = identifier[keras_layer] . identifier[get_weights] ()[ literal[int] ]
keyword[else] :
identifier[W_h] = identifier[keras_layer] . identifier[get_weights] ()[ literal[int] ]. identifier[T]
identifier[W_x] = identifier[keras_layer] . identifier[get_weights] ()[ literal[int] ]. identifier[T]
identifier[b] = identifier[keras_layer] . identifier[get_weights] ()[ literal[int] ]
identifier[activation_str] = identifier[_get_recurrent_activation_name_from_keras] ( identifier[keras_layer] . identifier[activation] )
identifier[builder] . identifier[add_simple_rnn] (
identifier[name] = identifier[layer] ,
identifier[W_h] = identifier[W_h] , identifier[W_x] = identifier[W_x] , identifier[b] = identifier[b] ,
identifier[hidden_size] = identifier[hidden_size] ,
identifier[input_size] = identifier[input_size] ,
identifier[activation] = identifier[activation_str] ,
identifier[input_names] = identifier[input_names] ,
identifier[output_names] = identifier[output_names] ,
identifier[output_all] = identifier[output_all] ,
identifier[reverse_input] = identifier[reverse_input] )
|
def convert_simple_rnn(builder, layer, input_names, output_names, keras_layer):
"""Convert an SimpleRNN layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
hidden_size = keras_layer.output_dim
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
if keras_layer.consume_less not in ['cpu', 'gpu']:
raise ValueError('Cannot convert Keras layer with consume_less = %s' % keras_layer.consume_less) # depends on [control=['if'], data=[]]
W_h = np.zeros((hidden_size, hidden_size))
W_x = np.zeros((hidden_size, input_size))
b = np.zeros((hidden_size,))
if keras_layer.consume_less == 'cpu':
W_h = keras_layer.get_weights()[1].T
W_x = keras_layer.get_weights()[0].T
b = keras_layer.get_weights()[2] # depends on [control=['if'], data=[]]
else:
W_h = keras_layer.get_weights()[1].T
W_x = keras_layer.get_weights()[0].T
b = keras_layer.get_weights()[2]
# Set actication type
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_simple_rnn(name=layer, W_h=W_h, W_x=W_x, b=b, hidden_size=hidden_size, input_size=input_size, activation=activation_str, input_names=input_names, output_names=output_names, output_all=output_all, reverse_input=reverse_input)
|
def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None):
'''
Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
:param str message:
The ciphertext to be decrypted.
:param _EncryptionData encryption_data:
The metadata associated with this ciphertext.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The decrypted plaintext.
:rtype: str
'''
_validate_not_none('message', message)
content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
if not (_EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm):
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
# decrypt data
decrypted_data = message
decryptor = cipher.decryptor()
decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
# unpad data
unpadder = PKCS7(128).unpadder()
decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
return decrypted_data
|
def function[_decrypt, parameter[message, encryption_data, key_encryption_key, resolver]]:
constant[
Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
:param str message:
The ciphertext to be decrypted.
:param _EncryptionData encryption_data:
The metadata associated with this ciphertext.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The decrypted plaintext.
:rtype: str
]
call[name[_validate_not_none], parameter[constant[message], name[message]]]
variable[content_encryption_key] assign[=] call[name[_validate_and_unwrap_cek], parameter[name[encryption_data], name[key_encryption_key], name[resolver]]]
if <ast.UnaryOp object at 0x7da1b1dda200> begin[:]
<ast.Raise object at 0x7da1b1dd9de0>
variable[cipher] assign[=] call[name[_generate_AES_CBC_cipher], parameter[name[content_encryption_key], name[encryption_data].content_encryption_IV]]
variable[decrypted_data] assign[=] name[message]
variable[decryptor] assign[=] call[name[cipher].decryptor, parameter[]]
variable[decrypted_data] assign[=] binary_operation[call[name[decryptor].update, parameter[name[decrypted_data]]] + call[name[decryptor].finalize, parameter[]]]
variable[unpadder] assign[=] call[call[name[PKCS7], parameter[constant[128]]].unpadder, parameter[]]
variable[decrypted_data] assign[=] binary_operation[call[name[unpadder].update, parameter[name[decrypted_data]]] + call[name[unpadder].finalize, parameter[]]]
return[name[decrypted_data]]
|
keyword[def] identifier[_decrypt] ( identifier[message] , identifier[encryption_data] , identifier[key_encryption_key] = keyword[None] , identifier[resolver] = keyword[None] ):
literal[string]
identifier[_validate_not_none] ( literal[string] , identifier[message] )
identifier[content_encryption_key] = identifier[_validate_and_unwrap_cek] ( identifier[encryption_data] , identifier[key_encryption_key] , identifier[resolver] )
keyword[if] keyword[not] ( identifier[_EncryptionAlgorithm] . identifier[AES_CBC_256] == identifier[encryption_data] . identifier[encryption_agent] . identifier[encryption_algorithm] ):
keyword[raise] identifier[ValueError] ( identifier[_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM] )
identifier[cipher] = identifier[_generate_AES_CBC_cipher] ( identifier[content_encryption_key] , identifier[encryption_data] . identifier[content_encryption_IV] )
identifier[decrypted_data] = identifier[message]
identifier[decryptor] = identifier[cipher] . identifier[decryptor] ()
identifier[decrypted_data] =( identifier[decryptor] . identifier[update] ( identifier[decrypted_data] )+ identifier[decryptor] . identifier[finalize] ())
identifier[unpadder] = identifier[PKCS7] ( literal[int] ). identifier[unpadder] ()
identifier[decrypted_data] =( identifier[unpadder] . identifier[update] ( identifier[decrypted_data] )+ identifier[unpadder] . identifier[finalize] ())
keyword[return] identifier[decrypted_data]
|
def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None):
"""
Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
:param str message:
The ciphertext to be decrypted.
:param _EncryptionData encryption_data:
The metadata associated with this ciphertext.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The decrypted plaintext.
:rtype: str
"""
_validate_not_none('message', message)
content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
if not _EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm:
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) # depends on [control=['if'], data=[]]
cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
# decrypt data
decrypted_data = message
decryptor = cipher.decryptor()
decrypted_data = decryptor.update(decrypted_data) + decryptor.finalize()
# unpad data
unpadder = PKCS7(128).unpadder()
decrypted_data = unpadder.update(decrypted_data) + unpadder.finalize()
return decrypted_data
|
def grow(self, len):
"""Grow up the content of the input buffer, the old data are
preserved This routine handle the I18N transcoding to
internal UTF-8 This routine is used when operating the
parser in normal (pull) mode TODO: one should be able to
remove one extra copy by copying directly onto in->buffer
or in->raw """
ret = libxml2mod.xmlParserInputBufferGrow(self._o, len)
return ret
|
def function[grow, parameter[self, len]]:
constant[Grow up the content of the input buffer, the old data are
preserved This routine handle the I18N transcoding to
internal UTF-8 This routine is used when operating the
parser in normal (pull) mode TODO: one should be able to
remove one extra copy by copying directly onto in->buffer
or in->raw ]
variable[ret] assign[=] call[name[libxml2mod].xmlParserInputBufferGrow, parameter[name[self]._o, name[len]]]
return[name[ret]]
|
keyword[def] identifier[grow] ( identifier[self] , identifier[len] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[xmlParserInputBufferGrow] ( identifier[self] . identifier[_o] , identifier[len] )
keyword[return] identifier[ret]
|
def grow(self, len):
"""Grow up the content of the input buffer, the old data are
preserved This routine handle the I18N transcoding to
internal UTF-8 This routine is used when operating the
parser in normal (pull) mode TODO: one should be able to
remove one extra copy by copying directly onto in->buffer
or in->raw """
ret = libxml2mod.xmlParserInputBufferGrow(self._o, len)
return ret
|
def run(self, definition):
"""Processing the pipeline."""
self.logger.info("Running with Python %s", sys.version.replace("\n", ""))
self.logger.info("Running on platform %s", platform.platform())
self.logger.info("Current cpu count is %d", multiprocessing.cpu_count())
self.logger.info("Processing pipeline definition '%s'", definition)
document = self.validate_document(definition)
if self.options.validate_only:
self.logger.info("Stopping after validation as requested!")
return []
self.provide_temporary_scripts_path()
versions = VersionsCheck().process(document)
VersionsReport().process(versions)
collector = Application.create_and_run_collector(document, self.options)
matrix = find_matrix(document)
output = []
if len(matrix) == 0:
model = {} if 'model' not in document else document['model']
pipeline = Pipeline(model=model, options=self.options)
pipeline.hooks = Hooks(document)
result = pipeline.process(document['pipeline'])
else:
result = self.run_matrix(matrix, document)
output = result['output']
self.shutdown(collector, success=result['success'])
return output
|
def function[run, parameter[self, definition]]:
constant[Processing the pipeline.]
call[name[self].logger.info, parameter[constant[Running with Python %s], call[name[sys].version.replace, parameter[constant[
], constant[]]]]]
call[name[self].logger.info, parameter[constant[Running on platform %s], call[name[platform].platform, parameter[]]]]
call[name[self].logger.info, parameter[constant[Current cpu count is %d], call[name[multiprocessing].cpu_count, parameter[]]]]
call[name[self].logger.info, parameter[constant[Processing pipeline definition '%s'], name[definition]]]
variable[document] assign[=] call[name[self].validate_document, parameter[name[definition]]]
if name[self].options.validate_only begin[:]
call[name[self].logger.info, parameter[constant[Stopping after validation as requested!]]]
return[list[[]]]
call[name[self].provide_temporary_scripts_path, parameter[]]
variable[versions] assign[=] call[call[name[VersionsCheck], parameter[]].process, parameter[name[document]]]
call[call[name[VersionsReport], parameter[]].process, parameter[name[versions]]]
variable[collector] assign[=] call[name[Application].create_and_run_collector, parameter[name[document], name[self].options]]
variable[matrix] assign[=] call[name[find_matrix], parameter[name[document]]]
variable[output] assign[=] list[[]]
if compare[call[name[len], parameter[name[matrix]]] equal[==] constant[0]] begin[:]
variable[model] assign[=] <ast.IfExp object at 0x7da207f01630>
variable[pipeline] assign[=] call[name[Pipeline], parameter[]]
name[pipeline].hooks assign[=] call[name[Hooks], parameter[name[document]]]
variable[result] assign[=] call[name[pipeline].process, parameter[call[name[document]][constant[pipeline]]]]
variable[output] assign[=] call[name[result]][constant[output]]
call[name[self].shutdown, parameter[name[collector]]]
return[name[output]]
|
keyword[def] identifier[run] ( identifier[self] , identifier[definition] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[sys] . identifier[version] . identifier[replace] ( literal[string] , literal[string] ))
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[platform] . identifier[platform] ())
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[multiprocessing] . identifier[cpu_count] ())
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[definition] )
identifier[document] = identifier[self] . identifier[validate_document] ( identifier[definition] )
keyword[if] identifier[self] . identifier[options] . identifier[validate_only] :
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
keyword[return] []
identifier[self] . identifier[provide_temporary_scripts_path] ()
identifier[versions] = identifier[VersionsCheck] (). identifier[process] ( identifier[document] )
identifier[VersionsReport] (). identifier[process] ( identifier[versions] )
identifier[collector] = identifier[Application] . identifier[create_and_run_collector] ( identifier[document] , identifier[self] . identifier[options] )
identifier[matrix] = identifier[find_matrix] ( identifier[document] )
identifier[output] =[]
keyword[if] identifier[len] ( identifier[matrix] )== literal[int] :
identifier[model] ={} keyword[if] literal[string] keyword[not] keyword[in] identifier[document] keyword[else] identifier[document] [ literal[string] ]
identifier[pipeline] = identifier[Pipeline] ( identifier[model] = identifier[model] , identifier[options] = identifier[self] . identifier[options] )
identifier[pipeline] . identifier[hooks] = identifier[Hooks] ( identifier[document] )
identifier[result] = identifier[pipeline] . identifier[process] ( identifier[document] [ literal[string] ])
keyword[else] :
identifier[result] = identifier[self] . identifier[run_matrix] ( identifier[matrix] , identifier[document] )
identifier[output] = identifier[result] [ literal[string] ]
identifier[self] . identifier[shutdown] ( identifier[collector] , identifier[success] = identifier[result] [ literal[string] ])
keyword[return] identifier[output]
|
def run(self, definition):
"""Processing the pipeline."""
self.logger.info('Running with Python %s', sys.version.replace('\n', ''))
self.logger.info('Running on platform %s', platform.platform())
self.logger.info('Current cpu count is %d', multiprocessing.cpu_count())
self.logger.info("Processing pipeline definition '%s'", definition)
document = self.validate_document(definition)
if self.options.validate_only:
self.logger.info('Stopping after validation as requested!')
return [] # depends on [control=['if'], data=[]]
self.provide_temporary_scripts_path()
versions = VersionsCheck().process(document)
VersionsReport().process(versions)
collector = Application.create_and_run_collector(document, self.options)
matrix = find_matrix(document)
output = []
if len(matrix) == 0:
model = {} if 'model' not in document else document['model']
pipeline = Pipeline(model=model, options=self.options)
pipeline.hooks = Hooks(document)
result = pipeline.process(document['pipeline']) # depends on [control=['if'], data=[]]
else:
result = self.run_matrix(matrix, document)
output = result['output']
self.shutdown(collector, success=result['success'])
return output
|
def get_current_user(self):
"""
Override get_current_user for Google AppEngine
Checks for oauth capable request first, if this fails fall back to standard users API
"""
from google.appengine.api import users
if _IS_DEVELOPMENT_SERVER:
return users.get_current_user()
else:
from google.appengine.api import oauth
try:
user = oauth.get_current_user()
except oauth.OAuthRequestError:
user = users.get_current_user()
return user
|
def function[get_current_user, parameter[self]]:
constant[
Override get_current_user for Google AppEngine
Checks for oauth capable request first, if this fails fall back to standard users API
]
from relative_module[google.appengine.api] import module[users]
if name[_IS_DEVELOPMENT_SERVER] begin[:]
return[call[name[users].get_current_user, parameter[]]]
|
keyword[def] identifier[get_current_user] ( identifier[self] ):
literal[string]
keyword[from] identifier[google] . identifier[appengine] . identifier[api] keyword[import] identifier[users]
keyword[if] identifier[_IS_DEVELOPMENT_SERVER] :
keyword[return] identifier[users] . identifier[get_current_user] ()
keyword[else] :
keyword[from] identifier[google] . identifier[appengine] . identifier[api] keyword[import] identifier[oauth]
keyword[try] :
identifier[user] = identifier[oauth] . identifier[get_current_user] ()
keyword[except] identifier[oauth] . identifier[OAuthRequestError] :
identifier[user] = identifier[users] . identifier[get_current_user] ()
keyword[return] identifier[user]
|
def get_current_user(self):
"""
Override get_current_user for Google AppEngine
Checks for oauth capable request first, if this fails fall back to standard users API
"""
from google.appengine.api import users
if _IS_DEVELOPMENT_SERVER:
return users.get_current_user() # depends on [control=['if'], data=[]]
else:
from google.appengine.api import oauth
try:
user = oauth.get_current_user() # depends on [control=['try'], data=[]]
except oauth.OAuthRequestError:
user = users.get_current_user() # depends on [control=['except'], data=[]]
return user
|
def __get_mapping(self, structures):
"""
match each pattern to each molecule.
if all patterns matches with all molecules
return generator of all possible mapping.
:param structures: disjoint molecules
:return: mapping generator
"""
for c in permutations(structures, len(self.__patterns)):
for m in product(*(x.get_substructure_mapping(y, limit=0) for x, y in zip(self.__patterns, c))):
mapping = {}
for i in m:
mapping.update(i)
if mapping:
yield mapping
|
def function[__get_mapping, parameter[self, structures]]:
constant[
match each pattern to each molecule.
if all patterns matches with all molecules
return generator of all possible mapping.
:param structures: disjoint molecules
:return: mapping generator
]
for taget[name[c]] in starred[call[name[permutations], parameter[name[structures], call[name[len], parameter[name[self].__patterns]]]]] begin[:]
for taget[name[m]] in starred[call[name[product], parameter[<ast.Starred object at 0x7da20c795660>]]] begin[:]
variable[mapping] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[m]] begin[:]
call[name[mapping].update, parameter[name[i]]]
if name[mapping] begin[:]
<ast.Yield object at 0x7da20c794af0>
|
keyword[def] identifier[__get_mapping] ( identifier[self] , identifier[structures] ):
literal[string]
keyword[for] identifier[c] keyword[in] identifier[permutations] ( identifier[structures] , identifier[len] ( identifier[self] . identifier[__patterns] )):
keyword[for] identifier[m] keyword[in] identifier[product] (*( identifier[x] . identifier[get_substructure_mapping] ( identifier[y] , identifier[limit] = literal[int] ) keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[self] . identifier[__patterns] , identifier[c] ))):
identifier[mapping] ={}
keyword[for] identifier[i] keyword[in] identifier[m] :
identifier[mapping] . identifier[update] ( identifier[i] )
keyword[if] identifier[mapping] :
keyword[yield] identifier[mapping]
|
def __get_mapping(self, structures):
"""
match each pattern to each molecule.
if all patterns matches with all molecules
return generator of all possible mapping.
:param structures: disjoint molecules
:return: mapping generator
"""
for c in permutations(structures, len(self.__patterns)):
for m in product(*(x.get_substructure_mapping(y, limit=0) for (x, y) in zip(self.__patterns, c))):
mapping = {}
for i in m:
mapping.update(i) # depends on [control=['for'], data=['i']]
if mapping:
yield mapping # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']] # depends on [control=['for'], data=['c']]
|
def pause(self, workflow_id, email_id):
"""
Pause an automated email.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
"""
self.workflow_id = workflow_id
self.email_id = email_id
return self._mc_client._post(url=self._build_path(workflow_id, 'emails', email_id, 'actions/pause'))
|
def function[pause, parameter[self, workflow_id, email_id]]:
constant[
Pause an automated email.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
]
name[self].workflow_id assign[=] name[workflow_id]
name[self].email_id assign[=] name[email_id]
return[call[name[self]._mc_client._post, parameter[]]]
|
keyword[def] identifier[pause] ( identifier[self] , identifier[workflow_id] , identifier[email_id] ):
literal[string]
identifier[self] . identifier[workflow_id] = identifier[workflow_id]
identifier[self] . identifier[email_id] = identifier[email_id]
keyword[return] identifier[self] . identifier[_mc_client] . identifier[_post] ( identifier[url] = identifier[self] . identifier[_build_path] ( identifier[workflow_id] , literal[string] , identifier[email_id] , literal[string] ))
|
def pause(self, workflow_id, email_id):
"""
Pause an automated email.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
"""
self.workflow_id = workflow_id
self.email_id = email_id
return self._mc_client._post(url=self._build_path(workflow_id, 'emails', email_id, 'actions/pause'))
|
def _delete_node_storage(self, node):
"""delete storage
:param node: node in form of list, or BLANK_NODE
"""
if node == BLANK_NODE:
return
# assert isinstance(node, list)
encoded = self._encode_node(node, put_in_db=False)
if len(encoded) < 32:
return
"""
===== FIXME ====
in the current trie implementation two nodes can share identical subtrees
thus we can not safely delete nodes for now
"""
self.deletes.append(encoded)
|
def function[_delete_node_storage, parameter[self, node]]:
constant[delete storage
:param node: node in form of list, or BLANK_NODE
]
if compare[name[node] equal[==] name[BLANK_NODE]] begin[:]
return[None]
variable[encoded] assign[=] call[name[self]._encode_node, parameter[name[node]]]
if compare[call[name[len], parameter[name[encoded]]] less[<] constant[32]] begin[:]
return[None]
constant[
===== FIXME ====
in the current trie implementation two nodes can share identical subtrees
thus we can not safely delete nodes for now
]
call[name[self].deletes.append, parameter[name[encoded]]]
|
keyword[def] identifier[_delete_node_storage] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] identifier[node] == identifier[BLANK_NODE] :
keyword[return]
identifier[encoded] = identifier[self] . identifier[_encode_node] ( identifier[node] , identifier[put_in_db] = keyword[False] )
keyword[if] identifier[len] ( identifier[encoded] )< literal[int] :
keyword[return]
literal[string]
identifier[self] . identifier[deletes] . identifier[append] ( identifier[encoded] )
|
def _delete_node_storage(self, node):
"""delete storage
:param node: node in form of list, or BLANK_NODE
"""
if node == BLANK_NODE:
return # depends on [control=['if'], data=[]]
# assert isinstance(node, list)
encoded = self._encode_node(node, put_in_db=False)
if len(encoded) < 32:
return # depends on [control=['if'], data=[]]
'\n ===== FIXME ====\n in the current trie implementation two nodes can share identical subtrees\n thus we can not safely delete nodes for now\n '
self.deletes.append(encoded)
|
def add_users_to_user_group(self, id, **kwargs): # noqa: E501
"""Add multiple users to a specific user group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_users_to_user_group(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body: List of users that should be added to user group
:return: ResponseContainerUserGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_users_to_user_group_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.add_users_to_user_group_with_http_info(id, **kwargs) # noqa: E501
return data
|
def function[add_users_to_user_group, parameter[self, id]]:
constant[Add multiple users to a specific user group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_users_to_user_group(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body: List of users that should be added to user group
:return: ResponseContainerUserGroup
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].add_users_to_user_group_with_http_info, parameter[name[id]]]]
|
keyword[def] identifier[add_users_to_user_group] ( identifier[self] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[add_users_to_user_group_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[add_users_to_user_group_with_http_info] ( identifier[id] ,** identifier[kwargs] )
keyword[return] identifier[data]
|
def add_users_to_user_group(self, id, **kwargs): # noqa: E501
'Add multiple users to a specific user group # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.add_users_to_user_group(id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str id: (required)\n :param list[str] body: List of users that should be added to user group\n :return: ResponseContainerUserGroup\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_users_to_user_group_with_http_info(id, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.add_users_to_user_group_with_http_info(id, **kwargs) # noqa: E501
return data
|
def get_time_rate(self):
'''
getter
Time rate.
'''
if isinstance(self.__time_rate, float) is False:
raise TypeError("The type of __time_rate must be float.")
if self.__time_rate <= 0.0:
raise ValueError("The value of __time_rate must be greater than 0.0")
return self.__time_rate
|
def function[get_time_rate, parameter[self]]:
constant[
getter
Time rate.
]
if compare[call[name[isinstance], parameter[name[self].__time_rate, name[float]]] is constant[False]] begin[:]
<ast.Raise object at 0x7da1b07ada50>
if compare[name[self].__time_rate less_or_equal[<=] constant[0.0]] begin[:]
<ast.Raise object at 0x7da1b07aeb60>
return[name[self].__time_rate]
|
keyword[def] identifier[get_time_rate] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[__time_rate] , identifier[float] ) keyword[is] keyword[False] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[self] . identifier[__time_rate] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[__time_rate]
|
def get_time_rate(self):
"""
getter
Time rate.
"""
if isinstance(self.__time_rate, float) is False:
raise TypeError('The type of __time_rate must be float.') # depends on [control=['if'], data=[]]
if self.__time_rate <= 0.0:
raise ValueError('The value of __time_rate must be greater than 0.0') # depends on [control=['if'], data=[]]
return self.__time_rate
|
def from_response_data(cls, response_data):
"""
Response factory
:param response_data: requests.models.Response
:return: pybomb.clients.Response
"""
response_json = response_data.json()
return cls(
response_data.url,
response_json["number_of_page_results"],
response_json["number_of_total_results"],
response_json["results"],
)
|
def function[from_response_data, parameter[cls, response_data]]:
constant[
Response factory
:param response_data: requests.models.Response
:return: pybomb.clients.Response
]
variable[response_json] assign[=] call[name[response_data].json, parameter[]]
return[call[name[cls], parameter[name[response_data].url, call[name[response_json]][constant[number_of_page_results]], call[name[response_json]][constant[number_of_total_results]], call[name[response_json]][constant[results]]]]]
|
keyword[def] identifier[from_response_data] ( identifier[cls] , identifier[response_data] ):
literal[string]
identifier[response_json] = identifier[response_data] . identifier[json] ()
keyword[return] identifier[cls] (
identifier[response_data] . identifier[url] ,
identifier[response_json] [ literal[string] ],
identifier[response_json] [ literal[string] ],
identifier[response_json] [ literal[string] ],
)
|
def from_response_data(cls, response_data):
"""
Response factory
:param response_data: requests.models.Response
:return: pybomb.clients.Response
"""
response_json = response_data.json()
return cls(response_data.url, response_json['number_of_page_results'], response_json['number_of_total_results'], response_json['results'])
|
def close(self):
"""Close this consumer thread and its underlying connection.
"""
try:
if self.consumer:
self.requeue_messages(m for _, m in iter_queue(self.delay_queue))
self.consumer.close()
except ConnectionError:
pass
|
def function[close, parameter[self]]:
constant[Close this consumer thread and its underlying connection.
]
<ast.Try object at 0x7da1b18c8df0>
|
keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[consumer] :
identifier[self] . identifier[requeue_messages] ( identifier[m] keyword[for] identifier[_] , identifier[m] keyword[in] identifier[iter_queue] ( identifier[self] . identifier[delay_queue] ))
identifier[self] . identifier[consumer] . identifier[close] ()
keyword[except] identifier[ConnectionError] :
keyword[pass]
|
def close(self):
"""Close this consumer thread and its underlying connection.
"""
try:
if self.consumer:
self.requeue_messages((m for (_, m) in iter_queue(self.delay_queue)))
self.consumer.close() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ConnectionError:
pass # depends on [control=['except'], data=[]]
|
def summary():
'''
.. versionadded:: 2014.7.0
Show a summary of the last puppet agent run
CLI Example:
.. code-block:: bash
salt '*' puppet.summary
'''
puppet = _Puppet()
try:
with salt.utils.files.fopen(puppet.lastrunfile, 'r') as fp_:
report = salt.utils.yaml.safe_load(fp_)
result = {}
if 'time' in report:
try:
result['last_run'] = datetime.datetime.fromtimestamp(
int(report['time']['last_run'])).isoformat()
except (TypeError, ValueError, KeyError):
result['last_run'] = 'invalid or missing timestamp'
result['time'] = {}
for key in ('total', 'config_retrieval'):
if key in report['time']:
result['time'][key] = report['time'][key]
if 'resources' in report:
result['resources'] = report['resources']
except salt.utils.yaml.YAMLError as exc:
raise CommandExecutionError(
'YAML error parsing puppet run summary: {0}'.format(exc)
)
except IOError as exc:
raise CommandExecutionError(
'Unable to read puppet run summary: {0}'.format(exc)
)
return result
|
def function[summary, parameter[]]:
constant[
.. versionadded:: 2014.7.0
Show a summary of the last puppet agent run
CLI Example:
.. code-block:: bash
salt '*' puppet.summary
]
variable[puppet] assign[=] call[name[_Puppet], parameter[]]
<ast.Try object at 0x7da18fe92620>
return[name[result]]
|
keyword[def] identifier[summary] ():
literal[string]
identifier[puppet] = identifier[_Puppet] ()
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[puppet] . identifier[lastrunfile] , literal[string] ) keyword[as] identifier[fp_] :
identifier[report] = identifier[salt] . identifier[utils] . identifier[yaml] . identifier[safe_load] ( identifier[fp_] )
identifier[result] ={}
keyword[if] literal[string] keyword[in] identifier[report] :
keyword[try] :
identifier[result] [ literal[string] ]= identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] (
identifier[int] ( identifier[report] [ literal[string] ][ literal[string] ])). identifier[isoformat] ()
keyword[except] ( identifier[TypeError] , identifier[ValueError] , identifier[KeyError] ):
identifier[result] [ literal[string] ]= literal[string]
identifier[result] [ literal[string] ]={}
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] ):
keyword[if] identifier[key] keyword[in] identifier[report] [ literal[string] ]:
identifier[result] [ literal[string] ][ identifier[key] ]= identifier[report] [ literal[string] ][ identifier[key] ]
keyword[if] literal[string] keyword[in] identifier[report] :
identifier[result] [ literal[string] ]= identifier[report] [ literal[string] ]
keyword[except] identifier[salt] . identifier[utils] . identifier[yaml] . identifier[YAMLError] keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[exc] )
)
keyword[except] identifier[IOError] keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[exc] )
)
keyword[return] identifier[result]
|
def summary():
"""
.. versionadded:: 2014.7.0
Show a summary of the last puppet agent run
CLI Example:
.. code-block:: bash
salt '*' puppet.summary
"""
puppet = _Puppet()
try:
with salt.utils.files.fopen(puppet.lastrunfile, 'r') as fp_:
report = salt.utils.yaml.safe_load(fp_) # depends on [control=['with'], data=['fp_']]
result = {}
if 'time' in report:
try:
result['last_run'] = datetime.datetime.fromtimestamp(int(report['time']['last_run'])).isoformat() # depends on [control=['try'], data=[]]
except (TypeError, ValueError, KeyError):
result['last_run'] = 'invalid or missing timestamp' # depends on [control=['except'], data=[]]
result['time'] = {}
for key in ('total', 'config_retrieval'):
if key in report['time']:
result['time'][key] = report['time'][key] # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=['report']]
if 'resources' in report:
result['resources'] = report['resources'] # depends on [control=['if'], data=['report']] # depends on [control=['try'], data=[]]
except salt.utils.yaml.YAMLError as exc:
raise CommandExecutionError('YAML error parsing puppet run summary: {0}'.format(exc)) # depends on [control=['except'], data=['exc']]
except IOError as exc:
raise CommandExecutionError('Unable to read puppet run summary: {0}'.format(exc)) # depends on [control=['except'], data=['exc']]
return result
|
def inherit_kwargs(inherit_func):
"""
TODO move to util_decor
inherit_func = inspect_pdfs
func = encoder.visualize.im_func
"""
import utool as ut
keys, is_arbitrary = ut.get_kwargs(inherit_func)
if is_arbitrary:
keys += ['**kwargs']
kwargs_append = '\n'.join(keys)
#from six.moves import builtins
#builtins.print(kwargs_block)
def _wrp(func):
if func.__doc__ is None:
func.__doc__ = ''
# TODO append to kwargs block if it exists
kwargs_block = 'Kwargs:\n' + ut.indent(kwargs_append)
func.__doc__ += kwargs_block
return func
return _wrp
|
def function[inherit_kwargs, parameter[inherit_func]]:
constant[
TODO move to util_decor
inherit_func = inspect_pdfs
func = encoder.visualize.im_func
]
import module[utool] as alias[ut]
<ast.Tuple object at 0x7da1b24e9b70> assign[=] call[name[ut].get_kwargs, parameter[name[inherit_func]]]
if name[is_arbitrary] begin[:]
<ast.AugAssign object at 0x7da1b24e8070>
variable[kwargs_append] assign[=] call[constant[
].join, parameter[name[keys]]]
def function[_wrp, parameter[func]]:
if compare[name[func].__doc__ is constant[None]] begin[:]
name[func].__doc__ assign[=] constant[]
variable[kwargs_block] assign[=] binary_operation[constant[Kwargs:
] + call[name[ut].indent, parameter[name[kwargs_append]]]]
<ast.AugAssign object at 0x7da1b24e9870>
return[name[func]]
return[name[_wrp]]
|
keyword[def] identifier[inherit_kwargs] ( identifier[inherit_func] ):
literal[string]
keyword[import] identifier[utool] keyword[as] identifier[ut]
identifier[keys] , identifier[is_arbitrary] = identifier[ut] . identifier[get_kwargs] ( identifier[inherit_func] )
keyword[if] identifier[is_arbitrary] :
identifier[keys] +=[ literal[string] ]
identifier[kwargs_append] = literal[string] . identifier[join] ( identifier[keys] )
keyword[def] identifier[_wrp] ( identifier[func] ):
keyword[if] identifier[func] . identifier[__doc__] keyword[is] keyword[None] :
identifier[func] . identifier[__doc__] = literal[string]
identifier[kwargs_block] = literal[string] + identifier[ut] . identifier[indent] ( identifier[kwargs_append] )
identifier[func] . identifier[__doc__] += identifier[kwargs_block]
keyword[return] identifier[func]
keyword[return] identifier[_wrp]
|
def inherit_kwargs(inherit_func):
"""
TODO move to util_decor
inherit_func = inspect_pdfs
func = encoder.visualize.im_func
"""
import utool as ut
(keys, is_arbitrary) = ut.get_kwargs(inherit_func)
if is_arbitrary:
keys += ['**kwargs'] # depends on [control=['if'], data=[]]
kwargs_append = '\n'.join(keys)
#from six.moves import builtins
#builtins.print(kwargs_block)
def _wrp(func):
if func.__doc__ is None:
func.__doc__ = '' # depends on [control=['if'], data=[]]
# TODO append to kwargs block if it exists
kwargs_block = 'Kwargs:\n' + ut.indent(kwargs_append)
func.__doc__ += kwargs_block
return func
return _wrp
|
def independentlinear60__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=60))
model.add(Dense(20, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['mean_squared_error'])
return KerasWrap(model, 30, flatten_output=True)
|
def function[independentlinear60__ffnn, parameter[]]:
constant[ 4-Layer Neural Network
]
from relative_module[keras.models] import module[Sequential]
from relative_module[keras.layers] import module[Dense]
variable[model] assign[=] call[name[Sequential], parameter[]]
call[name[model].add, parameter[call[name[Dense], parameter[constant[32]]]]]
call[name[model].add, parameter[call[name[Dense], parameter[constant[20]]]]]
call[name[model].add, parameter[call[name[Dense], parameter[constant[20]]]]]
call[name[model].add, parameter[call[name[Dense], parameter[constant[1]]]]]
call[name[model].compile, parameter[]]
return[call[name[KerasWrap], parameter[name[model], constant[30]]]]
|
keyword[def] identifier[independentlinear60__ffnn] ():
literal[string]
keyword[from] identifier[keras] . identifier[models] keyword[import] identifier[Sequential]
keyword[from] identifier[keras] . identifier[layers] keyword[import] identifier[Dense]
identifier[model] = identifier[Sequential] ()
identifier[model] . identifier[add] ( identifier[Dense] ( literal[int] , identifier[activation] = literal[string] , identifier[input_dim] = literal[int] ))
identifier[model] . identifier[add] ( identifier[Dense] ( literal[int] , identifier[activation] = literal[string] ))
identifier[model] . identifier[add] ( identifier[Dense] ( literal[int] , identifier[activation] = literal[string] ))
identifier[model] . identifier[add] ( identifier[Dense] ( literal[int] ))
identifier[model] . identifier[compile] ( identifier[optimizer] = literal[string] ,
identifier[loss] = literal[string] ,
identifier[metrics] =[ literal[string] ])
keyword[return] identifier[KerasWrap] ( identifier[model] , literal[int] , identifier[flatten_output] = keyword[True] )
|
def independentlinear60__ffnn():
""" 4-Layer Neural Network
"""
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=60))
model.add(Dense(20, activation='relu'))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error'])
return KerasWrap(model, 30, flatten_output=True)
|
def clear(mnt):
'''
Pass in the mnt dict returned from nbd_mount to unmount and disconnect
the image from nbd. If all of the partitions are unmounted return an
empty dict, otherwise return a dict containing the still mounted
partitions
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.clear '{"/mnt/foo": "/dev/nbd0p1"}'
'''
ret = {}
nbds = set()
for m_pt, dev in six.iteritems(mnt):
mnt_ret = __salt__['mount.umount'](m_pt)
if mnt_ret is not True:
ret[m_pt] = dev
nbds.add(dev[:dev.rindex('p')])
if ret:
return ret
for nbd in nbds:
__salt__['cmd.run']('qemu-nbd -d {0}'.format(nbd), python_shell=False)
return ret
|
def function[clear, parameter[mnt]]:
constant[
Pass in the mnt dict returned from nbd_mount to unmount and disconnect
the image from nbd. If all of the partitions are unmounted return an
empty dict, otherwise return a dict containing the still mounted
partitions
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.clear '{"/mnt/foo": "/dev/nbd0p1"}'
]
variable[ret] assign[=] dictionary[[], []]
variable[nbds] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1cccb50>, <ast.Name object at 0x7da1b1ccc310>]]] in starred[call[name[six].iteritems, parameter[name[mnt]]]] begin[:]
variable[mnt_ret] assign[=] call[call[name[__salt__]][constant[mount.umount]], parameter[name[m_pt]]]
if compare[name[mnt_ret] is_not constant[True]] begin[:]
call[name[ret]][name[m_pt]] assign[=] name[dev]
call[name[nbds].add, parameter[call[name[dev]][<ast.Slice object at 0x7da1b1cccc40>]]]
if name[ret] begin[:]
return[name[ret]]
for taget[name[nbd]] in starred[name[nbds]] begin[:]
call[call[name[__salt__]][constant[cmd.run]], parameter[call[constant[qemu-nbd -d {0}].format, parameter[name[nbd]]]]]
return[name[ret]]
|
keyword[def] identifier[clear] ( identifier[mnt] ):
literal[string]
identifier[ret] ={}
identifier[nbds] = identifier[set] ()
keyword[for] identifier[m_pt] , identifier[dev] keyword[in] identifier[six] . identifier[iteritems] ( identifier[mnt] ):
identifier[mnt_ret] = identifier[__salt__] [ literal[string] ]( identifier[m_pt] )
keyword[if] identifier[mnt_ret] keyword[is] keyword[not] keyword[True] :
identifier[ret] [ identifier[m_pt] ]= identifier[dev]
identifier[nbds] . identifier[add] ( identifier[dev] [: identifier[dev] . identifier[rindex] ( literal[string] )])
keyword[if] identifier[ret] :
keyword[return] identifier[ret]
keyword[for] identifier[nbd] keyword[in] identifier[nbds] :
identifier[__salt__] [ literal[string] ]( literal[string] . identifier[format] ( identifier[nbd] ), identifier[python_shell] = keyword[False] )
keyword[return] identifier[ret]
|
def clear(mnt):
"""
Pass in the mnt dict returned from nbd_mount to unmount and disconnect
the image from nbd. If all of the partitions are unmounted return an
empty dict, otherwise return a dict containing the still mounted
partitions
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.clear '{"/mnt/foo": "/dev/nbd0p1"}'
"""
ret = {}
nbds = set()
for (m_pt, dev) in six.iteritems(mnt):
mnt_ret = __salt__['mount.umount'](m_pt)
if mnt_ret is not True:
ret[m_pt] = dev # depends on [control=['if'], data=[]]
nbds.add(dev[:dev.rindex('p')]) # depends on [control=['for'], data=[]]
if ret:
return ret # depends on [control=['if'], data=[]]
for nbd in nbds:
__salt__['cmd.run']('qemu-nbd -d {0}'.format(nbd), python_shell=False) # depends on [control=['for'], data=['nbd']]
return ret
|
async def cas(self, value: Any, **kwargs) -> bool:
"""
Checks and sets the specified value for the locked key. If the value has changed
since the lock was created, it will raise an :class:`aiocache.lock.OptimisticLockError`
exception.
:raises: :class:`aiocache.lock.OptimisticLockError`
"""
success = await self.client.set(self.key, value, _cas_token=self._token, **kwargs)
if not success:
raise OptimisticLockError("Value has changed since the lock started")
return True
|
<ast.AsyncFunctionDef object at 0x7da18c4cc580>
|
keyword[async] keyword[def] identifier[cas] ( identifier[self] , identifier[value] : identifier[Any] ,** identifier[kwargs] )-> identifier[bool] :
literal[string]
identifier[success] = keyword[await] identifier[self] . identifier[client] . identifier[set] ( identifier[self] . identifier[key] , identifier[value] , identifier[_cas_token] = identifier[self] . identifier[_token] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[success] :
keyword[raise] identifier[OptimisticLockError] ( literal[string] )
keyword[return] keyword[True]
|
async def cas(self, value: Any, **kwargs) -> bool:
"""
Checks and sets the specified value for the locked key. If the value has changed
since the lock was created, it will raise an :class:`aiocache.lock.OptimisticLockError`
exception.
:raises: :class:`aiocache.lock.OptimisticLockError`
"""
success = await self.client.set(self.key, value, _cas_token=self._token, **kwargs)
if not success:
raise OptimisticLockError('Value has changed since the lock started') # depends on [control=['if'], data=[]]
return True
|
def getLabel(self, inferenceType=None):
"""
Helper method that generates a unique label for a :class:`MetricSpec` /
:class:`~nupic.frameworks.opf.opf_utils.InferenceType` pair. The label is
formatted as follows:
::
<predictionKind>:<metric type>:(paramName=value)*:field=<fieldname>
For example:
::
classification:aae:paramA=10.2:paramB=20:window=100:field=pounds
:returns: (string) label for inference type
"""
result = []
if inferenceType is not None:
result.append(InferenceType.getLabel(inferenceType))
result.append(self.inferenceElement)
result.append(self.metric)
params = self.params
if params is not None:
sortedParams= params.keys()
sortedParams.sort()
for param in sortedParams:
# Don't include the customFuncSource - it is too long an unwieldy
if param in ('customFuncSource', 'customFuncDef', 'customExpr'):
continue
value = params[param]
if isinstance(value, str):
result.extend(["%s='%s'"% (param, value)])
else:
result.extend(["%s=%s"% (param, value)])
if self.field:
result.append("field=%s"% (self.field) )
return self._LABEL_SEPARATOR.join(result)
|
def function[getLabel, parameter[self, inferenceType]]:
constant[
Helper method that generates a unique label for a :class:`MetricSpec` /
:class:`~nupic.frameworks.opf.opf_utils.InferenceType` pair. The label is
formatted as follows:
::
<predictionKind>:<metric type>:(paramName=value)*:field=<fieldname>
For example:
::
classification:aae:paramA=10.2:paramB=20:window=100:field=pounds
:returns: (string) label for inference type
]
variable[result] assign[=] list[[]]
if compare[name[inferenceType] is_not constant[None]] begin[:]
call[name[result].append, parameter[call[name[InferenceType].getLabel, parameter[name[inferenceType]]]]]
call[name[result].append, parameter[name[self].inferenceElement]]
call[name[result].append, parameter[name[self].metric]]
variable[params] assign[=] name[self].params
if compare[name[params] is_not constant[None]] begin[:]
variable[sortedParams] assign[=] call[name[params].keys, parameter[]]
call[name[sortedParams].sort, parameter[]]
for taget[name[param]] in starred[name[sortedParams]] begin[:]
if compare[name[param] in tuple[[<ast.Constant object at 0x7da20e9b07c0>, <ast.Constant object at 0x7da20e9b11e0>, <ast.Constant object at 0x7da20e9b27d0>]]] begin[:]
continue
variable[value] assign[=] call[name[params]][name[param]]
if call[name[isinstance], parameter[name[value], name[str]]] begin[:]
call[name[result].extend, parameter[list[[<ast.BinOp object at 0x7da20e9b2cb0>]]]]
if name[self].field begin[:]
call[name[result].append, parameter[binary_operation[constant[field=%s] <ast.Mod object at 0x7da2590d6920> name[self].field]]]
return[call[name[self]._LABEL_SEPARATOR.join, parameter[name[result]]]]
|
keyword[def] identifier[getLabel] ( identifier[self] , identifier[inferenceType] = keyword[None] ):
literal[string]
identifier[result] =[]
keyword[if] identifier[inferenceType] keyword[is] keyword[not] keyword[None] :
identifier[result] . identifier[append] ( identifier[InferenceType] . identifier[getLabel] ( identifier[inferenceType] ))
identifier[result] . identifier[append] ( identifier[self] . identifier[inferenceElement] )
identifier[result] . identifier[append] ( identifier[self] . identifier[metric] )
identifier[params] = identifier[self] . identifier[params]
keyword[if] identifier[params] keyword[is] keyword[not] keyword[None] :
identifier[sortedParams] = identifier[params] . identifier[keys] ()
identifier[sortedParams] . identifier[sort] ()
keyword[for] identifier[param] keyword[in] identifier[sortedParams] :
keyword[if] identifier[param] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[continue]
identifier[value] = identifier[params] [ identifier[param] ]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ):
identifier[result] . identifier[extend] ([ literal[string] %( identifier[param] , identifier[value] )])
keyword[else] :
identifier[result] . identifier[extend] ([ literal[string] %( identifier[param] , identifier[value] )])
keyword[if] identifier[self] . identifier[field] :
identifier[result] . identifier[append] ( literal[string] %( identifier[self] . identifier[field] ))
keyword[return] identifier[self] . identifier[_LABEL_SEPARATOR] . identifier[join] ( identifier[result] )
|
def getLabel(self, inferenceType=None):
"""
Helper method that generates a unique label for a :class:`MetricSpec` /
:class:`~nupic.frameworks.opf.opf_utils.InferenceType` pair. The label is
formatted as follows:
::
<predictionKind>:<metric type>:(paramName=value)*:field=<fieldname>
For example:
::
classification:aae:paramA=10.2:paramB=20:window=100:field=pounds
:returns: (string) label for inference type
"""
result = []
if inferenceType is not None:
result.append(InferenceType.getLabel(inferenceType)) # depends on [control=['if'], data=['inferenceType']]
result.append(self.inferenceElement)
result.append(self.metric)
params = self.params
if params is not None:
sortedParams = params.keys()
sortedParams.sort()
for param in sortedParams:
# Don't include the customFuncSource - it is too long an unwieldy
if param in ('customFuncSource', 'customFuncDef', 'customExpr'):
continue # depends on [control=['if'], data=[]]
value = params[param]
if isinstance(value, str):
result.extend(["%s='%s'" % (param, value)]) # depends on [control=['if'], data=[]]
else:
result.extend(['%s=%s' % (param, value)]) # depends on [control=['for'], data=['param']] # depends on [control=['if'], data=['params']]
if self.field:
result.append('field=%s' % self.field) # depends on [control=['if'], data=[]]
return self._LABEL_SEPARATOR.join(result)
|
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
|
def function[max_pool, parameter[arr, block_size, cval, preserve_dtype]]:
constant[
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
]
return[call[name[pool], parameter[name[arr], name[block_size], name[np].max]]]
|
keyword[def] identifier[max_pool] ( identifier[arr] , identifier[block_size] , identifier[cval] = literal[int] , identifier[preserve_dtype] = keyword[True] ):
literal[string]
keyword[return] identifier[pool] ( identifier[arr] , identifier[block_size] , identifier[np] . identifier[max] , identifier[cval] = identifier[cval] , identifier[preserve_dtype] = identifier[preserve_dtype] )
|
def max_pool(arr, block_size, cval=0, preserve_dtype=True):
"""
Resize an array using max-pooling.
dtype support::
See :func:`imgaug.imgaug.pool`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pool. See :func:`imgaug.pool` for details.
block_size : int or tuple of int or tuple of int
Size of each block of values to pool. See `imgaug.pool` for details.
cval : number, optional
Padding value. See :func:`imgaug.pool` for details.
preserve_dtype : bool, optional
Whether to preserve the input array dtype. See :func:`imgaug.pool` for details.
Returns
-------
arr_reduced : (H',W') ndarray or (H',W',C') ndarray
Array after max-pooling.
"""
return pool(arr, block_size, np.max, cval=cval, preserve_dtype=preserve_dtype)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.