code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def models_for_pages(*args):
"""
Create a select list containing each of the models that subclass the
``Page`` model.
"""
from warnings import warn
warn("template tag models_for_pages is deprectaed, use "
"PageAdmin.get_content_models instead")
from yacms.pages.admin import PageAdmin
return PageAdmin.get_content_models() | def function[models_for_pages, parameter[]]:
constant[
Create a select list containing each of the models that subclass the
``Page`` model.
]
from relative_module[warnings] import module[warn]
call[name[warn], parameter[constant[template tag models_for_pages is deprectaed, use PageAdmin.get_content_models instead]]]
from relative_module[yacms.pages.admin] import module[PageAdmin]
return[call[name[PageAdmin].get_content_models, parameter[]]] | keyword[def] identifier[models_for_pages] (* identifier[args] ):
literal[string]
keyword[from] identifier[warnings] keyword[import] identifier[warn]
identifier[warn] ( literal[string]
literal[string] )
keyword[from] identifier[yacms] . identifier[pages] . identifier[admin] keyword[import] identifier[PageAdmin]
keyword[return] identifier[PageAdmin] . identifier[get_content_models] () | def models_for_pages(*args):
"""
Create a select list containing each of the models that subclass the
``Page`` model.
"""
from warnings import warn
warn('template tag models_for_pages is deprectaed, use PageAdmin.get_content_models instead')
from yacms.pages.admin import PageAdmin
return PageAdmin.get_content_models() |
def snap_tz(dttm, instruction, timezone):
"""This function handles timezone aware datetimes.
Sometimes it is necessary to keep daylight saving time switches in mind.
Args:
instruction (string): a string that encodes 0 to n transformations of a time, i.e. "-1h@h", "@mon+2d+4h", ...
dttm (datetime): a datetime with timezone
timezone: a pytz timezone
Returns:
datetime: The datetime resulting from applying all transformations to the input datetime.
Example:
>>> import pytz
>>> CET = pytz.timezone("Europe/Berlin")
>>> dttm = CET.localize(datetime(2017, 3, 26, 3, 44)
>>> dttm
datetime.datetime(2017, 3, 26, 3, 44, tzinfo=<DstTzInfo 'Europe/Berlin' CEST+2:00:00 DST>)
>>> snap_tz(dttm, "-2h@h", CET)
datetime.datetime(2017, 3, 26, 0, 0, tzinfo=<DstTzInfo 'Europe/Berlin' CET+1:00:00 STD>)
>>> # switch from winter to summer time!
"""
transformations = parse(instruction)
return reduce(lambda dt, transformation: transformation.apply_to_with_tz(dt, timezone), transformations, dttm) | def function[snap_tz, parameter[dttm, instruction, timezone]]:
constant[This function handles timezone aware datetimes.
Sometimes it is necessary to keep daylight saving time switches in mind.
Args:
instruction (string): a string that encodes 0 to n transformations of a time, i.e. "-1h@h", "@mon+2d+4h", ...
dttm (datetime): a datetime with timezone
timezone: a pytz timezone
Returns:
datetime: The datetime resulting from applying all transformations to the input datetime.
Example:
>>> import pytz
>>> CET = pytz.timezone("Europe/Berlin")
>>> dttm = CET.localize(datetime(2017, 3, 26, 3, 44)
>>> dttm
datetime.datetime(2017, 3, 26, 3, 44, tzinfo=<DstTzInfo 'Europe/Berlin' CEST+2:00:00 DST>)
>>> snap_tz(dttm, "-2h@h", CET)
datetime.datetime(2017, 3, 26, 0, 0, tzinfo=<DstTzInfo 'Europe/Berlin' CET+1:00:00 STD>)
>>> # switch from winter to summer time!
]
variable[transformations] assign[=] call[name[parse], parameter[name[instruction]]]
return[call[name[reduce], parameter[<ast.Lambda object at 0x7da1b0217340>, name[transformations], name[dttm]]]] | keyword[def] identifier[snap_tz] ( identifier[dttm] , identifier[instruction] , identifier[timezone] ):
literal[string]
identifier[transformations] = identifier[parse] ( identifier[instruction] )
keyword[return] identifier[reduce] ( keyword[lambda] identifier[dt] , identifier[transformation] : identifier[transformation] . identifier[apply_to_with_tz] ( identifier[dt] , identifier[timezone] ), identifier[transformations] , identifier[dttm] ) | def snap_tz(dttm, instruction, timezone):
"""This function handles timezone aware datetimes.
Sometimes it is necessary to keep daylight saving time switches in mind.
Args:
instruction (string): a string that encodes 0 to n transformations of a time, i.e. "-1h@h", "@mon+2d+4h", ...
dttm (datetime): a datetime with timezone
timezone: a pytz timezone
Returns:
datetime: The datetime resulting from applying all transformations to the input datetime.
Example:
>>> import pytz
>>> CET = pytz.timezone("Europe/Berlin")
>>> dttm = CET.localize(datetime(2017, 3, 26, 3, 44)
>>> dttm
datetime.datetime(2017, 3, 26, 3, 44, tzinfo=<DstTzInfo 'Europe/Berlin' CEST+2:00:00 DST>)
>>> snap_tz(dttm, "-2h@h", CET)
datetime.datetime(2017, 3, 26, 0, 0, tzinfo=<DstTzInfo 'Europe/Berlin' CET+1:00:00 STD>)
>>> # switch from winter to summer time!
"""
transformations = parse(instruction)
return reduce(lambda dt, transformation: transformation.apply_to_with_tz(dt, timezone), transformations, dttm) |
def match_head(subject, pattern):
"""Checks if the head of subject matches the pattern's head."""
if isinstance(pattern, Pattern):
pattern = pattern.expression
pattern_head = get_head(pattern)
if pattern_head is None:
return True
if issubclass(pattern_head, OneIdentityOperation):
return True
subject_head = get_head(subject)
assert subject_head is not None
return issubclass(subject_head, pattern_head) | def function[match_head, parameter[subject, pattern]]:
constant[Checks if the head of subject matches the pattern's head.]
if call[name[isinstance], parameter[name[pattern], name[Pattern]]] begin[:]
variable[pattern] assign[=] name[pattern].expression
variable[pattern_head] assign[=] call[name[get_head], parameter[name[pattern]]]
if compare[name[pattern_head] is constant[None]] begin[:]
return[constant[True]]
if call[name[issubclass], parameter[name[pattern_head], name[OneIdentityOperation]]] begin[:]
return[constant[True]]
variable[subject_head] assign[=] call[name[get_head], parameter[name[subject]]]
assert[compare[name[subject_head] is_not constant[None]]]
return[call[name[issubclass], parameter[name[subject_head], name[pattern_head]]]] | keyword[def] identifier[match_head] ( identifier[subject] , identifier[pattern] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[pattern] , identifier[Pattern] ):
identifier[pattern] = identifier[pattern] . identifier[expression]
identifier[pattern_head] = identifier[get_head] ( identifier[pattern] )
keyword[if] identifier[pattern_head] keyword[is] keyword[None] :
keyword[return] keyword[True]
keyword[if] identifier[issubclass] ( identifier[pattern_head] , identifier[OneIdentityOperation] ):
keyword[return] keyword[True]
identifier[subject_head] = identifier[get_head] ( identifier[subject] )
keyword[assert] identifier[subject_head] keyword[is] keyword[not] keyword[None]
keyword[return] identifier[issubclass] ( identifier[subject_head] , identifier[pattern_head] ) | def match_head(subject, pattern):
"""Checks if the head of subject matches the pattern's head."""
if isinstance(pattern, Pattern):
pattern = pattern.expression # depends on [control=['if'], data=[]]
pattern_head = get_head(pattern)
if pattern_head is None:
return True # depends on [control=['if'], data=[]]
if issubclass(pattern_head, OneIdentityOperation):
return True # depends on [control=['if'], data=[]]
subject_head = get_head(subject)
assert subject_head is not None
return issubclass(subject_head, pattern_head) |
def _get_version():
"""
Fetches the version number from the package's __init__.py file
"""
with open('Lib/fontParts/__init__.py', 'r', encoding='utf-8') as f:
for line in f:
if line.startswith(u'__version__'):
return ast.parse(line).body[0].value.s
raise RuntimeError("No __version__ string found!") | def function[_get_version, parameter[]]:
constant[
Fetches the version number from the package's __init__.py file
]
with call[name[open], parameter[constant[Lib/fontParts/__init__.py], constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
if call[name[line].startswith, parameter[constant[__version__]]] begin[:]
return[call[call[name[ast].parse, parameter[name[line]]].body][constant[0]].value.s]
<ast.Raise object at 0x7da18ede42b0> | keyword[def] identifier[_get_version] ():
literal[string]
keyword[with] identifier[open] ( literal[string] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[ast] . identifier[parse] ( identifier[line] ). identifier[body] [ literal[int] ]. identifier[value] . identifier[s]
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def _get_version():
"""
Fetches the version number from the package's __init__.py file
"""
with open('Lib/fontParts/__init__.py', 'r', encoding='utf-8') as f:
for line in f:
if line.startswith(u'__version__'):
return ast.parse(line).body[0].value.s # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
raise RuntimeError('No __version__ string found!') # depends on [control=['with'], data=['f']] |
def rdf_catalog():
'''Root RDF endpoint with content negociation handling'''
format = RDF_EXTENSIONS[negociate_content()]
url = url_for('site.rdf_catalog_format', format=format)
return redirect(url) | def function[rdf_catalog, parameter[]]:
constant[Root RDF endpoint with content negociation handling]
variable[format] assign[=] call[name[RDF_EXTENSIONS]][call[name[negociate_content], parameter[]]]
variable[url] assign[=] call[name[url_for], parameter[constant[site.rdf_catalog_format]]]
return[call[name[redirect], parameter[name[url]]]] | keyword[def] identifier[rdf_catalog] ():
literal[string]
identifier[format] = identifier[RDF_EXTENSIONS] [ identifier[negociate_content] ()]
identifier[url] = identifier[url_for] ( literal[string] , identifier[format] = identifier[format] )
keyword[return] identifier[redirect] ( identifier[url] ) | def rdf_catalog():
"""Root RDF endpoint with content negociation handling"""
format = RDF_EXTENSIONS[negociate_content()]
url = url_for('site.rdf_catalog_format', format=format)
return redirect(url) |
def isRunActive(g):
"""
Polls the data server to see if a run is active
"""
if g.cpars['hcam_server_on']:
url = g.cpars['hipercam_server'] + 'summary'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=True)
if not rs.ok:
raise DriverError('isRunActive error: ' + str(rs.err))
if rs.state == 'idle':
return False
elif rs.state == 'active':
return True
else:
raise DriverError('isRunActive error, state = ' + rs.state)
else:
raise DriverError('isRunActive error: servers are not active') | def function[isRunActive, parameter[g]]:
constant[
Polls the data server to see if a run is active
]
if call[name[g].cpars][constant[hcam_server_on]] begin[:]
variable[url] assign[=] binary_operation[call[name[g].cpars][constant[hipercam_server]] + constant[summary]]
variable[response] assign[=] call[name[urllib].request.urlopen, parameter[name[url]]]
variable[rs] assign[=] call[name[ReadServer], parameter[call[name[response].read, parameter[]]]]
if <ast.UnaryOp object at 0x7da207f02b90> begin[:]
<ast.Raise object at 0x7da18f58e890>
if compare[name[rs].state equal[==] constant[idle]] begin[:]
return[constant[False]] | keyword[def] identifier[isRunActive] ( identifier[g] ):
literal[string]
keyword[if] identifier[g] . identifier[cpars] [ literal[string] ]:
identifier[url] = identifier[g] . identifier[cpars] [ literal[string] ]+ literal[string]
identifier[response] = identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[url] , identifier[timeout] = literal[int] )
identifier[rs] = identifier[ReadServer] ( identifier[response] . identifier[read] (), identifier[status_msg] = keyword[True] )
keyword[if] keyword[not] identifier[rs] . identifier[ok] :
keyword[raise] identifier[DriverError] ( literal[string] + identifier[str] ( identifier[rs] . identifier[err] ))
keyword[if] identifier[rs] . identifier[state] == literal[string] :
keyword[return] keyword[False]
keyword[elif] identifier[rs] . identifier[state] == literal[string] :
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[DriverError] ( literal[string] + identifier[rs] . identifier[state] )
keyword[else] :
keyword[raise] identifier[DriverError] ( literal[string] ) | def isRunActive(g):
"""
Polls the data server to see if a run is active
"""
if g.cpars['hcam_server_on']:
url = g.cpars['hipercam_server'] + 'summary'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=True)
if not rs.ok:
raise DriverError('isRunActive error: ' + str(rs.err)) # depends on [control=['if'], data=[]]
if rs.state == 'idle':
return False # depends on [control=['if'], data=[]]
elif rs.state == 'active':
return True # depends on [control=['if'], data=[]]
else:
raise DriverError('isRunActive error, state = ' + rs.state) # depends on [control=['if'], data=[]]
else:
raise DriverError('isRunActive error: servers are not active') |
def edges(self):
# TODO: check docstring
"""Returns a generator for iterating over edges
Yields
------
type
Generator for iterating over edges.
"""
for i in sorted(self._matrix.keys(), key=lambda x:x.name()):
for j in sorted(self._matrix[i].keys(), key=lambda x:x.name()):
if i != j:
yield (i, j) | def function[edges, parameter[self]]:
constant[Returns a generator for iterating over edges
Yields
------
type
Generator for iterating over edges.
]
for taget[name[i]] in starred[call[name[sorted], parameter[call[name[self]._matrix.keys, parameter[]]]]] begin[:]
for taget[name[j]] in starred[call[name[sorted], parameter[call[call[name[self]._matrix][name[i]].keys, parameter[]]]]] begin[:]
if compare[name[i] not_equal[!=] name[j]] begin[:]
<ast.Yield object at 0x7da1b26ac370> | keyword[def] identifier[edges] ( identifier[self] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[sorted] ( identifier[self] . identifier[_matrix] . identifier[keys] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[name] ()):
keyword[for] identifier[j] keyword[in] identifier[sorted] ( identifier[self] . identifier[_matrix] [ identifier[i] ]. identifier[keys] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[name] ()):
keyword[if] identifier[i] != identifier[j] :
keyword[yield] ( identifier[i] , identifier[j] ) | def edges(self):
# TODO: check docstring
'Returns a generator for iterating over edges\n \n Yields\n ------\n type\n Generator for iterating over edges.\n \n '
for i in sorted(self._matrix.keys(), key=lambda x: x.name()):
for j in sorted(self._matrix[i].keys(), key=lambda x: x.name()):
if i != j:
yield (i, j) # depends on [control=['if'], data=['i', 'j']] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] |
def copy_fields(self, model):
"""
Creates copies of the fields we are keeping
track of for the provided model, returning a
dictionary mapping field name to a copied field object.
"""
fields = {'__module__' : model.__module__}
for field in model._meta.fields:
if not field.name in self._exclude:
field = copy.deepcopy(field)
if isinstance(field, models.AutoField):
#we replace the AutoField of the original model
#with an IntegerField because a model can
#have only one autofield.
field.__class__ = models.IntegerField
if field.primary_key:
field.serialize = True
#OneToOne fields should really be tracked
#as ForeignKey fields
if isinstance(field, models.OneToOneField):
field.__class__ = models.ForeignKey
if field.primary_key or field.unique:
#unique fields of the original model
#can not be guaranteed to be unique
#in the audit log entry but they
#should still be indexed for faster lookups.
field.primary_key = False
field._unique = False
field.db_index = True
if field.remote_field and field.remote_field.related_name:
field.remote_field.related_name = '_auditlog_{}_{}'.format(
model._meta.model_name,
field.remote_field.related_name
)
elif field.remote_field:
try:
if field.remote_field.get_accessor_name():
field.remote_field.related_name = '_auditlog_{}_{}'.format(
model._meta.model_name,
field.remote_field.get_accessor_name()
)
except e:
pass
fields[field.name] = field
return fields | def function[copy_fields, parameter[self, model]]:
constant[
Creates copies of the fields we are keeping
track of for the provided model, returning a
dictionary mapping field name to a copied field object.
]
variable[fields] assign[=] dictionary[[<ast.Constant object at 0x7da1b0dde680>], [<ast.Attribute object at 0x7da1b0ddca90>]]
for taget[name[field]] in starred[name[model]._meta.fields] begin[:]
if <ast.UnaryOp object at 0x7da18dc9b760> begin[:]
variable[field] assign[=] call[name[copy].deepcopy, parameter[name[field]]]
if call[name[isinstance], parameter[name[field], name[models].AutoField]] begin[:]
name[field].__class__ assign[=] name[models].IntegerField
if name[field].primary_key begin[:]
name[field].serialize assign[=] constant[True]
if call[name[isinstance], parameter[name[field], name[models].OneToOneField]] begin[:]
name[field].__class__ assign[=] name[models].ForeignKey
if <ast.BoolOp object at 0x7da1b0b08f10> begin[:]
name[field].primary_key assign[=] constant[False]
name[field]._unique assign[=] constant[False]
name[field].db_index assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b0b0a530> begin[:]
name[field].remote_field.related_name assign[=] call[constant[_auditlog_{}_{}].format, parameter[name[model]._meta.model_name, name[field].remote_field.related_name]]
call[name[fields]][name[field].name] assign[=] name[field]
return[name[fields]] | keyword[def] identifier[copy_fields] ( identifier[self] , identifier[model] ):
literal[string]
identifier[fields] ={ literal[string] : identifier[model] . identifier[__module__] }
keyword[for] identifier[field] keyword[in] identifier[model] . identifier[_meta] . identifier[fields] :
keyword[if] keyword[not] identifier[field] . identifier[name] keyword[in] identifier[self] . identifier[_exclude] :
identifier[field] = identifier[copy] . identifier[deepcopy] ( identifier[field] )
keyword[if] identifier[isinstance] ( identifier[field] , identifier[models] . identifier[AutoField] ):
identifier[field] . identifier[__class__] = identifier[models] . identifier[IntegerField]
keyword[if] identifier[field] . identifier[primary_key] :
identifier[field] . identifier[serialize] = keyword[True]
keyword[if] identifier[isinstance] ( identifier[field] , identifier[models] . identifier[OneToOneField] ):
identifier[field] . identifier[__class__] = identifier[models] . identifier[ForeignKey]
keyword[if] identifier[field] . identifier[primary_key] keyword[or] identifier[field] . identifier[unique] :
identifier[field] . identifier[primary_key] = keyword[False]
identifier[field] . identifier[_unique] = keyword[False]
identifier[field] . identifier[db_index] = keyword[True]
keyword[if] identifier[field] . identifier[remote_field] keyword[and] identifier[field] . identifier[remote_field] . identifier[related_name] :
identifier[field] . identifier[remote_field] . identifier[related_name] = literal[string] . identifier[format] (
identifier[model] . identifier[_meta] . identifier[model_name] ,
identifier[field] . identifier[remote_field] . identifier[related_name]
)
keyword[elif] identifier[field] . identifier[remote_field] :
keyword[try] :
keyword[if] identifier[field] . identifier[remote_field] . identifier[get_accessor_name] ():
identifier[field] . identifier[remote_field] . identifier[related_name] = literal[string] . identifier[format] (
identifier[model] . identifier[_meta] . identifier[model_name] ,
identifier[field] . identifier[remote_field] . identifier[get_accessor_name] ()
)
keyword[except] identifier[e] :
keyword[pass]
identifier[fields] [ identifier[field] . identifier[name] ]= identifier[field]
keyword[return] identifier[fields] | def copy_fields(self, model):
"""
Creates copies of the fields we are keeping
track of for the provided model, returning a
dictionary mapping field name to a copied field object.
"""
fields = {'__module__': model.__module__}
for field in model._meta.fields:
if not field.name in self._exclude:
field = copy.deepcopy(field)
if isinstance(field, models.AutoField):
#we replace the AutoField of the original model
#with an IntegerField because a model can
#have only one autofield.
field.__class__ = models.IntegerField # depends on [control=['if'], data=[]]
if field.primary_key:
field.serialize = True # depends on [control=['if'], data=[]]
#OneToOne fields should really be tracked
#as ForeignKey fields
if isinstance(field, models.OneToOneField):
field.__class__ = models.ForeignKey # depends on [control=['if'], data=[]]
if field.primary_key or field.unique:
#unique fields of the original model
#can not be guaranteed to be unique
#in the audit log entry but they
#should still be indexed for faster lookups.
field.primary_key = False
field._unique = False
field.db_index = True # depends on [control=['if'], data=[]]
if field.remote_field and field.remote_field.related_name:
field.remote_field.related_name = '_auditlog_{}_{}'.format(model._meta.model_name, field.remote_field.related_name) # depends on [control=['if'], data=[]]
elif field.remote_field:
try:
if field.remote_field.get_accessor_name():
field.remote_field.related_name = '_auditlog_{}_{}'.format(model._meta.model_name, field.remote_field.get_accessor_name()) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except e:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
fields[field.name] = field # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
return fields |
def isSprintActive(self, sprintIdx):
"""If the given sprint exists and is active, return active=True.
If the sprint does not exist yet, this call will create it (and return
active=True). If it already exists, but is completing or complete, return
active=False.
If sprintIdx is past the end of the possible sprints, return
active=False, noMoreSprints=True
IMPORTANT: When speculative particles are enabled, this call has some
special processing to handle speculative sprints:
* When creating a new speculative sprint (creating sprint N before
sprint N-1 has completed), it initially only puts in only ONE swarm into
the sprint.
* Every time it is asked if sprint N is active, it also checks to see if
it is time to add another swarm to the sprint, and adds a new swarm if
appropriate before returning active=True
* We decide it is time to add a new swarm to a speculative sprint when ALL
of the currently active swarms in the sprint have all the workers they
need (number of running (not mature) particles is _minParticlesPerSwarm).
This means that we have capacity to run additional particles in a new
swarm.
It is expected that the sprints will be checked IN ORDER from 0 on up. (It
is an error not to) The caller should always try to allocate from the first
active sprint it finds. If it can't, then it can call this again to
find/create the next active sprint.
Parameters:
---------------------------------------------------------------------
retval: (active, noMoreSprints)
active: True if the given sprint is active
noMoreSprints: True if there are no more sprints possible
"""
while True:
numExistingSprints = len(self._state['sprints'])
# If this sprint already exists, see if it is active
if sprintIdx <= numExistingSprints-1:
# With speculation off, it's simple, just return whether or not the
# asked for sprint has active status
if not self._hsObj._speculativeParticles:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
return (active, False)
# With speculation on, if the sprint is still marked active, we also
# need to see if it's time to add a new swarm to it.
else:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
if not active:
return (active, False)
# See if all of the existing swarms are at capacity (have all the
# workers they need):
activeSwarmIds = self.getActiveSwarms(sprintIdx)
swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId,
matured=False)[0] for swarmId in activeSwarmIds]
notFullSwarms = [len(swarm) for swarm in swarmSizes \
if len(swarm) < self._hsObj._minParticlesPerSwarm]
# If some swarms have room return that the swarm is active.
if len(notFullSwarms) > 0:
return (True, False)
# If the existing swarms are at capacity, we will fall through to the
# logic below which tries to add a new swarm to the sprint.
# Stop creating new sprints?
if self._state['lastGoodSprint'] is not None:
return (False, True)
# if fixedFields is set, we are running a fast swarm and only run sprint0
if self._hsObj._fixedFields is not None:
return (False, True)
# ----------------------------------------------------------------------
# Get the best model (if there is one) from the prior sprint. That gives
# us the base encoder set for the next sprint. For sprint zero make sure
# it does not take the last sprintidx because of wrapping.
if sprintIdx > 0 \
and self._state['sprints'][sprintIdx-1]['status'] == 'completed':
(bestModelId, _) = self.bestModelInCompletedSprint(sprintIdx-1)
(particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(
bestModelId)
bestSwarmId = particleState['swarmId']
baseEncoderSets = [bestSwarmId.split('.')]
# If there is no best model yet, then use all encoder sets from the prior
# sprint that were not killed
else:
bestSwarmId = None
particleState = None
# Build up more combinations, using ALL of the sets in the current
# sprint.
baseEncoderSets = []
for swarmId in self.getNonKilledSwarms(sprintIdx-1):
baseEncoderSets.append(swarmId.split('.'))
# ----------------------------------------------------------------------
# Which encoders should we add to the current base set?
encoderAddSet = []
# If we have constraints on how many fields we carry forward into
# subsequent sprints (either nupic.hypersearch.max.field.branching or
# nupic.hypersearch.min.field.contribution was set), then be more
# picky about which fields we add in.
limitFields = False
if self._hsObj._maxBranching > 0 \
or self._hsObj._minFieldContribution >= 0:
if self._hsObj._searchType == HsSearchType.temporal or \
self._hsObj._searchType == HsSearchType.classification:
if sprintIdx >= 1:
limitFields = True
baseSprintIdx = 0
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
if sprintIdx >= 2:
limitFields = True
baseSprintIdx = 1
else:
raise RuntimeError("Unimplemented search type %s" % \
(self._hsObj._searchType))
# Only add top _maxBranching encoders to the swarms?
if limitFields:
# Get field contributions to filter added fields
pctFieldContributions, absFieldContributions = \
self.getFieldContributions()
toRemove = []
self.logger.debug("FieldContributions min: %s" % \
(self._hsObj._minFieldContribution))
for fieldname in pctFieldContributions:
if pctFieldContributions[fieldname] < self._hsObj._minFieldContribution:
self.logger.debug("FieldContributions removing: %s" % (fieldname))
toRemove.append(self.getEncoderKeyFromName(fieldname))
else:
self.logger.debug("FieldContributions keeping: %s" % (fieldname))
# Grab the top maxBranching base sprint swarms.
swarms = self._state["swarms"]
sprintSwarms = [(swarm, swarms[swarm]["bestErrScore"]) \
for swarm in swarms if swarms[swarm]["sprintIdx"] == baseSprintIdx]
sprintSwarms = sorted(sprintSwarms, key=itemgetter(1))
if self._hsObj._maxBranching > 0:
sprintSwarms = sprintSwarms[0:self._hsObj._maxBranching]
# Create encoder set to generate further swarms.
for swarm in sprintSwarms:
swarmEncoders = swarm[0].split(".")
for encoder in swarmEncoders:
if not encoder in encoderAddSet:
encoderAddSet.append(encoder)
encoderAddSet = [encoder for encoder in encoderAddSet \
if not str(encoder) in toRemove]
# If no limit on the branching or min contribution, simply use all of the
# encoders.
else:
encoderAddSet = self._hsObj._encoderNames
# -----------------------------------------------------------------------
# Build up the new encoder combinations for the next sprint.
newSwarmIds = set()
# See if the caller wants to try more extensive field combinations with
# 3 fields.
if (self._hsObj._searchType == HsSearchType.temporal \
or self._hsObj._searchType == HsSearchType.legacyTemporal) \
and sprintIdx == 2 \
and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
if self._hsObj._tryAll3FieldCombinations:
newEncoders = set(self._hsObj._encoderNames)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
else:
# Just make sure the timestamp encoders are part of the mix
newEncoders = set(encoderAddSet)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
for encoder in self._hsObj._encoderNames:
if encoder.endswith('_timeOfDay') or encoder.endswith('_weekend') \
or encoder.endswith('_dayOfWeek'):
newEncoders.add(encoder)
allCombos = list(itertools.combinations(newEncoders, 2))
for combo in allCombos:
newSet = list(combo)
newSet.append(self._hsObj._predictedFieldEncoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# Else, we only build up by adding 1 new encoder to the best combination(s)
# we've seen from the prior sprint
else:
for baseEncoderSet in baseEncoderSets:
for encoder in encoderAddSet:
if encoder not in self._state['blackListedEncoders'] \
and encoder not in baseEncoderSet:
newSet = list(baseEncoderSet)
newSet.append(encoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# ----------------------------------------------------------------------
# Sort the new swarm Ids
newSwarmIds = sorted(newSwarmIds)
# If no more swarms can be found for this sprint...
if len(newSwarmIds) == 0:
# if sprint is not an empty sprint return that it is active but do not
# add anything to it.
if len(self.getAllSwarms(sprintIdx)) > 0:
return (True, False)
# If this is an empty sprint and we couldn't find any new swarms to
# add (only bad fields are remaining), the search is over
else:
return (False, True)
# Add this sprint and the swarms that are in it to our state
self._dirty = True
# Add in the new sprint if necessary
if len(self._state["sprints"]) == sprintIdx:
self._state['sprints'].append({'status': 'active',
'bestModelId': None,
'bestErrScore': None})
# Add in the new swarm(s) to the sprint
for swarmId in newSwarmIds:
self._state['swarms'][swarmId] = {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': sprintIdx}
# Update the list of active swarms
self._state['activeSwarms'] = self.getActiveSwarms()
# Try to set new state
success = self.writeStateToDB()
# Return result if successful
if success:
return (True, False) | def function[isSprintActive, parameter[self, sprintIdx]]:
constant[If the given sprint exists and is active, return active=True.
If the sprint does not exist yet, this call will create it (and return
active=True). If it already exists, but is completing or complete, return
active=False.
If sprintIdx is past the end of the possible sprints, return
active=False, noMoreSprints=True
IMPORTANT: When speculative particles are enabled, this call has some
special processing to handle speculative sprints:
* When creating a new speculative sprint (creating sprint N before
sprint N-1 has completed), it initially only puts in only ONE swarm into
the sprint.
* Every time it is asked if sprint N is active, it also checks to see if
it is time to add another swarm to the sprint, and adds a new swarm if
appropriate before returning active=True
* We decide it is time to add a new swarm to a speculative sprint when ALL
of the currently active swarms in the sprint have all the workers they
need (number of running (not mature) particles is _minParticlesPerSwarm).
This means that we have capacity to run additional particles in a new
swarm.
It is expected that the sprints will be checked IN ORDER from 0 on up. (It
is an error not to) The caller should always try to allocate from the first
active sprint it finds. If it can't, then it can call this again to
find/create the next active sprint.
Parameters:
---------------------------------------------------------------------
retval: (active, noMoreSprints)
active: True if the given sprint is active
noMoreSprints: True if there are no more sprints possible
]
while constant[True] begin[:]
variable[numExistingSprints] assign[=] call[name[len], parameter[call[name[self]._state][constant[sprints]]]]
if compare[name[sprintIdx] less_or_equal[<=] binary_operation[name[numExistingSprints] - constant[1]]] begin[:]
if <ast.UnaryOp object at 0x7da20c7951b0> begin[:]
variable[active] assign[=] compare[call[call[call[name[self]._state][constant[sprints]]][name[sprintIdx]]][constant[status]] equal[==] constant[active]]
return[tuple[[<ast.Name object at 0x7da20c7969e0>, <ast.Constant object at 0x7da20c797730>]]]
if compare[call[name[self]._state][constant[lastGoodSprint]] is_not constant[None]] begin[:]
return[tuple[[<ast.Constant object at 0x7da2047ebd30>, <ast.Constant object at 0x7da2047ea710>]]]
if compare[name[self]._hsObj._fixedFields is_not constant[None]] begin[:]
return[tuple[[<ast.Constant object at 0x7da20c796740>, <ast.Constant object at 0x7da20c7945b0>]]]
if <ast.BoolOp object at 0x7da20c794370> begin[:]
<ast.Tuple object at 0x7da20c795210> assign[=] call[name[self].bestModelInCompletedSprint, parameter[binary_operation[name[sprintIdx] - constant[1]]]]
<ast.Tuple object at 0x7da20c795360> assign[=] call[name[self]._hsObj._resultsDB.getParticleInfo, parameter[name[bestModelId]]]
variable[bestSwarmId] assign[=] call[name[particleState]][constant[swarmId]]
variable[baseEncoderSets] assign[=] list[[<ast.Call object at 0x7da20c795c90>]]
variable[encoderAddSet] assign[=] list[[]]
variable[limitFields] assign[=] constant[False]
if <ast.BoolOp object at 0x7da20c796530> begin[:]
if <ast.BoolOp object at 0x7da20c795480> begin[:]
if compare[name[sprintIdx] greater_or_equal[>=] constant[1]] begin[:]
variable[limitFields] assign[=] constant[True]
variable[baseSprintIdx] assign[=] constant[0]
if name[limitFields] begin[:]
<ast.Tuple object at 0x7da2044c3760> assign[=] call[name[self].getFieldContributions, parameter[]]
variable[toRemove] assign[=] list[[]]
call[name[self].logger.debug, parameter[binary_operation[constant[FieldContributions min: %s] <ast.Mod object at 0x7da2590d6920> name[self]._hsObj._minFieldContribution]]]
for taget[name[fieldname]] in starred[name[pctFieldContributions]] begin[:]
if compare[call[name[pctFieldContributions]][name[fieldname]] less[<] name[self]._hsObj._minFieldContribution] begin[:]
call[name[self].logger.debug, parameter[binary_operation[constant[FieldContributions removing: %s] <ast.Mod object at 0x7da2590d6920> name[fieldname]]]]
call[name[toRemove].append, parameter[call[name[self].getEncoderKeyFromName, parameter[name[fieldname]]]]]
variable[swarms] assign[=] call[name[self]._state][constant[swarms]]
variable[sprintSwarms] assign[=] <ast.ListComp object at 0x7da2044c3220>
variable[sprintSwarms] assign[=] call[name[sorted], parameter[name[sprintSwarms]]]
if compare[name[self]._hsObj._maxBranching greater[>] constant[0]] begin[:]
variable[sprintSwarms] assign[=] call[name[sprintSwarms]][<ast.Slice object at 0x7da2044c0a60>]
for taget[name[swarm]] in starred[name[sprintSwarms]] begin[:]
variable[swarmEncoders] assign[=] call[call[name[swarm]][constant[0]].split, parameter[constant[.]]]
for taget[name[encoder]] in starred[name[swarmEncoders]] begin[:]
if <ast.UnaryOp object at 0x7da2044c2d70> begin[:]
call[name[encoderAddSet].append, parameter[name[encoder]]]
variable[encoderAddSet] assign[=] <ast.ListComp object at 0x7da2044c38e0>
variable[newSwarmIds] assign[=] call[name[set], parameter[]]
if <ast.BoolOp object at 0x7da18dc04dc0> begin[:]
if name[self]._hsObj._tryAll3FieldCombinations begin[:]
variable[newEncoders] assign[=] call[name[set], parameter[name[self]._hsObj._encoderNames]]
if compare[name[self]._hsObj._predictedFieldEncoder in name[newEncoders]] begin[:]
call[name[newEncoders].remove, parameter[name[self]._hsObj._predictedFieldEncoder]]
variable[allCombos] assign[=] call[name[list], parameter[call[name[itertools].combinations, parameter[name[newEncoders], constant[2]]]]]
for taget[name[combo]] in starred[name[allCombos]] begin[:]
variable[newSet] assign[=] call[name[list], parameter[name[combo]]]
call[name[newSet].append, parameter[name[self]._hsObj._predictedFieldEncoder]]
call[name[newSet].sort, parameter[]]
variable[newSwarmId] assign[=] call[constant[.].join, parameter[name[newSet]]]
if compare[name[newSwarmId] <ast.NotIn object at 0x7da2590d7190> call[name[self]._state][constant[swarms]]] begin[:]
call[name[newSwarmIds].add, parameter[name[newSwarmId]]]
if compare[call[name[len], parameter[call[name[self].getActiveSwarms, parameter[binary_operation[name[sprintIdx] - constant[1]]]]]] greater[>] constant[0]] begin[:]
break
variable[newSwarmIds] assign[=] call[name[sorted], parameter[name[newSwarmIds]]]
if compare[call[name[len], parameter[name[newSwarmIds]]] equal[==] constant[0]] begin[:]
if compare[call[name[len], parameter[call[name[self].getAllSwarms, parameter[name[sprintIdx]]]]] greater[>] constant[0]] begin[:]
return[tuple[[<ast.Constant object at 0x7da18dc05ab0>, <ast.Constant object at 0x7da18dc045b0>]]]
name[self]._dirty assign[=] constant[True]
if compare[call[name[len], parameter[call[name[self]._state][constant[sprints]]]] equal[==] name[sprintIdx]] begin[:]
call[call[name[self]._state][constant[sprints]].append, parameter[dictionary[[<ast.Constant object at 0x7da18dc07820>, <ast.Constant object at 0x7da18dc06980>, <ast.Constant object at 0x7da18dc041c0>], [<ast.Constant object at 0x7da18dc06440>, <ast.Constant object at 0x7da18dc06140>, <ast.Constant object at 0x7da18dc04370>]]]]
for taget[name[swarmId]] in starred[name[newSwarmIds]] begin[:]
call[call[name[self]._state][constant[swarms]]][name[swarmId]] assign[=] dictionary[[<ast.Constant object at 0x7da18dc05270>, <ast.Constant object at 0x7da18dc07e20>, <ast.Constant object at 0x7da18dc06dd0>, <ast.Constant object at 0x7da18dc07910>], [<ast.Constant object at 0x7da18dc05de0>, <ast.Constant object at 0x7da18dc06e30>, <ast.Constant object at 0x7da18dc06200>, <ast.Name object at 0x7da18dc06b00>]]
call[name[self]._state][constant[activeSwarms]] assign[=] call[name[self].getActiveSwarms, parameter[]]
variable[success] assign[=] call[name[self].writeStateToDB, parameter[]]
if name[success] begin[:]
return[tuple[[<ast.Constant object at 0x7da18dc05ff0>, <ast.Constant object at 0x7da18dc078b0>]]] | keyword[def] identifier[isSprintActive] ( identifier[self] , identifier[sprintIdx] ):
literal[string]
keyword[while] keyword[True] :
identifier[numExistingSprints] = identifier[len] ( identifier[self] . identifier[_state] [ literal[string] ])
keyword[if] identifier[sprintIdx] <= identifier[numExistingSprints] - literal[int] :
keyword[if] keyword[not] identifier[self] . identifier[_hsObj] . identifier[_speculativeParticles] :
identifier[active] =( identifier[self] . identifier[_state] [ literal[string] ][ identifier[sprintIdx] ][ literal[string] ]== literal[string] )
keyword[return] ( identifier[active] , keyword[False] )
keyword[else] :
identifier[active] =( identifier[self] . identifier[_state] [ literal[string] ][ identifier[sprintIdx] ][ literal[string] ]== literal[string] )
keyword[if] keyword[not] identifier[active] :
keyword[return] ( identifier[active] , keyword[False] )
identifier[activeSwarmIds] = identifier[self] . identifier[getActiveSwarms] ( identifier[sprintIdx] )
identifier[swarmSizes] =[ identifier[self] . identifier[_hsObj] . identifier[_resultsDB] . identifier[getParticleInfos] ( identifier[swarmId] ,
identifier[matured] = keyword[False] )[ literal[int] ] keyword[for] identifier[swarmId] keyword[in] identifier[activeSwarmIds] ]
identifier[notFullSwarms] =[ identifier[len] ( identifier[swarm] ) keyword[for] identifier[swarm] keyword[in] identifier[swarmSizes] keyword[if] identifier[len] ( identifier[swarm] )< identifier[self] . identifier[_hsObj] . identifier[_minParticlesPerSwarm] ]
keyword[if] identifier[len] ( identifier[notFullSwarms] )> literal[int] :
keyword[return] ( keyword[True] , keyword[False] )
keyword[if] identifier[self] . identifier[_state] [ literal[string] ] keyword[is] keyword[not] keyword[None] :
keyword[return] ( keyword[False] , keyword[True] )
keyword[if] identifier[self] . identifier[_hsObj] . identifier[_fixedFields] keyword[is] keyword[not] keyword[None] :
keyword[return] ( keyword[False] , keyword[True] )
keyword[if] identifier[sprintIdx] > literal[int] keyword[and] identifier[self] . identifier[_state] [ literal[string] ][ identifier[sprintIdx] - literal[int] ][ literal[string] ]== literal[string] :
( identifier[bestModelId] , identifier[_] )= identifier[self] . identifier[bestModelInCompletedSprint] ( identifier[sprintIdx] - literal[int] )
( identifier[particleState] , identifier[_] , identifier[_] , identifier[_] , identifier[_] )= identifier[self] . identifier[_hsObj] . identifier[_resultsDB] . identifier[getParticleInfo] (
identifier[bestModelId] )
identifier[bestSwarmId] = identifier[particleState] [ literal[string] ]
identifier[baseEncoderSets] =[ identifier[bestSwarmId] . identifier[split] ( literal[string] )]
keyword[else] :
identifier[bestSwarmId] = keyword[None]
identifier[particleState] = keyword[None]
identifier[baseEncoderSets] =[]
keyword[for] identifier[swarmId] keyword[in] identifier[self] . identifier[getNonKilledSwarms] ( identifier[sprintIdx] - literal[int] ):
identifier[baseEncoderSets] . identifier[append] ( identifier[swarmId] . identifier[split] ( literal[string] ))
identifier[encoderAddSet] =[]
identifier[limitFields] = keyword[False]
keyword[if] identifier[self] . identifier[_hsObj] . identifier[_maxBranching] > literal[int] keyword[or] identifier[self] . identifier[_hsObj] . identifier[_minFieldContribution] >= literal[int] :
keyword[if] identifier[self] . identifier[_hsObj] . identifier[_searchType] == identifier[HsSearchType] . identifier[temporal] keyword[or] identifier[self] . identifier[_hsObj] . identifier[_searchType] == identifier[HsSearchType] . identifier[classification] :
keyword[if] identifier[sprintIdx] >= literal[int] :
identifier[limitFields] = keyword[True]
identifier[baseSprintIdx] = literal[int]
keyword[elif] identifier[self] . identifier[_hsObj] . identifier[_searchType] == identifier[HsSearchType] . identifier[legacyTemporal] :
keyword[if] identifier[sprintIdx] >= literal[int] :
identifier[limitFields] = keyword[True]
identifier[baseSprintIdx] = literal[int]
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] %( identifier[self] . identifier[_hsObj] . identifier[_searchType] ))
keyword[if] identifier[limitFields] :
identifier[pctFieldContributions] , identifier[absFieldContributions] = identifier[self] . identifier[getFieldContributions] ()
identifier[toRemove] =[]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[self] . identifier[_hsObj] . identifier[_minFieldContribution] ))
keyword[for] identifier[fieldname] keyword[in] identifier[pctFieldContributions] :
keyword[if] identifier[pctFieldContributions] [ identifier[fieldname] ]< identifier[self] . identifier[_hsObj] . identifier[_minFieldContribution] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[fieldname] ))
identifier[toRemove] . identifier[append] ( identifier[self] . identifier[getEncoderKeyFromName] ( identifier[fieldname] ))
keyword[else] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[fieldname] ))
identifier[swarms] = identifier[self] . identifier[_state] [ literal[string] ]
identifier[sprintSwarms] =[( identifier[swarm] , identifier[swarms] [ identifier[swarm] ][ literal[string] ]) keyword[for] identifier[swarm] keyword[in] identifier[swarms] keyword[if] identifier[swarms] [ identifier[swarm] ][ literal[string] ]== identifier[baseSprintIdx] ]
identifier[sprintSwarms] = identifier[sorted] ( identifier[sprintSwarms] , identifier[key] = identifier[itemgetter] ( literal[int] ))
keyword[if] identifier[self] . identifier[_hsObj] . identifier[_maxBranching] > literal[int] :
identifier[sprintSwarms] = identifier[sprintSwarms] [ literal[int] : identifier[self] . identifier[_hsObj] . identifier[_maxBranching] ]
keyword[for] identifier[swarm] keyword[in] identifier[sprintSwarms] :
identifier[swarmEncoders] = identifier[swarm] [ literal[int] ]. identifier[split] ( literal[string] )
keyword[for] identifier[encoder] keyword[in] identifier[swarmEncoders] :
keyword[if] keyword[not] identifier[encoder] keyword[in] identifier[encoderAddSet] :
identifier[encoderAddSet] . identifier[append] ( identifier[encoder] )
identifier[encoderAddSet] =[ identifier[encoder] keyword[for] identifier[encoder] keyword[in] identifier[encoderAddSet] keyword[if] keyword[not] identifier[str] ( identifier[encoder] ) keyword[in] identifier[toRemove] ]
keyword[else] :
identifier[encoderAddSet] = identifier[self] . identifier[_hsObj] . identifier[_encoderNames]
identifier[newSwarmIds] = identifier[set] ()
keyword[if] ( identifier[self] . identifier[_hsObj] . identifier[_searchType] == identifier[HsSearchType] . identifier[temporal] keyword[or] identifier[self] . identifier[_hsObj] . identifier[_searchType] == identifier[HsSearchType] . identifier[legacyTemporal] ) keyword[and] identifier[sprintIdx] == literal[int] keyword[and] ( identifier[self] . identifier[_hsObj] . identifier[_tryAll3FieldCombinations] keyword[or] identifier[self] . identifier[_hsObj] . identifier[_tryAll3FieldCombinationsWTimestamps] ):
keyword[if] identifier[self] . identifier[_hsObj] . identifier[_tryAll3FieldCombinations] :
identifier[newEncoders] = identifier[set] ( identifier[self] . identifier[_hsObj] . identifier[_encoderNames] )
keyword[if] identifier[self] . identifier[_hsObj] . identifier[_predictedFieldEncoder] keyword[in] identifier[newEncoders] :
identifier[newEncoders] . identifier[remove] ( identifier[self] . identifier[_hsObj] . identifier[_predictedFieldEncoder] )
keyword[else] :
identifier[newEncoders] = identifier[set] ( identifier[encoderAddSet] )
keyword[if] identifier[self] . identifier[_hsObj] . identifier[_predictedFieldEncoder] keyword[in] identifier[newEncoders] :
identifier[newEncoders] . identifier[remove] ( identifier[self] . identifier[_hsObj] . identifier[_predictedFieldEncoder] )
keyword[for] identifier[encoder] keyword[in] identifier[self] . identifier[_hsObj] . identifier[_encoderNames] :
keyword[if] identifier[encoder] . identifier[endswith] ( literal[string] ) keyword[or] identifier[encoder] . identifier[endswith] ( literal[string] ) keyword[or] identifier[encoder] . identifier[endswith] ( literal[string] ):
identifier[newEncoders] . identifier[add] ( identifier[encoder] )
identifier[allCombos] = identifier[list] ( identifier[itertools] . identifier[combinations] ( identifier[newEncoders] , literal[int] ))
keyword[for] identifier[combo] keyword[in] identifier[allCombos] :
identifier[newSet] = identifier[list] ( identifier[combo] )
identifier[newSet] . identifier[append] ( identifier[self] . identifier[_hsObj] . identifier[_predictedFieldEncoder] )
identifier[newSet] . identifier[sort] ()
identifier[newSwarmId] = literal[string] . identifier[join] ( identifier[newSet] )
keyword[if] identifier[newSwarmId] keyword[not] keyword[in] identifier[self] . identifier[_state] [ literal[string] ]:
identifier[newSwarmIds] . identifier[add] ( identifier[newSwarmId] )
keyword[if] ( identifier[len] ( identifier[self] . identifier[getActiveSwarms] ( identifier[sprintIdx] - literal[int] ))> literal[int] ):
keyword[break]
keyword[else] :
keyword[for] identifier[baseEncoderSet] keyword[in] identifier[baseEncoderSets] :
keyword[for] identifier[encoder] keyword[in] identifier[encoderAddSet] :
keyword[if] identifier[encoder] keyword[not] keyword[in] identifier[self] . identifier[_state] [ literal[string] ] keyword[and] identifier[encoder] keyword[not] keyword[in] identifier[baseEncoderSet] :
identifier[newSet] = identifier[list] ( identifier[baseEncoderSet] )
identifier[newSet] . identifier[append] ( identifier[encoder] )
identifier[newSet] . identifier[sort] ()
identifier[newSwarmId] = literal[string] . identifier[join] ( identifier[newSet] )
keyword[if] identifier[newSwarmId] keyword[not] keyword[in] identifier[self] . identifier[_state] [ literal[string] ]:
identifier[newSwarmIds] . identifier[add] ( identifier[newSwarmId] )
keyword[if] ( identifier[len] ( identifier[self] . identifier[getActiveSwarms] ( identifier[sprintIdx] - literal[int] ))> literal[int] ):
keyword[break]
identifier[newSwarmIds] = identifier[sorted] ( identifier[newSwarmIds] )
keyword[if] identifier[len] ( identifier[newSwarmIds] )== literal[int] :
keyword[if] identifier[len] ( identifier[self] . identifier[getAllSwarms] ( identifier[sprintIdx] ))> literal[int] :
keyword[return] ( keyword[True] , keyword[False] )
keyword[else] :
keyword[return] ( keyword[False] , keyword[True] )
identifier[self] . identifier[_dirty] = keyword[True]
keyword[if] identifier[len] ( identifier[self] . identifier[_state] [ literal[string] ])== identifier[sprintIdx] :
identifier[self] . identifier[_state] [ literal[string] ]. identifier[append] ({ literal[string] : literal[string] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] })
keyword[for] identifier[swarmId] keyword[in] identifier[newSwarmIds] :
identifier[self] . identifier[_state] [ literal[string] ][ identifier[swarmId] ]={ literal[string] : literal[string] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : identifier[sprintIdx] }
identifier[self] . identifier[_state] [ literal[string] ]= identifier[self] . identifier[getActiveSwarms] ()
identifier[success] = identifier[self] . identifier[writeStateToDB] ()
keyword[if] identifier[success] :
keyword[return] ( keyword[True] , keyword[False] ) | def isSprintActive(self, sprintIdx):
"""If the given sprint exists and is active, return active=True.
If the sprint does not exist yet, this call will create it (and return
active=True). If it already exists, but is completing or complete, return
active=False.
If sprintIdx is past the end of the possible sprints, return
active=False, noMoreSprints=True
IMPORTANT: When speculative particles are enabled, this call has some
special processing to handle speculative sprints:
* When creating a new speculative sprint (creating sprint N before
sprint N-1 has completed), it initially only puts in only ONE swarm into
the sprint.
* Every time it is asked if sprint N is active, it also checks to see if
it is time to add another swarm to the sprint, and adds a new swarm if
appropriate before returning active=True
* We decide it is time to add a new swarm to a speculative sprint when ALL
of the currently active swarms in the sprint have all the workers they
need (number of running (not mature) particles is _minParticlesPerSwarm).
This means that we have capacity to run additional particles in a new
swarm.
It is expected that the sprints will be checked IN ORDER from 0 on up. (It
is an error not to) The caller should always try to allocate from the first
active sprint it finds. If it can't, then it can call this again to
find/create the next active sprint.
Parameters:
---------------------------------------------------------------------
retval: (active, noMoreSprints)
active: True if the given sprint is active
noMoreSprints: True if there are no more sprints possible
"""
while True:
numExistingSprints = len(self._state['sprints'])
# If this sprint already exists, see if it is active
if sprintIdx <= numExistingSprints - 1:
# With speculation off, it's simple, just return whether or not the
# asked for sprint has active status
if not self._hsObj._speculativeParticles:
active = self._state['sprints'][sprintIdx]['status'] == 'active'
return (active, False) # depends on [control=['if'], data=[]]
else:
# With speculation on, if the sprint is still marked active, we also
# need to see if it's time to add a new swarm to it.
active = self._state['sprints'][sprintIdx]['status'] == 'active'
if not active:
return (active, False) # depends on [control=['if'], data=[]]
# See if all of the existing swarms are at capacity (have all the
# workers they need):
activeSwarmIds = self.getActiveSwarms(sprintIdx)
swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId, matured=False)[0] for swarmId in activeSwarmIds]
notFullSwarms = [len(swarm) for swarm in swarmSizes if len(swarm) < self._hsObj._minParticlesPerSwarm]
# If some swarms have room return that the swarm is active.
if len(notFullSwarms) > 0:
return (True, False) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['sprintIdx']]
# If the existing swarms are at capacity, we will fall through to the
# logic below which tries to add a new swarm to the sprint.
# Stop creating new sprints?
if self._state['lastGoodSprint'] is not None:
return (False, True) # depends on [control=['if'], data=[]]
# if fixedFields is set, we are running a fast swarm and only run sprint0
if self._hsObj._fixedFields is not None:
return (False, True) # depends on [control=['if'], data=[]]
# ----------------------------------------------------------------------
# Get the best model (if there is one) from the prior sprint. That gives
# us the base encoder set for the next sprint. For sprint zero make sure
# it does not take the last sprintidx because of wrapping.
if sprintIdx > 0 and self._state['sprints'][sprintIdx - 1]['status'] == 'completed':
(bestModelId, _) = self.bestModelInCompletedSprint(sprintIdx - 1)
(particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(bestModelId)
bestSwarmId = particleState['swarmId']
baseEncoderSets = [bestSwarmId.split('.')] # depends on [control=['if'], data=[]]
else:
# If there is no best model yet, then use all encoder sets from the prior
# sprint that were not killed
bestSwarmId = None
particleState = None
# Build up more combinations, using ALL of the sets in the current
# sprint.
baseEncoderSets = []
for swarmId in self.getNonKilledSwarms(sprintIdx - 1):
baseEncoderSets.append(swarmId.split('.')) # depends on [control=['for'], data=['swarmId']]
# ----------------------------------------------------------------------
# Which encoders should we add to the current base set?
encoderAddSet = []
# If we have constraints on how many fields we carry forward into
# subsequent sprints (either nupic.hypersearch.max.field.branching or
# nupic.hypersearch.min.field.contribution was set), then be more
# picky about which fields we add in.
limitFields = False
if self._hsObj._maxBranching > 0 or self._hsObj._minFieldContribution >= 0:
if self._hsObj._searchType == HsSearchType.temporal or self._hsObj._searchType == HsSearchType.classification:
if sprintIdx >= 1:
limitFields = True
baseSprintIdx = 0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
if sprintIdx >= 2:
limitFields = True
baseSprintIdx = 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Unimplemented search type %s' % self._hsObj._searchType) # depends on [control=['if'], data=[]]
# Only add top _maxBranching encoders to the swarms?
if limitFields:
# Get field contributions to filter added fields
(pctFieldContributions, absFieldContributions) = self.getFieldContributions()
toRemove = []
self.logger.debug('FieldContributions min: %s' % self._hsObj._minFieldContribution)
for fieldname in pctFieldContributions:
if pctFieldContributions[fieldname] < self._hsObj._minFieldContribution:
self.logger.debug('FieldContributions removing: %s' % fieldname)
toRemove.append(self.getEncoderKeyFromName(fieldname)) # depends on [control=['if'], data=[]]
else:
self.logger.debug('FieldContributions keeping: %s' % fieldname) # depends on [control=['for'], data=['fieldname']]
# Grab the top maxBranching base sprint swarms.
swarms = self._state['swarms']
sprintSwarms = [(swarm, swarms[swarm]['bestErrScore']) for swarm in swarms if swarms[swarm]['sprintIdx'] == baseSprintIdx]
sprintSwarms = sorted(sprintSwarms, key=itemgetter(1))
if self._hsObj._maxBranching > 0:
sprintSwarms = sprintSwarms[0:self._hsObj._maxBranching] # depends on [control=['if'], data=[]]
# Create encoder set to generate further swarms.
for swarm in sprintSwarms:
swarmEncoders = swarm[0].split('.')
for encoder in swarmEncoders:
if not encoder in encoderAddSet:
encoderAddSet.append(encoder) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['encoder']] # depends on [control=['for'], data=['swarm']]
encoderAddSet = [encoder for encoder in encoderAddSet if not str(encoder) in toRemove] # depends on [control=['if'], data=[]]
else:
# If no limit on the branching or min contribution, simply use all of the
# encoders.
encoderAddSet = self._hsObj._encoderNames
# -----------------------------------------------------------------------
# Build up the new encoder combinations for the next sprint.
newSwarmIds = set()
# See if the caller wants to try more extensive field combinations with
# 3 fields.
if (self._hsObj._searchType == HsSearchType.temporal or self._hsObj._searchType == HsSearchType.legacyTemporal) and sprintIdx == 2 and (self._hsObj._tryAll3FieldCombinations or self._hsObj._tryAll3FieldCombinationsWTimestamps):
if self._hsObj._tryAll3FieldCombinations:
newEncoders = set(self._hsObj._encoderNames)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder) # depends on [control=['if'], data=['newEncoders']] # depends on [control=['if'], data=[]]
else:
# Just make sure the timestamp encoders are part of the mix
newEncoders = set(encoderAddSet)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder) # depends on [control=['if'], data=['newEncoders']]
for encoder in self._hsObj._encoderNames:
if encoder.endswith('_timeOfDay') or encoder.endswith('_weekend') or encoder.endswith('_dayOfWeek'):
newEncoders.add(encoder) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['encoder']]
allCombos = list(itertools.combinations(newEncoders, 2))
for combo in allCombos:
newSet = list(combo)
newSet.append(self._hsObj._predictedFieldEncoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if len(self.getActiveSwarms(sprintIdx - 1)) > 0:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['newSwarmId']] # depends on [control=['for'], data=['combo']] # depends on [control=['if'], data=[]]
else:
# Else, we only build up by adding 1 new encoder to the best combination(s)
# we've seen from the prior sprint
for baseEncoderSet in baseEncoderSets:
for encoder in encoderAddSet:
if encoder not in self._state['blackListedEncoders'] and encoder not in baseEncoderSet:
newSet = list(baseEncoderSet)
newSet.append(encoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if len(self.getActiveSwarms(sprintIdx - 1)) > 0:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['newSwarmId']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['encoder']] # depends on [control=['for'], data=['baseEncoderSet']]
# ----------------------------------------------------------------------
# Sort the new swarm Ids
newSwarmIds = sorted(newSwarmIds)
# If no more swarms can be found for this sprint...
if len(newSwarmIds) == 0:
# if sprint is not an empty sprint return that it is active but do not
# add anything to it.
if len(self.getAllSwarms(sprintIdx)) > 0:
return (True, False) # depends on [control=['if'], data=[]]
else:
# If this is an empty sprint and we couldn't find any new swarms to
# add (only bad fields are remaining), the search is over
return (False, True) # depends on [control=['if'], data=[]]
# Add this sprint and the swarms that are in it to our state
self._dirty = True
# Add in the new sprint if necessary
if len(self._state['sprints']) == sprintIdx:
self._state['sprints'].append({'status': 'active', 'bestModelId': None, 'bestErrScore': None}) # depends on [control=['if'], data=[]]
# Add in the new swarm(s) to the sprint
for swarmId in newSwarmIds:
self._state['swarms'][swarmId] = {'status': 'active', 'bestModelId': None, 'bestErrScore': None, 'sprintIdx': sprintIdx} # depends on [control=['for'], data=['swarmId']]
# Update the list of active swarms
self._state['activeSwarms'] = self.getActiveSwarms()
# Try to set new state
success = self.writeStateToDB()
# Return result if successful
if success:
return (True, False) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def _initialize_operations(self):
"""Initializer for _operations.
Raises:
TypeError: _graph is not a tf.Graph or mtf.Graph.
Returns:
a list of (tf.Operation or mtf.Operation)
"""
if isinstance(self._graph, tf.Graph):
return self._graph.get_operations()
elif isinstance(self._graph, mtf.Graph):
return self._graph.operations
else:
raise TypeError('Graph is not tf.Graph or mtf.Graph: {}'
.format(type(self._graph))) | def function[_initialize_operations, parameter[self]]:
constant[Initializer for _operations.
Raises:
TypeError: _graph is not a tf.Graph or mtf.Graph.
Returns:
a list of (tf.Operation or mtf.Operation)
]
if call[name[isinstance], parameter[name[self]._graph, name[tf].Graph]] begin[:]
return[call[name[self]._graph.get_operations, parameter[]]] | keyword[def] identifier[_initialize_operations] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_graph] , identifier[tf] . identifier[Graph] ):
keyword[return] identifier[self] . identifier[_graph] . identifier[get_operations] ()
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[_graph] , identifier[mtf] . identifier[Graph] ):
keyword[return] identifier[self] . identifier[_graph] . identifier[operations]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string]
. identifier[format] ( identifier[type] ( identifier[self] . identifier[_graph] ))) | def _initialize_operations(self):
"""Initializer for _operations.
Raises:
TypeError: _graph is not a tf.Graph or mtf.Graph.
Returns:
a list of (tf.Operation or mtf.Operation)
"""
if isinstance(self._graph, tf.Graph):
return self._graph.get_operations() # depends on [control=['if'], data=[]]
elif isinstance(self._graph, mtf.Graph):
return self._graph.operations # depends on [control=['if'], data=[]]
else:
raise TypeError('Graph is not tf.Graph or mtf.Graph: {}'.format(type(self._graph))) |
def set_object_position(self, object_name, position=[0, 0, 0]):
""" Sets the object position. """
h = self.get_object_handle(object_name)
return self.call_remote_api('simxSetObjectPosition',
h, -1, position,
sending=True) | def function[set_object_position, parameter[self, object_name, position]]:
constant[ Sets the object position. ]
variable[h] assign[=] call[name[self].get_object_handle, parameter[name[object_name]]]
return[call[name[self].call_remote_api, parameter[constant[simxSetObjectPosition], name[h], <ast.UnaryOp object at 0x7da1b138e8c0>, name[position]]]] | keyword[def] identifier[set_object_position] ( identifier[self] , identifier[object_name] , identifier[position] =[ literal[int] , literal[int] , literal[int] ]):
literal[string]
identifier[h] = identifier[self] . identifier[get_object_handle] ( identifier[object_name] )
keyword[return] identifier[self] . identifier[call_remote_api] ( literal[string] ,
identifier[h] ,- literal[int] , identifier[position] ,
identifier[sending] = keyword[True] ) | def set_object_position(self, object_name, position=[0, 0, 0]):
""" Sets the object position. """
h = self.get_object_handle(object_name)
return self.call_remote_api('simxSetObjectPosition', h, -1, position, sending=True) |
def get_dataset_file(self, dataset_id, file_path, version = None):
"""
Retrieves a dataset file matching a provided file path
:param dataset_id: The id of the dataset to retrieve file from
:type dataset_id: int
:param file_path: The file path within the dataset
:type file_path: str
:param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched
:type version: int
:return: A dataset file matching the filepath provided
:rtype: :class:`DatasetFile`
"""
return self.get_dataset_files(dataset_id, "^{}$".format(file_path), version_number=version)[0] | def function[get_dataset_file, parameter[self, dataset_id, file_path, version]]:
constant[
Retrieves a dataset file matching a provided file path
:param dataset_id: The id of the dataset to retrieve file from
:type dataset_id: int
:param file_path: The file path within the dataset
:type file_path: str
:param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched
:type version: int
:return: A dataset file matching the filepath provided
:rtype: :class:`DatasetFile`
]
return[call[call[name[self].get_dataset_files, parameter[name[dataset_id], call[constant[^{}$].format, parameter[name[file_path]]]]]][constant[0]]] | keyword[def] identifier[get_dataset_file] ( identifier[self] , identifier[dataset_id] , identifier[file_path] , identifier[version] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[get_dataset_files] ( identifier[dataset_id] , literal[string] . identifier[format] ( identifier[file_path] ), identifier[version_number] = identifier[version] )[ literal[int] ] | def get_dataset_file(self, dataset_id, file_path, version=None):
"""
Retrieves a dataset file matching a provided file path
:param dataset_id: The id of the dataset to retrieve file from
:type dataset_id: int
:param file_path: The file path within the dataset
:type file_path: str
:param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched
:type version: int
:return: A dataset file matching the filepath provided
:rtype: :class:`DatasetFile`
"""
return self.get_dataset_files(dataset_id, '^{}$'.format(file_path), version_number=version)[0] |
def _bfs_subgraph(self, start_id, forward=True):
"""
Private method creates a subgraph in a bfs order.
The forward parameter specifies whether it is a forward or backward
traversal.
"""
if forward:
get_bfs = self.forw_bfs
get_nbrs = self.out_nbrs
else:
get_bfs = self.back_bfs
get_nbrs = self.inc_nbrs
g = Graph()
bfs_list = get_bfs(start_id)
for node in bfs_list:
g.add_node(node)
for node in bfs_list:
for nbr_id in get_nbrs(node):
g.add_edge(node, nbr_id)
return g | def function[_bfs_subgraph, parameter[self, start_id, forward]]:
constant[
Private method creates a subgraph in a bfs order.
The forward parameter specifies whether it is a forward or backward
traversal.
]
if name[forward] begin[:]
variable[get_bfs] assign[=] name[self].forw_bfs
variable[get_nbrs] assign[=] name[self].out_nbrs
variable[g] assign[=] call[name[Graph], parameter[]]
variable[bfs_list] assign[=] call[name[get_bfs], parameter[name[start_id]]]
for taget[name[node]] in starred[name[bfs_list]] begin[:]
call[name[g].add_node, parameter[name[node]]]
for taget[name[node]] in starred[name[bfs_list]] begin[:]
for taget[name[nbr_id]] in starred[call[name[get_nbrs], parameter[name[node]]]] begin[:]
call[name[g].add_edge, parameter[name[node], name[nbr_id]]]
return[name[g]] | keyword[def] identifier[_bfs_subgraph] ( identifier[self] , identifier[start_id] , identifier[forward] = keyword[True] ):
literal[string]
keyword[if] identifier[forward] :
identifier[get_bfs] = identifier[self] . identifier[forw_bfs]
identifier[get_nbrs] = identifier[self] . identifier[out_nbrs]
keyword[else] :
identifier[get_bfs] = identifier[self] . identifier[back_bfs]
identifier[get_nbrs] = identifier[self] . identifier[inc_nbrs]
identifier[g] = identifier[Graph] ()
identifier[bfs_list] = identifier[get_bfs] ( identifier[start_id] )
keyword[for] identifier[node] keyword[in] identifier[bfs_list] :
identifier[g] . identifier[add_node] ( identifier[node] )
keyword[for] identifier[node] keyword[in] identifier[bfs_list] :
keyword[for] identifier[nbr_id] keyword[in] identifier[get_nbrs] ( identifier[node] ):
identifier[g] . identifier[add_edge] ( identifier[node] , identifier[nbr_id] )
keyword[return] identifier[g] | def _bfs_subgraph(self, start_id, forward=True):
"""
Private method creates a subgraph in a bfs order.
The forward parameter specifies whether it is a forward or backward
traversal.
"""
if forward:
get_bfs = self.forw_bfs
get_nbrs = self.out_nbrs # depends on [control=['if'], data=[]]
else:
get_bfs = self.back_bfs
get_nbrs = self.inc_nbrs
g = Graph()
bfs_list = get_bfs(start_id)
for node in bfs_list:
g.add_node(node) # depends on [control=['for'], data=['node']]
for node in bfs_list:
for nbr_id in get_nbrs(node):
g.add_edge(node, nbr_id) # depends on [control=['for'], data=['nbr_id']] # depends on [control=['for'], data=['node']]
return g |
def count_rows(self, table, cols='*'):
"""Get the number of rows in a particular table."""
query = 'SELECT COUNT({0}) FROM {1}'.format(join_cols(cols), wrap(table))
result = self.fetch(query)
return result if result is not None else 0 | def function[count_rows, parameter[self, table, cols]]:
constant[Get the number of rows in a particular table.]
variable[query] assign[=] call[constant[SELECT COUNT({0}) FROM {1}].format, parameter[call[name[join_cols], parameter[name[cols]]], call[name[wrap], parameter[name[table]]]]]
variable[result] assign[=] call[name[self].fetch, parameter[name[query]]]
return[<ast.IfExp object at 0x7da1b0b7d420>] | keyword[def] identifier[count_rows] ( identifier[self] , identifier[table] , identifier[cols] = literal[string] ):
literal[string]
identifier[query] = literal[string] . identifier[format] ( identifier[join_cols] ( identifier[cols] ), identifier[wrap] ( identifier[table] ))
identifier[result] = identifier[self] . identifier[fetch] ( identifier[query] )
keyword[return] identifier[result] keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] keyword[else] literal[int] | def count_rows(self, table, cols='*'):
"""Get the number of rows in a particular table."""
query = 'SELECT COUNT({0}) FROM {1}'.format(join_cols(cols), wrap(table))
result = self.fetch(query)
return result if result is not None else 0 |
def find_expected_error(self, delta_params='calc'):
"""
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
"""
grad = self.calc_grad()
if list(delta_params) in [list('calc'), list('perfect')]:
jtj = (self.JTJ if delta_params == 'perfect' else
self._calc_damped_jtj(self.JTJ))
delta_params = self._calc_lm_step(jtj, self.calc_grad())
#If the model were linear, then the cost would be quadratic,
#with Hessian 2*`self.JTJ` and gradient `grad`
expected_error = (self.error + np.dot(grad, delta_params) +
np.dot(np.dot(self.JTJ, delta_params), delta_params))
return expected_error | def function[find_expected_error, parameter[self, delta_params]]:
constant[
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
]
variable[grad] assign[=] call[name[self].calc_grad, parameter[]]
if compare[call[name[list], parameter[name[delta_params]]] in list[[<ast.Call object at 0x7da204567370>, <ast.Call object at 0x7da204565bd0>]]] begin[:]
variable[jtj] assign[=] <ast.IfExp object at 0x7da2045645e0>
variable[delta_params] assign[=] call[name[self]._calc_lm_step, parameter[name[jtj], call[name[self].calc_grad, parameter[]]]]
variable[expected_error] assign[=] binary_operation[binary_operation[name[self].error + call[name[np].dot, parameter[name[grad], name[delta_params]]]] + call[name[np].dot, parameter[call[name[np].dot, parameter[name[self].JTJ, name[delta_params]]], name[delta_params]]]]
return[name[expected_error]] | keyword[def] identifier[find_expected_error] ( identifier[self] , identifier[delta_params] = literal[string] ):
literal[string]
identifier[grad] = identifier[self] . identifier[calc_grad] ()
keyword[if] identifier[list] ( identifier[delta_params] ) keyword[in] [ identifier[list] ( literal[string] ), identifier[list] ( literal[string] )]:
identifier[jtj] =( identifier[self] . identifier[JTJ] keyword[if] identifier[delta_params] == literal[string] keyword[else]
identifier[self] . identifier[_calc_damped_jtj] ( identifier[self] . identifier[JTJ] ))
identifier[delta_params] = identifier[self] . identifier[_calc_lm_step] ( identifier[jtj] , identifier[self] . identifier[calc_grad] ())
identifier[expected_error] =( identifier[self] . identifier[error] + identifier[np] . identifier[dot] ( identifier[grad] , identifier[delta_params] )+
identifier[np] . identifier[dot] ( identifier[np] . identifier[dot] ( identifier[self] . identifier[JTJ] , identifier[delta_params] ), identifier[delta_params] ))
keyword[return] identifier[expected_error] | def find_expected_error(self, delta_params='calc'):
"""
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
"""
grad = self.calc_grad()
if list(delta_params) in [list('calc'), list('perfect')]:
jtj = self.JTJ if delta_params == 'perfect' else self._calc_damped_jtj(self.JTJ)
delta_params = self._calc_lm_step(jtj, self.calc_grad()) # depends on [control=['if'], data=[]]
#If the model were linear, then the cost would be quadratic,
#with Hessian 2*`self.JTJ` and gradient `grad`
expected_error = self.error + np.dot(grad, delta_params) + np.dot(np.dot(self.JTJ, delta_params), delta_params)
return expected_error |
def show_cationpi(self):
"""Visualizes cation-pi interactions"""
grp = self.getPseudoBondGroup("Cation-Pi-%i" % self.tid, associateWith=[self.model])
grp.lineWidth = 3
grp.lineType = self.chimera.Dash
for i, cat in enumerate(self.plcomplex.pication):
m = self.model
r = m.newResidue("pseudoatoms", " ", 1, " ")
chargecenter = m.newAtom("CHARGE", self.chimera.Element("CHARGE"))
x, y, z = cat.charge_center
chargecenter.setCoord(self.chimera.Coord(x, y, z))
r.addAtom(chargecenter)
centroid = m.newAtom("CENTROID", self.chimera.Element("CENTROID"))
x, y, z = cat.ring_center
centroid.setCoord(self.chimera.Coord(x, y, z))
r.addAtom(centroid)
b = grp.newPseudoBond(centroid, chargecenter)
b.color = self.colorbyname('orange')
if cat.protcharged:
self.bs_res_ids += cat.charge_atoms
else:
self.bs_res_ids += cat.ring_atoms | def function[show_cationpi, parameter[self]]:
constant[Visualizes cation-pi interactions]
variable[grp] assign[=] call[name[self].getPseudoBondGroup, parameter[binary_operation[constant[Cation-Pi-%i] <ast.Mod object at 0x7da2590d6920> name[self].tid]]]
name[grp].lineWidth assign[=] constant[3]
name[grp].lineType assign[=] name[self].chimera.Dash
for taget[tuple[[<ast.Name object at 0x7da18ede7280>, <ast.Name object at 0x7da18ede44c0>]]] in starred[call[name[enumerate], parameter[name[self].plcomplex.pication]]] begin[:]
variable[m] assign[=] name[self].model
variable[r] assign[=] call[name[m].newResidue, parameter[constant[pseudoatoms], constant[ ], constant[1], constant[ ]]]
variable[chargecenter] assign[=] call[name[m].newAtom, parameter[constant[CHARGE], call[name[self].chimera.Element, parameter[constant[CHARGE]]]]]
<ast.Tuple object at 0x7da18ede51e0> assign[=] name[cat].charge_center
call[name[chargecenter].setCoord, parameter[call[name[self].chimera.Coord, parameter[name[x], name[y], name[z]]]]]
call[name[r].addAtom, parameter[name[chargecenter]]]
variable[centroid] assign[=] call[name[m].newAtom, parameter[constant[CENTROID], call[name[self].chimera.Element, parameter[constant[CENTROID]]]]]
<ast.Tuple object at 0x7da18ede7b20> assign[=] name[cat].ring_center
call[name[centroid].setCoord, parameter[call[name[self].chimera.Coord, parameter[name[x], name[y], name[z]]]]]
call[name[r].addAtom, parameter[name[centroid]]]
variable[b] assign[=] call[name[grp].newPseudoBond, parameter[name[centroid], name[chargecenter]]]
name[b].color assign[=] call[name[self].colorbyname, parameter[constant[orange]]]
if name[cat].protcharged begin[:]
<ast.AugAssign object at 0x7da18ede79a0> | keyword[def] identifier[show_cationpi] ( identifier[self] ):
literal[string]
identifier[grp] = identifier[self] . identifier[getPseudoBondGroup] ( literal[string] % identifier[self] . identifier[tid] , identifier[associateWith] =[ identifier[self] . identifier[model] ])
identifier[grp] . identifier[lineWidth] = literal[int]
identifier[grp] . identifier[lineType] = identifier[self] . identifier[chimera] . identifier[Dash]
keyword[for] identifier[i] , identifier[cat] keyword[in] identifier[enumerate] ( identifier[self] . identifier[plcomplex] . identifier[pication] ):
identifier[m] = identifier[self] . identifier[model]
identifier[r] = identifier[m] . identifier[newResidue] ( literal[string] , literal[string] , literal[int] , literal[string] )
identifier[chargecenter] = identifier[m] . identifier[newAtom] ( literal[string] , identifier[self] . identifier[chimera] . identifier[Element] ( literal[string] ))
identifier[x] , identifier[y] , identifier[z] = identifier[cat] . identifier[charge_center]
identifier[chargecenter] . identifier[setCoord] ( identifier[self] . identifier[chimera] . identifier[Coord] ( identifier[x] , identifier[y] , identifier[z] ))
identifier[r] . identifier[addAtom] ( identifier[chargecenter] )
identifier[centroid] = identifier[m] . identifier[newAtom] ( literal[string] , identifier[self] . identifier[chimera] . identifier[Element] ( literal[string] ))
identifier[x] , identifier[y] , identifier[z] = identifier[cat] . identifier[ring_center]
identifier[centroid] . identifier[setCoord] ( identifier[self] . identifier[chimera] . identifier[Coord] ( identifier[x] , identifier[y] , identifier[z] ))
identifier[r] . identifier[addAtom] ( identifier[centroid] )
identifier[b] = identifier[grp] . identifier[newPseudoBond] ( identifier[centroid] , identifier[chargecenter] )
identifier[b] . identifier[color] = identifier[self] . identifier[colorbyname] ( literal[string] )
keyword[if] identifier[cat] . identifier[protcharged] :
identifier[self] . identifier[bs_res_ids] += identifier[cat] . identifier[charge_atoms]
keyword[else] :
identifier[self] . identifier[bs_res_ids] += identifier[cat] . identifier[ring_atoms] | def show_cationpi(self):
"""Visualizes cation-pi interactions"""
grp = self.getPseudoBondGroup('Cation-Pi-%i' % self.tid, associateWith=[self.model])
grp.lineWidth = 3
grp.lineType = self.chimera.Dash
for (i, cat) in enumerate(self.plcomplex.pication):
m = self.model
r = m.newResidue('pseudoatoms', ' ', 1, ' ')
chargecenter = m.newAtom('CHARGE', self.chimera.Element('CHARGE'))
(x, y, z) = cat.charge_center
chargecenter.setCoord(self.chimera.Coord(x, y, z))
r.addAtom(chargecenter)
centroid = m.newAtom('CENTROID', self.chimera.Element('CENTROID'))
(x, y, z) = cat.ring_center
centroid.setCoord(self.chimera.Coord(x, y, z))
r.addAtom(centroid)
b = grp.newPseudoBond(centroid, chargecenter)
b.color = self.colorbyname('orange')
if cat.protcharged:
self.bs_res_ids += cat.charge_atoms # depends on [control=['if'], data=[]]
else:
self.bs_res_ids += cat.ring_atoms # depends on [control=['for'], data=[]] |
def parse_document(graph: BELGraph,
enumerated_lines: Iterable[Tuple[int, str]],
metadata_parser: MetadataParser,
) -> None:
"""Parse the lines in the document section of a BEL script."""
parse_document_start_time = time.time()
for line_number, line in enumerated_lines:
try:
metadata_parser.parseString(line, line_number=line_number)
except VersionFormatWarning as exc:
_log_parse_exception(graph, exc)
graph.add_warning(exc)
except Exception as e:
exc = MalformedMetadataException(line_number, line, 0)
_log_parse_exception(graph, exc)
raise exc from e
for required in REQUIRED_METADATA:
required_metadatum = metadata_parser.document_metadata.get(required)
if required_metadatum is not None:
continue
required_metadatum_key = INVERSE_DOCUMENT_KEYS[required]
# This has to be insert since it needs to go on the front!
exc = MissingMetadataException.make(required_metadatum_key)
graph.warnings.insert(0, (None, exc, {}))
_log_parse_exception(graph, exc)
graph.document.update(metadata_parser.document_metadata)
log.info('Finished parsing document section in %.02f seconds', time.time() - parse_document_start_time) | def function[parse_document, parameter[graph, enumerated_lines, metadata_parser]]:
constant[Parse the lines in the document section of a BEL script.]
variable[parse_document_start_time] assign[=] call[name[time].time, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0e80cd0>, <ast.Name object at 0x7da1b0e80640>]]] in starred[name[enumerated_lines]] begin[:]
<ast.Try object at 0x7da1b0e83c70>
for taget[name[required]] in starred[name[REQUIRED_METADATA]] begin[:]
variable[required_metadatum] assign[=] call[name[metadata_parser].document_metadata.get, parameter[name[required]]]
if compare[name[required_metadatum] is_not constant[None]] begin[:]
continue
variable[required_metadatum_key] assign[=] call[name[INVERSE_DOCUMENT_KEYS]][name[required]]
variable[exc] assign[=] call[name[MissingMetadataException].make, parameter[name[required_metadatum_key]]]
call[name[graph].warnings.insert, parameter[constant[0], tuple[[<ast.Constant object at 0x7da1b0cb3ca0>, <ast.Name object at 0x7da1b0cb25f0>, <ast.Dict object at 0x7da1b0cb1ba0>]]]]
call[name[_log_parse_exception], parameter[name[graph], name[exc]]]
call[name[graph].document.update, parameter[name[metadata_parser].document_metadata]]
call[name[log].info, parameter[constant[Finished parsing document section in %.02f seconds], binary_operation[call[name[time].time, parameter[]] - name[parse_document_start_time]]]] | keyword[def] identifier[parse_document] ( identifier[graph] : identifier[BELGraph] ,
identifier[enumerated_lines] : identifier[Iterable] [ identifier[Tuple] [ identifier[int] , identifier[str] ]],
identifier[metadata_parser] : identifier[MetadataParser] ,
)-> keyword[None] :
literal[string]
identifier[parse_document_start_time] = identifier[time] . identifier[time] ()
keyword[for] identifier[line_number] , identifier[line] keyword[in] identifier[enumerated_lines] :
keyword[try] :
identifier[metadata_parser] . identifier[parseString] ( identifier[line] , identifier[line_number] = identifier[line_number] )
keyword[except] identifier[VersionFormatWarning] keyword[as] identifier[exc] :
identifier[_log_parse_exception] ( identifier[graph] , identifier[exc] )
identifier[graph] . identifier[add_warning] ( identifier[exc] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[exc] = identifier[MalformedMetadataException] ( identifier[line_number] , identifier[line] , literal[int] )
identifier[_log_parse_exception] ( identifier[graph] , identifier[exc] )
keyword[raise] identifier[exc] keyword[from] identifier[e]
keyword[for] identifier[required] keyword[in] identifier[REQUIRED_METADATA] :
identifier[required_metadatum] = identifier[metadata_parser] . identifier[document_metadata] . identifier[get] ( identifier[required] )
keyword[if] identifier[required_metadatum] keyword[is] keyword[not] keyword[None] :
keyword[continue]
identifier[required_metadatum_key] = identifier[INVERSE_DOCUMENT_KEYS] [ identifier[required] ]
identifier[exc] = identifier[MissingMetadataException] . identifier[make] ( identifier[required_metadatum_key] )
identifier[graph] . identifier[warnings] . identifier[insert] ( literal[int] ,( keyword[None] , identifier[exc] ,{}))
identifier[_log_parse_exception] ( identifier[graph] , identifier[exc] )
identifier[graph] . identifier[document] . identifier[update] ( identifier[metadata_parser] . identifier[document_metadata] )
identifier[log] . identifier[info] ( literal[string] , identifier[time] . identifier[time] ()- identifier[parse_document_start_time] ) | def parse_document(graph: BELGraph, enumerated_lines: Iterable[Tuple[int, str]], metadata_parser: MetadataParser) -> None:
"""Parse the lines in the document section of a BEL script."""
parse_document_start_time = time.time()
for (line_number, line) in enumerated_lines:
try:
metadata_parser.parseString(line, line_number=line_number) # depends on [control=['try'], data=[]]
except VersionFormatWarning as exc:
_log_parse_exception(graph, exc)
graph.add_warning(exc) # depends on [control=['except'], data=['exc']]
except Exception as e:
exc = MalformedMetadataException(line_number, line, 0)
_log_parse_exception(graph, exc)
raise exc from e # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]]
for required in REQUIRED_METADATA:
required_metadatum = metadata_parser.document_metadata.get(required)
if required_metadatum is not None:
continue # depends on [control=['if'], data=[]]
required_metadatum_key = INVERSE_DOCUMENT_KEYS[required]
# This has to be insert since it needs to go on the front!
exc = MissingMetadataException.make(required_metadatum_key)
graph.warnings.insert(0, (None, exc, {}))
_log_parse_exception(graph, exc) # depends on [control=['for'], data=['required']]
graph.document.update(metadata_parser.document_metadata)
log.info('Finished parsing document section in %.02f seconds', time.time() - parse_document_start_time) |
def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|') | def function[_split_row, parameter[self, row, border]]:
constant[ split a row of text into list of cells. ]
if name[border] begin[:]
if call[name[row].startswith, parameter[constant[|]]] begin[:]
variable[row] assign[=] call[name[row]][<ast.Slice object at 0x7da18c4cdf00>]
if call[name[row].endswith, parameter[constant[|]]] begin[:]
variable[row] assign[=] call[name[row]][<ast.Slice object at 0x7da20c76d000>]
return[call[name[row].split, parameter[constant[|]]]] | keyword[def] identifier[_split_row] ( identifier[self] , identifier[row] , identifier[border] ):
literal[string]
keyword[if] identifier[border] :
keyword[if] identifier[row] . identifier[startswith] ( literal[string] ):
identifier[row] = identifier[row] [ literal[int] :]
keyword[if] identifier[row] . identifier[endswith] ( literal[string] ):
identifier[row] = identifier[row] [:- literal[int] ]
keyword[return] identifier[row] . identifier[split] ( literal[string] ) | def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:] # depends on [control=['if'], data=[]]
if row.endswith('|'):
row = row[:-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return row.split('|') |
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'azure',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
label = vm_.get('label', vm_['name'])
service_name = vm_.get('service_name', vm_['name'])
service_kwargs = {
'service_name': service_name,
'label': label,
'description': vm_.get('desc', vm_['name']),
}
loc_error = False
if 'location' in vm_:
if 'affinity_group' in vm_:
loc_error = True
else:
service_kwargs['location'] = vm_['location']
elif 'affinity_group' in vm_:
service_kwargs['affinity_group'] = vm_['affinity_group']
else:
loc_error = True
if loc_error:
raise SaltCloudSystemExit(
'Either a location or affinity group must be specified, but not both'
)
ssh_port = config.get_cloud_config_value('port', vm_, __opts__,
default=22, search_global=True)
ssh_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(
name='SSH',
protocol='TCP',
port=ssh_port,
local_port=22,
)
network_config = azure.servicemanagement.ConfigurationSet()
network_config.input_endpoints.input_endpoints.append(ssh_endpoint)
network_config.configuration_set_type = 'NetworkConfiguration'
if 'win_username' in vm_:
system_config = azure.servicemanagement.WindowsConfigurationSet(
computer_name=vm_['name'],
admin_username=vm_['win_username'],
admin_password=vm_['win_password'],
)
smb_port = '445'
if 'smb_port' in vm_:
smb_port = vm_['smb_port']
smb_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(
name='SMB',
protocol='TCP',
port=smb_port,
local_port=smb_port,
)
network_config.input_endpoints.input_endpoints.append(smb_endpoint)
# Domain and WinRM configuration not yet supported by Salt Cloud
system_config.domain_join = None
system_config.win_rm = None
else:
system_config = azure.servicemanagement.LinuxConfigurationSet(
host_name=vm_['name'],
user_name=vm_['ssh_username'],
user_password=vm_['ssh_password'],
disable_ssh_password_authentication=False,
)
# TODO: Might need to create a storage account
media_link = vm_['media_link']
# TODO: Probably better to use more than just the name in the media_link
media_link += '/{0}.vhd'.format(vm_['name'])
os_hd = azure.servicemanagement.OSVirtualHardDisk(vm_['image'], media_link)
vm_kwargs = {
'service_name': service_name,
'deployment_name': service_name,
'deployment_slot': vm_['slot'],
'label': label,
'role_name': vm_['name'],
'system_config': system_config,
'os_virtual_hard_disk': os_hd,
'role_size': vm_['size'],
'network_config': network_config,
}
if 'virtual_network_name' in vm_:
vm_kwargs['virtual_network_name'] = vm_['virtual_network_name']
if 'subnet_name' in vm_:
network_config.subnet_names.append(vm_['subnet_name'])
log.debug('vm_kwargs: %s', vm_kwargs)
event_kwargs = {'service_kwargs': service_kwargs.copy(),
'vm_kwargs': vm_kwargs.copy()}
del event_kwargs['vm_kwargs']['system_config']
del event_kwargs['vm_kwargs']['os_virtual_hard_disk']
del event_kwargs['vm_kwargs']['network_config']
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('vm_kwargs: %s', vm_kwargs)
# Azure lets you open winrm on a new VM
# Can open up specific ports in Azure; but not on Windows
try:
conn.create_hosted_service(**service_kwargs)
except AzureConflictHttpError:
log.debug('Cloud service already exists')
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in six.text_type(exc):
log.error(
'Error creating %s on Azure.\n\n'
'The hosted service name is invalid. The name can contain '
'only letters, numbers, and hyphens. The name must start with '
'a letter and must end with a letter or a number.',
vm_['name'],
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
else:
log.error(
'Error creating %s on Azure\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
result = conn.create_virtual_machine_deployment(**vm_kwargs)
log.debug('Request ID for machine: %s', result.request_id)
_wait_for_async(conn, result.request_id)
except AzureConflictHttpError:
log.debug('Conflict error. The deployment may already exist, trying add_role')
# Deleting two useless keywords
del vm_kwargs['deployment_slot']
del vm_kwargs['label']
del vm_kwargs['virtual_network_name']
result = conn.add_role(**vm_kwargs)
_wait_for_async(conn, result.request_id)
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in six.text_type(exc):
log.error(
'Error creating %s on Azure.\n\n'
'The VM name is invalid. The name can contain '
'only letters, numbers, and hyphens. The name must start with '
'a letter and must end with a letter or a number.',
vm_['name'],
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
else:
log.error(
'Error creating %s on Azure.\n\n'
'The Virtual Machine could not be created. If you '
'are using an already existing Cloud Service, '
'make sure you set up the `port` variable corresponding '
'to the SSH port exists and that the port number is not '
'already in use.\nThe following exception was thrown when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
def wait_for_hostname():
'''
Wait for the IP address to become available
'''
try:
conn.get_role(service_name, service_name, vm_['name'])
data = show_instance(vm_['name'], call='action')
if 'url' in data and data['url'] != six.text_type(''):
return data['url']
except AzureMissingResourceHttpError:
pass
time.sleep(1)
return False
hostname = salt.utils.cloud.wait_for_fun(
wait_for_hostname,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
if not hostname:
log.error('Failed to get a value for the hostname.')
return False
vm_['ssh_host'] = hostname.replace('http://', '').replace('/', '')
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
# Attaching volumes
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
__utils__['cloud.fire_event'](
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
args=__utils__['cloud.filter_event']('attaching_volumes', vm_, ['volumes']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Create and attach volumes to node %s', vm_['name'])
created = create_attach_volumes(
vm_['name'],
{
'volumes': volumes,
'service_name': service_name,
'deployment_name': vm_['name'],
'media_link': media_link,
'role_name': vm_['name'],
'del_all_vols_on_destroy': vm_.get('set_del_all_vols_on_destroy', False)
},
call='action'
)
ret['Attached Volumes'] = created
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'%s\'', vm_)
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
ret.update(data)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret | def function[create, parameter[vm_]]:
constant[
Create a single VM from a data dict
]
<ast.Try object at 0x7da20c992b00>
call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[starting create], call[constant[salt/cloud/{0}/creating].format, parameter[call[name[vm_]][constant[name]]]]]]
call[name[log].info, parameter[constant[Creating Cloud VM %s], call[name[vm_]][constant[name]]]]
variable[conn] assign[=] call[name[get_conn], parameter[]]
variable[label] assign[=] call[name[vm_].get, parameter[constant[label], call[name[vm_]][constant[name]]]]
variable[service_name] assign[=] call[name[vm_].get, parameter[constant[service_name], call[name[vm_]][constant[name]]]]
variable[service_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da20c9902e0>, <ast.Constant object at 0x7da20c993700>, <ast.Constant object at 0x7da20c990460>], [<ast.Name object at 0x7da20c992d10>, <ast.Name object at 0x7da20c992050>, <ast.Call object at 0x7da20c9924d0>]]
variable[loc_error] assign[=] constant[False]
if compare[constant[location] in name[vm_]] begin[:]
if compare[constant[affinity_group] in name[vm_]] begin[:]
variable[loc_error] assign[=] constant[True]
if name[loc_error] begin[:]
<ast.Raise object at 0x7da20c992ec0>
variable[ssh_port] assign[=] call[name[config].get_cloud_config_value, parameter[constant[port], name[vm_], name[__opts__]]]
variable[ssh_endpoint] assign[=] call[name[azure].servicemanagement.ConfigurationSetInputEndpoint, parameter[]]
variable[network_config] assign[=] call[name[azure].servicemanagement.ConfigurationSet, parameter[]]
call[name[network_config].input_endpoints.input_endpoints.append, parameter[name[ssh_endpoint]]]
name[network_config].configuration_set_type assign[=] constant[NetworkConfiguration]
if compare[constant[win_username] in name[vm_]] begin[:]
variable[system_config] assign[=] call[name[azure].servicemanagement.WindowsConfigurationSet, parameter[]]
variable[smb_port] assign[=] constant[445]
if compare[constant[smb_port] in name[vm_]] begin[:]
variable[smb_port] assign[=] call[name[vm_]][constant[smb_port]]
variable[smb_endpoint] assign[=] call[name[azure].servicemanagement.ConfigurationSetInputEndpoint, parameter[]]
call[name[network_config].input_endpoints.input_endpoints.append, parameter[name[smb_endpoint]]]
name[system_config].domain_join assign[=] constant[None]
name[system_config].win_rm assign[=] constant[None]
variable[media_link] assign[=] call[name[vm_]][constant[media_link]]
<ast.AugAssign object at 0x7da20c990880>
variable[os_hd] assign[=] call[name[azure].servicemanagement.OSVirtualHardDisk, parameter[call[name[vm_]][constant[image]], name[media_link]]]
variable[vm_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da20c991030>, <ast.Constant object at 0x7da20c992470>, <ast.Constant object at 0x7da20c991a20>, <ast.Constant object at 0x7da20c990490>, <ast.Constant object at 0x7da20c9914e0>, <ast.Constant object at 0x7da20c990ca0>, <ast.Constant object at 0x7da20c9909d0>, <ast.Constant object at 0x7da20c990520>, <ast.Constant object at 0x7da20c993850>], [<ast.Name object at 0x7da20c991ff0>, <ast.Name object at 0x7da20c992cb0>, <ast.Subscript object at 0x7da20c990fd0>, <ast.Name object at 0x7da20c990be0>, <ast.Subscript object at 0x7da20c993580>, <ast.Name object at 0x7da20c9918d0>, <ast.Name object at 0x7da20c993c70>, <ast.Subscript object at 0x7da18f00ef50>, <ast.Name object at 0x7da18f00cfa0>]]
if compare[constant[virtual_network_name] in name[vm_]] begin[:]
call[name[vm_kwargs]][constant[virtual_network_name]] assign[=] call[name[vm_]][constant[virtual_network_name]]
if compare[constant[subnet_name] in name[vm_]] begin[:]
call[name[network_config].subnet_names.append, parameter[call[name[vm_]][constant[subnet_name]]]]
call[name[log].debug, parameter[constant[vm_kwargs: %s], name[vm_kwargs]]]
variable[event_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da18f00ddb0>, <ast.Constant object at 0x7da18f00fe20>], [<ast.Call object at 0x7da18f00dd20>, <ast.Call object at 0x7da18f00e3b0>]]
<ast.Delete object at 0x7da18f00d2d0>
<ast.Delete object at 0x7da18f00fe50>
<ast.Delete object at 0x7da18f00fb80>
call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[requesting instance], call[constant[salt/cloud/{0}/requesting].format, parameter[call[name[vm_]][constant[name]]]]]]
call[name[log].debug, parameter[constant[vm_kwargs: %s], name[vm_kwargs]]]
<ast.Try object at 0x7da18f00e1a0>
<ast.Try object at 0x7da18f00e770>
def function[wait_for_hostname, parameter[]]:
constant[
Wait for the IP address to become available
]
<ast.Try object at 0x7da18f00c490>
call[name[time].sleep, parameter[constant[1]]]
return[constant[False]]
variable[hostname] assign[=] call[name[salt].utils.cloud.wait_for_fun, parameter[name[wait_for_hostname]]]
if <ast.UnaryOp object at 0x7da18bc710c0> begin[:]
call[name[log].error, parameter[constant[Failed to get a value for the hostname.]]]
return[constant[False]]
call[name[vm_]][constant[ssh_host]] assign[=] call[call[name[hostname].replace, parameter[constant[http://], constant[]]].replace, parameter[constant[/], constant[]]]
call[name[vm_]][constant[password]] assign[=] call[name[config].get_cloud_config_value, parameter[constant[ssh_password], name[vm_], name[__opts__]]]
variable[ret] assign[=] call[call[name[__utils__]][constant[cloud.bootstrap]], parameter[name[vm_], name[__opts__]]]
variable[volumes] assign[=] call[name[config].get_cloud_config_value, parameter[constant[volumes], name[vm_], name[__opts__]]]
if name[volumes] begin[:]
call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[attaching volumes], call[constant[salt/cloud/{0}/attaching_volumes].format, parameter[call[name[vm_]][constant[name]]]]]]
call[name[log].info, parameter[constant[Create and attach volumes to node %s], call[name[vm_]][constant[name]]]]
variable[created] assign[=] call[name[create_attach_volumes], parameter[call[name[vm_]][constant[name]], dictionary[[<ast.Constant object at 0x7da18bc72140>, <ast.Constant object at 0x7da18bc72560>, <ast.Constant object at 0x7da18bc70970>, <ast.Constant object at 0x7da18bc73070>, <ast.Constant object at 0x7da18bc729b0>, <ast.Constant object at 0x7da18bc703a0>], [<ast.Name object at 0x7da18bc72ef0>, <ast.Name object at 0x7da18bc70700>, <ast.Subscript object at 0x7da18bc728f0>, <ast.Name object at 0x7da18bc71030>, <ast.Subscript object at 0x7da18bc71690>, <ast.Call object at 0x7da18bc73ca0>]]]]
call[name[ret]][constant[Attached Volumes]] assign[=] name[created]
variable[data] assign[=] call[name[show_instance], parameter[call[name[vm_]][constant[name]]]]
call[name[log].info, parameter[constant[Created Cloud VM '%s'], name[vm_]]]
call[name[log].debug, parameter[constant['%s' VM creation details:
%s], call[name[vm_]][constant[name]], call[name[pprint].pformat, parameter[name[data]]]]]
call[name[ret].update, parameter[name[data]]]
call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[created instance], call[constant[salt/cloud/{0}/created].format, parameter[call[name[vm_]][constant[name]]]]]]
return[name[ret]] | keyword[def] identifier[create] ( identifier[vm_] ):
literal[string]
keyword[try] :
keyword[if] identifier[vm_] [ literal[string] ] keyword[and] identifier[config] . identifier[is_profile_configured] ( identifier[__opts__] ,
identifier[__active_provider_name__] keyword[or] literal[string] ,
identifier[vm_] [ literal[string] ],
identifier[vm_] = identifier[vm_] ) keyword[is] keyword[False] :
keyword[return] keyword[False]
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[vm_] [ literal[string] ]),
identifier[args] = identifier[__utils__] [ literal[string] ]( literal[string] , identifier[vm_] ,[ literal[string] , literal[string] , literal[string] , literal[string] ]),
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[transport] = identifier[__opts__] [ literal[string] ]
)
identifier[log] . identifier[info] ( literal[string] , identifier[vm_] [ literal[string] ])
identifier[conn] = identifier[get_conn] ()
identifier[label] = identifier[vm_] . identifier[get] ( literal[string] , identifier[vm_] [ literal[string] ])
identifier[service_name] = identifier[vm_] . identifier[get] ( literal[string] , identifier[vm_] [ literal[string] ])
identifier[service_kwargs] ={
literal[string] : identifier[service_name] ,
literal[string] : identifier[label] ,
literal[string] : identifier[vm_] . identifier[get] ( literal[string] , identifier[vm_] [ literal[string] ]),
}
identifier[loc_error] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[vm_] :
keyword[if] literal[string] keyword[in] identifier[vm_] :
identifier[loc_error] = keyword[True]
keyword[else] :
identifier[service_kwargs] [ literal[string] ]= identifier[vm_] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[vm_] :
identifier[service_kwargs] [ literal[string] ]= identifier[vm_] [ literal[string] ]
keyword[else] :
identifier[loc_error] = keyword[True]
keyword[if] identifier[loc_error] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
)
identifier[ssh_port] = identifier[config] . identifier[get_cloud_config_value] ( literal[string] , identifier[vm_] , identifier[__opts__] ,
identifier[default] = literal[int] , identifier[search_global] = keyword[True] )
identifier[ssh_endpoint] = identifier[azure] . identifier[servicemanagement] . identifier[ConfigurationSetInputEndpoint] (
identifier[name] = literal[string] ,
identifier[protocol] = literal[string] ,
identifier[port] = identifier[ssh_port] ,
identifier[local_port] = literal[int] ,
)
identifier[network_config] = identifier[azure] . identifier[servicemanagement] . identifier[ConfigurationSet] ()
identifier[network_config] . identifier[input_endpoints] . identifier[input_endpoints] . identifier[append] ( identifier[ssh_endpoint] )
identifier[network_config] . identifier[configuration_set_type] = literal[string]
keyword[if] literal[string] keyword[in] identifier[vm_] :
identifier[system_config] = identifier[azure] . identifier[servicemanagement] . identifier[WindowsConfigurationSet] (
identifier[computer_name] = identifier[vm_] [ literal[string] ],
identifier[admin_username] = identifier[vm_] [ literal[string] ],
identifier[admin_password] = identifier[vm_] [ literal[string] ],
)
identifier[smb_port] = literal[string]
keyword[if] literal[string] keyword[in] identifier[vm_] :
identifier[smb_port] = identifier[vm_] [ literal[string] ]
identifier[smb_endpoint] = identifier[azure] . identifier[servicemanagement] . identifier[ConfigurationSetInputEndpoint] (
identifier[name] = literal[string] ,
identifier[protocol] = literal[string] ,
identifier[port] = identifier[smb_port] ,
identifier[local_port] = identifier[smb_port] ,
)
identifier[network_config] . identifier[input_endpoints] . identifier[input_endpoints] . identifier[append] ( identifier[smb_endpoint] )
identifier[system_config] . identifier[domain_join] = keyword[None]
identifier[system_config] . identifier[win_rm] = keyword[None]
keyword[else] :
identifier[system_config] = identifier[azure] . identifier[servicemanagement] . identifier[LinuxConfigurationSet] (
identifier[host_name] = identifier[vm_] [ literal[string] ],
identifier[user_name] = identifier[vm_] [ literal[string] ],
identifier[user_password] = identifier[vm_] [ literal[string] ],
identifier[disable_ssh_password_authentication] = keyword[False] ,
)
identifier[media_link] = identifier[vm_] [ literal[string] ]
identifier[media_link] += literal[string] . identifier[format] ( identifier[vm_] [ literal[string] ])
identifier[os_hd] = identifier[azure] . identifier[servicemanagement] . identifier[OSVirtualHardDisk] ( identifier[vm_] [ literal[string] ], identifier[media_link] )
identifier[vm_kwargs] ={
literal[string] : identifier[service_name] ,
literal[string] : identifier[service_name] ,
literal[string] : identifier[vm_] [ literal[string] ],
literal[string] : identifier[label] ,
literal[string] : identifier[vm_] [ literal[string] ],
literal[string] : identifier[system_config] ,
literal[string] : identifier[os_hd] ,
literal[string] : identifier[vm_] [ literal[string] ],
literal[string] : identifier[network_config] ,
}
keyword[if] literal[string] keyword[in] identifier[vm_] :
identifier[vm_kwargs] [ literal[string] ]= identifier[vm_] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[vm_] :
identifier[network_config] . identifier[subnet_names] . identifier[append] ( identifier[vm_] [ literal[string] ])
identifier[log] . identifier[debug] ( literal[string] , identifier[vm_kwargs] )
identifier[event_kwargs] ={ literal[string] : identifier[service_kwargs] . identifier[copy] (),
literal[string] : identifier[vm_kwargs] . identifier[copy] ()}
keyword[del] identifier[event_kwargs] [ literal[string] ][ literal[string] ]
keyword[del] identifier[event_kwargs] [ literal[string] ][ literal[string] ]
keyword[del] identifier[event_kwargs] [ literal[string] ][ literal[string] ]
identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[vm_] [ literal[string] ]),
identifier[args] = identifier[__utils__] [ literal[string] ]( literal[string] , identifier[event_kwargs] , identifier[list] ( identifier[event_kwargs] )),
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[transport] = identifier[__opts__] [ literal[string] ]
)
identifier[log] . identifier[debug] ( literal[string] , identifier[vm_kwargs] )
keyword[try] :
identifier[conn] . identifier[create_hosted_service] (** identifier[service_kwargs] )
keyword[except] identifier[AzureConflictHttpError] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[error] = literal[string]
keyword[if] identifier[error] keyword[in] identifier[six] . identifier[text_type] ( identifier[exc] ):
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string]
literal[string] ,
identifier[vm_] [ literal[string] ],
identifier[exc_info_on_loglevel] = identifier[logging] . identifier[DEBUG]
)
keyword[else] :
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string] ,
identifier[vm_] [ literal[string] ], identifier[exc] ,
identifier[exc_info_on_loglevel] = identifier[logging] . identifier[DEBUG]
)
keyword[return] keyword[False]
keyword[try] :
identifier[result] = identifier[conn] . identifier[create_virtual_machine_deployment] (** identifier[vm_kwargs] )
identifier[log] . identifier[debug] ( literal[string] , identifier[result] . identifier[request_id] )
identifier[_wait_for_async] ( identifier[conn] , identifier[result] . identifier[request_id] )
keyword[except] identifier[AzureConflictHttpError] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[del] identifier[vm_kwargs] [ literal[string] ]
keyword[del] identifier[vm_kwargs] [ literal[string] ]
keyword[del] identifier[vm_kwargs] [ literal[string] ]
identifier[result] = identifier[conn] . identifier[add_role] (** identifier[vm_kwargs] )
identifier[_wait_for_async] ( identifier[conn] , identifier[result] . identifier[request_id] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[error] = literal[string]
keyword[if] identifier[error] keyword[in] identifier[six] . identifier[text_type] ( identifier[exc] ):
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string]
literal[string] ,
identifier[vm_] [ literal[string] ],
identifier[exc_info_on_loglevel] = identifier[logging] . identifier[DEBUG]
)
keyword[else] :
identifier[log] . identifier[error] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] ,
identifier[vm_] [ literal[string] ], identifier[exc] ,
identifier[exc_info_on_loglevel] = identifier[logging] . identifier[DEBUG]
)
keyword[return] keyword[False]
keyword[def] identifier[wait_for_hostname] ():
literal[string]
keyword[try] :
identifier[conn] . identifier[get_role] ( identifier[service_name] , identifier[service_name] , identifier[vm_] [ literal[string] ])
identifier[data] = identifier[show_instance] ( identifier[vm_] [ literal[string] ], identifier[call] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[data] keyword[and] identifier[data] [ literal[string] ]!= identifier[six] . identifier[text_type] ( literal[string] ):
keyword[return] identifier[data] [ literal[string] ]
keyword[except] identifier[AzureMissingResourceHttpError] :
keyword[pass]
identifier[time] . identifier[sleep] ( literal[int] )
keyword[return] keyword[False]
identifier[hostname] = identifier[salt] . identifier[utils] . identifier[cloud] . identifier[wait_for_fun] (
identifier[wait_for_hostname] ,
identifier[timeout] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[default] = literal[int] * literal[int] ),
)
keyword[if] keyword[not] identifier[hostname] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
identifier[vm_] [ literal[string] ]= identifier[hostname] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
identifier[vm_] [ literal[string] ]= identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__]
)
identifier[ret] = identifier[__utils__] [ literal[string] ]( identifier[vm_] , identifier[__opts__] )
identifier[volumes] = identifier[config] . identifier[get_cloud_config_value] (
literal[string] , identifier[vm_] , identifier[__opts__] , identifier[search_global] = keyword[True]
)
keyword[if] identifier[volumes] :
identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[vm_] [ literal[string] ]),
identifier[args] = identifier[__utils__] [ literal[string] ]( literal[string] , identifier[vm_] ,[ literal[string] ]),
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[transport] = identifier[__opts__] [ literal[string] ]
)
identifier[log] . identifier[info] ( literal[string] , identifier[vm_] [ literal[string] ])
identifier[created] = identifier[create_attach_volumes] (
identifier[vm_] [ literal[string] ],
{
literal[string] : identifier[volumes] ,
literal[string] : identifier[service_name] ,
literal[string] : identifier[vm_] [ literal[string] ],
literal[string] : identifier[media_link] ,
literal[string] : identifier[vm_] [ literal[string] ],
literal[string] : identifier[vm_] . identifier[get] ( literal[string] , keyword[False] )
},
identifier[call] = literal[string]
)
identifier[ret] [ literal[string] ]= identifier[created]
identifier[data] = identifier[show_instance] ( identifier[vm_] [ literal[string] ], identifier[call] = literal[string] )
identifier[log] . identifier[info] ( literal[string] , identifier[vm_] )
identifier[log] . identifier[debug] ( literal[string] , identifier[vm_] [ literal[string] ], identifier[pprint] . identifier[pformat] ( identifier[data] ))
identifier[ret] . identifier[update] ( identifier[data] )
identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[vm_] [ literal[string] ]),
identifier[args] = identifier[__utils__] [ literal[string] ]( literal[string] , identifier[vm_] ,[ literal[string] , literal[string] , literal[string] , literal[string] ]),
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[transport] = identifier[__opts__] [ literal[string] ]
)
keyword[return] identifier[ret] | def create(vm_):
"""
Create a single VM from a data dict
"""
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'azure', vm_['profile'], vm_=vm_) is False:
return False # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
__utils__['cloud.fire_event']('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
label = vm_.get('label', vm_['name'])
service_name = vm_.get('service_name', vm_['name'])
service_kwargs = {'service_name': service_name, 'label': label, 'description': vm_.get('desc', vm_['name'])}
loc_error = False
if 'location' in vm_:
if 'affinity_group' in vm_:
loc_error = True # depends on [control=['if'], data=[]]
else:
service_kwargs['location'] = vm_['location'] # depends on [control=['if'], data=['vm_']]
elif 'affinity_group' in vm_:
service_kwargs['affinity_group'] = vm_['affinity_group'] # depends on [control=['if'], data=['vm_']]
else:
loc_error = True
if loc_error:
raise SaltCloudSystemExit('Either a location or affinity group must be specified, but not both') # depends on [control=['if'], data=[]]
ssh_port = config.get_cloud_config_value('port', vm_, __opts__, default=22, search_global=True)
ssh_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(name='SSH', protocol='TCP', port=ssh_port, local_port=22)
network_config = azure.servicemanagement.ConfigurationSet()
network_config.input_endpoints.input_endpoints.append(ssh_endpoint)
network_config.configuration_set_type = 'NetworkConfiguration'
if 'win_username' in vm_:
system_config = azure.servicemanagement.WindowsConfigurationSet(computer_name=vm_['name'], admin_username=vm_['win_username'], admin_password=vm_['win_password'])
smb_port = '445'
if 'smb_port' in vm_:
smb_port = vm_['smb_port'] # depends on [control=['if'], data=['vm_']]
smb_endpoint = azure.servicemanagement.ConfigurationSetInputEndpoint(name='SMB', protocol='TCP', port=smb_port, local_port=smb_port)
network_config.input_endpoints.input_endpoints.append(smb_endpoint)
# Domain and WinRM configuration not yet supported by Salt Cloud
system_config.domain_join = None
system_config.win_rm = None # depends on [control=['if'], data=['vm_']]
else:
system_config = azure.servicemanagement.LinuxConfigurationSet(host_name=vm_['name'], user_name=vm_['ssh_username'], user_password=vm_['ssh_password'], disable_ssh_password_authentication=False)
# TODO: Might need to create a storage account
media_link = vm_['media_link']
# TODO: Probably better to use more than just the name in the media_link
media_link += '/{0}.vhd'.format(vm_['name'])
os_hd = azure.servicemanagement.OSVirtualHardDisk(vm_['image'], media_link)
vm_kwargs = {'service_name': service_name, 'deployment_name': service_name, 'deployment_slot': vm_['slot'], 'label': label, 'role_name': vm_['name'], 'system_config': system_config, 'os_virtual_hard_disk': os_hd, 'role_size': vm_['size'], 'network_config': network_config}
if 'virtual_network_name' in vm_:
vm_kwargs['virtual_network_name'] = vm_['virtual_network_name']
if 'subnet_name' in vm_:
network_config.subnet_names.append(vm_['subnet_name']) # depends on [control=['if'], data=['vm_']] # depends on [control=['if'], data=['vm_']]
log.debug('vm_kwargs: %s', vm_kwargs)
event_kwargs = {'service_kwargs': service_kwargs.copy(), 'vm_kwargs': vm_kwargs.copy()}
del event_kwargs['vm_kwargs']['system_config']
del event_kwargs['vm_kwargs']['os_virtual_hard_disk']
del event_kwargs['vm_kwargs']['network_config']
__utils__['cloud.fire_event']('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args=__utils__['cloud.filter_event']('requesting', event_kwargs, list(event_kwargs)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
log.debug('vm_kwargs: %s', vm_kwargs)
# Azure lets you open winrm on a new VM
# Can open up specific ports in Azure; but not on Windows
try:
conn.create_hosted_service(**service_kwargs) # depends on [control=['try'], data=[]]
except AzureConflictHttpError:
log.debug('Cloud service already exists') # depends on [control=['except'], data=[]]
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in six.text_type(exc):
# Show the traceback if the debug logging level is enabled
log.error('Error creating %s on Azure.\n\nThe hosted service name is invalid. The name can contain only letters, numbers, and hyphens. The name must start with a letter and must end with a letter or a number.', vm_['name'], exc_info_on_loglevel=logging.DEBUG) # depends on [control=['if'], data=[]]
else:
# Show the traceback if the debug logging level is enabled
log.error('Error creating %s on Azure\n\nThe following exception was thrown when trying to run the initial deployment: \n%s', vm_['name'], exc, exc_info_on_loglevel=logging.DEBUG)
return False # depends on [control=['except'], data=['exc']]
try:
result = conn.create_virtual_machine_deployment(**vm_kwargs)
log.debug('Request ID for machine: %s', result.request_id)
_wait_for_async(conn, result.request_id) # depends on [control=['try'], data=[]]
except AzureConflictHttpError:
log.debug('Conflict error. The deployment may already exist, trying add_role')
# Deleting two useless keywords
del vm_kwargs['deployment_slot']
del vm_kwargs['label']
del vm_kwargs['virtual_network_name']
result = conn.add_role(**vm_kwargs)
_wait_for_async(conn, result.request_id) # depends on [control=['except'], data=[]]
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in six.text_type(exc):
# Show the traceback if the debug logging level is enabled
log.error('Error creating %s on Azure.\n\nThe VM name is invalid. The name can contain only letters, numbers, and hyphens. The name must start with a letter and must end with a letter or a number.', vm_['name'], exc_info_on_loglevel=logging.DEBUG) # depends on [control=['if'], data=[]]
else:
# Show the traceback if the debug logging level is enabled
log.error('Error creating %s on Azure.\n\nThe Virtual Machine could not be created. If you are using an already existing Cloud Service, make sure you set up the `port` variable corresponding to the SSH port exists and that the port number is not already in use.\nThe following exception was thrown when trying to run the initial deployment: \n%s', vm_['name'], exc, exc_info_on_loglevel=logging.DEBUG)
return False # depends on [control=['except'], data=['exc']]
def wait_for_hostname():
"""
Wait for the IP address to become available
"""
try:
conn.get_role(service_name, service_name, vm_['name'])
data = show_instance(vm_['name'], call='action')
if 'url' in data and data['url'] != six.text_type(''):
return data['url'] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AzureMissingResourceHttpError:
pass # depends on [control=['except'], data=[]]
time.sleep(1)
return False
hostname = salt.utils.cloud.wait_for_fun(wait_for_hostname, timeout=config.get_cloud_config_value('wait_for_fun_timeout', vm_, __opts__, default=15 * 60))
if not hostname:
log.error('Failed to get a value for the hostname.')
return False # depends on [control=['if'], data=[]]
vm_['ssh_host'] = hostname.replace('http://', '').replace('/', '')
vm_['password'] = config.get_cloud_config_value('ssh_password', vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
# Attaching volumes
volumes = config.get_cloud_config_value('volumes', vm_, __opts__, search_global=True)
if volumes:
__utils__['cloud.fire_event']('event', 'attaching volumes', 'salt/cloud/{0}/attaching_volumes'.format(vm_['name']), args=__utils__['cloud.filter_event']('attaching_volumes', vm_, ['volumes']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
log.info('Create and attach volumes to node %s', vm_['name'])
created = create_attach_volumes(vm_['name'], {'volumes': volumes, 'service_name': service_name, 'deployment_name': vm_['name'], 'media_link': media_link, 'role_name': vm_['name'], 'del_all_vols_on_destroy': vm_.get('set_del_all_vols_on_destroy', False)}, call='action')
ret['Attached Volumes'] = created # depends on [control=['if'], data=[]]
data = show_instance(vm_['name'], call='action')
log.info("Created Cloud VM '%s'", vm_)
log.debug("'%s' VM creation details:\n%s", vm_['name'], pprint.pformat(data))
ret.update(data)
__utils__['cloud.fire_event']('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
return ret |
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout,
``None`` is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan | def function[accept, parameter[self, timeout]]:
constant[
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout,
``None`` is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
]
call[name[self].lock.acquire, parameter[]]
<ast.Try object at 0x7da1b21239a0>
return[name[chan]] | keyword[def] identifier[accept] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[self] . identifier[lock] . identifier[acquire] ()
keyword[try] :
keyword[if] identifier[len] ( identifier[self] . identifier[server_accepts] )> literal[int] :
identifier[chan] = identifier[self] . identifier[server_accepts] . identifier[pop] ( literal[int] )
keyword[else] :
identifier[self] . identifier[server_accept_cv] . identifier[wait] ( identifier[timeout] )
keyword[if] identifier[len] ( identifier[self] . identifier[server_accepts] )> literal[int] :
identifier[chan] = identifier[self] . identifier[server_accepts] . identifier[pop] ( literal[int] )
keyword[else] :
identifier[chan] = keyword[None]
keyword[finally] :
identifier[self] . identifier[lock] . identifier[release] ()
keyword[return] identifier[chan] | def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout,
``None`` is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
:return: a new `.Channel` opened by the client
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0) # depends on [control=['if'], data=[]]
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0) # depends on [control=['if'], data=[]]
else:
# timeout
chan = None # depends on [control=['try'], data=[]]
finally:
self.lock.release()
return chan |
def get_path_for_url(url, folder=None, filename=None, overwrite=False):
# type: (str, Optional[str], Optional[str], bool) -> str
"""Get filename from url and join to provided folder or temporary folder if no folder supplied, ensuring uniqueness
Args:
url (str): URL to download
folder (Optional[str]): Folder to download it to. Defaults to None (temporary folder).
filename (Optional[str]): Filename to use for downloaded file. Defaults to None (derive from the url).
overwrite (bool): Whether to overwrite existing file. Defaults to False.
Returns:
str: Path of downloaded file
"""
if not filename:
urlpath = urlsplit(url).path
filename = basename(urlpath)
filename, extension = splitext(filename)
if not folder:
folder = get_temp_dir()
path = join(folder, '%s%s' % (filename, extension))
if overwrite:
try:
remove(path)
except OSError:
pass
else:
count = 0
while exists(path):
count += 1
path = join(folder, '%s%d%s' % (filename, count, extension))
return path | def function[get_path_for_url, parameter[url, folder, filename, overwrite]]:
constant[Get filename from url and join to provided folder or temporary folder if no folder supplied, ensuring uniqueness
Args:
url (str): URL to download
folder (Optional[str]): Folder to download it to. Defaults to None (temporary folder).
filename (Optional[str]): Filename to use for downloaded file. Defaults to None (derive from the url).
overwrite (bool): Whether to overwrite existing file. Defaults to False.
Returns:
str: Path of downloaded file
]
if <ast.UnaryOp object at 0x7da1b1035900> begin[:]
variable[urlpath] assign[=] call[name[urlsplit], parameter[name[url]]].path
variable[filename] assign[=] call[name[basename], parameter[name[urlpath]]]
<ast.Tuple object at 0x7da1b1034820> assign[=] call[name[splitext], parameter[name[filename]]]
if <ast.UnaryOp object at 0x7da1b1037850> begin[:]
variable[folder] assign[=] call[name[get_temp_dir], parameter[]]
variable[path] assign[=] call[name[join], parameter[name[folder], binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b10344c0>, <ast.Name object at 0x7da1b1036620>]]]]]
if name[overwrite] begin[:]
<ast.Try object at 0x7da1b10b1780>
return[name[path]] | keyword[def] identifier[get_path_for_url] ( identifier[url] , identifier[folder] = keyword[None] , identifier[filename] = keyword[None] , identifier[overwrite] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[filename] :
identifier[urlpath] = identifier[urlsplit] ( identifier[url] ). identifier[path]
identifier[filename] = identifier[basename] ( identifier[urlpath] )
identifier[filename] , identifier[extension] = identifier[splitext] ( identifier[filename] )
keyword[if] keyword[not] identifier[folder] :
identifier[folder] = identifier[get_temp_dir] ()
identifier[path] = identifier[join] ( identifier[folder] , literal[string] %( identifier[filename] , identifier[extension] ))
keyword[if] identifier[overwrite] :
keyword[try] :
identifier[remove] ( identifier[path] )
keyword[except] identifier[OSError] :
keyword[pass]
keyword[else] :
identifier[count] = literal[int]
keyword[while] identifier[exists] ( identifier[path] ):
identifier[count] += literal[int]
identifier[path] = identifier[join] ( identifier[folder] , literal[string] %( identifier[filename] , identifier[count] , identifier[extension] ))
keyword[return] identifier[path] | def get_path_for_url(url, folder=None, filename=None, overwrite=False):
# type: (str, Optional[str], Optional[str], bool) -> str
'Get filename from url and join to provided folder or temporary folder if no folder supplied, ensuring uniqueness\n\n Args:\n url (str): URL to download\n folder (Optional[str]): Folder to download it to. Defaults to None (temporary folder).\n filename (Optional[str]): Filename to use for downloaded file. Defaults to None (derive from the url).\n overwrite (bool): Whether to overwrite existing file. Defaults to False.\n\n Returns:\n str: Path of downloaded file\n\n '
if not filename:
urlpath = urlsplit(url).path
filename = basename(urlpath) # depends on [control=['if'], data=[]]
(filename, extension) = splitext(filename)
if not folder:
folder = get_temp_dir() # depends on [control=['if'], data=[]]
path = join(folder, '%s%s' % (filename, extension))
if overwrite:
try:
remove(path) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
count = 0
while exists(path):
count += 1
path = join(folder, '%s%d%s' % (filename, count, extension)) # depends on [control=['while'], data=[]]
return path |
def get(self):
"""
Return the referer aka the WHOIS server of the current domain extension.
"""
if not PyFunceble.CONFIGURATION["local"]:
# We are not running a test in a local network.
if self.domain_extension not in self.ignored_extension:
# The extension of the domain we are testing is not into
# the list of ignored extensions.
# We set the referer to None as we do not have any.
referer = None
if self.domain_extension in PyFunceble.INTERN["iana_db"]:
# The domain extension is in the iana database.
if not PyFunceble.CONFIGURATION["no_whois"]:
# We are authorized to use WHOIS for the test result.
# We get the referer from the database.
referer = PyFunceble.INTERN["iana_db"][self.domain_extension]
if not referer:
# The referer is not filled.
# We log the case of the current extension.
Logs().referer_not_found(self.domain_extension)
# And we handle and return None status.
return None
# The referer is into the database.
# We return the extracted referer.
return referer
# We are not authorized to use WHOIS for the test result.
# We return None.
return None
# The domain extension is not in the iana database.
# We return False, it is an invalid domain.
return False
# The extension of the domain we are testing is not into
# the list of ignored extensions.
# We return None, the domain does not have a whois server.
return None
# We are running a test in a local network.
# We return None.
return None | def function[get, parameter[self]]:
constant[
Return the referer aka the WHOIS server of the current domain extension.
]
if <ast.UnaryOp object at 0x7da1b0294c10> begin[:]
if compare[name[self].domain_extension <ast.NotIn object at 0x7da2590d7190> name[self].ignored_extension] begin[:]
variable[referer] assign[=] constant[None]
if compare[name[self].domain_extension in call[name[PyFunceble].INTERN][constant[iana_db]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0297700> begin[:]
variable[referer] assign[=] call[call[name[PyFunceble].INTERN][constant[iana_db]]][name[self].domain_extension]
if <ast.UnaryOp object at 0x7da1b02961a0> begin[:]
call[call[name[Logs], parameter[]].referer_not_found, parameter[name[self].domain_extension]]
return[constant[None]]
return[name[referer]]
return[constant[None]]
return[constant[False]]
return[constant[None]]
return[constant[None]] | keyword[def] identifier[get] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[PyFunceble] . identifier[CONFIGURATION] [ literal[string] ]:
keyword[if] identifier[self] . identifier[domain_extension] keyword[not] keyword[in] identifier[self] . identifier[ignored_extension] :
identifier[referer] = keyword[None]
keyword[if] identifier[self] . identifier[domain_extension] keyword[in] identifier[PyFunceble] . identifier[INTERN] [ literal[string] ]:
keyword[if] keyword[not] identifier[PyFunceble] . identifier[CONFIGURATION] [ literal[string] ]:
identifier[referer] = identifier[PyFunceble] . identifier[INTERN] [ literal[string] ][ identifier[self] . identifier[domain_extension] ]
keyword[if] keyword[not] identifier[referer] :
identifier[Logs] (). identifier[referer_not_found] ( identifier[self] . identifier[domain_extension] )
keyword[return] keyword[None]
keyword[return] identifier[referer]
keyword[return] keyword[None]
keyword[return] keyword[False]
keyword[return] keyword[None]
keyword[return] keyword[None] | def get(self):
"""
Return the referer aka the WHOIS server of the current domain extension.
"""
if not PyFunceble.CONFIGURATION['local']:
# We are not running a test in a local network.
if self.domain_extension not in self.ignored_extension:
# The extension of the domain we are testing is not into
# the list of ignored extensions.
# We set the referer to None as we do not have any.
referer = None
if self.domain_extension in PyFunceble.INTERN['iana_db']:
# The domain extension is in the iana database.
if not PyFunceble.CONFIGURATION['no_whois']:
# We are authorized to use WHOIS for the test result.
# We get the referer from the database.
referer = PyFunceble.INTERN['iana_db'][self.domain_extension]
if not referer:
# The referer is not filled.
# We log the case of the current extension.
Logs().referer_not_found(self.domain_extension)
# And we handle and return None status.
return None # depends on [control=['if'], data=[]]
# The referer is into the database.
# We return the extracted referer.
return referer # depends on [control=['if'], data=[]]
# We are not authorized to use WHOIS for the test result.
# We return None.
return None # depends on [control=['if'], data=[]]
# The domain extension is not in the iana database.
# We return False, it is an invalid domain.
return False # depends on [control=['if'], data=[]]
# The extension of the domain we are testing is not into
# the list of ignored extensions.
# We return None, the domain does not have a whois server.
return None # depends on [control=['if'], data=[]]
# We are running a test in a local network.
# We return None.
return None |
def verifyToken(self, auth):
"""
Ensure the authentication token for the given auth method is still valid.
Args:
auth (Auth): authentication type to check
Raises:
.SkypeAuthException: if Skype auth is required, and the current token has expired and can't be renewed
"""
if auth in (self.Auth.SkypeToken, self.Auth.Authorize):
if "skype" not in self.tokenExpiry or datetime.now() >= self.tokenExpiry["skype"]:
if not hasattr(self, "getSkypeToken"):
raise SkypeAuthException("Skype token expired, and no password specified")
self.getSkypeToken()
elif auth == self.Auth.RegToken:
if "reg" not in self.tokenExpiry or datetime.now() >= self.tokenExpiry["reg"]:
self.getRegToken() | def function[verifyToken, parameter[self, auth]]:
constant[
Ensure the authentication token for the given auth method is still valid.
Args:
auth (Auth): authentication type to check
Raises:
.SkypeAuthException: if Skype auth is required, and the current token has expired and can't be renewed
]
if compare[name[auth] in tuple[[<ast.Attribute object at 0x7da18ede5030>, <ast.Attribute object at 0x7da18ede43d0>]]] begin[:]
if <ast.BoolOp object at 0x7da18fe90580> begin[:]
if <ast.UnaryOp object at 0x7da18fe90820> begin[:]
<ast.Raise object at 0x7da18fe93a30>
call[name[self].getSkypeToken, parameter[]] | keyword[def] identifier[verifyToken] ( identifier[self] , identifier[auth] ):
literal[string]
keyword[if] identifier[auth] keyword[in] ( identifier[self] . identifier[Auth] . identifier[SkypeToken] , identifier[self] . identifier[Auth] . identifier[Authorize] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[tokenExpiry] keyword[or] identifier[datetime] . identifier[now] ()>= identifier[self] . identifier[tokenExpiry] [ literal[string] ]:
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[raise] identifier[SkypeAuthException] ( literal[string] )
identifier[self] . identifier[getSkypeToken] ()
keyword[elif] identifier[auth] == identifier[self] . identifier[Auth] . identifier[RegToken] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[tokenExpiry] keyword[or] identifier[datetime] . identifier[now] ()>= identifier[self] . identifier[tokenExpiry] [ literal[string] ]:
identifier[self] . identifier[getRegToken] () | def verifyToken(self, auth):
"""
Ensure the authentication token for the given auth method is still valid.
Args:
auth (Auth): authentication type to check
Raises:
.SkypeAuthException: if Skype auth is required, and the current token has expired and can't be renewed
"""
if auth in (self.Auth.SkypeToken, self.Auth.Authorize):
if 'skype' not in self.tokenExpiry or datetime.now() >= self.tokenExpiry['skype']:
if not hasattr(self, 'getSkypeToken'):
raise SkypeAuthException('Skype token expired, and no password specified') # depends on [control=['if'], data=[]]
self.getSkypeToken() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif auth == self.Auth.RegToken:
if 'reg' not in self.tokenExpiry or datetime.now() >= self.tokenExpiry['reg']:
self.getRegToken() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def from_hising(cls, h, J, offset=None):
"""Construct a binary polynomial from a higher-order Ising problem.
Args:
h (dict):
The linear biases.
J (dict):
The higher-order biases.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:obj:`.BinaryPolynomial`
Examples:
>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)
"""
poly = {(k,): v for k, v in h.items()}
poly.update(J)
if offset is not None:
poly[frozenset([])] = offset
return cls(poly, Vartype.SPIN) | def function[from_hising, parameter[cls, h, J, offset]]:
constant[Construct a binary polynomial from a higher-order Ising problem.
Args:
h (dict):
The linear biases.
J (dict):
The higher-order biases.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:obj:`.BinaryPolynomial`
Examples:
>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)
]
variable[poly] assign[=] <ast.DictComp object at 0x7da1b07157b0>
call[name[poly].update, parameter[name[J]]]
if compare[name[offset] is_not constant[None]] begin[:]
call[name[poly]][call[name[frozenset], parameter[list[[]]]]] assign[=] name[offset]
return[call[name[cls], parameter[name[poly], name[Vartype].SPIN]]] | keyword[def] identifier[from_hising] ( identifier[cls] , identifier[h] , identifier[J] , identifier[offset] = keyword[None] ):
literal[string]
identifier[poly] ={( identifier[k] ,): identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[h] . identifier[items] ()}
identifier[poly] . identifier[update] ( identifier[J] )
keyword[if] identifier[offset] keyword[is] keyword[not] keyword[None] :
identifier[poly] [ identifier[frozenset] ([])]= identifier[offset]
keyword[return] identifier[cls] ( identifier[poly] , identifier[Vartype] . identifier[SPIN] ) | def from_hising(cls, h, J, offset=None):
"""Construct a binary polynomial from a higher-order Ising problem.
Args:
h (dict):
The linear biases.
J (dict):
The higher-order biases.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:obj:`.BinaryPolynomial`
Examples:
>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)
"""
poly = {(k,): v for (k, v) in h.items()}
poly.update(J)
if offset is not None:
poly[frozenset([])] = offset # depends on [control=['if'], data=['offset']]
return cls(poly, Vartype.SPIN) |
def download_dataset(self, dataset_name, local_path, how="stream"):
""" It downloads from the repository the specified dataset and puts it
in the specified local folder
:param dataset_name: the name the dataset has in the repository
:param local_path: where you want to save the dataset
:param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream'
downloads the dataset sample by sample
:return: None
"""
if not os.path.isdir(local_path):
os.makedirs(local_path)
else:
raise ValueError("Path {} already exists!".format(local_path))
local_path = os.path.join(local_path, FILES_FOLDER)
os.makedirs(local_path)
if how == 'zip':
return self.download_as_zip(dataset_name, local_path)
elif how == 'stream':
return self.download_as_stream(dataset_name, local_path)
else:
raise ValueError("how must be {'zip', 'stream'}") | def function[download_dataset, parameter[self, dataset_name, local_path, how]]:
constant[ It downloads from the repository the specified dataset and puts it
in the specified local folder
:param dataset_name: the name the dataset has in the repository
:param local_path: where you want to save the dataset
:param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream'
downloads the dataset sample by sample
:return: None
]
if <ast.UnaryOp object at 0x7da1b1be6920> begin[:]
call[name[os].makedirs, parameter[name[local_path]]]
variable[local_path] assign[=] call[name[os].path.join, parameter[name[local_path], name[FILES_FOLDER]]]
call[name[os].makedirs, parameter[name[local_path]]]
if compare[name[how] equal[==] constant[zip]] begin[:]
return[call[name[self].download_as_zip, parameter[name[dataset_name], name[local_path]]]] | keyword[def] identifier[download_dataset] ( identifier[self] , identifier[dataset_name] , identifier[local_path] , identifier[how] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[local_path] ):
identifier[os] . identifier[makedirs] ( identifier[local_path] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[local_path] ))
identifier[local_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[local_path] , identifier[FILES_FOLDER] )
identifier[os] . identifier[makedirs] ( identifier[local_path] )
keyword[if] identifier[how] == literal[string] :
keyword[return] identifier[self] . identifier[download_as_zip] ( identifier[dataset_name] , identifier[local_path] )
keyword[elif] identifier[how] == literal[string] :
keyword[return] identifier[self] . identifier[download_as_stream] ( identifier[dataset_name] , identifier[local_path] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def download_dataset(self, dataset_name, local_path, how='stream'):
""" It downloads from the repository the specified dataset and puts it
in the specified local folder
:param dataset_name: the name the dataset has in the repository
:param local_path: where you want to save the dataset
:param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream'
downloads the dataset sample by sample
:return: None
"""
if not os.path.isdir(local_path):
os.makedirs(local_path) # depends on [control=['if'], data=[]]
else:
raise ValueError('Path {} already exists!'.format(local_path))
local_path = os.path.join(local_path, FILES_FOLDER)
os.makedirs(local_path)
if how == 'zip':
return self.download_as_zip(dataset_name, local_path) # depends on [control=['if'], data=[]]
elif how == 'stream':
return self.download_as_stream(dataset_name, local_path) # depends on [control=['if'], data=[]]
else:
raise ValueError("how must be {'zip', 'stream'}") |
def table_absent(name, db):
'''
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
'''
changes = {'name': name,
'changes': {},
'result': None,
'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn, "SELECT sql FROM sqlite_master " +
" WHERE type='table' AND name=?", [name])
if len(tables) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "'" + name + "' will be dropped"
else:
conn.execute("DROP TABLE " + name)
conn.commit()
changes['changes']['old'] = tables[0][0]
changes['result'] = True
changes['comment'] = "'" + name + "' was dropped"
elif not tables:
changes['result'] = True
changes['comment'] = "'" + name + "' is already absent"
else:
changes['result'] = False
changes['comment'] = "Multiple tables with the same name='" + \
name + "'"
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e)
finally:
if conn:
conn.close()
return changes | def function[table_absent, parameter[name, db]]:
constant[
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
]
variable[changes] assign[=] dictionary[[<ast.Constant object at 0x7da20cabc400>, <ast.Constant object at 0x7da20cabe9b0>, <ast.Constant object at 0x7da20cabebf0>, <ast.Constant object at 0x7da20cabd690>], [<ast.Name object at 0x7da20cabcdf0>, <ast.Dict object at 0x7da20cabe290>, <ast.Constant object at 0x7da20cabd8a0>, <ast.Constant object at 0x7da20cabce80>]]
variable[conn] assign[=] constant[None]
<ast.Try object at 0x7da20cabe590>
return[name[changes]] | keyword[def] identifier[table_absent] ( identifier[name] , identifier[db] ):
literal[string]
identifier[changes] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[None] ,
literal[string] : literal[string] }
identifier[conn] = keyword[None]
keyword[try] :
identifier[conn] = identifier[sqlite3] . identifier[connect] ( identifier[db] , identifier[detect_types] = identifier[sqlite3] . identifier[PARSE_DECLTYPES] )
identifier[tables] = identifier[_query] ( identifier[conn] , literal[string] +
literal[string] ,[ identifier[name] ])
keyword[if] identifier[len] ( identifier[tables] )== literal[int] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[changes] [ literal[string] ]= keyword[True]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[else] :
identifier[conn] . identifier[execute] ( literal[string] + identifier[name] )
identifier[conn] . identifier[commit] ()
identifier[changes] [ literal[string] ][ literal[string] ]= identifier[tables] [ literal[int] ][ literal[int] ]
identifier[changes] [ literal[string] ]= keyword[True]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[elif] keyword[not] identifier[tables] :
identifier[changes] [ literal[string] ]= keyword[True]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[else] :
identifier[changes] [ literal[string] ]= keyword[False]
identifier[changes] [ literal[string] ]= literal[string] + identifier[name] + literal[string]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[changes] [ literal[string] ]= keyword[False]
identifier[changes] [ literal[string] ]= identifier[six] . identifier[text_type] ( identifier[e] )
keyword[finally] :
keyword[if] identifier[conn] :
identifier[conn] . identifier[close] ()
keyword[return] identifier[changes] | def table_absent(name, db):
"""
Make sure the specified table does not exist
name
The name of the table
db
The name of the database file
"""
changes = {'name': name, 'changes': {}, 'result': None, 'comment': ''}
conn = None
try:
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES)
tables = _query(conn, 'SELECT sql FROM sqlite_master ' + " WHERE type='table' AND name=?", [name])
if len(tables) == 1:
if __opts__['test']:
changes['result'] = True
changes['comment'] = "'" + name + "' will be dropped" # depends on [control=['if'], data=[]]
else:
conn.execute('DROP TABLE ' + name)
conn.commit()
changes['changes']['old'] = tables[0][0]
changes['result'] = True
changes['comment'] = "'" + name + "' was dropped" # depends on [control=['if'], data=[]]
elif not tables:
changes['result'] = True
changes['comment'] = "'" + name + "' is already absent" # depends on [control=['if'], data=[]]
else:
changes['result'] = False
changes['comment'] = "Multiple tables with the same name='" + name + "'" # depends on [control=['try'], data=[]]
except Exception as e:
changes['result'] = False
changes['comment'] = six.text_type(e) # depends on [control=['except'], data=['e']]
finally:
if conn:
conn.close() # depends on [control=['if'], data=[]]
return changes |
def nodebalancer_create(self, region, **kwargs):
"""
Creates a new NodeBalancer in the given Region.
:param region: The Region in which to create the NodeBalancer.
:type region: Region or str
:returns: The new NodeBalancer
:rtype: NodeBalancer
"""
params = {
"region": region.id if isinstance(region, Base) else region,
}
params.update(kwargs)
result = self.post('/nodebalancers', data=params)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when creating Nodebalaner!', json=result)
n = NodeBalancer(self, result['id'], result)
return n | def function[nodebalancer_create, parameter[self, region]]:
constant[
Creates a new NodeBalancer in the given Region.
:param region: The Region in which to create the NodeBalancer.
:type region: Region or str
:returns: The new NodeBalancer
:rtype: NodeBalancer
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9ac80>], [<ast.IfExp object at 0x7da18dc98e80>]]
call[name[params].update, parameter[name[kwargs]]]
variable[result] assign[=] call[name[self].post, parameter[constant[/nodebalancers]]]
if <ast.UnaryOp object at 0x7da18dc9a380> begin[:]
<ast.Raise object at 0x7da18dc9b280>
variable[n] assign[=] call[name[NodeBalancer], parameter[name[self], call[name[result]][constant[id]], name[result]]]
return[name[n]] | keyword[def] identifier[nodebalancer_create] ( identifier[self] , identifier[region] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={
literal[string] : identifier[region] . identifier[id] keyword[if] identifier[isinstance] ( identifier[region] , identifier[Base] ) keyword[else] identifier[region] ,
}
identifier[params] . identifier[update] ( identifier[kwargs] )
identifier[result] = identifier[self] . identifier[post] ( literal[string] , identifier[data] = identifier[params] )
keyword[if] keyword[not] literal[string] keyword[in] identifier[result] :
keyword[raise] identifier[UnexpectedResponseError] ( literal[string] , identifier[json] = identifier[result] )
identifier[n] = identifier[NodeBalancer] ( identifier[self] , identifier[result] [ literal[string] ], identifier[result] )
keyword[return] identifier[n] | def nodebalancer_create(self, region, **kwargs):
"""
Creates a new NodeBalancer in the given Region.
:param region: The Region in which to create the NodeBalancer.
:type region: Region or str
:returns: The new NodeBalancer
:rtype: NodeBalancer
"""
params = {'region': region.id if isinstance(region, Base) else region}
params.update(kwargs)
result = self.post('/nodebalancers', data=params)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when creating Nodebalaner!', json=result) # depends on [control=['if'], data=[]]
n = NodeBalancer(self, result['id'], result)
return n |
def remove_module(self, module):
"""
Ownership of module is returned
"""
with ffi.OutputString() as outerr:
if ffi.lib.LLVMPY_RemoveModule(self, module, outerr):
raise RuntimeError(str(outerr))
self._modules.remove(module)
module._owned = False | def function[remove_module, parameter[self, module]]:
constant[
Ownership of module is returned
]
with call[name[ffi].OutputString, parameter[]] begin[:]
if call[name[ffi].lib.LLVMPY_RemoveModule, parameter[name[self], name[module], name[outerr]]] begin[:]
<ast.Raise object at 0x7da1b19edc60>
call[name[self]._modules.remove, parameter[name[module]]]
name[module]._owned assign[=] constant[False] | keyword[def] identifier[remove_module] ( identifier[self] , identifier[module] ):
literal[string]
keyword[with] identifier[ffi] . identifier[OutputString] () keyword[as] identifier[outerr] :
keyword[if] identifier[ffi] . identifier[lib] . identifier[LLVMPY_RemoveModule] ( identifier[self] , identifier[module] , identifier[outerr] ):
keyword[raise] identifier[RuntimeError] ( identifier[str] ( identifier[outerr] ))
identifier[self] . identifier[_modules] . identifier[remove] ( identifier[module] )
identifier[module] . identifier[_owned] = keyword[False] | def remove_module(self, module):
"""
Ownership of module is returned
"""
with ffi.OutputString() as outerr:
if ffi.lib.LLVMPY_RemoveModule(self, module, outerr):
raise RuntimeError(str(outerr)) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['outerr']]
self._modules.remove(module)
module._owned = False |
def is_dsub_operation(cls, op):
"""Determine if a pipelines operation is a dsub request.
We don't have a rigorous way to identify an operation as being submitted
by dsub. Our best option is to check for certain fields that have always
been part of dsub operations.
- labels: job-id, job-name, and user-id have always existed
- envs: _SCRIPT has always existed.
In order to keep a simple heuristic this test only uses labels.
Args:
op: a pipelines operation.
Returns:
Boolean, true if the pipeline run was generated by dsub.
"""
if not cls.is_pipelines_operation(op):
return False
for name in ['job-id', 'job-name', 'user-id']:
if not cls.get_operation_label(op, name):
return False
return True | def function[is_dsub_operation, parameter[cls, op]]:
constant[Determine if a pipelines operation is a dsub request.
We don't have a rigorous way to identify an operation as being submitted
by dsub. Our best option is to check for certain fields that have always
been part of dsub operations.
- labels: job-id, job-name, and user-id have always existed
- envs: _SCRIPT has always existed.
In order to keep a simple heuristic this test only uses labels.
Args:
op: a pipelines operation.
Returns:
Boolean, true if the pipeline run was generated by dsub.
]
if <ast.UnaryOp object at 0x7da1b013c340> begin[:]
return[constant[False]]
for taget[name[name]] in starred[list[[<ast.Constant object at 0x7da1b013ed70>, <ast.Constant object at 0x7da1b013cf10>, <ast.Constant object at 0x7da1b013e0b0>]]] begin[:]
if <ast.UnaryOp object at 0x7da1b013d360> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_dsub_operation] ( identifier[cls] , identifier[op] ):
literal[string]
keyword[if] keyword[not] identifier[cls] . identifier[is_pipelines_operation] ( identifier[op] ):
keyword[return] keyword[False]
keyword[for] identifier[name] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[if] keyword[not] identifier[cls] . identifier[get_operation_label] ( identifier[op] , identifier[name] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_dsub_operation(cls, op):
"""Determine if a pipelines operation is a dsub request.
We don't have a rigorous way to identify an operation as being submitted
by dsub. Our best option is to check for certain fields that have always
been part of dsub operations.
- labels: job-id, job-name, and user-id have always existed
- envs: _SCRIPT has always existed.
In order to keep a simple heuristic this test only uses labels.
Args:
op: a pipelines operation.
Returns:
Boolean, true if the pipeline run was generated by dsub.
"""
if not cls.is_pipelines_operation(op):
return False # depends on [control=['if'], data=[]]
for name in ['job-id', 'job-name', 'user-id']:
if not cls.get_operation_label(op, name):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
return True |
def _allocate_address_neutron(self, instance, network_ids):
"""
Allocates a floating/public ip address to the given instance,
using the OpenStack Network ('Neutron') API.
:param instance: instance to assign address to
:param list network_id:
List of IDs (as strings) of networks where to
request allocation the floating IP.
:return: public ip address
"""
self._init_os_api()
with OpenStackCloudProvider.__node_start_lock:
# Note: to return *all* addresses, all parameters to
# `neutron_client.list_floatingips()` should be left out;
# setting them to `None` (e.g., `fixed_ip_address=None`)
# results in an empty list...
free_ips = [
ip for ip in
self.neutron_client.list_floatingips().get('floatingips')
if (ip['floating_network_id'] in network_ids
# keep only unallocated IP addrs
and ip['fixed_ip_address'] is None
and ip['port_id'] is None)
]
if free_ips:
floating_ip = free_ips.pop()
log.debug("Using existing floating IP %r", floating_ip)
else:
# FIXME: OpenStack Network API v2 requires that we specify
# a network ID along with the request for a floating IP.
# However, ElastiCluster configuration allows for multiple
# networks to be connected to a VM, but does not give any
# hint as to which one(s) should be used for such requests.
# So we try them all, ignoring errors until one request
# succeeds and hope that it's OK. One can imagine
# scenarios where this is *not* correct, but: (1) these
# scenarios are unlikely, and (2) the old novaclient code
# above has not even had the concept of multiple networks
# for floating IPs and no-one has complained in 5 years...
for network_id in network_ids:
log.debug(
"Trying to allocate floating IP on network %s ...", network_id)
try:
floating_ip = self.neutron_client.create_floatingip({
'floatingip': {
'floating_network_id':network_id,
}}).get('floatingip')
log.debug(
"Allocated IP address %s on network %s",
floating_ip['floating_ip_address'], network_id)
break # stop at first network where we get a floating IP
except BadNeutronRequest as err:
raise RuntimeError(
"Failed allocating floating IP on network {0}: {1}"
.format(network_id, err))
if floating_ip.get('floating_ip_address', None) is None:
raise RuntimeError(
"Could not allocate floating IP for VM {0}"
.format(instance_id))
# wait until at least one interface is up
interfaces = []
# FIXMEE: no timeout!
while not interfaces:
interfaces = instance.interface_list()
sleep(2) ## FIXME: hard-coded value
# get port ID
for interface in interfaces:
log.debug(
"Instance %s (ID: %s):"
" Checking if floating IP can be attached to interface %r ...",
instance.name, instance.id, interface)
# if interface.net_id not in network_ids:
# log.debug(
# "Instance %s (ID: %s):"
# " Skipping interface %r:"
# " not attached to any of the requested networks.",
# instance.name, instance.id, interface)
# continue
port_id = interface.port_id
if port_id is None:
log.debug(
"Instance %s (ID: %s):"
" Skipping interface %r: no port ID!",
instance.name, instance.id, interface)
continue
log.debug(
"Instance `%s` (ID: %s):"
" will assign floating IP to port ID %s (state: %s),"
" already running IP addresses %r",
instance.name, instance.id,
port_id, interface.port_state,
[item['ip_address'] for item in interface.fixed_ips])
if interface.port_state != 'ACTIVE':
log.warn(
"Instance `%s` (ID: %s):"
" port `%s` is in state %s (epected 'ACTIVE' instead)",
instance.name, instance.id,
port_id, interface.port_state)
break
else:
raise RuntimeError(
"Could not find port on network(s) {0}"
" for instance {1} (ID: {2}) to bind a floating IP to."
.format(network_ids, instance.name, instance.id))
# assign floating IP to port
floating_ip = self.neutron_client.update_floatingip(
floating_ip['id'], {
'floatingip': {
'port_id': port_id,
},
}
).get('floatingip')
ip_address = floating_ip['floating_ip_address']
log.debug("Assigned IP address %s to port %s", ip_address, port_id)
log.info("Waiting 300s until floating IP %s is ACTIVE", ip_address)
for i in range(300):
_floating_ip = self.neutron_client.show_floatingip(floating_ip['id'])
if _floating_ip['floatingip']['status'] != 'DOWN':
break
sleep(1)
# Invalidate cache for this VM, as we just assigned a new IP
if instance.id in self._cached_instances:
del self._cached_instances[instance.id]
return ip_address | def function[_allocate_address_neutron, parameter[self, instance, network_ids]]:
constant[
Allocates a floating/public ip address to the given instance,
using the OpenStack Network ('Neutron') API.
:param instance: instance to assign address to
:param list network_id:
List of IDs (as strings) of networks where to
request allocation the floating IP.
:return: public ip address
]
call[name[self]._init_os_api, parameter[]]
with name[OpenStackCloudProvider].__node_start_lock begin[:]
variable[free_ips] assign[=] <ast.ListComp object at 0x7da1b0613be0>
if name[free_ips] begin[:]
variable[floating_ip] assign[=] call[name[free_ips].pop, parameter[]]
call[name[log].debug, parameter[constant[Using existing floating IP %r], name[floating_ip]]]
if compare[call[name[floating_ip].get, parameter[constant[floating_ip_address], constant[None]]] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b06129b0>
variable[interfaces] assign[=] list[[]]
while <ast.UnaryOp object at 0x7da1b06127a0> begin[:]
variable[interfaces] assign[=] call[name[instance].interface_list, parameter[]]
call[name[sleep], parameter[constant[2]]]
for taget[name[interface]] in starred[name[interfaces]] begin[:]
call[name[log].debug, parameter[constant[Instance %s (ID: %s): Checking if floating IP can be attached to interface %r ...], name[instance].name, name[instance].id, name[interface]]]
variable[port_id] assign[=] name[interface].port_id
if compare[name[port_id] is constant[None]] begin[:]
call[name[log].debug, parameter[constant[Instance %s (ID: %s): Skipping interface %r: no port ID!], name[instance].name, name[instance].id, name[interface]]]
continue
call[name[log].debug, parameter[constant[Instance `%s` (ID: %s): will assign floating IP to port ID %s (state: %s), already running IP addresses %r], name[instance].name, name[instance].id, name[port_id], name[interface].port_state, <ast.ListComp object at 0x7da1b0611d20>]]
if compare[name[interface].port_state not_equal[!=] constant[ACTIVE]] begin[:]
call[name[log].warn, parameter[constant[Instance `%s` (ID: %s): port `%s` is in state %s (epected 'ACTIVE' instead)], name[instance].name, name[instance].id, name[port_id], name[interface].port_state]]
break
variable[floating_ip] assign[=] call[call[name[self].neutron_client.update_floatingip, parameter[call[name[floating_ip]][constant[id]], dictionary[[<ast.Constant object at 0x7da1b0611300>], [<ast.Dict object at 0x7da1b06112d0>]]]].get, parameter[constant[floatingip]]]
variable[ip_address] assign[=] call[name[floating_ip]][constant[floating_ip_address]]
call[name[log].debug, parameter[constant[Assigned IP address %s to port %s], name[ip_address], name[port_id]]]
call[name[log].info, parameter[constant[Waiting 300s until floating IP %s is ACTIVE], name[ip_address]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[300]]]] begin[:]
variable[_floating_ip] assign[=] call[name[self].neutron_client.show_floatingip, parameter[call[name[floating_ip]][constant[id]]]]
if compare[call[call[name[_floating_ip]][constant[floatingip]]][constant[status]] not_equal[!=] constant[DOWN]] begin[:]
break
call[name[sleep], parameter[constant[1]]]
if compare[name[instance].id in name[self]._cached_instances] begin[:]
<ast.Delete object at 0x7da1b06107c0>
return[name[ip_address]] | keyword[def] identifier[_allocate_address_neutron] ( identifier[self] , identifier[instance] , identifier[network_ids] ):
literal[string]
identifier[self] . identifier[_init_os_api] ()
keyword[with] identifier[OpenStackCloudProvider] . identifier[__node_start_lock] :
identifier[free_ips] =[
identifier[ip] keyword[for] identifier[ip] keyword[in]
identifier[self] . identifier[neutron_client] . identifier[list_floatingips] (). identifier[get] ( literal[string] )
keyword[if] ( identifier[ip] [ literal[string] ] keyword[in] identifier[network_ids]
keyword[and] identifier[ip] [ literal[string] ] keyword[is] keyword[None]
keyword[and] identifier[ip] [ literal[string] ] keyword[is] keyword[None] )
]
keyword[if] identifier[free_ips] :
identifier[floating_ip] = identifier[free_ips] . identifier[pop] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[floating_ip] )
keyword[else] :
keyword[for] identifier[network_id] keyword[in] identifier[network_ids] :
identifier[log] . identifier[debug] (
literal[string] , identifier[network_id] )
keyword[try] :
identifier[floating_ip] = identifier[self] . identifier[neutron_client] . identifier[create_floatingip] ({
literal[string] :{
literal[string] : identifier[network_id] ,
}}). identifier[get] ( literal[string] )
identifier[log] . identifier[debug] (
literal[string] ,
identifier[floating_ip] [ literal[string] ], identifier[network_id] )
keyword[break]
keyword[except] identifier[BadNeutronRequest] keyword[as] identifier[err] :
keyword[raise] identifier[RuntimeError] (
literal[string]
. identifier[format] ( identifier[network_id] , identifier[err] ))
keyword[if] identifier[floating_ip] . identifier[get] ( literal[string] , keyword[None] ) keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] (
literal[string]
. identifier[format] ( identifier[instance_id] ))
identifier[interfaces] =[]
keyword[while] keyword[not] identifier[interfaces] :
identifier[interfaces] = identifier[instance] . identifier[interface_list] ()
identifier[sleep] ( literal[int] )
keyword[for] identifier[interface] keyword[in] identifier[interfaces] :
identifier[log] . identifier[debug] (
literal[string]
literal[string] ,
identifier[instance] . identifier[name] , identifier[instance] . identifier[id] , identifier[interface] )
identifier[port_id] = identifier[interface] . identifier[port_id]
keyword[if] identifier[port_id] keyword[is] keyword[None] :
identifier[log] . identifier[debug] (
literal[string]
literal[string] ,
identifier[instance] . identifier[name] , identifier[instance] . identifier[id] , identifier[interface] )
keyword[continue]
identifier[log] . identifier[debug] (
literal[string]
literal[string]
literal[string] ,
identifier[instance] . identifier[name] , identifier[instance] . identifier[id] ,
identifier[port_id] , identifier[interface] . identifier[port_state] ,
[ identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[interface] . identifier[fixed_ips] ])
keyword[if] identifier[interface] . identifier[port_state] != literal[string] :
identifier[log] . identifier[warn] (
literal[string]
literal[string] ,
identifier[instance] . identifier[name] , identifier[instance] . identifier[id] ,
identifier[port_id] , identifier[interface] . identifier[port_state] )
keyword[break]
keyword[else] :
keyword[raise] identifier[RuntimeError] (
literal[string]
literal[string]
. identifier[format] ( identifier[network_ids] , identifier[instance] . identifier[name] , identifier[instance] . identifier[id] ))
identifier[floating_ip] = identifier[self] . identifier[neutron_client] . identifier[update_floatingip] (
identifier[floating_ip] [ literal[string] ],{
literal[string] :{
literal[string] : identifier[port_id] ,
},
}
). identifier[get] ( literal[string] )
identifier[ip_address] = identifier[floating_ip] [ literal[string] ]
identifier[log] . identifier[debug] ( literal[string] , identifier[ip_address] , identifier[port_id] )
identifier[log] . identifier[info] ( literal[string] , identifier[ip_address] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[_floating_ip] = identifier[self] . identifier[neutron_client] . identifier[show_floatingip] ( identifier[floating_ip] [ literal[string] ])
keyword[if] identifier[_floating_ip] [ literal[string] ][ literal[string] ]!= literal[string] :
keyword[break]
identifier[sleep] ( literal[int] )
keyword[if] identifier[instance] . identifier[id] keyword[in] identifier[self] . identifier[_cached_instances] :
keyword[del] identifier[self] . identifier[_cached_instances] [ identifier[instance] . identifier[id] ]
keyword[return] identifier[ip_address] | def _allocate_address_neutron(self, instance, network_ids):
"""
Allocates a floating/public ip address to the given instance,
using the OpenStack Network ('Neutron') API.
:param instance: instance to assign address to
:param list network_id:
List of IDs (as strings) of networks where to
request allocation the floating IP.
:return: public ip address
"""
self._init_os_api()
with OpenStackCloudProvider.__node_start_lock:
# Note: to return *all* addresses, all parameters to
# `neutron_client.list_floatingips()` should be left out;
# setting them to `None` (e.g., `fixed_ip_address=None`)
# results in an empty list...
# keep only unallocated IP addrs
free_ips = [ip for ip in self.neutron_client.list_floatingips().get('floatingips') if ip['floating_network_id'] in network_ids and ip['fixed_ip_address'] is None and (ip['port_id'] is None)]
if free_ips:
floating_ip = free_ips.pop()
log.debug('Using existing floating IP %r', floating_ip) # depends on [control=['if'], data=[]]
else:
# FIXME: OpenStack Network API v2 requires that we specify
# a network ID along with the request for a floating IP.
# However, ElastiCluster configuration allows for multiple
# networks to be connected to a VM, but does not give any
# hint as to which one(s) should be used for such requests.
# So we try them all, ignoring errors until one request
# succeeds and hope that it's OK. One can imagine
# scenarios where this is *not* correct, but: (1) these
# scenarios are unlikely, and (2) the old novaclient code
# above has not even had the concept of multiple networks
# for floating IPs and no-one has complained in 5 years...
for network_id in network_ids:
log.debug('Trying to allocate floating IP on network %s ...', network_id)
try:
floating_ip = self.neutron_client.create_floatingip({'floatingip': {'floating_network_id': network_id}}).get('floatingip')
log.debug('Allocated IP address %s on network %s', floating_ip['floating_ip_address'], network_id)
break # stop at first network where we get a floating IP # depends on [control=['try'], data=[]]
except BadNeutronRequest as err:
raise RuntimeError('Failed allocating floating IP on network {0}: {1}'.format(network_id, err)) # depends on [control=['except'], data=['err']] # depends on [control=['for'], data=['network_id']]
if floating_ip.get('floating_ip_address', None) is None:
raise RuntimeError('Could not allocate floating IP for VM {0}'.format(instance_id)) # depends on [control=['if'], data=[]]
# wait until at least one interface is up
interfaces = []
# FIXMEE: no timeout!
while not interfaces:
interfaces = instance.interface_list()
sleep(2) ## FIXME: hard-coded value # depends on [control=['while'], data=[]]
# get port ID
for interface in interfaces:
log.debug('Instance %s (ID: %s): Checking if floating IP can be attached to interface %r ...', instance.name, instance.id, interface)
# if interface.net_id not in network_ids:
# log.debug(
# "Instance %s (ID: %s):"
# " Skipping interface %r:"
# " not attached to any of the requested networks.",
# instance.name, instance.id, interface)
# continue
port_id = interface.port_id
if port_id is None:
log.debug('Instance %s (ID: %s): Skipping interface %r: no port ID!', instance.name, instance.id, interface)
continue # depends on [control=['if'], data=[]]
log.debug('Instance `%s` (ID: %s): will assign floating IP to port ID %s (state: %s), already running IP addresses %r', instance.name, instance.id, port_id, interface.port_state, [item['ip_address'] for item in interface.fixed_ips])
if interface.port_state != 'ACTIVE':
log.warn("Instance `%s` (ID: %s): port `%s` is in state %s (epected 'ACTIVE' instead)", instance.name, instance.id, port_id, interface.port_state) # depends on [control=['if'], data=[]]
break # depends on [control=['for'], data=['interface']]
else:
raise RuntimeError('Could not find port on network(s) {0} for instance {1} (ID: {2}) to bind a floating IP to.'.format(network_ids, instance.name, instance.id))
# assign floating IP to port
floating_ip = self.neutron_client.update_floatingip(floating_ip['id'], {'floatingip': {'port_id': port_id}}).get('floatingip')
ip_address = floating_ip['floating_ip_address']
log.debug('Assigned IP address %s to port %s', ip_address, port_id)
log.info('Waiting 300s until floating IP %s is ACTIVE', ip_address)
for i in range(300):
_floating_ip = self.neutron_client.show_floatingip(floating_ip['id'])
if _floating_ip['floatingip']['status'] != 'DOWN':
break # depends on [control=['if'], data=[]]
sleep(1) # depends on [control=['for'], data=[]]
# Invalidate cache for this VM, as we just assigned a new IP
if instance.id in self._cached_instances:
del self._cached_instances[instance.id] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
return ip_address |
def _build_connections(self, process_list, ignore_dependencies,
auto_dependency):
"""Parses the process connections dictionaries into a process list
This method is called upon instantiation of the NextflowGenerator
class. Essentially, it sets the main input/output channel names of the
processes so that they can be linked correctly.
If a connection between two consecutive process is not possible due
to a mismatch in the input/output types, it exits with an error.
Returns
-------
"""
logger.debug("=============================")
logger.debug("Building pipeline connections")
logger.debug("=============================")
logger.debug("Processing connections: {}".format(process_list))
for p, con in enumerate(process_list):
logger.debug("Processing connection '{}': {}".format(p, con))
# Get lanes
in_lane = con["input"]["lane"]
out_lane = con["output"]["lane"]
logger.debug("[{}] Input lane: {}".format(p, in_lane))
logger.debug("[{}] Output lane: {}".format(p, out_lane))
# Update the total number of lines of the pipeline
if out_lane > self.lanes:
self.lanes = out_lane
# Get process names and directives for the output process
p_in_name, p_out_name, out_directives = self._get_process_names(
con, p)
# Check if process is available or correctly named
if p_out_name not in self.process_map:
logger.error(colored_print(
"\nThe process '{}' is not available."
.format(p_out_name), "red_bold"))
guess_process(p_out_name, self.process_map)
sys.exit(1)
# Instance output process
out_process = self.process_map[p_out_name](template=p_out_name)
# Update directives, if provided
if out_directives:
out_process.update_attributes(out_directives)
# Set suffix strings for main input/output channels. Suffixes are
# based on the lane and the arbitrary and unique process id
# e.g.: 'process_1_1'
input_suf = "{}_{}".format(in_lane, p)
output_suf = "{}_{}".format(out_lane, p)
logger.debug("[{}] Setting main channels with input suffix '{}'"
" and output suffix '{}'".format(
p, input_suf, output_suf))
out_process.set_main_channel_names(input_suf, output_suf, out_lane)
# Instance input process, if it exists. In case of init, the
# output process forks from the raw input user data
if p_in_name != "__init__":
# Create instance of input process
in_process = self.process_map[p_in_name](template=p_in_name)
# Test if two processes can be connected by input/output types
logger.debug("[{}] Testing connection between input and "
"output processes".format(p))
self._test_connection(in_process, out_process)
out_process.parent_lane = in_lane
else:
# When the input process is __init__, set the parent_lane
# to None. This will tell the engine that this process
# will receive the main input from the raw user input.
out_process.parent_lane = None
logger.debug("[{}] Parent lane: {}".format(
p, out_process.parent_lane))
# If the current connection is a fork, add it to the fork tree
if in_lane != out_lane:
logger.debug("[{}] Connection is a fork. Adding lanes to "
"fork list".format(p))
self._fork_tree[in_lane].append(out_lane)
# Update main output fork of parent process
try:
parent_process = [
x for x in self.processes if x.lane == in_lane and
x.template == p_in_name
][0]
logger.debug(
"[{}] Updating main forks of parent fork '{}' with"
" '{}'".format(p, parent_process,
out_process.input_channel))
parent_process.update_main_forks(out_process.input_channel)
except IndexError:
pass
else:
# Get parent process, naive version
parent_process = self.processes[-1]
# Check if the last process' lane matches the lane of the
# current output process. If not, get the last process
# in the same lane
if parent_process.lane and parent_process.lane != out_lane:
parent_process = [x for x in self.processes[::-1]
if x.lane == out_lane][0]
if parent_process.output_channel:
logger.debug(
"[{}] Updating input channel of output process"
" with '{}'".format(
p, parent_process.output_channel))
out_process.input_channel = parent_process.output_channel
# Check for process dependencies
if out_process.dependencies and not ignore_dependencies:
logger.debug("[{}] Dependencies found for process '{}': "
"{}".format(p, p_out_name,
out_process.dependencies))
parent_lanes = self._get_fork_tree(out_lane)
for dep in out_process.dependencies:
if not self._search_tree_backwards(dep, parent_lanes):
if auto_dependency:
self._add_dependency(
out_process, dep, in_lane, out_lane, p)
elif not self.export_parameters:
logger.error(colored_print(
"\nThe following dependency of the process"
" '{}' is missing: {}".format(p_out_name, dep),
"red_bold"))
sys.exit(1)
self.processes.append(out_process)
logger.debug("Completed connections: {}".format(self.processes))
logger.debug("Fork tree: {}".format(self._fork_tree)) | def function[_build_connections, parameter[self, process_list, ignore_dependencies, auto_dependency]]:
constant[Parses the process connections dictionaries into a process list
This method is called upon instantiation of the NextflowGenerator
class. Essentially, it sets the main input/output channel names of the
processes so that they can be linked correctly.
If a connection between two consecutive process is not possible due
to a mismatch in the input/output types, it exits with an error.
Returns
-------
]
call[name[logger].debug, parameter[constant[=============================]]]
call[name[logger].debug, parameter[constant[Building pipeline connections]]]
call[name[logger].debug, parameter[constant[=============================]]]
call[name[logger].debug, parameter[call[constant[Processing connections: {}].format, parameter[name[process_list]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0243880>, <ast.Name object at 0x7da1b0243370>]]] in starred[call[name[enumerate], parameter[name[process_list]]]] begin[:]
call[name[logger].debug, parameter[call[constant[Processing connection '{}': {}].format, parameter[name[p], name[con]]]]]
variable[in_lane] assign[=] call[call[name[con]][constant[input]]][constant[lane]]
variable[out_lane] assign[=] call[call[name[con]][constant[output]]][constant[lane]]
call[name[logger].debug, parameter[call[constant[[{}] Input lane: {}].format, parameter[name[p], name[in_lane]]]]]
call[name[logger].debug, parameter[call[constant[[{}] Output lane: {}].format, parameter[name[p], name[out_lane]]]]]
if compare[name[out_lane] greater[>] name[self].lanes] begin[:]
name[self].lanes assign[=] name[out_lane]
<ast.Tuple object at 0x7da1b02412a0> assign[=] call[name[self]._get_process_names, parameter[name[con], name[p]]]
if compare[name[p_out_name] <ast.NotIn object at 0x7da2590d7190> name[self].process_map] begin[:]
call[name[logger].error, parameter[call[name[colored_print], parameter[call[constant[
The process '{}' is not available.].format, parameter[name[p_out_name]]], constant[red_bold]]]]]
call[name[guess_process], parameter[name[p_out_name], name[self].process_map]]
call[name[sys].exit, parameter[constant[1]]]
variable[out_process] assign[=] call[call[name[self].process_map][name[p_out_name]], parameter[]]
if name[out_directives] begin[:]
call[name[out_process].update_attributes, parameter[name[out_directives]]]
variable[input_suf] assign[=] call[constant[{}_{}].format, parameter[name[in_lane], name[p]]]
variable[output_suf] assign[=] call[constant[{}_{}].format, parameter[name[out_lane], name[p]]]
call[name[logger].debug, parameter[call[constant[[{}] Setting main channels with input suffix '{}' and output suffix '{}'].format, parameter[name[p], name[input_suf], name[output_suf]]]]]
call[name[out_process].set_main_channel_names, parameter[name[input_suf], name[output_suf], name[out_lane]]]
if compare[name[p_in_name] not_equal[!=] constant[__init__]] begin[:]
variable[in_process] assign[=] call[call[name[self].process_map][name[p_in_name]], parameter[]]
call[name[logger].debug, parameter[call[constant[[{}] Testing connection between input and output processes].format, parameter[name[p]]]]]
call[name[self]._test_connection, parameter[name[in_process], name[out_process]]]
name[out_process].parent_lane assign[=] name[in_lane]
call[name[logger].debug, parameter[call[constant[[{}] Parent lane: {}].format, parameter[name[p], name[out_process].parent_lane]]]]
if compare[name[in_lane] not_equal[!=] name[out_lane]] begin[:]
call[name[logger].debug, parameter[call[constant[[{}] Connection is a fork. Adding lanes to fork list].format, parameter[name[p]]]]]
call[call[name[self]._fork_tree][name[in_lane]].append, parameter[name[out_lane]]]
<ast.Try object at 0x7da1b03e0430>
if <ast.BoolOp object at 0x7da1b03e02e0> begin[:]
call[name[logger].debug, parameter[call[constant[[{}] Dependencies found for process '{}': {}].format, parameter[name[p], name[p_out_name], name[out_process].dependencies]]]]
variable[parent_lanes] assign[=] call[name[self]._get_fork_tree, parameter[name[out_lane]]]
for taget[name[dep]] in starred[name[out_process].dependencies] begin[:]
if <ast.UnaryOp object at 0x7da1b02da050> begin[:]
if name[auto_dependency] begin[:]
call[name[self]._add_dependency, parameter[name[out_process], name[dep], name[in_lane], name[out_lane], name[p]]]
call[name[self].processes.append, parameter[name[out_process]]]
call[name[logger].debug, parameter[call[constant[Completed connections: {}].format, parameter[name[self].processes]]]]
call[name[logger].debug, parameter[call[constant[Fork tree: {}].format, parameter[name[self]._fork_tree]]]] | keyword[def] identifier[_build_connections] ( identifier[self] , identifier[process_list] , identifier[ignore_dependencies] ,
identifier[auto_dependency] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[process_list] ))
keyword[for] identifier[p] , identifier[con] keyword[in] identifier[enumerate] ( identifier[process_list] ):
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[p] , identifier[con] ))
identifier[in_lane] = identifier[con] [ literal[string] ][ literal[string] ]
identifier[out_lane] = identifier[con] [ literal[string] ][ literal[string] ]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[p] , identifier[in_lane] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[p] , identifier[out_lane] ))
keyword[if] identifier[out_lane] > identifier[self] . identifier[lanes] :
identifier[self] . identifier[lanes] = identifier[out_lane]
identifier[p_in_name] , identifier[p_out_name] , identifier[out_directives] = identifier[self] . identifier[_get_process_names] (
identifier[con] , identifier[p] )
keyword[if] identifier[p_out_name] keyword[not] keyword[in] identifier[self] . identifier[process_map] :
identifier[logger] . identifier[error] ( identifier[colored_print] (
literal[string]
. identifier[format] ( identifier[p_out_name] ), literal[string] ))
identifier[guess_process] ( identifier[p_out_name] , identifier[self] . identifier[process_map] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[out_process] = identifier[self] . identifier[process_map] [ identifier[p_out_name] ]( identifier[template] = identifier[p_out_name] )
keyword[if] identifier[out_directives] :
identifier[out_process] . identifier[update_attributes] ( identifier[out_directives] )
identifier[input_suf] = literal[string] . identifier[format] ( identifier[in_lane] , identifier[p] )
identifier[output_suf] = literal[string] . identifier[format] ( identifier[out_lane] , identifier[p] )
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] (
identifier[p] , identifier[input_suf] , identifier[output_suf] ))
identifier[out_process] . identifier[set_main_channel_names] ( identifier[input_suf] , identifier[output_suf] , identifier[out_lane] )
keyword[if] identifier[p_in_name] != literal[string] :
identifier[in_process] = identifier[self] . identifier[process_map] [ identifier[p_in_name] ]( identifier[template] = identifier[p_in_name] )
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] ( identifier[p] ))
identifier[self] . identifier[_test_connection] ( identifier[in_process] , identifier[out_process] )
identifier[out_process] . identifier[parent_lane] = identifier[in_lane]
keyword[else] :
identifier[out_process] . identifier[parent_lane] = keyword[None]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[p] , identifier[out_process] . identifier[parent_lane] ))
keyword[if] identifier[in_lane] != identifier[out_lane] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] ( identifier[p] ))
identifier[self] . identifier[_fork_tree] [ identifier[in_lane] ]. identifier[append] ( identifier[out_lane] )
keyword[try] :
identifier[parent_process] =[
identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[processes] keyword[if] identifier[x] . identifier[lane] == identifier[in_lane] keyword[and]
identifier[x] . identifier[template] == identifier[p_in_name]
][ literal[int] ]
identifier[logger] . identifier[debug] (
literal[string]
literal[string] . identifier[format] ( identifier[p] , identifier[parent_process] ,
identifier[out_process] . identifier[input_channel] ))
identifier[parent_process] . identifier[update_main_forks] ( identifier[out_process] . identifier[input_channel] )
keyword[except] identifier[IndexError] :
keyword[pass]
keyword[else] :
identifier[parent_process] = identifier[self] . identifier[processes] [- literal[int] ]
keyword[if] identifier[parent_process] . identifier[lane] keyword[and] identifier[parent_process] . identifier[lane] != identifier[out_lane] :
identifier[parent_process] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[processes] [::- literal[int] ]
keyword[if] identifier[x] . identifier[lane] == identifier[out_lane] ][ literal[int] ]
keyword[if] identifier[parent_process] . identifier[output_channel] :
identifier[logger] . identifier[debug] (
literal[string]
literal[string] . identifier[format] (
identifier[p] , identifier[parent_process] . identifier[output_channel] ))
identifier[out_process] . identifier[input_channel] = identifier[parent_process] . identifier[output_channel]
keyword[if] identifier[out_process] . identifier[dependencies] keyword[and] keyword[not] identifier[ignore_dependencies] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] ( identifier[p] , identifier[p_out_name] ,
identifier[out_process] . identifier[dependencies] ))
identifier[parent_lanes] = identifier[self] . identifier[_get_fork_tree] ( identifier[out_lane] )
keyword[for] identifier[dep] keyword[in] identifier[out_process] . identifier[dependencies] :
keyword[if] keyword[not] identifier[self] . identifier[_search_tree_backwards] ( identifier[dep] , identifier[parent_lanes] ):
keyword[if] identifier[auto_dependency] :
identifier[self] . identifier[_add_dependency] (
identifier[out_process] , identifier[dep] , identifier[in_lane] , identifier[out_lane] , identifier[p] )
keyword[elif] keyword[not] identifier[self] . identifier[export_parameters] :
identifier[logger] . identifier[error] ( identifier[colored_print] (
literal[string]
literal[string] . identifier[format] ( identifier[p_out_name] , identifier[dep] ),
literal[string] ))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[self] . identifier[processes] . identifier[append] ( identifier[out_process] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[processes] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[_fork_tree] )) | def _build_connections(self, process_list, ignore_dependencies, auto_dependency):
"""Parses the process connections dictionaries into a process list
This method is called upon instantiation of the NextflowGenerator
class. Essentially, it sets the main input/output channel names of the
processes so that they can be linked correctly.
If a connection between two consecutive process is not possible due
to a mismatch in the input/output types, it exits with an error.
Returns
-------
"""
logger.debug('=============================')
logger.debug('Building pipeline connections')
logger.debug('=============================')
logger.debug('Processing connections: {}'.format(process_list))
for (p, con) in enumerate(process_list):
logger.debug("Processing connection '{}': {}".format(p, con))
# Get lanes
in_lane = con['input']['lane']
out_lane = con['output']['lane']
logger.debug('[{}] Input lane: {}'.format(p, in_lane))
logger.debug('[{}] Output lane: {}'.format(p, out_lane))
# Update the total number of lines of the pipeline
if out_lane > self.lanes:
self.lanes = out_lane # depends on [control=['if'], data=['out_lane']]
# Get process names and directives for the output process
(p_in_name, p_out_name, out_directives) = self._get_process_names(con, p)
# Check if process is available or correctly named
if p_out_name not in self.process_map:
logger.error(colored_print("\nThe process '{}' is not available.".format(p_out_name), 'red_bold'))
guess_process(p_out_name, self.process_map)
sys.exit(1) # depends on [control=['if'], data=['p_out_name']]
# Instance output process
out_process = self.process_map[p_out_name](template=p_out_name)
# Update directives, if provided
if out_directives:
out_process.update_attributes(out_directives) # depends on [control=['if'], data=[]]
# Set suffix strings for main input/output channels. Suffixes are
# based on the lane and the arbitrary and unique process id
# e.g.: 'process_1_1'
input_suf = '{}_{}'.format(in_lane, p)
output_suf = '{}_{}'.format(out_lane, p)
logger.debug("[{}] Setting main channels with input suffix '{}' and output suffix '{}'".format(p, input_suf, output_suf))
out_process.set_main_channel_names(input_suf, output_suf, out_lane)
# Instance input process, if it exists. In case of init, the
# output process forks from the raw input user data
if p_in_name != '__init__':
# Create instance of input process
in_process = self.process_map[p_in_name](template=p_in_name)
# Test if two processes can be connected by input/output types
logger.debug('[{}] Testing connection between input and output processes'.format(p))
self._test_connection(in_process, out_process)
out_process.parent_lane = in_lane # depends on [control=['if'], data=['p_in_name']]
else:
# When the input process is __init__, set the parent_lane
# to None. This will tell the engine that this process
# will receive the main input from the raw user input.
out_process.parent_lane = None
logger.debug('[{}] Parent lane: {}'.format(p, out_process.parent_lane))
# If the current connection is a fork, add it to the fork tree
if in_lane != out_lane:
logger.debug('[{}] Connection is a fork. Adding lanes to fork list'.format(p))
self._fork_tree[in_lane].append(out_lane)
# Update main output fork of parent process
try:
parent_process = [x for x in self.processes if x.lane == in_lane and x.template == p_in_name][0]
logger.debug("[{}] Updating main forks of parent fork '{}' with '{}'".format(p, parent_process, out_process.input_channel))
parent_process.update_main_forks(out_process.input_channel) # depends on [control=['try'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['in_lane', 'out_lane']]
else:
# Get parent process, naive version
parent_process = self.processes[-1]
# Check if the last process' lane matches the lane of the
# current output process. If not, get the last process
# in the same lane
if parent_process.lane and parent_process.lane != out_lane:
parent_process = [x for x in self.processes[::-1] if x.lane == out_lane][0] # depends on [control=['if'], data=[]]
if parent_process.output_channel:
logger.debug("[{}] Updating input channel of output process with '{}'".format(p, parent_process.output_channel))
out_process.input_channel = parent_process.output_channel # depends on [control=['if'], data=[]]
# Check for process dependencies
if out_process.dependencies and (not ignore_dependencies):
logger.debug("[{}] Dependencies found for process '{}': {}".format(p, p_out_name, out_process.dependencies))
parent_lanes = self._get_fork_tree(out_lane)
for dep in out_process.dependencies:
if not self._search_tree_backwards(dep, parent_lanes):
if auto_dependency:
self._add_dependency(out_process, dep, in_lane, out_lane, p) # depends on [control=['if'], data=[]]
elif not self.export_parameters:
logger.error(colored_print("\nThe following dependency of the process '{}' is missing: {}".format(p_out_name, dep), 'red_bold'))
sys.exit(1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dep']] # depends on [control=['if'], data=[]]
self.processes.append(out_process) # depends on [control=['for'], data=[]]
logger.debug('Completed connections: {}'.format(self.processes))
logger.debug('Fork tree: {}'.format(self._fork_tree)) |
def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational internal energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta E$ (kJ/mol)"
else:
ylabel = r"$\Delta E$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim,
factor=1e-3, **kwargs)
return fig | def function[plot_internal_energy, parameter[self, tmin, tmax, ntemp, ylim]]:
constant[
Plots the vibrational internal energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
]
variable[temperatures] assign[=] call[name[np].linspace, parameter[name[tmin], name[tmax], name[ntemp]]]
if name[self].structure begin[:]
variable[ylabel] assign[=] constant[$\Delta E$ (kJ/mol)]
variable[fig] assign[=] call[name[self]._plot_thermo, parameter[name[self].dos.internal_energy, name[temperatures]]]
return[name[fig]] | keyword[def] identifier[plot_internal_energy] ( identifier[self] , identifier[tmin] , identifier[tmax] , identifier[ntemp] , identifier[ylim] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[temperatures] = identifier[np] . identifier[linspace] ( identifier[tmin] , identifier[tmax] , identifier[ntemp] )
keyword[if] identifier[self] . identifier[structure] :
identifier[ylabel] = literal[string]
keyword[else] :
identifier[ylabel] = literal[string]
identifier[fig] = identifier[self] . identifier[_plot_thermo] ( identifier[self] . identifier[dos] . identifier[internal_energy] , identifier[temperatures] , identifier[ylabel] = identifier[ylabel] , identifier[ylim] = identifier[ylim] ,
identifier[factor] = literal[int] ,** identifier[kwargs] )
keyword[return] identifier[fig] | def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational internal energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = '$\\Delta E$ (kJ/mol)' # depends on [control=['if'], data=[]]
else:
ylabel = '$\\Delta E$ (kJ/mol-c)'
fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim, factor=0.001, **kwargs)
return fig |
def cmd_map(self, args):
'''map commands'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if args[0] == "icon":
if len(args) < 3:
print("Usage: map icon <lat> <lon> <icon>")
else:
lat = args[1]
lon = args[2]
flag = 'flag.png'
if len(args) > 3:
flag = args[3] + '.png'
icon = self.mpstate.map.icon(flag)
self.mpstate.map.add_object(mp_slipmap.SlipIcon('icon - %s [%u]' % (str(flag),self.icon_counter),
(float(lat),float(lon)),
icon, layer=3, rotation=0, follow=False))
self.icon_counter += 1
elif args[0] == "set":
self.map_settings.command(args[1:])
self.mpstate.map.add_object(mp_slipmap.SlipBrightness(self.map_settings.brightness))
elif args[0] == "sethome":
self.cmd_set_home(args)
else:
print("usage: map <icon|set>") | def function[cmd_map, parameter[self, args]]:
constant[map commands]
from relative_module[MAVProxy.modules.mavproxy_map] import module[mp_slipmap]
if compare[call[name[args]][constant[0]] equal[==] constant[icon]] begin[:]
if compare[call[name[len], parameter[name[args]]] less[<] constant[3]] begin[:]
call[name[print], parameter[constant[Usage: map icon <lat> <lon> <icon>]]] | keyword[def] identifier[cmd_map] ( identifier[self] , identifier[args] ):
literal[string]
keyword[from] identifier[MAVProxy] . identifier[modules] . identifier[mavproxy_map] keyword[import] identifier[mp_slipmap]
keyword[if] identifier[args] [ literal[int] ]== literal[string] :
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
identifier[print] ( literal[string] )
keyword[else] :
identifier[lat] = identifier[args] [ literal[int] ]
identifier[lon] = identifier[args] [ literal[int] ]
identifier[flag] = literal[string]
keyword[if] identifier[len] ( identifier[args] )> literal[int] :
identifier[flag] = identifier[args] [ literal[int] ]+ literal[string]
identifier[icon] = identifier[self] . identifier[mpstate] . identifier[map] . identifier[icon] ( identifier[flag] )
identifier[self] . identifier[mpstate] . identifier[map] . identifier[add_object] ( identifier[mp_slipmap] . identifier[SlipIcon] ( literal[string] %( identifier[str] ( identifier[flag] ), identifier[self] . identifier[icon_counter] ),
( identifier[float] ( identifier[lat] ), identifier[float] ( identifier[lon] )),
identifier[icon] , identifier[layer] = literal[int] , identifier[rotation] = literal[int] , identifier[follow] = keyword[False] ))
identifier[self] . identifier[icon_counter] += literal[int]
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[map_settings] . identifier[command] ( identifier[args] [ literal[int] :])
identifier[self] . identifier[mpstate] . identifier[map] . identifier[add_object] ( identifier[mp_slipmap] . identifier[SlipBrightness] ( identifier[self] . identifier[map_settings] . identifier[brightness] ))
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[cmd_set_home] ( identifier[args] )
keyword[else] :
identifier[print] ( literal[string] ) | def cmd_map(self, args):
"""map commands"""
from MAVProxy.modules.mavproxy_map import mp_slipmap
if args[0] == 'icon':
if len(args) < 3:
print('Usage: map icon <lat> <lon> <icon>') # depends on [control=['if'], data=[]]
else:
lat = args[1]
lon = args[2]
flag = 'flag.png'
if len(args) > 3:
flag = args[3] + '.png' # depends on [control=['if'], data=[]]
icon = self.mpstate.map.icon(flag)
self.mpstate.map.add_object(mp_slipmap.SlipIcon('icon - %s [%u]' % (str(flag), self.icon_counter), (float(lat), float(lon)), icon, layer=3, rotation=0, follow=False))
self.icon_counter += 1 # depends on [control=['if'], data=[]]
elif args[0] == 'set':
self.map_settings.command(args[1:])
self.mpstate.map.add_object(mp_slipmap.SlipBrightness(self.map_settings.brightness)) # depends on [control=['if'], data=[]]
elif args[0] == 'sethome':
self.cmd_set_home(args) # depends on [control=['if'], data=[]]
else:
print('usage: map <icon|set>') |
def get_queryset(self):
"""
Returns only objects which are accessible to the current user.
If user is not authenticated all public objects will be returned.
Model must implement AccessLevelManager!
"""
return self.queryset.all().accessible_to(user=self.request.user) | def function[get_queryset, parameter[self]]:
constant[
Returns only objects which are accessible to the current user.
If user is not authenticated all public objects will be returned.
Model must implement AccessLevelManager!
]
return[call[call[name[self].queryset.all, parameter[]].accessible_to, parameter[]]] | keyword[def] identifier[get_queryset] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[queryset] . identifier[all] (). identifier[accessible_to] ( identifier[user] = identifier[self] . identifier[request] . identifier[user] ) | def get_queryset(self):
"""
Returns only objects which are accessible to the current user.
If user is not authenticated all public objects will be returned.
Model must implement AccessLevelManager!
"""
return self.queryset.all().accessible_to(user=self.request.user) |
async def start_transaction(connection_name: Optional[str] = None) -> BaseTransactionWrapper:
"""
Function to manually control your transaction.
Returns transaction object with ``.rollback()`` and ``.commit()`` methods.
All db calls in same coroutine context will run into transaction
before ending transaction with above methods.
:param connection_name: name of connection to run with, optional if you have only
one db connection
"""
connection = _get_connection(connection_name)
transaction = connection._in_transaction()
await transaction.start()
return transaction | <ast.AsyncFunctionDef object at 0x7da1b16dc2e0> | keyword[async] keyword[def] identifier[start_transaction] ( identifier[connection_name] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[BaseTransactionWrapper] :
literal[string]
identifier[connection] = identifier[_get_connection] ( identifier[connection_name] )
identifier[transaction] = identifier[connection] . identifier[_in_transaction] ()
keyword[await] identifier[transaction] . identifier[start] ()
keyword[return] identifier[transaction] | async def start_transaction(connection_name: Optional[str]=None) -> BaseTransactionWrapper:
"""
Function to manually control your transaction.
Returns transaction object with ``.rollback()`` and ``.commit()`` methods.
All db calls in same coroutine context will run into transaction
before ending transaction with above methods.
:param connection_name: name of connection to run with, optional if you have only
one db connection
"""
connection = _get_connection(connection_name)
transaction = connection._in_transaction()
await transaction.start()
return transaction |
def get(self, request, *args, **kwargs):
"""
This method handles GET requests.
If a GET request reaches this point, the wizard assumes that the user
just starts at the first step or wants to restart the process.
The data of the wizard will be resetted before rendering the first step.
"""
self.storage.reset()
# reset the current step to the first step.
self.storage.current_step = self.steps.first
return self.render(self.get_form()) | def function[get, parameter[self, request]]:
constant[
This method handles GET requests.
If a GET request reaches this point, the wizard assumes that the user
just starts at the first step or wants to restart the process.
The data of the wizard will be resetted before rendering the first step.
]
call[name[self].storage.reset, parameter[]]
name[self].storage.current_step assign[=] name[self].steps.first
return[call[name[self].render, parameter[call[name[self].get_form, parameter[]]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[storage] . identifier[reset] ()
identifier[self] . identifier[storage] . identifier[current_step] = identifier[self] . identifier[steps] . identifier[first]
keyword[return] identifier[self] . identifier[render] ( identifier[self] . identifier[get_form] ()) | def get(self, request, *args, **kwargs):
"""
This method handles GET requests.
If a GET request reaches this point, the wizard assumes that the user
just starts at the first step or wants to restart the process.
The data of the wizard will be resetted before rendering the first step.
"""
self.storage.reset()
# reset the current step to the first step.
self.storage.current_step = self.steps.first
return self.render(self.get_form()) |
def server_pxe(host=None,
admin_username=None,
admin_password=None):
'''
Configure server to PXE perform a one off PXE boot
CLI Example:
.. code-block:: bash
salt dell dracr.server_pxe
'''
if __execute_cmd('config -g cfgServerInfo -o cfgServerFirstBootDevice PXE',
host=host, admin_username=admin_username,
admin_password=admin_password):
if __execute_cmd('config -g cfgServerInfo -o cfgServerBootOnce 1',
host=host, admin_username=admin_username,
admin_password=admin_password):
return server_reboot
else:
log.warning('failed to set boot order')
return False
log.warning('failed to configure PXE boot')
return False | def function[server_pxe, parameter[host, admin_username, admin_password]]:
constant[
Configure server to PXE perform a one off PXE boot
CLI Example:
.. code-block:: bash
salt dell dracr.server_pxe
]
if call[name[__execute_cmd], parameter[constant[config -g cfgServerInfo -o cfgServerFirstBootDevice PXE]]] begin[:]
if call[name[__execute_cmd], parameter[constant[config -g cfgServerInfo -o cfgServerBootOnce 1]]] begin[:]
return[name[server_reboot]]
call[name[log].warning, parameter[constant[failed to configure PXE boot]]]
return[constant[False]] | keyword[def] identifier[server_pxe] ( identifier[host] = keyword[None] ,
identifier[admin_username] = keyword[None] ,
identifier[admin_password] = keyword[None] ):
literal[string]
keyword[if] identifier[__execute_cmd] ( literal[string] ,
identifier[host] = identifier[host] , identifier[admin_username] = identifier[admin_username] ,
identifier[admin_password] = identifier[admin_password] ):
keyword[if] identifier[__execute_cmd] ( literal[string] ,
identifier[host] = identifier[host] , identifier[admin_username] = identifier[admin_username] ,
identifier[admin_password] = identifier[admin_password] ):
keyword[return] identifier[server_reboot]
keyword[else] :
identifier[log] . identifier[warning] ( literal[string] )
keyword[return] keyword[False]
identifier[log] . identifier[warning] ( literal[string] )
keyword[return] keyword[False] | def server_pxe(host=None, admin_username=None, admin_password=None):
"""
Configure server to PXE perform a one off PXE boot
CLI Example:
.. code-block:: bash
salt dell dracr.server_pxe
"""
if __execute_cmd('config -g cfgServerInfo -o cfgServerFirstBootDevice PXE', host=host, admin_username=admin_username, admin_password=admin_password):
if __execute_cmd('config -g cfgServerInfo -o cfgServerBootOnce 1', host=host, admin_username=admin_username, admin_password=admin_password):
return server_reboot # depends on [control=['if'], data=[]]
else:
log.warning('failed to set boot order')
return False # depends on [control=['if'], data=[]]
log.warning('failed to configure PXE boot')
return False |
def add_statement(self, statement_obj):
"""
Adds a statement object to the layer
@type statement_obj: L{Cstatement}
@param statement_obj: the statement object
"""
if statement_obj.get_id() in self.idx:
raise ValueError("Statement with id {} already exists!"
.format(statement_obj.get_id()))
self.node.append(statement_obj.get_node())
self.idx[statement_obj.get_id()] = statement_obj | def function[add_statement, parameter[self, statement_obj]]:
constant[
Adds a statement object to the layer
@type statement_obj: L{Cstatement}
@param statement_obj: the statement object
]
if compare[call[name[statement_obj].get_id, parameter[]] in name[self].idx] begin[:]
<ast.Raise object at 0x7da18eb57010>
call[name[self].node.append, parameter[call[name[statement_obj].get_node, parameter[]]]]
call[name[self].idx][call[name[statement_obj].get_id, parameter[]]] assign[=] name[statement_obj] | keyword[def] identifier[add_statement] ( identifier[self] , identifier[statement_obj] ):
literal[string]
keyword[if] identifier[statement_obj] . identifier[get_id] () keyword[in] identifier[self] . identifier[idx] :
keyword[raise] identifier[ValueError] ( literal[string]
. identifier[format] ( identifier[statement_obj] . identifier[get_id] ()))
identifier[self] . identifier[node] . identifier[append] ( identifier[statement_obj] . identifier[get_node] ())
identifier[self] . identifier[idx] [ identifier[statement_obj] . identifier[get_id] ()]= identifier[statement_obj] | def add_statement(self, statement_obj):
"""
Adds a statement object to the layer
@type statement_obj: L{Cstatement}
@param statement_obj: the statement object
"""
if statement_obj.get_id() in self.idx:
raise ValueError('Statement with id {} already exists!'.format(statement_obj.get_id())) # depends on [control=['if'], data=[]]
self.node.append(statement_obj.get_node())
self.idx[statement_obj.get_id()] = statement_obj |
def _execCommand(Argv, collect_missing):
r"""Worker of execCommand.
"""
if not Argv:
raise HandledException('Please specify a command!')
RouteParts = Argv[0].split('/')
Args, KwArgs = getDigestableArgs(Argv[1:])
ResolvedMember = getDescendant(BaseGroup, RouteParts[:])
if isinstance(ResolvedMember, Group):
raise HandledException('Please specify a task.', Member=ResolvedMember)
if not isinstance(ResolvedMember, Task):
raise HandledException('No such task.', Member=BaseGroup)
return ResolvedMember.__collect_n_call__(*Args, **KwArgs) if collect_missing else ResolvedMember(*Args, **KwArgs) | def function[_execCommand, parameter[Argv, collect_missing]]:
constant[Worker of execCommand.
]
if <ast.UnaryOp object at 0x7da18f09d240> begin[:]
<ast.Raise object at 0x7da18f09f430>
variable[RouteParts] assign[=] call[call[name[Argv]][constant[0]].split, parameter[constant[/]]]
<ast.Tuple object at 0x7da18f09ef50> assign[=] call[name[getDigestableArgs], parameter[call[name[Argv]][<ast.Slice object at 0x7da20c6c5f00>]]]
variable[ResolvedMember] assign[=] call[name[getDescendant], parameter[name[BaseGroup], call[name[RouteParts]][<ast.Slice object at 0x7da20c6c4670>]]]
if call[name[isinstance], parameter[name[ResolvedMember], name[Group]]] begin[:]
<ast.Raise object at 0x7da20c6c7a90>
if <ast.UnaryOp object at 0x7da20c6c4280> begin[:]
<ast.Raise object at 0x7da20c6c53c0>
return[<ast.IfExp object at 0x7da20c6c41f0>] | keyword[def] identifier[_execCommand] ( identifier[Argv] , identifier[collect_missing] ):
literal[string]
keyword[if] keyword[not] identifier[Argv] :
keyword[raise] identifier[HandledException] ( literal[string] )
identifier[RouteParts] = identifier[Argv] [ literal[int] ]. identifier[split] ( literal[string] )
identifier[Args] , identifier[KwArgs] = identifier[getDigestableArgs] ( identifier[Argv] [ literal[int] :])
identifier[ResolvedMember] = identifier[getDescendant] ( identifier[BaseGroup] , identifier[RouteParts] [:])
keyword[if] identifier[isinstance] ( identifier[ResolvedMember] , identifier[Group] ):
keyword[raise] identifier[HandledException] ( literal[string] , identifier[Member] = identifier[ResolvedMember] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[ResolvedMember] , identifier[Task] ):
keyword[raise] identifier[HandledException] ( literal[string] , identifier[Member] = identifier[BaseGroup] )
keyword[return] identifier[ResolvedMember] . identifier[__collect_n_call__] (* identifier[Args] ,** identifier[KwArgs] ) keyword[if] identifier[collect_missing] keyword[else] identifier[ResolvedMember] (* identifier[Args] ,** identifier[KwArgs] ) | def _execCommand(Argv, collect_missing):
"""Worker of execCommand.
"""
if not Argv:
raise HandledException('Please specify a command!') # depends on [control=['if'], data=[]]
RouteParts = Argv[0].split('/')
(Args, KwArgs) = getDigestableArgs(Argv[1:])
ResolvedMember = getDescendant(BaseGroup, RouteParts[:])
if isinstance(ResolvedMember, Group):
raise HandledException('Please specify a task.', Member=ResolvedMember) # depends on [control=['if'], data=[]]
if not isinstance(ResolvedMember, Task):
raise HandledException('No such task.', Member=BaseGroup) # depends on [control=['if'], data=[]]
return ResolvedMember.__collect_n_call__(*Args, **KwArgs) if collect_missing else ResolvedMember(*Args, **KwArgs) |
def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg)
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns)
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f)
cfg = _read_yaml('local/merge.yml')
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return data, labels | def function[merge, parameter[data, cfg]]:
constant[ WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
]
if compare[call[name[type], parameter[name[cfg]]] equal[==] name[str]] begin[:]
variable[cfg] assign[=] call[name[_read_yaml], parameter[name[cfg]]]
if compare[name[cfg] equal[==] constant[None]] begin[:]
variable[cfg] assign[=] call[name[_read_yaml], parameter[constant[local/merge.yml]]]
if compare[name[cfg] equal[==] constant[None]] begin[:]
call[name[print], parameter[constant[creating merge.yml config file draft ...]]]
variable[cfg] assign[=] dictionary[[], []]
for taget[name[key]] in starred[name[data]] begin[:]
call[name[cfg]][name[key]] assign[=] call[name[list], parameter[call[name[data]][name[key]].columns]]
with call[name[open], parameter[constant[local/merge.yml], constant[xt]]] begin[:]
call[name[yaml].dump, parameter[name[cfg], name[f]]]
variable[cfg] assign[=] call[name[_read_yaml], parameter[constant[local/merge.yml]]]
variable[labels] assign[=] constant[None]
return[tuple[[<ast.Name object at 0x7da18f720b80>, <ast.Name object at 0x7da18f720880>]]] | keyword[def] identifier[merge] ( identifier[data] , identifier[cfg] = keyword[None] ):
literal[string]
keyword[if] identifier[type] ( identifier[cfg] )== identifier[str] :
identifier[cfg] = identifier[_read_yaml] ( identifier[cfg] )
keyword[if] identifier[cfg] == keyword[None] :
identifier[cfg] = identifier[_read_yaml] ( literal[string] )
keyword[if] identifier[cfg] == keyword[None] :
identifier[print] ( literal[string] )
identifier[cfg] ={}
keyword[for] identifier[key] keyword[in] identifier[data] :
identifier[cfg] [ identifier[key] ]= identifier[list] ( identifier[data] [ identifier[key] ]. identifier[columns] )
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[yaml] . identifier[dump] ( identifier[cfg] , identifier[f] )
identifier[cfg] = identifier[_read_yaml] ( literal[string] )
identifier[labels] = keyword[None]
keyword[return] identifier[data] , identifier[labels] | def merge(data, cfg=None):
""" WORK IN PROGRESS
Concat, merge, join, drop keys in dictionary of pandas.DataFrames
into one pandas.DataFrame (data) and a pandas.Series (labels).
Parameters
----------
data : dict of pandas.DataFrames
Result of icy.read()
cfg : dict or str, optional
Dictionary of actions to perform on data
or str with path to YAML, that will be parsed.
Returns
-------
data : pandas.DataFrame
The aggregated dataset
labels : pandas.Series
The target variable for analysis of the dataset,
can have fewer samples than the aggregated dataset
Notes
-----
"""
# go from a dict of dataframes (data) to one dataframe (data) and one series (labels)
# pd.concat([df1, df2], join, join_axes, ignore_index) and pd.merge(left, right, how, on, suffixes)
# should be easy to iterate from normalized tables to a fully joined set of dataframes
if type(cfg) == str:
cfg = _read_yaml(cfg) # depends on [control=['if'], data=[]]
if cfg == None:
cfg = _read_yaml('local/merge.yml')
if cfg == None:
print('creating merge.yml config file draft ...')
cfg = {}
# find all tables with identical column names
# if no common key-col
# concat along rows, add col (src)
# e.g. chimps
# find all tables with same length
# if no duplicate column names
# concat along columns
# find master table (by length?)
# from smalles to biggest table
# find possible key-cols by uniques == len
# find bigger tables with common column names -> cands
# check for highest overlap-ratio of uniques -> cand (prefer smaller table if equal ratio)
# join table on best cand
# if ratio below treshold put table on unidentified list
for key in data:
cfg[key] = list(data[key].columns) # depends on [control=['for'], data=['key']]
with open('local/merge.yml', 'xt') as f:
yaml.dump(cfg, f) # depends on [control=['with'], data=['f']]
cfg = _read_yaml('local/merge.yml') # depends on [control=['if'], data=['cfg']] # depends on [control=['if'], data=['cfg']]
# if cfg == None:
# if not os.path.exists(default_cfg):
# create default_cfg draft
# else:
# join on default_cfg
# report join_result
# else:
# join on cfg
# report join_result
labels = None
return (data, labels) |
def _extract_and_handle_bgp4_withdraws(self, withdraw_list):
"""Extracts withdraws advertised in the given update message's
*MpUnReachNlri* attribute.
Assumes MPBGP capability is enabled.
Parameters:
- update_msg: (Update) is assumed to be checked for all bgp
message errors.
Extracted withdraws are added to appropriate *Destination* for further
processing.
"""
msg_rf = RF_IPv4_UC
w_nlris = withdraw_list
if not w_nlris:
# If this is EOR of some kind, handle it
self._handle_eor(msg_rf)
for w_nlri in w_nlris:
w_path = bgp_utils.create_path(
self,
w_nlri,
is_withdraw=True
)
block, blocked_cause = self._apply_in_filter(w_path)
received_route = ReceivedRoute(w_path, self, block)
nlri_str = w_nlri.formatted_nlri_str
if nlri_str in self._adj_rib_in:
del self._adj_rib_in[nlri_str]
self._signal_bus.adj_rib_in_changed(self, received_route)
if not block:
# Update appropriate table with withdraws.
tm = self._core_service.table_manager
tm.learn_path(w_path)
else:
LOG.debug('prefix : %s is blocked by in-bound filter: %s',
nlri_str, blocked_cause) | def function[_extract_and_handle_bgp4_withdraws, parameter[self, withdraw_list]]:
constant[Extracts withdraws advertised in the given update message's
*MpUnReachNlri* attribute.
Assumes MPBGP capability is enabled.
Parameters:
- update_msg: (Update) is assumed to be checked for all bgp
message errors.
Extracted withdraws are added to appropriate *Destination* for further
processing.
]
variable[msg_rf] assign[=] name[RF_IPv4_UC]
variable[w_nlris] assign[=] name[withdraw_list]
if <ast.UnaryOp object at 0x7da1b1bad690> begin[:]
call[name[self]._handle_eor, parameter[name[msg_rf]]]
for taget[name[w_nlri]] in starred[name[w_nlris]] begin[:]
variable[w_path] assign[=] call[name[bgp_utils].create_path, parameter[name[self], name[w_nlri]]]
<ast.Tuple object at 0x7da1b1bacf70> assign[=] call[name[self]._apply_in_filter, parameter[name[w_path]]]
variable[received_route] assign[=] call[name[ReceivedRoute], parameter[name[w_path], name[self], name[block]]]
variable[nlri_str] assign[=] name[w_nlri].formatted_nlri_str
if compare[name[nlri_str] in name[self]._adj_rib_in] begin[:]
<ast.Delete object at 0x7da1b1bac880>
call[name[self]._signal_bus.adj_rib_in_changed, parameter[name[self], name[received_route]]]
if <ast.UnaryOp object at 0x7da1b1baf460> begin[:]
variable[tm] assign[=] name[self]._core_service.table_manager
call[name[tm].learn_path, parameter[name[w_path]]] | keyword[def] identifier[_extract_and_handle_bgp4_withdraws] ( identifier[self] , identifier[withdraw_list] ):
literal[string]
identifier[msg_rf] = identifier[RF_IPv4_UC]
identifier[w_nlris] = identifier[withdraw_list]
keyword[if] keyword[not] identifier[w_nlris] :
identifier[self] . identifier[_handle_eor] ( identifier[msg_rf] )
keyword[for] identifier[w_nlri] keyword[in] identifier[w_nlris] :
identifier[w_path] = identifier[bgp_utils] . identifier[create_path] (
identifier[self] ,
identifier[w_nlri] ,
identifier[is_withdraw] = keyword[True]
)
identifier[block] , identifier[blocked_cause] = identifier[self] . identifier[_apply_in_filter] ( identifier[w_path] )
identifier[received_route] = identifier[ReceivedRoute] ( identifier[w_path] , identifier[self] , identifier[block] )
identifier[nlri_str] = identifier[w_nlri] . identifier[formatted_nlri_str]
keyword[if] identifier[nlri_str] keyword[in] identifier[self] . identifier[_adj_rib_in] :
keyword[del] identifier[self] . identifier[_adj_rib_in] [ identifier[nlri_str] ]
identifier[self] . identifier[_signal_bus] . identifier[adj_rib_in_changed] ( identifier[self] , identifier[received_route] )
keyword[if] keyword[not] identifier[block] :
identifier[tm] = identifier[self] . identifier[_core_service] . identifier[table_manager]
identifier[tm] . identifier[learn_path] ( identifier[w_path] )
keyword[else] :
identifier[LOG] . identifier[debug] ( literal[string] ,
identifier[nlri_str] , identifier[blocked_cause] ) | def _extract_and_handle_bgp4_withdraws(self, withdraw_list):
"""Extracts withdraws advertised in the given update message's
*MpUnReachNlri* attribute.
Assumes MPBGP capability is enabled.
Parameters:
- update_msg: (Update) is assumed to be checked for all bgp
message errors.
Extracted withdraws are added to appropriate *Destination* for further
processing.
"""
msg_rf = RF_IPv4_UC
w_nlris = withdraw_list
if not w_nlris:
# If this is EOR of some kind, handle it
self._handle_eor(msg_rf) # depends on [control=['if'], data=[]]
for w_nlri in w_nlris:
w_path = bgp_utils.create_path(self, w_nlri, is_withdraw=True)
(block, blocked_cause) = self._apply_in_filter(w_path)
received_route = ReceivedRoute(w_path, self, block)
nlri_str = w_nlri.formatted_nlri_str
if nlri_str in self._adj_rib_in:
del self._adj_rib_in[nlri_str]
self._signal_bus.adj_rib_in_changed(self, received_route) # depends on [control=['if'], data=['nlri_str']]
if not block:
# Update appropriate table with withdraws.
tm = self._core_service.table_manager
tm.learn_path(w_path) # depends on [control=['if'], data=[]]
else:
LOG.debug('prefix : %s is blocked by in-bound filter: %s', nlri_str, blocked_cause) # depends on [control=['for'], data=['w_nlri']] |
def dt64_to_dt(dt64):
"""
Safely converts NumPy datetime64 to a datetime object.
"""
ts = (dt64 - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's')
return dt.datetime.utcfromtimestamp(ts) | def function[dt64_to_dt, parameter[dt64]]:
constant[
Safely converts NumPy datetime64 to a datetime object.
]
variable[ts] assign[=] binary_operation[binary_operation[name[dt64] - call[name[np].datetime64, parameter[constant[1970-01-01T00:00:00]]]] / call[name[np].timedelta64, parameter[constant[1], constant[s]]]]
return[call[name[dt].datetime.utcfromtimestamp, parameter[name[ts]]]] | keyword[def] identifier[dt64_to_dt] ( identifier[dt64] ):
literal[string]
identifier[ts] =( identifier[dt64] - identifier[np] . identifier[datetime64] ( literal[string] ))/ identifier[np] . identifier[timedelta64] ( literal[int] , literal[string] )
keyword[return] identifier[dt] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[ts] ) | def dt64_to_dt(dt64):
"""
Safely converts NumPy datetime64 to a datetime object.
"""
ts = (dt64 - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's')
return dt.datetime.utcfromtimestamp(ts) |
def apply_T4(word):
'''An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].'''
WORD = word.split('.')
for i, v in enumerate(WORD):
# i % 2 != 0 prevents this rule from applying to first, third, etc.
# syllables, which receive stress (WSP)
if is_consonant(v[-1]) and i % 2 != 0:
if i + 1 == len(WORD) or is_consonant(WORD[i + 1][0]):
vv = u_or_y_final_diphthongs(v)
if vv and not is_long(vv.group(1)):
I = vv.start(1) + 1
WORD[i] = v[:I] + '.' + v[I:]
WORD = '.'.join(WORD)
RULE = ' T4' if word != WORD else ''
return WORD, RULE | def function[apply_T4, parameter[word]]:
constant[An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].]
variable[WORD] assign[=] call[name[word].split, parameter[constant[.]]]
for taget[tuple[[<ast.Name object at 0x7da1b11e0430>, <ast.Name object at 0x7da1b11e3ca0>]]] in starred[call[name[enumerate], parameter[name[WORD]]]] begin[:]
if <ast.BoolOp object at 0x7da1b11e18a0> begin[:]
if <ast.BoolOp object at 0x7da1b11e00a0> begin[:]
variable[vv] assign[=] call[name[u_or_y_final_diphthongs], parameter[name[v]]]
if <ast.BoolOp object at 0x7da1b11e24d0> begin[:]
variable[I] assign[=] binary_operation[call[name[vv].start, parameter[constant[1]]] + constant[1]]
call[name[WORD]][name[i]] assign[=] binary_operation[binary_operation[call[name[v]][<ast.Slice object at 0x7da1b1178880>] + constant[.]] + call[name[v]][<ast.Slice object at 0x7da1b1178040>]]
variable[WORD] assign[=] call[constant[.].join, parameter[name[WORD]]]
variable[RULE] assign[=] <ast.IfExp object at 0x7da1b1178d00>
return[tuple[[<ast.Name object at 0x7da1b1178f70>, <ast.Name object at 0x7da1b11788b0>]]] | keyword[def] identifier[apply_T4] ( identifier[word] ):
literal[string]
identifier[WORD] = identifier[word] . identifier[split] ( literal[string] )
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[WORD] ):
keyword[if] identifier[is_consonant] ( identifier[v] [- literal[int] ]) keyword[and] identifier[i] % literal[int] != literal[int] :
keyword[if] identifier[i] + literal[int] == identifier[len] ( identifier[WORD] ) keyword[or] identifier[is_consonant] ( identifier[WORD] [ identifier[i] + literal[int] ][ literal[int] ]):
identifier[vv] = identifier[u_or_y_final_diphthongs] ( identifier[v] )
keyword[if] identifier[vv] keyword[and] keyword[not] identifier[is_long] ( identifier[vv] . identifier[group] ( literal[int] )):
identifier[I] = identifier[vv] . identifier[start] ( literal[int] )+ literal[int]
identifier[WORD] [ identifier[i] ]= identifier[v] [: identifier[I] ]+ literal[string] + identifier[v] [ identifier[I] :]
identifier[WORD] = literal[string] . identifier[join] ( identifier[WORD] )
identifier[RULE] = literal[string] keyword[if] identifier[word] != identifier[WORD] keyword[else] literal[string]
keyword[return] identifier[WORD] , identifier[RULE] | def apply_T4(word):
"""An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa]."""
WORD = word.split('.')
for (i, v) in enumerate(WORD):
# i % 2 != 0 prevents this rule from applying to first, third, etc.
# syllables, which receive stress (WSP)
if is_consonant(v[-1]) and i % 2 != 0:
if i + 1 == len(WORD) or is_consonant(WORD[i + 1][0]):
vv = u_or_y_final_diphthongs(v)
if vv and (not is_long(vv.group(1))):
I = vv.start(1) + 1
WORD[i] = v[:I] + '.' + v[I:] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
WORD = '.'.join(WORD)
RULE = ' T4' if word != WORD else ''
return (WORD, RULE) |
def modules(self):
"""Fetch modules pages."""
resource = self.RMODULES
params = {
self.PLIMIT: self.max_items,
self.PSORT_BY: self.VLATEST_RELEASE
}
for page in self._fetch(resource, params):
yield page | def function[modules, parameter[self]]:
constant[Fetch modules pages.]
variable[resource] assign[=] name[self].RMODULES
variable[params] assign[=] dictionary[[<ast.Attribute object at 0x7da2054a6620>, <ast.Attribute object at 0x7da2054a5a20>], [<ast.Attribute object at 0x7da2054a6e90>, <ast.Attribute object at 0x7da2054a40d0>]]
for taget[name[page]] in starred[call[name[self]._fetch, parameter[name[resource], name[params]]]] begin[:]
<ast.Yield object at 0x7da18f00e980> | keyword[def] identifier[modules] ( identifier[self] ):
literal[string]
identifier[resource] = identifier[self] . identifier[RMODULES]
identifier[params] ={
identifier[self] . identifier[PLIMIT] : identifier[self] . identifier[max_items] ,
identifier[self] . identifier[PSORT_BY] : identifier[self] . identifier[VLATEST_RELEASE]
}
keyword[for] identifier[page] keyword[in] identifier[self] . identifier[_fetch] ( identifier[resource] , identifier[params] ):
keyword[yield] identifier[page] | def modules(self):
"""Fetch modules pages."""
resource = self.RMODULES
params = {self.PLIMIT: self.max_items, self.PSORT_BY: self.VLATEST_RELEASE}
for page in self._fetch(resource, params):
yield page # depends on [control=['for'], data=['page']] |
def term_matrix(idlist, subject_category, taxon, **kwargs):
"""
Intersection between annotated objects
P1 not(P1)
F1 0 5
not(F1) 6 0
"""
results = search_associations(objects=idlist,
subject_taxon=taxon,
subject_category=subject_category,
select_fields=[M.SUBJECT, M.OBJECT_CLOSURE],
facet_fields=[],
rows=-1,
include_raw=True,
**kwargs)
docs = results['raw'].docs
subjects_per_term = {}
smap = {}
for d in docs:
smap[d[M.SUBJECT]] = 1
for c in d[M.OBJECT_CLOSURE]:
if c in idlist:
if c not in subjects_per_term:
subjects_per_term[c] = []
subjects_per_term[c].append(d[M.SUBJECT])
pop_n = len(smap.keys())
cells = []
for cx in idlist:
csubjs = set(subjects_per_term[cx])
for dx in idlist:
dsubjs = set(subjects_per_term[dx])
a = len(csubjs.intersection(dsubjs))
b = len(csubjs) - a
c = len(dsubjs) - a
d = pop_n - len(dsubjs) - b
ctable = [[a, b], [c, d]]
_, p_under = sp.stats.fisher_exact(ctable, 'less')
_, p_over = sp.stats.fisher_exact(ctable, 'greater')
cells.append({'c':cx, 'd':dx,
'nc':len(csubjs),
'nd':len(dsubjs),
'n':a,
'p_l':p_under,
'p_g':p_over
})
return cells | def function[term_matrix, parameter[idlist, subject_category, taxon]]:
constant[
Intersection between annotated objects
P1 not(P1)
F1 0 5
not(F1) 6 0
]
variable[results] assign[=] call[name[search_associations], parameter[]]
variable[docs] assign[=] call[name[results]][constant[raw]].docs
variable[subjects_per_term] assign[=] dictionary[[], []]
variable[smap] assign[=] dictionary[[], []]
for taget[name[d]] in starred[name[docs]] begin[:]
call[name[smap]][call[name[d]][name[M].SUBJECT]] assign[=] constant[1]
for taget[name[c]] in starred[call[name[d]][name[M].OBJECT_CLOSURE]] begin[:]
if compare[name[c] in name[idlist]] begin[:]
if compare[name[c] <ast.NotIn object at 0x7da2590d7190> name[subjects_per_term]] begin[:]
call[name[subjects_per_term]][name[c]] assign[=] list[[]]
call[call[name[subjects_per_term]][name[c]].append, parameter[call[name[d]][name[M].SUBJECT]]]
variable[pop_n] assign[=] call[name[len], parameter[call[name[smap].keys, parameter[]]]]
variable[cells] assign[=] list[[]]
for taget[name[cx]] in starred[name[idlist]] begin[:]
variable[csubjs] assign[=] call[name[set], parameter[call[name[subjects_per_term]][name[cx]]]]
for taget[name[dx]] in starred[name[idlist]] begin[:]
variable[dsubjs] assign[=] call[name[set], parameter[call[name[subjects_per_term]][name[dx]]]]
variable[a] assign[=] call[name[len], parameter[call[name[csubjs].intersection, parameter[name[dsubjs]]]]]
variable[b] assign[=] binary_operation[call[name[len], parameter[name[csubjs]]] - name[a]]
variable[c] assign[=] binary_operation[call[name[len], parameter[name[dsubjs]]] - name[a]]
variable[d] assign[=] binary_operation[binary_operation[name[pop_n] - call[name[len], parameter[name[dsubjs]]]] - name[b]]
variable[ctable] assign[=] list[[<ast.List object at 0x7da1b0746e00>, <ast.List object at 0x7da1b07440d0>]]
<ast.Tuple object at 0x7da1b0744b80> assign[=] call[name[sp].stats.fisher_exact, parameter[name[ctable], constant[less]]]
<ast.Tuple object at 0x7da1b0744850> assign[=] call[name[sp].stats.fisher_exact, parameter[name[ctable], constant[greater]]]
call[name[cells].append, parameter[dictionary[[<ast.Constant object at 0x7da1b07459f0>, <ast.Constant object at 0x7da1b0745b40>, <ast.Constant object at 0x7da1b07469e0>, <ast.Constant object at 0x7da1b0745ae0>, <ast.Constant object at 0x7da1b07440a0>, <ast.Constant object at 0x7da1b0746980>, <ast.Constant object at 0x7da1b0747f70>], [<ast.Name object at 0x7da1b0747280>, <ast.Name object at 0x7da1b0745a20>, <ast.Call object at 0x7da1b0745d80>, <ast.Call object at 0x7da1b0746260>, <ast.Name object at 0x7da1b0746680>, <ast.Name object at 0x7da1b0746350>, <ast.Name object at 0x7da1b0744490>]]]]
return[name[cells]] | keyword[def] identifier[term_matrix] ( identifier[idlist] , identifier[subject_category] , identifier[taxon] ,** identifier[kwargs] ):
literal[string]
identifier[results] = identifier[search_associations] ( identifier[objects] = identifier[idlist] ,
identifier[subject_taxon] = identifier[taxon] ,
identifier[subject_category] = identifier[subject_category] ,
identifier[select_fields] =[ identifier[M] . identifier[SUBJECT] , identifier[M] . identifier[OBJECT_CLOSURE] ],
identifier[facet_fields] =[],
identifier[rows] =- literal[int] ,
identifier[include_raw] = keyword[True] ,
** identifier[kwargs] )
identifier[docs] = identifier[results] [ literal[string] ]. identifier[docs]
identifier[subjects_per_term] ={}
identifier[smap] ={}
keyword[for] identifier[d] keyword[in] identifier[docs] :
identifier[smap] [ identifier[d] [ identifier[M] . identifier[SUBJECT] ]]= literal[int]
keyword[for] identifier[c] keyword[in] identifier[d] [ identifier[M] . identifier[OBJECT_CLOSURE] ]:
keyword[if] identifier[c] keyword[in] identifier[idlist] :
keyword[if] identifier[c] keyword[not] keyword[in] identifier[subjects_per_term] :
identifier[subjects_per_term] [ identifier[c] ]=[]
identifier[subjects_per_term] [ identifier[c] ]. identifier[append] ( identifier[d] [ identifier[M] . identifier[SUBJECT] ])
identifier[pop_n] = identifier[len] ( identifier[smap] . identifier[keys] ())
identifier[cells] =[]
keyword[for] identifier[cx] keyword[in] identifier[idlist] :
identifier[csubjs] = identifier[set] ( identifier[subjects_per_term] [ identifier[cx] ])
keyword[for] identifier[dx] keyword[in] identifier[idlist] :
identifier[dsubjs] = identifier[set] ( identifier[subjects_per_term] [ identifier[dx] ])
identifier[a] = identifier[len] ( identifier[csubjs] . identifier[intersection] ( identifier[dsubjs] ))
identifier[b] = identifier[len] ( identifier[csubjs] )- identifier[a]
identifier[c] = identifier[len] ( identifier[dsubjs] )- identifier[a]
identifier[d] = identifier[pop_n] - identifier[len] ( identifier[dsubjs] )- identifier[b]
identifier[ctable] =[[ identifier[a] , identifier[b] ],[ identifier[c] , identifier[d] ]]
identifier[_] , identifier[p_under] = identifier[sp] . identifier[stats] . identifier[fisher_exact] ( identifier[ctable] , literal[string] )
identifier[_] , identifier[p_over] = identifier[sp] . identifier[stats] . identifier[fisher_exact] ( identifier[ctable] , literal[string] )
identifier[cells] . identifier[append] ({ literal[string] : identifier[cx] , literal[string] : identifier[dx] ,
literal[string] : identifier[len] ( identifier[csubjs] ),
literal[string] : identifier[len] ( identifier[dsubjs] ),
literal[string] : identifier[a] ,
literal[string] : identifier[p_under] ,
literal[string] : identifier[p_over]
})
keyword[return] identifier[cells] | def term_matrix(idlist, subject_category, taxon, **kwargs):
"""
Intersection between annotated objects
P1 not(P1)
F1 0 5
not(F1) 6 0
"""
results = search_associations(objects=idlist, subject_taxon=taxon, subject_category=subject_category, select_fields=[M.SUBJECT, M.OBJECT_CLOSURE], facet_fields=[], rows=-1, include_raw=True, **kwargs)
docs = results['raw'].docs
subjects_per_term = {}
smap = {}
for d in docs:
smap[d[M.SUBJECT]] = 1
for c in d[M.OBJECT_CLOSURE]:
if c in idlist:
if c not in subjects_per_term:
subjects_per_term[c] = [] # depends on [control=['if'], data=['c', 'subjects_per_term']]
subjects_per_term[c].append(d[M.SUBJECT]) # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=['c']] # depends on [control=['for'], data=['d']]
pop_n = len(smap.keys())
cells = []
for cx in idlist:
csubjs = set(subjects_per_term[cx])
for dx in idlist:
dsubjs = set(subjects_per_term[dx])
a = len(csubjs.intersection(dsubjs))
b = len(csubjs) - a
c = len(dsubjs) - a
d = pop_n - len(dsubjs) - b
ctable = [[a, b], [c, d]]
(_, p_under) = sp.stats.fisher_exact(ctable, 'less')
(_, p_over) = sp.stats.fisher_exact(ctable, 'greater')
cells.append({'c': cx, 'd': dx, 'nc': len(csubjs), 'nd': len(dsubjs), 'n': a, 'p_l': p_under, 'p_g': p_over}) # depends on [control=['for'], data=['dx']] # depends on [control=['for'], data=['cx']]
return cells |
def save_bd5(
space, filename,
group_index=0, object_name="molecule", spatial_unit="meter", time_unit="second",
trunc=False, with_radius=False):
"""Save a space in the BDML-BD5 format (https://github.com/openssbd/BDML-BD5).
Open file for read/write, if it already exists, and create a new file, otherwise.
If trunc is True, always create a new file.
A new group named `group_name` is created. If the group already exists, returns
an exception.
Parameters
----------
space : Space, str, pathlib.PurePath, list, tuple or set
A Space or World to be saved. If str or pathlib.PurePath is given, a space is
loaded from the given path. If this is an iterable (list, tuple, set), apply
this function to each element of the given.
filename : str
A HDF5 filename.
group_index : int, optional
An index of the group written (0, 1, ..., n). Defaults to 0.
object_name : str, optional
A name of the object. Its length must be less than 128. Defaults to "molecule".
spatial_unit : str, optional
An unit of the length scale. Its length must be less than 16. Defaults to "meter".
time_unit : str, optional
An unit of the time scale. Its length must be less than 16. Defaults to "second".
trunc : bool, optional
Whether truncate file or not. If True, always overwrite the file when open it.
Defaults to False.
with_radius : bool, optional
Whether save the radius of particles. If True, particles are saved as 'sphere',
otherwise, as 'point'. Defaults to False.
"""
if isinstance(space, (list, tuple, set)):
for i, space_ in enumerate(space):
assert not isinstance(space_, (list, tuple, set))
save_bd5(
space_, filename, group_index + i, object_name, spatial_unit, time_unit,
trunc if i == 0 else False, with_radius)
elif isinstance(space, str):
save_bd5(load_world(space), filename, group_index, object_name, spatial_unit, time_unit, trunc, with_radius)
elif isinstance(space, pathlib.PurePath):
save_bd5(str(space), filename, group_index, object_name, spatial_unit, time_unit, trunc, with_radius)
else:
# space is expected to be either Space or World.
_save_bd5(space.as_base(), filename, group_index, object_name, spatial_unit, time_unit, trunc, with_radius) | def function[save_bd5, parameter[space, filename, group_index, object_name, spatial_unit, time_unit, trunc, with_radius]]:
constant[Save a space in the BDML-BD5 format (https://github.com/openssbd/BDML-BD5).
Open file for read/write, if it already exists, and create a new file, otherwise.
If trunc is True, always create a new file.
A new group named `group_name` is created. If the group already exists, returns
an exception.
Parameters
----------
space : Space, str, pathlib.PurePath, list, tuple or set
A Space or World to be saved. If str or pathlib.PurePath is given, a space is
loaded from the given path. If this is an iterable (list, tuple, set), apply
this function to each element of the given.
filename : str
A HDF5 filename.
group_index : int, optional
An index of the group written (0, 1, ..., n). Defaults to 0.
object_name : str, optional
A name of the object. Its length must be less than 128. Defaults to "molecule".
spatial_unit : str, optional
An unit of the length scale. Its length must be less than 16. Defaults to "meter".
time_unit : str, optional
An unit of the time scale. Its length must be less than 16. Defaults to "second".
trunc : bool, optional
Whether truncate file or not. If True, always overwrite the file when open it.
Defaults to False.
with_radius : bool, optional
Whether save the radius of particles. If True, particles are saved as 'sphere',
otherwise, as 'point'. Defaults to False.
]
if call[name[isinstance], parameter[name[space], tuple[[<ast.Name object at 0x7da1b0ef1870>, <ast.Name object at 0x7da1b0ef1840>, <ast.Name object at 0x7da1b0ef1810>]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0ef1780>, <ast.Name object at 0x7da1b0ef1750>]]] in starred[call[name[enumerate], parameter[name[space]]]] begin[:]
assert[<ast.UnaryOp object at 0x7da1b0ef1660>]
call[name[save_bd5], parameter[name[space_], name[filename], binary_operation[name[group_index] + name[i]], name[object_name], name[spatial_unit], name[time_unit], <ast.IfExp object at 0x7da1b0ef12d0>, name[with_radius]]] | keyword[def] identifier[save_bd5] (
identifier[space] , identifier[filename] ,
identifier[group_index] = literal[int] , identifier[object_name] = literal[string] , identifier[spatial_unit] = literal[string] , identifier[time_unit] = literal[string] ,
identifier[trunc] = keyword[False] , identifier[with_radius] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[space] ,( identifier[list] , identifier[tuple] , identifier[set] )):
keyword[for] identifier[i] , identifier[space_] keyword[in] identifier[enumerate] ( identifier[space] ):
keyword[assert] keyword[not] identifier[isinstance] ( identifier[space_] ,( identifier[list] , identifier[tuple] , identifier[set] ))
identifier[save_bd5] (
identifier[space_] , identifier[filename] , identifier[group_index] + identifier[i] , identifier[object_name] , identifier[spatial_unit] , identifier[time_unit] ,
identifier[trunc] keyword[if] identifier[i] == literal[int] keyword[else] keyword[False] , identifier[with_radius] )
keyword[elif] identifier[isinstance] ( identifier[space] , identifier[str] ):
identifier[save_bd5] ( identifier[load_world] ( identifier[space] ), identifier[filename] , identifier[group_index] , identifier[object_name] , identifier[spatial_unit] , identifier[time_unit] , identifier[trunc] , identifier[with_radius] )
keyword[elif] identifier[isinstance] ( identifier[space] , identifier[pathlib] . identifier[PurePath] ):
identifier[save_bd5] ( identifier[str] ( identifier[space] ), identifier[filename] , identifier[group_index] , identifier[object_name] , identifier[spatial_unit] , identifier[time_unit] , identifier[trunc] , identifier[with_radius] )
keyword[else] :
identifier[_save_bd5] ( identifier[space] . identifier[as_base] (), identifier[filename] , identifier[group_index] , identifier[object_name] , identifier[spatial_unit] , identifier[time_unit] , identifier[trunc] , identifier[with_radius] ) | def save_bd5(space, filename, group_index=0, object_name='molecule', spatial_unit='meter', time_unit='second', trunc=False, with_radius=False):
"""Save a space in the BDML-BD5 format (https://github.com/openssbd/BDML-BD5).
Open file for read/write, if it already exists, and create a new file, otherwise.
If trunc is True, always create a new file.
A new group named `group_name` is created. If the group already exists, returns
an exception.
Parameters
----------
space : Space, str, pathlib.PurePath, list, tuple or set
A Space or World to be saved. If str or pathlib.PurePath is given, a space is
loaded from the given path. If this is an iterable (list, tuple, set), apply
this function to each element of the given.
filename : str
A HDF5 filename.
group_index : int, optional
An index of the group written (0, 1, ..., n). Defaults to 0.
object_name : str, optional
A name of the object. Its length must be less than 128. Defaults to "molecule".
spatial_unit : str, optional
An unit of the length scale. Its length must be less than 16. Defaults to "meter".
time_unit : str, optional
An unit of the time scale. Its length must be less than 16. Defaults to "second".
trunc : bool, optional
Whether truncate file or not. If True, always overwrite the file when open it.
Defaults to False.
with_radius : bool, optional
Whether save the radius of particles. If True, particles are saved as 'sphere',
otherwise, as 'point'. Defaults to False.
"""
if isinstance(space, (list, tuple, set)):
for (i, space_) in enumerate(space):
assert not isinstance(space_, (list, tuple, set))
save_bd5(space_, filename, group_index + i, object_name, spatial_unit, time_unit, trunc if i == 0 else False, with_radius) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(space, str):
save_bd5(load_world(space), filename, group_index, object_name, spatial_unit, time_unit, trunc, with_radius) # depends on [control=['if'], data=[]]
elif isinstance(space, pathlib.PurePath):
save_bd5(str(space), filename, group_index, object_name, spatial_unit, time_unit, trunc, with_radius) # depends on [control=['if'], data=[]]
else:
# space is expected to be either Space or World.
_save_bd5(space.as_base(), filename, group_index, object_name, spatial_unit, time_unit, trunc, with_radius) |
def search_files(source: str, extensions: List[str]) -> List[Path]:
"""Retrieve files located the source directory and its subdirectories,
whose extension match one of the listed extensions.
:raise GuesslangError: when there is not enough files in the directory
:param source: directory name
:param extensions: list of file extensions
:return: filenames
"""
files = [
path for path in Path(source).glob('**/*')
if path.is_file() and path.suffix.lstrip('.') in extensions]
nb_files = len(files)
LOGGER.debug("Total files found: %d", nb_files)
if nb_files < NB_FILES_MIN:
LOGGER.error("Too few source files")
raise GuesslangError(
'{} source files found in {}. {} files minimum is required'.format(
nb_files, source, NB_FILES_MIN))
random.shuffle(files)
return files | def function[search_files, parameter[source, extensions]]:
constant[Retrieve files located the source directory and its subdirectories,
whose extension match one of the listed extensions.
:raise GuesslangError: when there is not enough files in the directory
:param source: directory name
:param extensions: list of file extensions
:return: filenames
]
variable[files] assign[=] <ast.ListComp object at 0x7da20c991b10>
variable[nb_files] assign[=] call[name[len], parameter[name[files]]]
call[name[LOGGER].debug, parameter[constant[Total files found: %d], name[nb_files]]]
if compare[name[nb_files] less[<] name[NB_FILES_MIN]] begin[:]
call[name[LOGGER].error, parameter[constant[Too few source files]]]
<ast.Raise object at 0x7da2041d8970>
call[name[random].shuffle, parameter[name[files]]]
return[name[files]] | keyword[def] identifier[search_files] ( identifier[source] : identifier[str] , identifier[extensions] : identifier[List] [ identifier[str] ])-> identifier[List] [ identifier[Path] ]:
literal[string]
identifier[files] =[
identifier[path] keyword[for] identifier[path] keyword[in] identifier[Path] ( identifier[source] ). identifier[glob] ( literal[string] )
keyword[if] identifier[path] . identifier[is_file] () keyword[and] identifier[path] . identifier[suffix] . identifier[lstrip] ( literal[string] ) keyword[in] identifier[extensions] ]
identifier[nb_files] = identifier[len] ( identifier[files] )
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[nb_files] )
keyword[if] identifier[nb_files] < identifier[NB_FILES_MIN] :
identifier[LOGGER] . identifier[error] ( literal[string] )
keyword[raise] identifier[GuesslangError] (
literal[string] . identifier[format] (
identifier[nb_files] , identifier[source] , identifier[NB_FILES_MIN] ))
identifier[random] . identifier[shuffle] ( identifier[files] )
keyword[return] identifier[files] | def search_files(source: str, extensions: List[str]) -> List[Path]:
"""Retrieve files located the source directory and its subdirectories,
whose extension match one of the listed extensions.
:raise GuesslangError: when there is not enough files in the directory
:param source: directory name
:param extensions: list of file extensions
:return: filenames
"""
files = [path for path in Path(source).glob('**/*') if path.is_file() and path.suffix.lstrip('.') in extensions]
nb_files = len(files)
LOGGER.debug('Total files found: %d', nb_files)
if nb_files < NB_FILES_MIN:
LOGGER.error('Too few source files')
raise GuesslangError('{} source files found in {}. {} files minimum is required'.format(nb_files, source, NB_FILES_MIN)) # depends on [control=['if'], data=['nb_files', 'NB_FILES_MIN']]
random.shuffle(files)
return files |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
values_dict = {}
if registry_key.number_of_values > 0:
for registry_value in registry_key.GetValues():
value_name = registry_value.name or '(default)'
if registry_value.DataIsString():
value_string = '[{0:s}] {1:s}'.format(
registry_value.data_type_string, registry_value.GetDataAsObject())
elif registry_value.DataIsInteger():
value_string = '[{0:s}] {1:d}'.format(
registry_value.data_type_string, registry_value.GetDataAsObject())
elif registry_value.DataIsMultiString():
value_string = '[{0:s}] {1:s}'.format(
registry_value.data_type_string, ''.join(
registry_value.GetDataAsObject()))
else:
value_string = '[{0:s}]'.format(registry_value.data_type_string)
values_dict[value_name] = value_string
# Generate at least one event object for the key.
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if registry_key.number_of_subkeys == 0:
error_string = 'Key: {0:s} missing subkeys.'.format(registry_key.path)
parser_mediator.ProduceExtractionWarning(error_string)
return
for zone_key in registry_key.GetSubkeys():
# TODO: these values are stored in the Description value of the
# zone key. This solution will break on zone values that are larger
# than 5.
path = '{0:s}\\{1:s}'.format(
registry_key.path, self._ZONE_NAMES[zone_key.name])
values_dict = {}
# TODO: this plugin currently just dumps the values and does not
# distinguish between what is a feature control or not.
for value in zone_key.GetValues():
# Ignore the default value.
if not value.name:
continue
if value.DataIsString():
value_string = value.GetDataAsObject()
elif value.DataIsInteger():
value_integer = value.GetDataAsObject()
if value.name in self._KNOWN_PERMISSIONS_VALUE_NAMES:
value_string = self._CONTROL_VALUES_PERMISSIONS.get(
value_integer, 'UNKNOWN')
elif value.name == '1A00':
value_string = self._CONTROL_VALUES_1A00.get(
value_integer, 'UNKNOWN')
elif value.name == '1C00':
value_string = self._CONTROL_VALUES_1C00.get(
value_integer, 'UNKNOWN')
elif value.name == '1E05':
value_string = self._CONTROL_VALUES_SAFETY.get(
value_integer, 'UNKNOWN')
else:
value_string = '{0:d}'.format(value_integer)
else:
value_string = '[{0:s}]'.format(value.data_type_string)
if len(value.name) == 4 and value.name != 'Icon':
value_description = self._FEATURE_CONTROLS.get(value.name, 'UNKNOWN')
else:
value_description = self._FEATURE_CONTROLS.get(value.name, '')
if value_description:
feature_control = '[{0:s}] {1:s}'.format(
value.name, value_description)
else:
feature_control = '[{0:s}]'.format(value.name)
values_dict[feature_control] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = path
event_data.offset = zone_key.offset
event_data.regvalue = values_dict
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(
zone_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | def function[ExtractEvents, parameter[self, parser_mediator, registry_key]]:
constant[Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
]
variable[values_dict] assign[=] dictionary[[], []]
if compare[name[registry_key].number_of_values greater[>] constant[0]] begin[:]
for taget[name[registry_value]] in starred[call[name[registry_key].GetValues, parameter[]]] begin[:]
variable[value_name] assign[=] <ast.BoolOp object at 0x7da20e9b3340>
if call[name[registry_value].DataIsString, parameter[]] begin[:]
variable[value_string] assign[=] call[constant[[{0:s}] {1:s}].format, parameter[name[registry_value].data_type_string, call[name[registry_value].GetDataAsObject, parameter[]]]]
call[name[values_dict]][name[value_name]] assign[=] name[value_string]
variable[event_data] assign[=] call[name[windows_events].WindowsRegistryEventData, parameter[]]
name[event_data].key_path assign[=] name[registry_key].path
name[event_data].offset assign[=] name[registry_key].offset
name[event_data].regvalue assign[=] name[values_dict]
name[event_data].urls assign[=] name[self].URLS
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[registry_key].last_written_time, name[definitions].TIME_DESCRIPTION_WRITTEN]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]]
if compare[name[registry_key].number_of_subkeys equal[==] constant[0]] begin[:]
variable[error_string] assign[=] call[constant[Key: {0:s} missing subkeys.].format, parameter[name[registry_key].path]]
call[name[parser_mediator].ProduceExtractionWarning, parameter[name[error_string]]]
return[None]
for taget[name[zone_key]] in starred[call[name[registry_key].GetSubkeys, parameter[]]] begin[:]
variable[path] assign[=] call[constant[{0:s}\{1:s}].format, parameter[name[registry_key].path, call[name[self]._ZONE_NAMES][name[zone_key].name]]]
variable[values_dict] assign[=] dictionary[[], []]
for taget[name[value]] in starred[call[name[zone_key].GetValues, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da2047eb4c0> begin[:]
continue
if call[name[value].DataIsString, parameter[]] begin[:]
variable[value_string] assign[=] call[name[value].GetDataAsObject, parameter[]]
if <ast.BoolOp object at 0x7da18eb55ff0> begin[:]
variable[value_description] assign[=] call[name[self]._FEATURE_CONTROLS.get, parameter[name[value].name, constant[UNKNOWN]]]
if name[value_description] begin[:]
variable[feature_control] assign[=] call[constant[[{0:s}] {1:s}].format, parameter[name[value].name, name[value_description]]]
call[name[values_dict]][name[feature_control]] assign[=] name[value_string]
variable[event_data] assign[=] call[name[windows_events].WindowsRegistryEventData, parameter[]]
name[event_data].key_path assign[=] name[path]
name[event_data].offset assign[=] name[zone_key].offset
name[event_data].regvalue assign[=] name[values_dict]
name[event_data].urls assign[=] name[self].URLS
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[zone_key].last_written_time, name[definitions].TIME_DESCRIPTION_WRITTEN]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]] | keyword[def] identifier[ExtractEvents] ( identifier[self] , identifier[parser_mediator] , identifier[registry_key] ,** identifier[kwargs] ):
literal[string]
identifier[values_dict] ={}
keyword[if] identifier[registry_key] . identifier[number_of_values] > literal[int] :
keyword[for] identifier[registry_value] keyword[in] identifier[registry_key] . identifier[GetValues] ():
identifier[value_name] = identifier[registry_value] . identifier[name] keyword[or] literal[string]
keyword[if] identifier[registry_value] . identifier[DataIsString] ():
identifier[value_string] = literal[string] . identifier[format] (
identifier[registry_value] . identifier[data_type_string] , identifier[registry_value] . identifier[GetDataAsObject] ())
keyword[elif] identifier[registry_value] . identifier[DataIsInteger] ():
identifier[value_string] = literal[string] . identifier[format] (
identifier[registry_value] . identifier[data_type_string] , identifier[registry_value] . identifier[GetDataAsObject] ())
keyword[elif] identifier[registry_value] . identifier[DataIsMultiString] ():
identifier[value_string] = literal[string] . identifier[format] (
identifier[registry_value] . identifier[data_type_string] , literal[string] . identifier[join] (
identifier[registry_value] . identifier[GetDataAsObject] ()))
keyword[else] :
identifier[value_string] = literal[string] . identifier[format] ( identifier[registry_value] . identifier[data_type_string] )
identifier[values_dict] [ identifier[value_name] ]= identifier[value_string]
identifier[event_data] = identifier[windows_events] . identifier[WindowsRegistryEventData] ()
identifier[event_data] . identifier[key_path] = identifier[registry_key] . identifier[path]
identifier[event_data] . identifier[offset] = identifier[registry_key] . identifier[offset]
identifier[event_data] . identifier[regvalue] = identifier[values_dict]
identifier[event_data] . identifier[urls] = identifier[self] . identifier[URLS]
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[registry_key] . identifier[last_written_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_WRITTEN] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] )
keyword[if] identifier[registry_key] . identifier[number_of_subkeys] == literal[int] :
identifier[error_string] = literal[string] . identifier[format] ( identifier[registry_key] . identifier[path] )
identifier[parser_mediator] . identifier[ProduceExtractionWarning] ( identifier[error_string] )
keyword[return]
keyword[for] identifier[zone_key] keyword[in] identifier[registry_key] . identifier[GetSubkeys] ():
identifier[path] = literal[string] . identifier[format] (
identifier[registry_key] . identifier[path] , identifier[self] . identifier[_ZONE_NAMES] [ identifier[zone_key] . identifier[name] ])
identifier[values_dict] ={}
keyword[for] identifier[value] keyword[in] identifier[zone_key] . identifier[GetValues] ():
keyword[if] keyword[not] identifier[value] . identifier[name] :
keyword[continue]
keyword[if] identifier[value] . identifier[DataIsString] ():
identifier[value_string] = identifier[value] . identifier[GetDataAsObject] ()
keyword[elif] identifier[value] . identifier[DataIsInteger] ():
identifier[value_integer] = identifier[value] . identifier[GetDataAsObject] ()
keyword[if] identifier[value] . identifier[name] keyword[in] identifier[self] . identifier[_KNOWN_PERMISSIONS_VALUE_NAMES] :
identifier[value_string] = identifier[self] . identifier[_CONTROL_VALUES_PERMISSIONS] . identifier[get] (
identifier[value_integer] , literal[string] )
keyword[elif] identifier[value] . identifier[name] == literal[string] :
identifier[value_string] = identifier[self] . identifier[_CONTROL_VALUES_1A00] . identifier[get] (
identifier[value_integer] , literal[string] )
keyword[elif] identifier[value] . identifier[name] == literal[string] :
identifier[value_string] = identifier[self] . identifier[_CONTROL_VALUES_1C00] . identifier[get] (
identifier[value_integer] , literal[string] )
keyword[elif] identifier[value] . identifier[name] == literal[string] :
identifier[value_string] = identifier[self] . identifier[_CONTROL_VALUES_SAFETY] . identifier[get] (
identifier[value_integer] , literal[string] )
keyword[else] :
identifier[value_string] = literal[string] . identifier[format] ( identifier[value_integer] )
keyword[else] :
identifier[value_string] = literal[string] . identifier[format] ( identifier[value] . identifier[data_type_string] )
keyword[if] identifier[len] ( identifier[value] . identifier[name] )== literal[int] keyword[and] identifier[value] . identifier[name] != literal[string] :
identifier[value_description] = identifier[self] . identifier[_FEATURE_CONTROLS] . identifier[get] ( identifier[value] . identifier[name] , literal[string] )
keyword[else] :
identifier[value_description] = identifier[self] . identifier[_FEATURE_CONTROLS] . identifier[get] ( identifier[value] . identifier[name] , literal[string] )
keyword[if] identifier[value_description] :
identifier[feature_control] = literal[string] . identifier[format] (
identifier[value] . identifier[name] , identifier[value_description] )
keyword[else] :
identifier[feature_control] = literal[string] . identifier[format] ( identifier[value] . identifier[name] )
identifier[values_dict] [ identifier[feature_control] ]= identifier[value_string]
identifier[event_data] = identifier[windows_events] . identifier[WindowsRegistryEventData] ()
identifier[event_data] . identifier[key_path] = identifier[path]
identifier[event_data] . identifier[offset] = identifier[zone_key] . identifier[offset]
identifier[event_data] . identifier[regvalue] = identifier[values_dict]
identifier[event_data] . identifier[urls] = identifier[self] . identifier[URLS]
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[zone_key] . identifier[last_written_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_WRITTEN] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] ) | def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
values_dict = {}
if registry_key.number_of_values > 0:
for registry_value in registry_key.GetValues():
value_name = registry_value.name or '(default)'
if registry_value.DataIsString():
value_string = '[{0:s}] {1:s}'.format(registry_value.data_type_string, registry_value.GetDataAsObject()) # depends on [control=['if'], data=[]]
elif registry_value.DataIsInteger():
value_string = '[{0:s}] {1:d}'.format(registry_value.data_type_string, registry_value.GetDataAsObject()) # depends on [control=['if'], data=[]]
elif registry_value.DataIsMultiString():
value_string = '[{0:s}] {1:s}'.format(registry_value.data_type_string, ''.join(registry_value.GetDataAsObject())) # depends on [control=['if'], data=[]]
else:
value_string = '[{0:s}]'.format(registry_value.data_type_string)
values_dict[value_name] = value_string # depends on [control=['for'], data=['registry_value']] # depends on [control=['if'], data=[]]
# Generate at least one event object for the key.
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if registry_key.number_of_subkeys == 0:
error_string = 'Key: {0:s} missing subkeys.'.format(registry_key.path)
parser_mediator.ProduceExtractionWarning(error_string)
return # depends on [control=['if'], data=[]]
for zone_key in registry_key.GetSubkeys():
# TODO: these values are stored in the Description value of the
# zone key. This solution will break on zone values that are larger
# than 5.
path = '{0:s}\\{1:s}'.format(registry_key.path, self._ZONE_NAMES[zone_key.name])
values_dict = {}
# TODO: this plugin currently just dumps the values and does not
# distinguish between what is a feature control or not.
for value in zone_key.GetValues():
# Ignore the default value.
if not value.name:
continue # depends on [control=['if'], data=[]]
if value.DataIsString():
value_string = value.GetDataAsObject() # depends on [control=['if'], data=[]]
elif value.DataIsInteger():
value_integer = value.GetDataAsObject()
if value.name in self._KNOWN_PERMISSIONS_VALUE_NAMES:
value_string = self._CONTROL_VALUES_PERMISSIONS.get(value_integer, 'UNKNOWN') # depends on [control=['if'], data=[]]
elif value.name == '1A00':
value_string = self._CONTROL_VALUES_1A00.get(value_integer, 'UNKNOWN') # depends on [control=['if'], data=[]]
elif value.name == '1C00':
value_string = self._CONTROL_VALUES_1C00.get(value_integer, 'UNKNOWN') # depends on [control=['if'], data=[]]
elif value.name == '1E05':
value_string = self._CONTROL_VALUES_SAFETY.get(value_integer, 'UNKNOWN') # depends on [control=['if'], data=[]]
else:
value_string = '{0:d}'.format(value_integer) # depends on [control=['if'], data=[]]
else:
value_string = '[{0:s}]'.format(value.data_type_string)
if len(value.name) == 4 and value.name != 'Icon':
value_description = self._FEATURE_CONTROLS.get(value.name, 'UNKNOWN') # depends on [control=['if'], data=[]]
else:
value_description = self._FEATURE_CONTROLS.get(value.name, '')
if value_description:
feature_control = '[{0:s}] {1:s}'.format(value.name, value_description) # depends on [control=['if'], data=[]]
else:
feature_control = '[{0:s}]'.format(value.name)
values_dict[feature_control] = value_string # depends on [control=['for'], data=['value']]
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = path
event_data.offset = zone_key.offset
event_data.regvalue = values_dict
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(zone_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) # depends on [control=['for'], data=['zone_key']] |
def _prepare_request(self, url, method, headers, data):
"""Prepare HTTP request.
:param str url: request URL.
:param str method: request method.
:param dict headers: request headers.
:param object data: JSON-encodable object.
:rtype: httpclient.HTTPRequest
"""
request = httpclient.HTTPRequest(
url=url, method=method, headers=headers, body=data,
connect_timeout=self._connect_timeout,
request_timeout=self._request_timeout,
auth_username=self._username, auth_password=self._password,
client_cert=self._client_cert, client_key=self._client_key,
ca_certs=self._ca_certs, validate_cert=self._verify_cert)
return request | def function[_prepare_request, parameter[self, url, method, headers, data]]:
constant[Prepare HTTP request.
:param str url: request URL.
:param str method: request method.
:param dict headers: request headers.
:param object data: JSON-encodable object.
:rtype: httpclient.HTTPRequest
]
variable[request] assign[=] call[name[httpclient].HTTPRequest, parameter[]]
return[name[request]] | keyword[def] identifier[_prepare_request] ( identifier[self] , identifier[url] , identifier[method] , identifier[headers] , identifier[data] ):
literal[string]
identifier[request] = identifier[httpclient] . identifier[HTTPRequest] (
identifier[url] = identifier[url] , identifier[method] = identifier[method] , identifier[headers] = identifier[headers] , identifier[body] = identifier[data] ,
identifier[connect_timeout] = identifier[self] . identifier[_connect_timeout] ,
identifier[request_timeout] = identifier[self] . identifier[_request_timeout] ,
identifier[auth_username] = identifier[self] . identifier[_username] , identifier[auth_password] = identifier[self] . identifier[_password] ,
identifier[client_cert] = identifier[self] . identifier[_client_cert] , identifier[client_key] = identifier[self] . identifier[_client_key] ,
identifier[ca_certs] = identifier[self] . identifier[_ca_certs] , identifier[validate_cert] = identifier[self] . identifier[_verify_cert] )
keyword[return] identifier[request] | def _prepare_request(self, url, method, headers, data):
"""Prepare HTTP request.
:param str url: request URL.
:param str method: request method.
:param dict headers: request headers.
:param object data: JSON-encodable object.
:rtype: httpclient.HTTPRequest
"""
request = httpclient.HTTPRequest(url=url, method=method, headers=headers, body=data, connect_timeout=self._connect_timeout, request_timeout=self._request_timeout, auth_username=self._username, auth_password=self._password, client_cert=self._client_cert, client_key=self._client_key, ca_certs=self._ca_certs, validate_cert=self._verify_cert)
return request |
def serialize(self):
"""This function serialize items into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here is the generic function that simply serialize each item of the items object
:return: Dictionary containing item's uuid as key and item as value
:rtype: dict
"""
res = {}
for key, item in list(self.items.items()):
res[key] = item.serialize()
return res | def function[serialize, parameter[self]]:
constant[This function serialize items into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here is the generic function that simply serialize each item of the items object
:return: Dictionary containing item's uuid as key and item as value
:rtype: dict
]
variable[res] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b23451e0>, <ast.Name object at 0x7da1b23470a0>]]] in starred[call[name[list], parameter[call[name[self].items.items, parameter[]]]]] begin[:]
call[name[res]][name[key]] assign[=] call[name[item].serialize, parameter[]]
return[name[res]] | keyword[def] identifier[serialize] ( identifier[self] ):
literal[string]
identifier[res] ={}
keyword[for] identifier[key] , identifier[item] keyword[in] identifier[list] ( identifier[self] . identifier[items] . identifier[items] ()):
identifier[res] [ identifier[key] ]= identifier[item] . identifier[serialize] ()
keyword[return] identifier[res] | def serialize(self):
"""This function serialize items into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here is the generic function that simply serialize each item of the items object
:return: Dictionary containing item's uuid as key and item as value
:rtype: dict
"""
res = {}
for (key, item) in list(self.items.items()):
res[key] = item.serialize() # depends on [control=['for'], data=[]]
return res |
def request_token(self):
""" Returns url, request_token, request_secret"""
logging.debug("Getting request token from %s:%d",
self.server, self.port)
token, secret = self._token("/oauth/requestToken")
return "{}/oauth/authorize?oauth_token={}".format(self.host, token), \
token, secret | def function[request_token, parameter[self]]:
constant[ Returns url, request_token, request_secret]
call[name[logging].debug, parameter[constant[Getting request token from %s:%d], name[self].server, name[self].port]]
<ast.Tuple object at 0x7da1b25842e0> assign[=] call[name[self]._token, parameter[constant[/oauth/requestToken]]]
return[tuple[[<ast.Call object at 0x7da1b2587d90>, <ast.Name object at 0x7da1b2586470>, <ast.Name object at 0x7da1b2587160>]]] | keyword[def] identifier[request_token] ( identifier[self] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[server] , identifier[self] . identifier[port] )
identifier[token] , identifier[secret] = identifier[self] . identifier[_token] ( literal[string] )
keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[host] , identifier[token] ), identifier[token] , identifier[secret] | def request_token(self):
""" Returns url, request_token, request_secret"""
logging.debug('Getting request token from %s:%d', self.server, self.port)
(token, secret) = self._token('/oauth/requestToken')
return ('{}/oauth/authorize?oauth_token={}'.format(self.host, token), token, secret) |
def sh_e_out(cls, cmd, **kwargs):
"""Run the command. and returns the stdout."""
cmd_kwargs = {
'stdout': subprocess.PIPE,
}
cmd_kwargs.update(kwargs)
return cls.sh_e(cmd, **cmd_kwargs)[0] | def function[sh_e_out, parameter[cls, cmd]]:
constant[Run the command. and returns the stdout.]
variable[cmd_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b053abc0>], [<ast.Attribute object at 0x7da1b053aef0>]]
call[name[cmd_kwargs].update, parameter[name[kwargs]]]
return[call[call[name[cls].sh_e, parameter[name[cmd]]]][constant[0]]] | keyword[def] identifier[sh_e_out] ( identifier[cls] , identifier[cmd] ,** identifier[kwargs] ):
literal[string]
identifier[cmd_kwargs] ={
literal[string] : identifier[subprocess] . identifier[PIPE] ,
}
identifier[cmd_kwargs] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[cls] . identifier[sh_e] ( identifier[cmd] ,** identifier[cmd_kwargs] )[ literal[int] ] | def sh_e_out(cls, cmd, **kwargs):
"""Run the command. and returns the stdout."""
cmd_kwargs = {'stdout': subprocess.PIPE}
cmd_kwargs.update(kwargs)
return cls.sh_e(cmd, **cmd_kwargs)[0] |
def cached_dataframe(self, csv_path, compute_fn):
"""
If a CSV path is in the _memory_cache, then return that cached value.
If we've already saved the DataFrame as a CSV then load it.
Otherwise run the provided `compute_fn`, and store its result
in memory and and save it as a CSV.
"""
if not csv_path.endswith(".csv"):
raise ValueError("Invalid path '%s', must be a CSV file" % csv_path)
if csv_path in self._memory_cache:
return self._memory_cache[csv_path]
if exists(csv_path) and not self.is_empty(csv_path):
df = self._read_csv(csv_path)
else:
df = compute_fn()
if not isinstance(df, pd.DataFrame):
raise TypeError(
"Expected compute_fn to return DataFrame, got %s : %s" % (
df, type(df)))
self._write_csv(df, csv_path)
self._memory_cache[csv_path] = df
return df | def function[cached_dataframe, parameter[self, csv_path, compute_fn]]:
constant[
If a CSV path is in the _memory_cache, then return that cached value.
If we've already saved the DataFrame as a CSV then load it.
Otherwise run the provided `compute_fn`, and store its result
in memory and and save it as a CSV.
]
if <ast.UnaryOp object at 0x7da1b0845c00> begin[:]
<ast.Raise object at 0x7da1b0865900>
if compare[name[csv_path] in name[self]._memory_cache] begin[:]
return[call[name[self]._memory_cache][name[csv_path]]]
if <ast.BoolOp object at 0x7da1b08bcd90> begin[:]
variable[df] assign[=] call[name[self]._read_csv, parameter[name[csv_path]]]
call[name[self]._memory_cache][name[csv_path]] assign[=] name[df]
return[name[df]] | keyword[def] identifier[cached_dataframe] ( identifier[self] , identifier[csv_path] , identifier[compute_fn] ):
literal[string]
keyword[if] keyword[not] identifier[csv_path] . identifier[endswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[csv_path] )
keyword[if] identifier[csv_path] keyword[in] identifier[self] . identifier[_memory_cache] :
keyword[return] identifier[self] . identifier[_memory_cache] [ identifier[csv_path] ]
keyword[if] identifier[exists] ( identifier[csv_path] ) keyword[and] keyword[not] identifier[self] . identifier[is_empty] ( identifier[csv_path] ):
identifier[df] = identifier[self] . identifier[_read_csv] ( identifier[csv_path] )
keyword[else] :
identifier[df] = identifier[compute_fn] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[df] , identifier[pd] . identifier[DataFrame] ):
keyword[raise] identifier[TypeError] (
literal[string] %(
identifier[df] , identifier[type] ( identifier[df] )))
identifier[self] . identifier[_write_csv] ( identifier[df] , identifier[csv_path] )
identifier[self] . identifier[_memory_cache] [ identifier[csv_path] ]= identifier[df]
keyword[return] identifier[df] | def cached_dataframe(self, csv_path, compute_fn):
"""
If a CSV path is in the _memory_cache, then return that cached value.
If we've already saved the DataFrame as a CSV then load it.
Otherwise run the provided `compute_fn`, and store its result
in memory and and save it as a CSV.
"""
if not csv_path.endswith('.csv'):
raise ValueError("Invalid path '%s', must be a CSV file" % csv_path) # depends on [control=['if'], data=[]]
if csv_path in self._memory_cache:
return self._memory_cache[csv_path] # depends on [control=['if'], data=['csv_path']]
if exists(csv_path) and (not self.is_empty(csv_path)):
df = self._read_csv(csv_path) # depends on [control=['if'], data=[]]
else:
df = compute_fn()
if not isinstance(df, pd.DataFrame):
raise TypeError('Expected compute_fn to return DataFrame, got %s : %s' % (df, type(df))) # depends on [control=['if'], data=[]]
self._write_csv(df, csv_path)
self._memory_cache[csv_path] = df
return df |
def get_4pt_bezier(steps, points):
"""Gets a series of bezier curve points with 1 set of 4
control points."""
for i in range(steps):
t = i / float(steps)
xloc = (math.pow(1 - t, 3) * points[0][0] +
3 * t * math.pow(1 - t, 2) * points[1][0] +
3 * (1 - t) * math.pow(t, 2) * points[2][0] +
math.pow(t, 3) * points[3][0])
yloc = (math.pow(1 - t, 3) * points[0][1] +
3 * t * math.pow(1 - t, 2) * points[1][1] +
3 * (1 - t) * math.pow(t, 2) * points[2][1] +
math.pow(t, 3) * points[3][1])
yield (xloc, yloc) | def function[get_4pt_bezier, parameter[steps, points]]:
constant[Gets a series of bezier curve points with 1 set of 4
control points.]
for taget[name[i]] in starred[call[name[range], parameter[name[steps]]]] begin[:]
variable[t] assign[=] binary_operation[name[i] / call[name[float], parameter[name[steps]]]]
variable[xloc] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[math].pow, parameter[binary_operation[constant[1] - name[t]], constant[3]]] * call[call[name[points]][constant[0]]][constant[0]]] + binary_operation[binary_operation[binary_operation[constant[3] * name[t]] * call[name[math].pow, parameter[binary_operation[constant[1] - name[t]], constant[2]]]] * call[call[name[points]][constant[1]]][constant[0]]]] + binary_operation[binary_operation[binary_operation[constant[3] * binary_operation[constant[1] - name[t]]] * call[name[math].pow, parameter[name[t], constant[2]]]] * call[call[name[points]][constant[2]]][constant[0]]]] + binary_operation[call[name[math].pow, parameter[name[t], constant[3]]] * call[call[name[points]][constant[3]]][constant[0]]]]
variable[yloc] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[math].pow, parameter[binary_operation[constant[1] - name[t]], constant[3]]] * call[call[name[points]][constant[0]]][constant[1]]] + binary_operation[binary_operation[binary_operation[constant[3] * name[t]] * call[name[math].pow, parameter[binary_operation[constant[1] - name[t]], constant[2]]]] * call[call[name[points]][constant[1]]][constant[1]]]] + binary_operation[binary_operation[binary_operation[constant[3] * binary_operation[constant[1] - name[t]]] * call[name[math].pow, parameter[name[t], constant[2]]]] * call[call[name[points]][constant[2]]][constant[1]]]] + binary_operation[call[name[math].pow, parameter[name[t], constant[3]]] * call[call[name[points]][constant[3]]][constant[1]]]]
<ast.Yield object at 0x7da1b0dc1e10> | keyword[def] identifier[get_4pt_bezier] ( identifier[steps] , identifier[points] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[steps] ):
identifier[t] = identifier[i] / identifier[float] ( identifier[steps] )
identifier[xloc] =( identifier[math] . identifier[pow] ( literal[int] - identifier[t] , literal[int] )* identifier[points] [ literal[int] ][ literal[int] ]+
literal[int] * identifier[t] * identifier[math] . identifier[pow] ( literal[int] - identifier[t] , literal[int] )* identifier[points] [ literal[int] ][ literal[int] ]+
literal[int] *( literal[int] - identifier[t] )* identifier[math] . identifier[pow] ( identifier[t] , literal[int] )* identifier[points] [ literal[int] ][ literal[int] ]+
identifier[math] . identifier[pow] ( identifier[t] , literal[int] )* identifier[points] [ literal[int] ][ literal[int] ])
identifier[yloc] =( identifier[math] . identifier[pow] ( literal[int] - identifier[t] , literal[int] )* identifier[points] [ literal[int] ][ literal[int] ]+
literal[int] * identifier[t] * identifier[math] . identifier[pow] ( literal[int] - identifier[t] , literal[int] )* identifier[points] [ literal[int] ][ literal[int] ]+
literal[int] *( literal[int] - identifier[t] )* identifier[math] . identifier[pow] ( identifier[t] , literal[int] )* identifier[points] [ literal[int] ][ literal[int] ]+
identifier[math] . identifier[pow] ( identifier[t] , literal[int] )* identifier[points] [ literal[int] ][ literal[int] ])
keyword[yield] ( identifier[xloc] , identifier[yloc] ) | def get_4pt_bezier(steps, points):
"""Gets a series of bezier curve points with 1 set of 4
control points."""
for i in range(steps):
t = i / float(steps)
xloc = math.pow(1 - t, 3) * points[0][0] + 3 * t * math.pow(1 - t, 2) * points[1][0] + 3 * (1 - t) * math.pow(t, 2) * points[2][0] + math.pow(t, 3) * points[3][0]
yloc = math.pow(1 - t, 3) * points[0][1] + 3 * t * math.pow(1 - t, 2) * points[1][1] + 3 * (1 - t) * math.pow(t, 2) * points[2][1] + math.pow(t, 3) * points[3][1]
yield (xloc, yloc) # depends on [control=['for'], data=['i']] |
def set(self, val, default=False, imported=False):
"""Set ``val`` as the :attr:`value` for this :class:`Setting`.
If ``default`` is ``True`` set also the :attr:`default` value.
"""
if hasattr(self.validator, '__call__'):
try:
val = self.validator(val)
except Exception as exc:
raise type(exc)(
'Could not validate value for "%s" setting: %s' %
(self.name, exc)
) from None
self.value = val
self.imported = imported
if default:
self.default = val
self.modified = True | def function[set, parameter[self, val, default, imported]]:
constant[Set ``val`` as the :attr:`value` for this :class:`Setting`.
If ``default`` is ``True`` set also the :attr:`default` value.
]
if call[name[hasattr], parameter[name[self].validator, constant[__call__]]] begin[:]
<ast.Try object at 0x7da18bc738b0>
name[self].value assign[=] name[val]
name[self].imported assign[=] name[imported]
if name[default] begin[:]
name[self].default assign[=] name[val]
name[self].modified assign[=] constant[True] | keyword[def] identifier[set] ( identifier[self] , identifier[val] , identifier[default] = keyword[False] , identifier[imported] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] . identifier[validator] , literal[string] ):
keyword[try] :
identifier[val] = identifier[self] . identifier[validator] ( identifier[val] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
keyword[raise] identifier[type] ( identifier[exc] )(
literal[string] %
( identifier[self] . identifier[name] , identifier[exc] )
) keyword[from] keyword[None]
identifier[self] . identifier[value] = identifier[val]
identifier[self] . identifier[imported] = identifier[imported]
keyword[if] identifier[default] :
identifier[self] . identifier[default] = identifier[val]
identifier[self] . identifier[modified] = keyword[True] | def set(self, val, default=False, imported=False):
"""Set ``val`` as the :attr:`value` for this :class:`Setting`.
If ``default`` is ``True`` set also the :attr:`default` value.
"""
if hasattr(self.validator, '__call__'):
try:
val = self.validator(val) # depends on [control=['try'], data=[]]
except Exception as exc:
raise type(exc)('Could not validate value for "%s" setting: %s' % (self.name, exc)) from None # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
self.value = val
self.imported = imported
if default:
self.default = val # depends on [control=['if'], data=[]]
self.modified = True |
def generate_tuple_zip(self, token_list, n=2):
'''
Generate the N-gram.
Args:
token_list: The list of tokens.
n N
Returns:
zip of Tuple(N-gram)
'''
return zip(*[token_list[i:] for i in range(n)]) | def function[generate_tuple_zip, parameter[self, token_list, n]]:
constant[
Generate the N-gram.
Args:
token_list: The list of tokens.
n N
Returns:
zip of Tuple(N-gram)
]
return[call[name[zip], parameter[<ast.Starred object at 0x7da1b0789cf0>]]] | keyword[def] identifier[generate_tuple_zip] ( identifier[self] , identifier[token_list] , identifier[n] = literal[int] ):
literal[string]
keyword[return] identifier[zip] (*[ identifier[token_list] [ identifier[i] :] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] )]) | def generate_tuple_zip(self, token_list, n=2):
"""
Generate the N-gram.
Args:
token_list: The list of tokens.
n N
Returns:
zip of Tuple(N-gram)
"""
return zip(*[token_list[i:] for i in range(n)]) |
def insert_sections_some(ol,*secs,**kwargs):
'''
ol = initRange(0,20,1)
ol
loc = 6
rslt = insert_sections_some(ol,['a','a','a'],['c','c','c','c'],index=loc)
rslt
####
'''
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
loc = kwargs['index']
secs = list(secs)
secs = [concat(*secs)]
locs = [loc]
return(insert_sections_many(ol,secs,locs,mode=mode)) | def function[insert_sections_some, parameter[ol]]:
constant[
ol = initRange(0,20,1)
ol
loc = 6
rslt = insert_sections_some(ol,['a','a','a'],['c','c','c','c'],index=loc)
rslt
####
]
if compare[constant[mode] in name[kwargs]] begin[:]
variable[mode] assign[=] call[name[kwargs]][constant[mode]]
variable[loc] assign[=] call[name[kwargs]][constant[index]]
variable[secs] assign[=] call[name[list], parameter[name[secs]]]
variable[secs] assign[=] list[[<ast.Call object at 0x7da1affee0e0>]]
variable[locs] assign[=] list[[<ast.Name object at 0x7da1affedcc0>]]
return[call[name[insert_sections_many], parameter[name[ol], name[secs], name[locs]]]] | keyword[def] identifier[insert_sections_some] ( identifier[ol] ,* identifier[secs] ,** identifier[kwargs] ):
literal[string]
keyword[if] ( literal[string] keyword[in] identifier[kwargs] ):
identifier[mode] = identifier[kwargs] [ literal[string] ]
keyword[else] :
identifier[mode] = literal[string]
identifier[loc] = identifier[kwargs] [ literal[string] ]
identifier[secs] = identifier[list] ( identifier[secs] )
identifier[secs] =[ identifier[concat] (* identifier[secs] )]
identifier[locs] =[ identifier[loc] ]
keyword[return] ( identifier[insert_sections_many] ( identifier[ol] , identifier[secs] , identifier[locs] , identifier[mode] = identifier[mode] )) | def insert_sections_some(ol, *secs, **kwargs):
"""
ol = initRange(0,20,1)
ol
loc = 6
rslt = insert_sections_some(ol,['a','a','a'],['c','c','c','c'],index=loc)
rslt
####
"""
if 'mode' in kwargs:
mode = kwargs['mode'] # depends on [control=['if'], data=['kwargs']]
else:
mode = 'new'
loc = kwargs['index']
secs = list(secs)
secs = [concat(*secs)]
locs = [loc]
return insert_sections_many(ol, secs, locs, mode=mode) |
def findConfigFile(cls, filename):
""" Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
:param filename: (string) name of file to locate
"""
paths = cls.getConfigPaths()
for p in paths:
testPath = os.path.join(p, filename)
if os.path.isfile(testPath):
return os.path.join(p, filename) | def function[findConfigFile, parameter[cls, filename]]:
constant[ Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
:param filename: (string) name of file to locate
]
variable[paths] assign[=] call[name[cls].getConfigPaths, parameter[]]
for taget[name[p]] in starred[name[paths]] begin[:]
variable[testPath] assign[=] call[name[os].path.join, parameter[name[p], name[filename]]]
if call[name[os].path.isfile, parameter[name[testPath]]] begin[:]
return[call[name[os].path.join, parameter[name[p], name[filename]]]] | keyword[def] identifier[findConfigFile] ( identifier[cls] , identifier[filename] ):
literal[string]
identifier[paths] = identifier[cls] . identifier[getConfigPaths] ()
keyword[for] identifier[p] keyword[in] identifier[paths] :
identifier[testPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[p] , identifier[filename] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[testPath] ):
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[p] , identifier[filename] ) | def findConfigFile(cls, filename):
""" Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
:param filename: (string) name of file to locate
"""
paths = cls.getConfigPaths()
for p in paths:
testPath = os.path.join(p, filename)
if os.path.isfile(testPath):
return os.path.join(p, filename) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] |
def text(self):
"""Decode content as a string.
"""
data = self.content
return data.decode(self.encoding or 'utf-8') if data else '' | def function[text, parameter[self]]:
constant[Decode content as a string.
]
variable[data] assign[=] name[self].content
return[<ast.IfExp object at 0x7da18bc72380>] | keyword[def] identifier[text] ( identifier[self] ):
literal[string]
identifier[data] = identifier[self] . identifier[content]
keyword[return] identifier[data] . identifier[decode] ( identifier[self] . identifier[encoding] keyword[or] literal[string] ) keyword[if] identifier[data] keyword[else] literal[string] | def text(self):
"""Decode content as a string.
"""
data = self.content
return data.decode(self.encoding or 'utf-8') if data else '' |
def clear_all():
"""DANGER!
*This command is a maintenance tool and clears the complete database.*
"""
sure = input("Are you sure to drop the complete database content? (Type "
"in upppercase YES)")
if not (sure == 'YES'):
db_log('Not deleting the database.')
sys.exit(5)
client = pymongo.MongoClient(host=dbhost, port=dbport)
db = client[dbname]
for col in db.collection_names(include_system_collections=False):
db_log("Dropping collection ", col, lvl=warn)
db.drop_collection(col) | def function[clear_all, parameter[]]:
constant[DANGER!
*This command is a maintenance tool and clears the complete database.*
]
variable[sure] assign[=] call[name[input], parameter[constant[Are you sure to drop the complete database content? (Type in upppercase YES)]]]
if <ast.UnaryOp object at 0x7da1b0facb50> begin[:]
call[name[db_log], parameter[constant[Not deleting the database.]]]
call[name[sys].exit, parameter[constant[5]]]
variable[client] assign[=] call[name[pymongo].MongoClient, parameter[]]
variable[db] assign[=] call[name[client]][name[dbname]]
for taget[name[col]] in starred[call[name[db].collection_names, parameter[]]] begin[:]
call[name[db_log], parameter[constant[Dropping collection ], name[col]]]
call[name[db].drop_collection, parameter[name[col]]] | keyword[def] identifier[clear_all] ():
literal[string]
identifier[sure] = identifier[input] ( literal[string]
literal[string] )
keyword[if] keyword[not] ( identifier[sure] == literal[string] ):
identifier[db_log] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[client] = identifier[pymongo] . identifier[MongoClient] ( identifier[host] = identifier[dbhost] , identifier[port] = identifier[dbport] )
identifier[db] = identifier[client] [ identifier[dbname] ]
keyword[for] identifier[col] keyword[in] identifier[db] . identifier[collection_names] ( identifier[include_system_collections] = keyword[False] ):
identifier[db_log] ( literal[string] , identifier[col] , identifier[lvl] = identifier[warn] )
identifier[db] . identifier[drop_collection] ( identifier[col] ) | def clear_all():
"""DANGER!
*This command is a maintenance tool and clears the complete database.*
"""
sure = input('Are you sure to drop the complete database content? (Type in upppercase YES)')
if not sure == 'YES':
db_log('Not deleting the database.')
sys.exit(5) # depends on [control=['if'], data=[]]
client = pymongo.MongoClient(host=dbhost, port=dbport)
db = client[dbname]
for col in db.collection_names(include_system_collections=False):
db_log('Dropping collection ', col, lvl=warn)
db.drop_collection(col) # depends on [control=['for'], data=['col']] |
def send_request(endpoint, **kwargs):
"""Return the response to a query as JSON from the NewsAPI web service.
The basic API is limited to 100 results which is chosen unless explicitly
given as an argument. Beyond that, paging is supported through the "page"
argument, if needed.
Parameters
----------
endpoint : str
Endpoint to query, e.g. "everything" or "top-headlines"
kwargs : dict
A list of keyword arguments passed as parameters with the query.
The basic ones are "q" which is the search query, "from" is a start
date formatted as for instance 2018-06-10 and "to" is an end date
with the same format.
Returns
-------
res_json : dict
The response from the web service as a JSON dict.
"""
if api_key is None:
logger.error('NewsAPI cannot be used without an API key')
return None
url = '%s/%s' % (newsapi_url, endpoint)
if 'apiKey' not in kwargs:
kwargs['apiKey'] = api_key
if 'pageSize' not in kwargs:
kwargs['pageSize'] = 100
res = requests.get(url, params=kwargs)
res.raise_for_status()
res_json = res.json()
return res_json | def function[send_request, parameter[endpoint]]:
constant[Return the response to a query as JSON from the NewsAPI web service.
The basic API is limited to 100 results which is chosen unless explicitly
given as an argument. Beyond that, paging is supported through the "page"
argument, if needed.
Parameters
----------
endpoint : str
Endpoint to query, e.g. "everything" or "top-headlines"
kwargs : dict
A list of keyword arguments passed as parameters with the query.
The basic ones are "q" which is the search query, "from" is a start
date formatted as for instance 2018-06-10 and "to" is an end date
with the same format.
Returns
-------
res_json : dict
The response from the web service as a JSON dict.
]
if compare[name[api_key] is constant[None]] begin[:]
call[name[logger].error, parameter[constant[NewsAPI cannot be used without an API key]]]
return[constant[None]]
variable[url] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18fe91510>, <ast.Name object at 0x7da18fe905b0>]]]
if compare[constant[apiKey] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[apiKey]] assign[=] name[api_key]
if compare[constant[pageSize] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[pageSize]] assign[=] constant[100]
variable[res] assign[=] call[name[requests].get, parameter[name[url]]]
call[name[res].raise_for_status, parameter[]]
variable[res_json] assign[=] call[name[res].json, parameter[]]
return[name[res_json]] | keyword[def] identifier[send_request] ( identifier[endpoint] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[api_key] keyword[is] keyword[None] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[None]
identifier[url] = literal[string] %( identifier[newsapi_url] , identifier[endpoint] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[api_key]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[int]
identifier[res] = identifier[requests] . identifier[get] ( identifier[url] , identifier[params] = identifier[kwargs] )
identifier[res] . identifier[raise_for_status] ()
identifier[res_json] = identifier[res] . identifier[json] ()
keyword[return] identifier[res_json] | def send_request(endpoint, **kwargs):
"""Return the response to a query as JSON from the NewsAPI web service.
The basic API is limited to 100 results which is chosen unless explicitly
given as an argument. Beyond that, paging is supported through the "page"
argument, if needed.
Parameters
----------
endpoint : str
Endpoint to query, e.g. "everything" or "top-headlines"
kwargs : dict
A list of keyword arguments passed as parameters with the query.
The basic ones are "q" which is the search query, "from" is a start
date formatted as for instance 2018-06-10 and "to" is an end date
with the same format.
Returns
-------
res_json : dict
The response from the web service as a JSON dict.
"""
if api_key is None:
logger.error('NewsAPI cannot be used without an API key')
return None # depends on [control=['if'], data=[]]
url = '%s/%s' % (newsapi_url, endpoint)
if 'apiKey' not in kwargs:
kwargs['apiKey'] = api_key # depends on [control=['if'], data=['kwargs']]
if 'pageSize' not in kwargs:
kwargs['pageSize'] = 100 # depends on [control=['if'], data=['kwargs']]
res = requests.get(url, params=kwargs)
res.raise_for_status()
res_json = res.json()
return res_json |
def get_bbox(self, primitive):
"""Get the bounding box for the mesh"""
accessor = primitive.attributes.get('POSITION')
return accessor.min, accessor.max | def function[get_bbox, parameter[self, primitive]]:
constant[Get the bounding box for the mesh]
variable[accessor] assign[=] call[name[primitive].attributes.get, parameter[constant[POSITION]]]
return[tuple[[<ast.Attribute object at 0x7da18ede7070>, <ast.Attribute object at 0x7da18ede7a90>]]] | keyword[def] identifier[get_bbox] ( identifier[self] , identifier[primitive] ):
literal[string]
identifier[accessor] = identifier[primitive] . identifier[attributes] . identifier[get] ( literal[string] )
keyword[return] identifier[accessor] . identifier[min] , identifier[accessor] . identifier[max] | def get_bbox(self, primitive):
"""Get the bounding box for the mesh"""
accessor = primitive.attributes.get('POSITION')
return (accessor.min, accessor.max) |
def contingency_table(dataframe, rownames, colnames, margins=True):
"""Contingency Table (also called Cross Tabulation)
- Table in a matrix format that displays the (multivariate) frequency distribution of the variables
- http://en.wikipedia.org/wiki/Contingency_table
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
# Taking just the rownames + colnames of the dataframe
sub_set = [rownames, colnames]
_sub_df = dataframe[sub_set]
return _sub_df.pivot_table(index=rownames, columns=colnames, margins=margins, aggfunc=len, fill_value=0) | def function[contingency_table, parameter[dataframe, rownames, colnames, margins]]:
constant[Contingency Table (also called Cross Tabulation)
- Table in a matrix format that displays the (multivariate) frequency distribution of the variables
- http://en.wikipedia.org/wiki/Contingency_table
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
]
variable[sub_set] assign[=] list[[<ast.Name object at 0x7da204567730>, <ast.Name object at 0x7da204564070>]]
variable[_sub_df] assign[=] call[name[dataframe]][name[sub_set]]
return[call[name[_sub_df].pivot_table, parameter[]]] | keyword[def] identifier[contingency_table] ( identifier[dataframe] , identifier[rownames] , identifier[colnames] , identifier[margins] = keyword[True] ):
literal[string]
identifier[sub_set] =[ identifier[rownames] , identifier[colnames] ]
identifier[_sub_df] = identifier[dataframe] [ identifier[sub_set] ]
keyword[return] identifier[_sub_df] . identifier[pivot_table] ( identifier[index] = identifier[rownames] , identifier[columns] = identifier[colnames] , identifier[margins] = identifier[margins] , identifier[aggfunc] = identifier[len] , identifier[fill_value] = literal[int] ) | def contingency_table(dataframe, rownames, colnames, margins=True):
"""Contingency Table (also called Cross Tabulation)
- Table in a matrix format that displays the (multivariate) frequency distribution of the variables
- http://en.wikipedia.org/wiki/Contingency_table
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
# Taking just the rownames + colnames of the dataframe
sub_set = [rownames, colnames]
_sub_df = dataframe[sub_set]
return _sub_df.pivot_table(index=rownames, columns=colnames, margins=margins, aggfunc=len, fill_value=0) |
def _from_rest_ignore(model, props):
""" Purge fields that are completely unknown """
fields = model.all_fields
for prop in props.keys():
if prop not in fields:
del props[prop] | def function[_from_rest_ignore, parameter[model, props]]:
constant[ Purge fields that are completely unknown ]
variable[fields] assign[=] name[model].all_fields
for taget[name[prop]] in starred[call[name[props].keys, parameter[]]] begin[:]
if compare[name[prop] <ast.NotIn object at 0x7da2590d7190> name[fields]] begin[:]
<ast.Delete object at 0x7da20e9568c0> | keyword[def] identifier[_from_rest_ignore] ( identifier[model] , identifier[props] ):
literal[string]
identifier[fields] = identifier[model] . identifier[all_fields]
keyword[for] identifier[prop] keyword[in] identifier[props] . identifier[keys] ():
keyword[if] identifier[prop] keyword[not] keyword[in] identifier[fields] :
keyword[del] identifier[props] [ identifier[prop] ] | def _from_rest_ignore(model, props):
""" Purge fields that are completely unknown """
fields = model.all_fields
for prop in props.keys():
if prop not in fields:
del props[prop] # depends on [control=['if'], data=['prop']] # depends on [control=['for'], data=['prop']] |
def create(shape, chunks=True, dtype=None, compressor='default',
fill_value=0, order='C', store=None, synchronizer=None,
overwrite=False, path=None, chunk_store=None, filters=None,
cache_metadata=True, cache_attrs=True, read_only=False,
object_codec=None, **kwargs):
"""Create an array.
Parameters
----------
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
synchronizer : object, optional
Array synchronizer.
overwrite : bool, optional
If True, delete all pre-existing data in `store` at `path` before
creating the array.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
filters : sequence of Codecs, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
read_only : bool, optional
True if array should be protected against modification.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
Returns
-------
z : zarr.core.Array
Examples
--------
Create an array with default settings::
>>> import zarr
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000))
>>> z
<zarr.core.Array (10000, 10000) float64>
Create an array with different some different configuration options::
>>> from numcodecs import Blosc
>>> compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.BITSHUFFLE)
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='i1', order='F',
... compressor=compressor)
>>> z
<zarr.core.Array (10000, 10000) int8>
To create an array with object dtype requires a filter that can handle Python object
encoding, e.g., `MsgPack` or `Pickle` from `numcodecs`::
>>> from numcodecs import MsgPack
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype=object,
... object_codec=MsgPack())
>>> z
<zarr.core.Array (10000, 10000) object>
Example with some filters, and also storing chunks separately from metadata::
>>> from numcodecs import Quantize, Adler32
>>> store, chunk_store = dict(), dict()
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='f8',
... filters=[Quantize(digits=2, dtype='f8'), Adler32()],
... store=store, chunk_store=chunk_store)
>>> z
<zarr.core.Array (10000, 10000) float64>
"""
# handle polymorphic store arg
store = normalize_store_arg(store)
# API compatibility with h5py
compressor, fill_value = _kwargs_compat(compressor, fill_value, kwargs)
# initialize array metadata
init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor,
fill_value=fill_value, order=order, overwrite=overwrite, path=path,
chunk_store=chunk_store, filters=filters, object_codec=object_codec)
# instantiate array
z = Array(store, path=path, chunk_store=chunk_store, synchronizer=synchronizer,
cache_metadata=cache_metadata, cache_attrs=cache_attrs, read_only=read_only)
return z | def function[create, parameter[shape, chunks, dtype, compressor, fill_value, order, store, synchronizer, overwrite, path, chunk_store, filters, cache_metadata, cache_attrs, read_only, object_codec]]:
constant[Create an array.
Parameters
----------
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
synchronizer : object, optional
Array synchronizer.
overwrite : bool, optional
If True, delete all pre-existing data in `store` at `path` before
creating the array.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
filters : sequence of Codecs, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
read_only : bool, optional
True if array should be protected against modification.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
Returns
-------
z : zarr.core.Array
Examples
--------
Create an array with default settings::
>>> import zarr
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000))
>>> z
<zarr.core.Array (10000, 10000) float64>
Create an array with different some different configuration options::
>>> from numcodecs import Blosc
>>> compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.BITSHUFFLE)
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='i1', order='F',
... compressor=compressor)
>>> z
<zarr.core.Array (10000, 10000) int8>
To create an array with object dtype requires a filter that can handle Python object
encoding, e.g., `MsgPack` or `Pickle` from `numcodecs`::
>>> from numcodecs import MsgPack
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype=object,
... object_codec=MsgPack())
>>> z
<zarr.core.Array (10000, 10000) object>
Example with some filters, and also storing chunks separately from metadata::
>>> from numcodecs import Quantize, Adler32
>>> store, chunk_store = dict(), dict()
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='f8',
... filters=[Quantize(digits=2, dtype='f8'), Adler32()],
... store=store, chunk_store=chunk_store)
>>> z
<zarr.core.Array (10000, 10000) float64>
]
variable[store] assign[=] call[name[normalize_store_arg], parameter[name[store]]]
<ast.Tuple object at 0x7da1b19db820> assign[=] call[name[_kwargs_compat], parameter[name[compressor], name[fill_value], name[kwargs]]]
call[name[init_array], parameter[name[store]]]
variable[z] assign[=] call[name[Array], parameter[name[store]]]
return[name[z]] | keyword[def] identifier[create] ( identifier[shape] , identifier[chunks] = keyword[True] , identifier[dtype] = keyword[None] , identifier[compressor] = literal[string] ,
identifier[fill_value] = literal[int] , identifier[order] = literal[string] , identifier[store] = keyword[None] , identifier[synchronizer] = keyword[None] ,
identifier[overwrite] = keyword[False] , identifier[path] = keyword[None] , identifier[chunk_store] = keyword[None] , identifier[filters] = keyword[None] ,
identifier[cache_metadata] = keyword[True] , identifier[cache_attrs] = keyword[True] , identifier[read_only] = keyword[False] ,
identifier[object_codec] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[store] = identifier[normalize_store_arg] ( identifier[store] )
identifier[compressor] , identifier[fill_value] = identifier[_kwargs_compat] ( identifier[compressor] , identifier[fill_value] , identifier[kwargs] )
identifier[init_array] ( identifier[store] , identifier[shape] = identifier[shape] , identifier[chunks] = identifier[chunks] , identifier[dtype] = identifier[dtype] , identifier[compressor] = identifier[compressor] ,
identifier[fill_value] = identifier[fill_value] , identifier[order] = identifier[order] , identifier[overwrite] = identifier[overwrite] , identifier[path] = identifier[path] ,
identifier[chunk_store] = identifier[chunk_store] , identifier[filters] = identifier[filters] , identifier[object_codec] = identifier[object_codec] )
identifier[z] = identifier[Array] ( identifier[store] , identifier[path] = identifier[path] , identifier[chunk_store] = identifier[chunk_store] , identifier[synchronizer] = identifier[synchronizer] ,
identifier[cache_metadata] = identifier[cache_metadata] , identifier[cache_attrs] = identifier[cache_attrs] , identifier[read_only] = identifier[read_only] )
keyword[return] identifier[z] | def create(shape, chunks=True, dtype=None, compressor='default', fill_value=0, order='C', store=None, synchronizer=None, overwrite=False, path=None, chunk_store=None, filters=None, cache_metadata=True, cache_attrs=True, read_only=False, object_codec=None, **kwargs):
"""Create an array.
Parameters
----------
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape. If True, will be guessed from `shape` and `dtype`. If
False, will be set to `shape`, i.e., single chunk for the whole array.
dtype : string or dtype, optional
NumPy dtype.
compressor : Codec, optional
Primary compressor.
fill_value : object
Default value to use for uninitialized portions of the array.
order : {'C', 'F'}, optional
Memory layout to be used within each chunk.
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
synchronizer : object, optional
Array synchronizer.
overwrite : bool, optional
If True, delete all pre-existing data in `store` at `path` before
creating the array.
path : string, optional
Path under which array is stored.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
filters : sequence of Codecs, optional
Sequence of filters to use to encode chunk data prior to compression.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
cache_attrs : bool, optional
If True (default), user attributes will be cached for attribute read
operations. If False, user attributes are reloaded from the store prior
to all attribute read operations.
read_only : bool, optional
True if array should be protected against modification.
object_codec : Codec, optional
A codec to encode object arrays, only needed if dtype=object.
Returns
-------
z : zarr.core.Array
Examples
--------
Create an array with default settings::
>>> import zarr
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000))
>>> z
<zarr.core.Array (10000, 10000) float64>
Create an array with different some different configuration options::
>>> from numcodecs import Blosc
>>> compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.BITSHUFFLE)
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='i1', order='F',
... compressor=compressor)
>>> z
<zarr.core.Array (10000, 10000) int8>
To create an array with object dtype requires a filter that can handle Python object
encoding, e.g., `MsgPack` or `Pickle` from `numcodecs`::
>>> from numcodecs import MsgPack
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype=object,
... object_codec=MsgPack())
>>> z
<zarr.core.Array (10000, 10000) object>
Example with some filters, and also storing chunks separately from metadata::
>>> from numcodecs import Quantize, Adler32
>>> store, chunk_store = dict(), dict()
>>> z = zarr.create((10000, 10000), chunks=(1000, 1000), dtype='f8',
... filters=[Quantize(digits=2, dtype='f8'), Adler32()],
... store=store, chunk_store=chunk_store)
>>> z
<zarr.core.Array (10000, 10000) float64>
"""
# handle polymorphic store arg
store = normalize_store_arg(store)
# API compatibility with h5py
(compressor, fill_value) = _kwargs_compat(compressor, fill_value, kwargs)
# initialize array metadata
init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, order=order, overwrite=overwrite, path=path, chunk_store=chunk_store, filters=filters, object_codec=object_codec)
# instantiate array
z = Array(store, path=path, chunk_store=chunk_store, synchronizer=synchronizer, cache_metadata=cache_metadata, cache_attrs=cache_attrs, read_only=read_only)
return z |
def list_group_maintainers(self, name):
"""
Get the maintainers of a group.
Args:
name (string): Name of group to query.
Returns:
(list[string]): List of maintainer names.
Raises:
requests.HTTPError on failure.
"""
self.project_service.set_auth(self._token_project)
return self.project_service.list_group_maintainers(name) | def function[list_group_maintainers, parameter[self, name]]:
constant[
Get the maintainers of a group.
Args:
name (string): Name of group to query.
Returns:
(list[string]): List of maintainer names.
Raises:
requests.HTTPError on failure.
]
call[name[self].project_service.set_auth, parameter[name[self]._token_project]]
return[call[name[self].project_service.list_group_maintainers, parameter[name[name]]]] | keyword[def] identifier[list_group_maintainers] ( identifier[self] , identifier[name] ):
literal[string]
identifier[self] . identifier[project_service] . identifier[set_auth] ( identifier[self] . identifier[_token_project] )
keyword[return] identifier[self] . identifier[project_service] . identifier[list_group_maintainers] ( identifier[name] ) | def list_group_maintainers(self, name):
"""
Get the maintainers of a group.
Args:
name (string): Name of group to query.
Returns:
(list[string]): List of maintainer names.
Raises:
requests.HTTPError on failure.
"""
self.project_service.set_auth(self._token_project)
return self.project_service.list_group_maintainers(name) |
def inferThalamus(t, l6Input, ffInput):
"""
Compute the effect of this feed forward input given the specific L6 input.
:param t: instance of Thalamus
:param l6Input:
:param ffInput: a numpy array of 0's and 1's
:return:
"""
print("\n-----------")
t.reset()
t.deInactivateCells(l6Input)
ffOutput = t.computeFeedForwardActivity(ffInput)
# print("L6 input:", l6Input)
# print("Active TRN cells: ", t.activeTRNCellIndices)
# print("Burst ready relay cells: ", t.burstReadyCellIndices)
return ffOutput | def function[inferThalamus, parameter[t, l6Input, ffInput]]:
constant[
Compute the effect of this feed forward input given the specific L6 input.
:param t: instance of Thalamus
:param l6Input:
:param ffInput: a numpy array of 0's and 1's
:return:
]
call[name[print], parameter[constant[
-----------]]]
call[name[t].reset, parameter[]]
call[name[t].deInactivateCells, parameter[name[l6Input]]]
variable[ffOutput] assign[=] call[name[t].computeFeedForwardActivity, parameter[name[ffInput]]]
return[name[ffOutput]] | keyword[def] identifier[inferThalamus] ( identifier[t] , identifier[l6Input] , identifier[ffInput] ):
literal[string]
identifier[print] ( literal[string] )
identifier[t] . identifier[reset] ()
identifier[t] . identifier[deInactivateCells] ( identifier[l6Input] )
identifier[ffOutput] = identifier[t] . identifier[computeFeedForwardActivity] ( identifier[ffInput] )
keyword[return] identifier[ffOutput] | def inferThalamus(t, l6Input, ffInput):
"""
Compute the effect of this feed forward input given the specific L6 input.
:param t: instance of Thalamus
:param l6Input:
:param ffInput: a numpy array of 0's and 1's
:return:
"""
print('\n-----------')
t.reset()
t.deInactivateCells(l6Input)
ffOutput = t.computeFeedForwardActivity(ffInput)
# print("L6 input:", l6Input)
# print("Active TRN cells: ", t.activeTRNCellIndices)
# print("Burst ready relay cells: ", t.burstReadyCellIndices)
return ffOutput |
def which(executable):
"""find the location of an executable"""
locations = (
'/usr/local/bin',
'/bin',
'/usr/bin',
'/usr/local/sbin',
'/usr/sbin',
'/sbin',
)
for location in locations:
executable_path = os.path.join(location, executable)
if os.path.exists(executable_path) and os.path.isfile(executable_path):
return executable_path | def function[which, parameter[executable]]:
constant[find the location of an executable]
variable[locations] assign[=] tuple[[<ast.Constant object at 0x7da1b17248b0>, <ast.Constant object at 0x7da1b1724190>, <ast.Constant object at 0x7da1b1725000>, <ast.Constant object at 0x7da1b1725030>, <ast.Constant object at 0x7da1b1727070>, <ast.Constant object at 0x7da1b1726620>]]
for taget[name[location]] in starred[name[locations]] begin[:]
variable[executable_path] assign[=] call[name[os].path.join, parameter[name[location], name[executable]]]
if <ast.BoolOp object at 0x7da1b1724cd0> begin[:]
return[name[executable_path]] | keyword[def] identifier[which] ( identifier[executable] ):
literal[string]
identifier[locations] =(
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)
keyword[for] identifier[location] keyword[in] identifier[locations] :
identifier[executable_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[location] , identifier[executable] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[executable_path] ) keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[executable_path] ):
keyword[return] identifier[executable_path] | def which(executable):
"""find the location of an executable"""
locations = ('/usr/local/bin', '/bin', '/usr/bin', '/usr/local/sbin', '/usr/sbin', '/sbin')
for location in locations:
executable_path = os.path.join(location, executable)
if os.path.exists(executable_path) and os.path.isfile(executable_path):
return executable_path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['location']] |
def select(versions, optionsRequired=False):
"""
Search for a wxPython installation that matches version. If one
is found then sys.path is modified so that version will be
imported with a 'import wx', otherwise a VersionError exception is
raised. This function should only be called once at the beginning
of the application before wxPython is imported.
:param versions: Specifies the version to look for, it can
either be a string or a list of strings. Each string is
compared to the installed wxPythons and the best match is
inserted into the sys.path, allowing an 'import wx' to
find that version.
The version string is composed of the dotted version
number (at least 2 of the 4 components) optionally
followed by hyphen ('-') separated options (wx port,
unicode/ansi, flavour, etc.) A match is determined by how
much of the installed version matches what is given in the
version parameter. If the version number components don't
match then the score is zero, otherwise the score is
increased for every specified optional component that is
specified and that matches.
Please note, however, that it is possible for a match to
be selected that doesn't exactly match the versions
requested. The only component that is required to be
matched is the version number. If you need to require a
match on the other components as well, then please use the
optional ``optionsRequired`` parameter described next.
:param optionsRequired: Allows you to specify that the other
components of the version string (such as the port name
or character type) are also required to be present for an
installed version to be considered a match. Using this
parameter allows you to change the selection from a soft,
as close as possible match to a hard, exact match.
"""
if type(versions) == str:
versions = [versions]
global _selected
if _selected is not None:
# A version was previously selected, ensure that it matches
# this new request
for ver in versions:
if _selected.Score(_wxPackageInfo(ver), optionsRequired) > 0:
return
# otherwise, raise an exception
raise VersionError("A previously selected wx version does not match the new request.")
# If we get here then this is the first time wxversion is used,
# ensure that wxPython hasn't been imported yet.
if sys.modules.has_key('wx') or sys.modules.has_key('wxPython'):
raise AlreadyImportedError("wxversion.select() must be called before wxPython is imported")
# Look for a matching version and manipulate the sys.path as
# needed to allow it to be imported.
installed = _find_installed(True)
bestMatch = _get_best_match(installed, versions, optionsRequired)
if bestMatch is None:
raise VersionError("Requested version of wxPython not found")
sys.path.insert(0, bestMatch.pathname)
# q.v. Bug #1409256
path64 = re.sub('/lib/','/lib64/',bestMatch.pathname)
if os.path.isdir(path64):
sys.path.insert(0, path64)
_selected = bestMatch | def function[select, parameter[versions, optionsRequired]]:
constant[
Search for a wxPython installation that matches version. If one
is found then sys.path is modified so that version will be
imported with a 'import wx', otherwise a VersionError exception is
raised. This function should only be called once at the beginning
of the application before wxPython is imported.
:param versions: Specifies the version to look for, it can
either be a string or a list of strings. Each string is
compared to the installed wxPythons and the best match is
inserted into the sys.path, allowing an 'import wx' to
find that version.
The version string is composed of the dotted version
number (at least 2 of the 4 components) optionally
followed by hyphen ('-') separated options (wx port,
unicode/ansi, flavour, etc.) A match is determined by how
much of the installed version matches what is given in the
version parameter. If the version number components don't
match then the score is zero, otherwise the score is
increased for every specified optional component that is
specified and that matches.
Please note, however, that it is possible for a match to
be selected that doesn't exactly match the versions
requested. The only component that is required to be
matched is the version number. If you need to require a
match on the other components as well, then please use the
optional ``optionsRequired`` parameter described next.
:param optionsRequired: Allows you to specify that the other
components of the version string (such as the port name
or character type) are also required to be present for an
installed version to be considered a match. Using this
parameter allows you to change the selection from a soft,
as close as possible match to a hard, exact match.
]
if compare[call[name[type], parameter[name[versions]]] equal[==] name[str]] begin[:]
variable[versions] assign[=] list[[<ast.Name object at 0x7da204622620>]]
<ast.Global object at 0x7da204620d90>
if compare[name[_selected] is_not constant[None]] begin[:]
for taget[name[ver]] in starred[name[versions]] begin[:]
if compare[call[name[_selected].Score, parameter[call[name[_wxPackageInfo], parameter[name[ver]]], name[optionsRequired]]] greater[>] constant[0]] begin[:]
return[None]
<ast.Raise object at 0x7da2046235e0>
if <ast.BoolOp object at 0x7da18f58dcf0> begin[:]
<ast.Raise object at 0x7da1b17b97e0>
variable[installed] assign[=] call[name[_find_installed], parameter[constant[True]]]
variable[bestMatch] assign[=] call[name[_get_best_match], parameter[name[installed], name[versions], name[optionsRequired]]]
if compare[name[bestMatch] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b17b9780>
call[name[sys].path.insert, parameter[constant[0], name[bestMatch].pathname]]
variable[path64] assign[=] call[name[re].sub, parameter[constant[/lib/], constant[/lib64/], name[bestMatch].pathname]]
if call[name[os].path.isdir, parameter[name[path64]]] begin[:]
call[name[sys].path.insert, parameter[constant[0], name[path64]]]
variable[_selected] assign[=] name[bestMatch] | keyword[def] identifier[select] ( identifier[versions] , identifier[optionsRequired] = keyword[False] ):
literal[string]
keyword[if] identifier[type] ( identifier[versions] )== identifier[str] :
identifier[versions] =[ identifier[versions] ]
keyword[global] identifier[_selected]
keyword[if] identifier[_selected] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[ver] keyword[in] identifier[versions] :
keyword[if] identifier[_selected] . identifier[Score] ( identifier[_wxPackageInfo] ( identifier[ver] ), identifier[optionsRequired] )> literal[int] :
keyword[return]
keyword[raise] identifier[VersionError] ( literal[string] )
keyword[if] identifier[sys] . identifier[modules] . identifier[has_key] ( literal[string] ) keyword[or] identifier[sys] . identifier[modules] . identifier[has_key] ( literal[string] ):
keyword[raise] identifier[AlreadyImportedError] ( literal[string] )
identifier[installed] = identifier[_find_installed] ( keyword[True] )
identifier[bestMatch] = identifier[_get_best_match] ( identifier[installed] , identifier[versions] , identifier[optionsRequired] )
keyword[if] identifier[bestMatch] keyword[is] keyword[None] :
keyword[raise] identifier[VersionError] ( literal[string] )
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[bestMatch] . identifier[pathname] )
identifier[path64] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[bestMatch] . identifier[pathname] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path64] ):
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[path64] )
identifier[_selected] = identifier[bestMatch] | def select(versions, optionsRequired=False):
"""
Search for a wxPython installation that matches version. If one
is found then sys.path is modified so that version will be
imported with a 'import wx', otherwise a VersionError exception is
raised. This function should only be called once at the beginning
of the application before wxPython is imported.
:param versions: Specifies the version to look for, it can
either be a string or a list of strings. Each string is
compared to the installed wxPythons and the best match is
inserted into the sys.path, allowing an 'import wx' to
find that version.
The version string is composed of the dotted version
number (at least 2 of the 4 components) optionally
followed by hyphen ('-') separated options (wx port,
unicode/ansi, flavour, etc.) A match is determined by how
much of the installed version matches what is given in the
version parameter. If the version number components don't
match then the score is zero, otherwise the score is
increased for every specified optional component that is
specified and that matches.
Please note, however, that it is possible for a match to
be selected that doesn't exactly match the versions
requested. The only component that is required to be
matched is the version number. If you need to require a
match on the other components as well, then please use the
optional ``optionsRequired`` parameter described next.
:param optionsRequired: Allows you to specify that the other
components of the version string (such as the port name
or character type) are also required to be present for an
installed version to be considered a match. Using this
parameter allows you to change the selection from a soft,
as close as possible match to a hard, exact match.
"""
if type(versions) == str:
versions = [versions] # depends on [control=['if'], data=[]]
global _selected
if _selected is not None:
# A version was previously selected, ensure that it matches
# this new request
for ver in versions:
if _selected.Score(_wxPackageInfo(ver), optionsRequired) > 0:
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ver']]
# otherwise, raise an exception
raise VersionError('A previously selected wx version does not match the new request.') # depends on [control=['if'], data=['_selected']] # If we get here then this is the first time wxversion is used,
# ensure that wxPython hasn't been imported yet.
if sys.modules.has_key('wx') or sys.modules.has_key('wxPython'):
raise AlreadyImportedError('wxversion.select() must be called before wxPython is imported') # depends on [control=['if'], data=[]]
# Look for a matching version and manipulate the sys.path as
# needed to allow it to be imported.
installed = _find_installed(True)
bestMatch = _get_best_match(installed, versions, optionsRequired)
if bestMatch is None:
raise VersionError('Requested version of wxPython not found') # depends on [control=['if'], data=[]]
sys.path.insert(0, bestMatch.pathname)
# q.v. Bug #1409256
path64 = re.sub('/lib/', '/lib64/', bestMatch.pathname)
if os.path.isdir(path64):
sys.path.insert(0, path64) # depends on [control=['if'], data=[]]
_selected = bestMatch |
def as_crispy_form(form, template_pack=TEMPLATE_PACK, label_class="", field_class=""):
"""
The original and still very useful way to generate a div elegant form/formset::
{% load crispy_forms_tags %}
<form class="uniForm" method="post">
{% csrf_token %}
{{ myform|crispy }}
</form>
or, if you want to explicitly set the template pack::
{{ myform|crispy:"bootstrap" }}
In ``bootstrap3`` or ``bootstrap4`` for horizontal forms you can do::
{{ myform|label_class:"col-lg-2",field_class:"col-lg-8" }}
"""
if isinstance(form, BaseFormSet):
template = uni_formset_template(template_pack)
c = Context({
'formset': form,
'form_show_errors': True,
'form_show_labels': True,
'label_class': label_class,
'field_class': field_class,
}).flatten()
else:
template = uni_form_template(template_pack)
c = Context({
'form': form,
'form_show_errors': True,
'form_show_labels': True,
'label_class': label_class,
'field_class': field_class,
}).flatten()
return template.render(c) | def function[as_crispy_form, parameter[form, template_pack, label_class, field_class]]:
constant[
The original and still very useful way to generate a div elegant form/formset::
{% load crispy_forms_tags %}
<form class="uniForm" method="post">
{% csrf_token %}
{{ myform|crispy }}
</form>
or, if you want to explicitly set the template pack::
{{ myform|crispy:"bootstrap" }}
In ``bootstrap3`` or ``bootstrap4`` for horizontal forms you can do::
{{ myform|label_class:"col-lg-2",field_class:"col-lg-8" }}
]
if call[name[isinstance], parameter[name[form], name[BaseFormSet]]] begin[:]
variable[template] assign[=] call[name[uni_formset_template], parameter[name[template_pack]]]
variable[c] assign[=] call[call[name[Context], parameter[dictionary[[<ast.Constant object at 0x7da20e9564d0>, <ast.Constant object at 0x7da20e9579a0>, <ast.Constant object at 0x7da20e9573a0>, <ast.Constant object at 0x7da20e9577f0>, <ast.Constant object at 0x7da20e957100>], [<ast.Name object at 0x7da20e956fe0>, <ast.Constant object at 0x7da20e955630>, <ast.Constant object at 0x7da20e956ec0>, <ast.Name object at 0x7da20e957a30>, <ast.Name object at 0x7da20e955f90>]]]].flatten, parameter[]]
return[call[name[template].render, parameter[name[c]]]] | keyword[def] identifier[as_crispy_form] ( identifier[form] , identifier[template_pack] = identifier[TEMPLATE_PACK] , identifier[label_class] = literal[string] , identifier[field_class] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[form] , identifier[BaseFormSet] ):
identifier[template] = identifier[uni_formset_template] ( identifier[template_pack] )
identifier[c] = identifier[Context] ({
literal[string] : identifier[form] ,
literal[string] : keyword[True] ,
literal[string] : keyword[True] ,
literal[string] : identifier[label_class] ,
literal[string] : identifier[field_class] ,
}). identifier[flatten] ()
keyword[else] :
identifier[template] = identifier[uni_form_template] ( identifier[template_pack] )
identifier[c] = identifier[Context] ({
literal[string] : identifier[form] ,
literal[string] : keyword[True] ,
literal[string] : keyword[True] ,
literal[string] : identifier[label_class] ,
literal[string] : identifier[field_class] ,
}). identifier[flatten] ()
keyword[return] identifier[template] . identifier[render] ( identifier[c] ) | def as_crispy_form(form, template_pack=TEMPLATE_PACK, label_class='', field_class=''):
"""
The original and still very useful way to generate a div elegant form/formset::
{% load crispy_forms_tags %}
<form class="uniForm" method="post">
{% csrf_token %}
{{ myform|crispy }}
</form>
or, if you want to explicitly set the template pack::
{{ myform|crispy:"bootstrap" }}
In ``bootstrap3`` or ``bootstrap4`` for horizontal forms you can do::
{{ myform|label_class:"col-lg-2",field_class:"col-lg-8" }}
"""
if isinstance(form, BaseFormSet):
template = uni_formset_template(template_pack)
c = Context({'formset': form, 'form_show_errors': True, 'form_show_labels': True, 'label_class': label_class, 'field_class': field_class}).flatten() # depends on [control=['if'], data=[]]
else:
template = uni_form_template(template_pack)
c = Context({'form': form, 'form_show_errors': True, 'form_show_labels': True, 'label_class': label_class, 'field_class': field_class}).flatten()
return template.render(c) |
def Run(self, args):
"""Search the file for the pattern.
This implements the grep algorithm used to scan files. It reads
the data in chunks of BUFF_SIZE (10 MB currently) and can use
different functions to search for matching patterns. In every
step, a buffer that is a bit bigger than the block size is used in
order to return all the requested results. Specifically, a
preamble is used in order to not miss any patterns that start in
one block of data and end in the next and also a postscript buffer
is kept such that the algorithm can return bytes trailing the
pattern even if the pattern is at the end of one block.
One block:
-----------------------------
| Pre | Data | Post |
-----------------------------
Searching the pattern is done here:
<------------------->
The following block is constructed like this:
-----------------------------
| Pre | Data | Post |
-----------------------------
|
-----------------------------
| Pre | Data | Post |
-----------------------------
The preamble is filled from Data so every hit that happens to fall
entirely into the preamble has to be discarded since it has
already been discovered in the step before.
Grepping for memory
If this action is used to grep the memory of a client machine
using one of the GRR memory acquisition drivers, we have to be
very careful not to have any hits in the GRR process memory space
itself. Therefore, if the input is a literal, it is XOR encoded
and only visible in memory when the pattern is matched. This is
done using bytearrays which guarantees in place updates and no
leaking patterns. Also the returned data is encoded using a
different XOR 'key'.
This should guarantee that there are no hits when the pattern is
not present in memory. However, since the data will be copied to
the preamble and the postscript, a single pattern might in some
cases produce multiple hits.
Args:
args: A protobuf describing the grep request.
Raises:
RuntimeError: No search pattern has been given in the request.
"""
fd = vfs.VFSOpen(args.target, progress_callback=self.Progress)
fd.Seek(args.start_offset)
base_offset = args.start_offset
self.xor_in_key = args.xor_in_key
self.xor_out_key = args.xor_out_key
if args.regex:
find_func = functools.partial(self.FindRegex, args.regex.AsBytes())
elif args.literal:
find_func = functools.partial(self.FindLiteral, args.literal.AsBytes())
else:
raise RuntimeError("Grep needs a regex or a literal.")
preamble_size = 0
postscript_size = 0
hits = 0
data = b""
while fd.Tell() < args.start_offset + args.length:
# Base size to read is at most the buffer size.
to_read = min(args.length, self.BUFF_SIZE,
args.start_offset + args.length - fd.Tell())
# Read some more data for the snippet.
to_read += self.ENVELOPE_SIZE - postscript_size
read_data = fd.Read(to_read)
data = data[-postscript_size - self.ENVELOPE_SIZE:] + read_data
postscript_size = max(0, self.ENVELOPE_SIZE - (to_read - len(read_data)))
data_size = len(data) - preamble_size - postscript_size
if data_size == 0 and postscript_size == 0:
break
for (start, end) in find_func(data):
# Ignore hits in the preamble.
if end <= preamble_size:
continue
# Ignore hits in the postscript.
if end > preamble_size + data_size:
continue
# Offset of file in the end after length.
if end + base_offset - preamble_size > args.start_offset + args.length:
break
data_start = max(0, start - args.bytes_before)
data_end = min(len(data), end + args.bytes_after)
out_data = utils.Xor(data[data_start:data_end], self.xor_out_key)
hits += 1
self.SendReply(
rdf_client.BufferReference(
offset=base_offset + start - preamble_size,
data=out_data,
length=len(out_data),
pathspec=fd.pathspec))
if args.mode == rdf_client_fs.GrepSpec.Mode.FIRST_HIT:
return
if hits >= self.HIT_LIMIT:
msg = utils.Xor(
b"This Grep has reached the maximum number of hits"
b" (%d)." % self.HIT_LIMIT, self.xor_out_key)
self.SendReply(
rdf_client.BufferReference(offset=0, data=msg, length=len(msg)))
return
self.Progress()
base_offset += data_size
# Allow for overlap with previous matches.
preamble_size = min(len(data), self.ENVELOPE_SIZE) | def function[Run, parameter[self, args]]:
constant[Search the file for the pattern.
This implements the grep algorithm used to scan files. It reads
the data in chunks of BUFF_SIZE (10 MB currently) and can use
different functions to search for matching patterns. In every
step, a buffer that is a bit bigger than the block size is used in
order to return all the requested results. Specifically, a
preamble is used in order to not miss any patterns that start in
one block of data and end in the next and also a postscript buffer
is kept such that the algorithm can return bytes trailing the
pattern even if the pattern is at the end of one block.
One block:
-----------------------------
| Pre | Data | Post |
-----------------------------
Searching the pattern is done here:
<------------------->
The following block is constructed like this:
-----------------------------
| Pre | Data | Post |
-----------------------------
|
-----------------------------
| Pre | Data | Post |
-----------------------------
The preamble is filled from Data so every hit that happens to fall
entirely into the preamble has to be discarded since it has
already been discovered in the step before.
Grepping for memory
If this action is used to grep the memory of a client machine
using one of the GRR memory acquisition drivers, we have to be
very careful not to have any hits in the GRR process memory space
itself. Therefore, if the input is a literal, it is XOR encoded
and only visible in memory when the pattern is matched. This is
done using bytearrays which guarantees in place updates and no
leaking patterns. Also the returned data is encoded using a
different XOR 'key'.
This should guarantee that there are no hits when the pattern is
not present in memory. However, since the data will be copied to
the preamble and the postscript, a single pattern might in some
cases produce multiple hits.
Args:
args: A protobuf describing the grep request.
Raises:
RuntimeError: No search pattern has been given in the request.
]
variable[fd] assign[=] call[name[vfs].VFSOpen, parameter[name[args].target]]
call[name[fd].Seek, parameter[name[args].start_offset]]
variable[base_offset] assign[=] name[args].start_offset
name[self].xor_in_key assign[=] name[args].xor_in_key
name[self].xor_out_key assign[=] name[args].xor_out_key
if name[args].regex begin[:]
variable[find_func] assign[=] call[name[functools].partial, parameter[name[self].FindRegex, call[name[args].regex.AsBytes, parameter[]]]]
variable[preamble_size] assign[=] constant[0]
variable[postscript_size] assign[=] constant[0]
variable[hits] assign[=] constant[0]
variable[data] assign[=] constant[b'']
while compare[call[name[fd].Tell, parameter[]] less[<] binary_operation[name[args].start_offset + name[args].length]] begin[:]
variable[to_read] assign[=] call[name[min], parameter[name[args].length, name[self].BUFF_SIZE, binary_operation[binary_operation[name[args].start_offset + name[args].length] - call[name[fd].Tell, parameter[]]]]]
<ast.AugAssign object at 0x7da1b1b2a470>
variable[read_data] assign[=] call[name[fd].Read, parameter[name[to_read]]]
variable[data] assign[=] binary_operation[call[name[data]][<ast.Slice object at 0x7da1b1b2aa10>] + name[read_data]]
variable[postscript_size] assign[=] call[name[max], parameter[constant[0], binary_operation[name[self].ENVELOPE_SIZE - binary_operation[name[to_read] - call[name[len], parameter[name[read_data]]]]]]]
variable[data_size] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[data]]] - name[preamble_size]] - name[postscript_size]]
if <ast.BoolOp object at 0x7da1b1b2abf0> begin[:]
break
for taget[tuple[[<ast.Name object at 0x7da1b1b2a560>, <ast.Name object at 0x7da1b1b297b0>]]] in starred[call[name[find_func], parameter[name[data]]]] begin[:]
if compare[name[end] less_or_equal[<=] name[preamble_size]] begin[:]
continue
if compare[name[end] greater[>] binary_operation[name[preamble_size] + name[data_size]]] begin[:]
continue
if compare[binary_operation[binary_operation[name[end] + name[base_offset]] - name[preamble_size]] greater[>] binary_operation[name[args].start_offset + name[args].length]] begin[:]
break
variable[data_start] assign[=] call[name[max], parameter[constant[0], binary_operation[name[start] - name[args].bytes_before]]]
variable[data_end] assign[=] call[name[min], parameter[call[name[len], parameter[name[data]]], binary_operation[name[end] + name[args].bytes_after]]]
variable[out_data] assign[=] call[name[utils].Xor, parameter[call[name[data]][<ast.Slice object at 0x7da1b1b89d80>], name[self].xor_out_key]]
<ast.AugAssign object at 0x7da1b1b89ab0>
call[name[self].SendReply, parameter[call[name[rdf_client].BufferReference, parameter[]]]]
if compare[name[args].mode equal[==] name[rdf_client_fs].GrepSpec.Mode.FIRST_HIT] begin[:]
return[None]
if compare[name[hits] greater_or_equal[>=] name[self].HIT_LIMIT] begin[:]
variable[msg] assign[=] call[name[utils].Xor, parameter[binary_operation[constant[b'This Grep has reached the maximum number of hits (%d).'] <ast.Mod object at 0x7da2590d6920> name[self].HIT_LIMIT], name[self].xor_out_key]]
call[name[self].SendReply, parameter[call[name[rdf_client].BufferReference, parameter[]]]]
return[None]
call[name[self].Progress, parameter[]]
<ast.AugAssign object at 0x7da1b1c3dc00>
variable[preamble_size] assign[=] call[name[min], parameter[call[name[len], parameter[name[data]]], name[self].ENVELOPE_SIZE]] | keyword[def] identifier[Run] ( identifier[self] , identifier[args] ):
literal[string]
identifier[fd] = identifier[vfs] . identifier[VFSOpen] ( identifier[args] . identifier[target] , identifier[progress_callback] = identifier[self] . identifier[Progress] )
identifier[fd] . identifier[Seek] ( identifier[args] . identifier[start_offset] )
identifier[base_offset] = identifier[args] . identifier[start_offset]
identifier[self] . identifier[xor_in_key] = identifier[args] . identifier[xor_in_key]
identifier[self] . identifier[xor_out_key] = identifier[args] . identifier[xor_out_key]
keyword[if] identifier[args] . identifier[regex] :
identifier[find_func] = identifier[functools] . identifier[partial] ( identifier[self] . identifier[FindRegex] , identifier[args] . identifier[regex] . identifier[AsBytes] ())
keyword[elif] identifier[args] . identifier[literal] :
identifier[find_func] = identifier[functools] . identifier[partial] ( identifier[self] . identifier[FindLiteral] , identifier[args] . identifier[literal] . identifier[AsBytes] ())
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[preamble_size] = literal[int]
identifier[postscript_size] = literal[int]
identifier[hits] = literal[int]
identifier[data] = literal[string]
keyword[while] identifier[fd] . identifier[Tell] ()< identifier[args] . identifier[start_offset] + identifier[args] . identifier[length] :
identifier[to_read] = identifier[min] ( identifier[args] . identifier[length] , identifier[self] . identifier[BUFF_SIZE] ,
identifier[args] . identifier[start_offset] + identifier[args] . identifier[length] - identifier[fd] . identifier[Tell] ())
identifier[to_read] += identifier[self] . identifier[ENVELOPE_SIZE] - identifier[postscript_size]
identifier[read_data] = identifier[fd] . identifier[Read] ( identifier[to_read] )
identifier[data] = identifier[data] [- identifier[postscript_size] - identifier[self] . identifier[ENVELOPE_SIZE] :]+ identifier[read_data]
identifier[postscript_size] = identifier[max] ( literal[int] , identifier[self] . identifier[ENVELOPE_SIZE] -( identifier[to_read] - identifier[len] ( identifier[read_data] )))
identifier[data_size] = identifier[len] ( identifier[data] )- identifier[preamble_size] - identifier[postscript_size]
keyword[if] identifier[data_size] == literal[int] keyword[and] identifier[postscript_size] == literal[int] :
keyword[break]
keyword[for] ( identifier[start] , identifier[end] ) keyword[in] identifier[find_func] ( identifier[data] ):
keyword[if] identifier[end] <= identifier[preamble_size] :
keyword[continue]
keyword[if] identifier[end] > identifier[preamble_size] + identifier[data_size] :
keyword[continue]
keyword[if] identifier[end] + identifier[base_offset] - identifier[preamble_size] > identifier[args] . identifier[start_offset] + identifier[args] . identifier[length] :
keyword[break]
identifier[data_start] = identifier[max] ( literal[int] , identifier[start] - identifier[args] . identifier[bytes_before] )
identifier[data_end] = identifier[min] ( identifier[len] ( identifier[data] ), identifier[end] + identifier[args] . identifier[bytes_after] )
identifier[out_data] = identifier[utils] . identifier[Xor] ( identifier[data] [ identifier[data_start] : identifier[data_end] ], identifier[self] . identifier[xor_out_key] )
identifier[hits] += literal[int]
identifier[self] . identifier[SendReply] (
identifier[rdf_client] . identifier[BufferReference] (
identifier[offset] = identifier[base_offset] + identifier[start] - identifier[preamble_size] ,
identifier[data] = identifier[out_data] ,
identifier[length] = identifier[len] ( identifier[out_data] ),
identifier[pathspec] = identifier[fd] . identifier[pathspec] ))
keyword[if] identifier[args] . identifier[mode] == identifier[rdf_client_fs] . identifier[GrepSpec] . identifier[Mode] . identifier[FIRST_HIT] :
keyword[return]
keyword[if] identifier[hits] >= identifier[self] . identifier[HIT_LIMIT] :
identifier[msg] = identifier[utils] . identifier[Xor] (
literal[string]
literal[string] % identifier[self] . identifier[HIT_LIMIT] , identifier[self] . identifier[xor_out_key] )
identifier[self] . identifier[SendReply] (
identifier[rdf_client] . identifier[BufferReference] ( identifier[offset] = literal[int] , identifier[data] = identifier[msg] , identifier[length] = identifier[len] ( identifier[msg] )))
keyword[return]
identifier[self] . identifier[Progress] ()
identifier[base_offset] += identifier[data_size]
identifier[preamble_size] = identifier[min] ( identifier[len] ( identifier[data] ), identifier[self] . identifier[ENVELOPE_SIZE] ) | def Run(self, args):
"""Search the file for the pattern.
This implements the grep algorithm used to scan files. It reads
the data in chunks of BUFF_SIZE (10 MB currently) and can use
different functions to search for matching patterns. In every
step, a buffer that is a bit bigger than the block size is used in
order to return all the requested results. Specifically, a
preamble is used in order to not miss any patterns that start in
one block of data and end in the next and also a postscript buffer
is kept such that the algorithm can return bytes trailing the
pattern even if the pattern is at the end of one block.
One block:
-----------------------------
| Pre | Data | Post |
-----------------------------
Searching the pattern is done here:
<------------------->
The following block is constructed like this:
-----------------------------
| Pre | Data | Post |
-----------------------------
|
-----------------------------
| Pre | Data | Post |
-----------------------------
The preamble is filled from Data so every hit that happens to fall
entirely into the preamble has to be discarded since it has
already been discovered in the step before.
Grepping for memory
If this action is used to grep the memory of a client machine
using one of the GRR memory acquisition drivers, we have to be
very careful not to have any hits in the GRR process memory space
itself. Therefore, if the input is a literal, it is XOR encoded
and only visible in memory when the pattern is matched. This is
done using bytearrays which guarantees in place updates and no
leaking patterns. Also the returned data is encoded using a
different XOR 'key'.
This should guarantee that there are no hits when the pattern is
not present in memory. However, since the data will be copied to
the preamble and the postscript, a single pattern might in some
cases produce multiple hits.
Args:
args: A protobuf describing the grep request.
Raises:
RuntimeError: No search pattern has been given in the request.
"""
fd = vfs.VFSOpen(args.target, progress_callback=self.Progress)
fd.Seek(args.start_offset)
base_offset = args.start_offset
self.xor_in_key = args.xor_in_key
self.xor_out_key = args.xor_out_key
if args.regex:
find_func = functools.partial(self.FindRegex, args.regex.AsBytes()) # depends on [control=['if'], data=[]]
elif args.literal:
find_func = functools.partial(self.FindLiteral, args.literal.AsBytes()) # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Grep needs a regex or a literal.')
preamble_size = 0
postscript_size = 0
hits = 0
data = b''
while fd.Tell() < args.start_offset + args.length:
# Base size to read is at most the buffer size.
to_read = min(args.length, self.BUFF_SIZE, args.start_offset + args.length - fd.Tell())
# Read some more data for the snippet.
to_read += self.ENVELOPE_SIZE - postscript_size
read_data = fd.Read(to_read)
data = data[-postscript_size - self.ENVELOPE_SIZE:] + read_data
postscript_size = max(0, self.ENVELOPE_SIZE - (to_read - len(read_data)))
data_size = len(data) - preamble_size - postscript_size
if data_size == 0 and postscript_size == 0:
break # depends on [control=['if'], data=[]]
for (start, end) in find_func(data):
# Ignore hits in the preamble.
if end <= preamble_size:
continue # depends on [control=['if'], data=[]]
# Ignore hits in the postscript.
if end > preamble_size + data_size:
continue # depends on [control=['if'], data=[]]
# Offset of file in the end after length.
if end + base_offset - preamble_size > args.start_offset + args.length:
break # depends on [control=['if'], data=[]]
data_start = max(0, start - args.bytes_before)
data_end = min(len(data), end + args.bytes_after)
out_data = utils.Xor(data[data_start:data_end], self.xor_out_key)
hits += 1
self.SendReply(rdf_client.BufferReference(offset=base_offset + start - preamble_size, data=out_data, length=len(out_data), pathspec=fd.pathspec))
if args.mode == rdf_client_fs.GrepSpec.Mode.FIRST_HIT:
return # depends on [control=['if'], data=[]]
if hits >= self.HIT_LIMIT:
msg = utils.Xor(b'This Grep has reached the maximum number of hits (%d).' % self.HIT_LIMIT, self.xor_out_key)
self.SendReply(rdf_client.BufferReference(offset=0, data=msg, length=len(msg)))
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self.Progress()
base_offset += data_size
# Allow for overlap with previous matches.
preamble_size = min(len(data), self.ENVELOPE_SIZE) # depends on [control=['while'], data=[]] |
def _validate_select_where(self):
""" Checks that a filterset will not create invalid select statement """
# check that there's either a =, a IN or a CONTAINS (collection)
# relationship with a primary key or indexed field. We also allow
# custom indexes to be queried with any operator (a difference
# between a secondary index)
equal_ops = [self.model._get_column_by_db_name(w.field) \
for w in self._where if not isinstance(w.value, Token)
and (isinstance(w.operator, EqualsOperator)
or self.model._get_column_by_db_name(w.field).custom_index)]
token_comparison = any([w for w in self._where if isinstance(w.value, Token)])
if not any(w.primary_key or w.has_index for w in equal_ops) and not token_comparison and not self._allow_filtering:
raise QueryException(
('Where clauses require either =, a IN or a CONTAINS '
'(collection) comparison with either a primary key or '
'indexed field. You might want to consider setting '
'custom_index on fields that you manage index outside '
'cqlengine.'))
if not self._allow_filtering:
# if the query is not on an indexed field
if not any(w.has_index for w in equal_ops):
if not any([w.partition_key for w in equal_ops]) and not token_comparison:
raise QueryException(
('Filtering on a clustering key without a partition '
'key is not allowed unless allow_filtering() is '
'called on the queryset. You might want to consider '
'setting custom_index on fields that you manage '
'index outside cqlengine.')) | def function[_validate_select_where, parameter[self]]:
constant[ Checks that a filterset will not create invalid select statement ]
variable[equal_ops] assign[=] <ast.ListComp object at 0x7da18bc71900>
variable[token_comparison] assign[=] call[name[any], parameter[<ast.ListComp object at 0x7da18bc70730>]]
if <ast.BoolOp object at 0x7da18bc70d00> begin[:]
<ast.Raise object at 0x7da20e9b18d0>
if <ast.UnaryOp object at 0x7da20e9b01c0> begin[:]
if <ast.UnaryOp object at 0x7da20e9b2b90> begin[:]
if <ast.BoolOp object at 0x7da20e9b3f10> begin[:]
<ast.Raise object at 0x7da20e9b13c0> | keyword[def] identifier[_validate_select_where] ( identifier[self] ):
literal[string]
identifier[equal_ops] =[ identifier[self] . identifier[model] . identifier[_get_column_by_db_name] ( identifier[w] . identifier[field] ) keyword[for] identifier[w] keyword[in] identifier[self] . identifier[_where] keyword[if] keyword[not] identifier[isinstance] ( identifier[w] . identifier[value] , identifier[Token] )
keyword[and] ( identifier[isinstance] ( identifier[w] . identifier[operator] , identifier[EqualsOperator] )
keyword[or] identifier[self] . identifier[model] . identifier[_get_column_by_db_name] ( identifier[w] . identifier[field] ). identifier[custom_index] )]
identifier[token_comparison] = identifier[any] ([ identifier[w] keyword[for] identifier[w] keyword[in] identifier[self] . identifier[_where] keyword[if] identifier[isinstance] ( identifier[w] . identifier[value] , identifier[Token] )])
keyword[if] keyword[not] identifier[any] ( identifier[w] . identifier[primary_key] keyword[or] identifier[w] . identifier[has_index] keyword[for] identifier[w] keyword[in] identifier[equal_ops] ) keyword[and] keyword[not] identifier[token_comparison] keyword[and] keyword[not] identifier[self] . identifier[_allow_filtering] :
keyword[raise] identifier[QueryException] (
( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] ))
keyword[if] keyword[not] identifier[self] . identifier[_allow_filtering] :
keyword[if] keyword[not] identifier[any] ( identifier[w] . identifier[has_index] keyword[for] identifier[w] keyword[in] identifier[equal_ops] ):
keyword[if] keyword[not] identifier[any] ([ identifier[w] . identifier[partition_key] keyword[for] identifier[w] keyword[in] identifier[equal_ops] ]) keyword[and] keyword[not] identifier[token_comparison] :
keyword[raise] identifier[QueryException] (
( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )) | def _validate_select_where(self):
""" Checks that a filterset will not create invalid select statement """
# check that there's either a =, a IN or a CONTAINS (collection)
# relationship with a primary key or indexed field. We also allow
# custom indexes to be queried with any operator (a difference
# between a secondary index)
equal_ops = [self.model._get_column_by_db_name(w.field) for w in self._where if not isinstance(w.value, Token) and (isinstance(w.operator, EqualsOperator) or self.model._get_column_by_db_name(w.field).custom_index)]
token_comparison = any([w for w in self._where if isinstance(w.value, Token)])
if not any((w.primary_key or w.has_index for w in equal_ops)) and (not token_comparison) and (not self._allow_filtering):
raise QueryException('Where clauses require either =, a IN or a CONTAINS (collection) comparison with either a primary key or indexed field. You might want to consider setting custom_index on fields that you manage index outside cqlengine.') # depends on [control=['if'], data=[]]
if not self._allow_filtering:
# if the query is not on an indexed field
if not any((w.has_index for w in equal_ops)):
if not any([w.partition_key for w in equal_ops]) and (not token_comparison):
raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the queryset. You might want to consider setting custom_index on fields that you manage index outside cqlengine.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def chmod(scope, filename, mode):
"""
Changes the permissions of the given file (or list of files)
to the given mode. You probably want to use an octal representation
for the integer, e.g. "chmod(myfile, 0644)".
:type filename: string
:param filename: A filename.
:type mode: int
:param mode: The access permissions.
"""
for file in filename:
os.chmod(file, mode[0])
return True | def function[chmod, parameter[scope, filename, mode]]:
constant[
Changes the permissions of the given file (or list of files)
to the given mode. You probably want to use an octal representation
for the integer, e.g. "chmod(myfile, 0644)".
:type filename: string
:param filename: A filename.
:type mode: int
:param mode: The access permissions.
]
for taget[name[file]] in starred[name[filename]] begin[:]
call[name[os].chmod, parameter[name[file], call[name[mode]][constant[0]]]]
return[constant[True]] | keyword[def] identifier[chmod] ( identifier[scope] , identifier[filename] , identifier[mode] ):
literal[string]
keyword[for] identifier[file] keyword[in] identifier[filename] :
identifier[os] . identifier[chmod] ( identifier[file] , identifier[mode] [ literal[int] ])
keyword[return] keyword[True] | def chmod(scope, filename, mode):
"""
Changes the permissions of the given file (or list of files)
to the given mode. You probably want to use an octal representation
for the integer, e.g. "chmod(myfile, 0644)".
:type filename: string
:param filename: A filename.
:type mode: int
:param mode: The access permissions.
"""
for file in filename:
os.chmod(file, mode[0]) # depends on [control=['for'], data=['file']]
return True |
def get_backup_end_segment_and_time(self, db_conn, backup_mode):
"""Grab a timestamp and WAL segment name after the end of the backup: this is a point in time to which
we must be able to recover to, and the last WAL segment that is required for the backup to be
consistent.
Note that pg_switch_xlog()/pg_switch_wal() is a superuser-only function, but since pg_start_backup() and
pg_stop_backup() cause an WAL switch we'll call them instead. The downside is an unnecessary
checkpoint.
"""
cursor = db_conn.cursor()
# Get backup end time and end segment and forcibly register a transaction in the current segment
# Note that we can't call pg_walfile_name() or pg_current_wal_lsn() in recovery
cursor.execute("SELECT now(), pg_is_in_recovery()")
backup_end_time, in_recovery = cursor.fetchone()
if in_recovery:
db_conn.commit()
return None, backup_end_time
if self.pg_version_server >= 100000:
cursor.execute("SELECT pg_walfile_name(pg_current_wal_lsn()), txid_current()")
else:
cursor.execute("SELECT pg_xlogfile_name(pg_current_xlog_location()), txid_current()")
backup_end_wal_segment, _ = cursor.fetchone()
db_conn.commit()
# Now force switch of the WAL segment to make sure we have archived a segment with a known
# timestamp after pg_stop_backup() was called.
backup_end_name = "pghoard_end_of_backup"
if backup_mode == "non-exclusive":
cursor.execute("SELECT pg_start_backup(%s, true, false)", [backup_end_name])
cursor.execute("SELECT pg_stop_backup(false)")
elif backup_mode == "pgespresso":
cursor.execute("SELECT pgespresso_start_backup(%s, false)", [backup_end_name])
backup_label = cursor.fetchone()[0]
cursor.execute("SELECT pgespresso_stop_backup(%s)", [backup_label])
else:
cursor.execute("SELECT pg_start_backup(%s)", [backup_end_name])
cursor.execute("SELECT pg_stop_backup()")
db_conn.commit()
return backup_end_wal_segment, backup_end_time | def function[get_backup_end_segment_and_time, parameter[self, db_conn, backup_mode]]:
constant[Grab a timestamp and WAL segment name after the end of the backup: this is a point in time to which
we must be able to recover to, and the last WAL segment that is required for the backup to be
consistent.
Note that pg_switch_xlog()/pg_switch_wal() is a superuser-only function, but since pg_start_backup() and
pg_stop_backup() cause an WAL switch we'll call them instead. The downside is an unnecessary
checkpoint.
]
variable[cursor] assign[=] call[name[db_conn].cursor, parameter[]]
call[name[cursor].execute, parameter[constant[SELECT now(), pg_is_in_recovery()]]]
<ast.Tuple object at 0x7da1b18a38b0> assign[=] call[name[cursor].fetchone, parameter[]]
if name[in_recovery] begin[:]
call[name[db_conn].commit, parameter[]]
return[tuple[[<ast.Constant object at 0x7da1b18a0e50>, <ast.Name object at 0x7da1b18a03d0>]]]
if compare[name[self].pg_version_server greater_or_equal[>=] constant[100000]] begin[:]
call[name[cursor].execute, parameter[constant[SELECT pg_walfile_name(pg_current_wal_lsn()), txid_current()]]]
<ast.Tuple object at 0x7da1b18a0580> assign[=] call[name[cursor].fetchone, parameter[]]
call[name[db_conn].commit, parameter[]]
variable[backup_end_name] assign[=] constant[pghoard_end_of_backup]
if compare[name[backup_mode] equal[==] constant[non-exclusive]] begin[:]
call[name[cursor].execute, parameter[constant[SELECT pg_start_backup(%s, true, false)], list[[<ast.Name object at 0x7da1b18a2080>]]]]
call[name[cursor].execute, parameter[constant[SELECT pg_stop_backup(false)]]]
call[name[db_conn].commit, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b18a32e0>, <ast.Name object at 0x7da1b18a1c30>]]] | keyword[def] identifier[get_backup_end_segment_and_time] ( identifier[self] , identifier[db_conn] , identifier[backup_mode] ):
literal[string]
identifier[cursor] = identifier[db_conn] . identifier[cursor] ()
identifier[cursor] . identifier[execute] ( literal[string] )
identifier[backup_end_time] , identifier[in_recovery] = identifier[cursor] . identifier[fetchone] ()
keyword[if] identifier[in_recovery] :
identifier[db_conn] . identifier[commit] ()
keyword[return] keyword[None] , identifier[backup_end_time]
keyword[if] identifier[self] . identifier[pg_version_server] >= literal[int] :
identifier[cursor] . identifier[execute] ( literal[string] )
keyword[else] :
identifier[cursor] . identifier[execute] ( literal[string] )
identifier[backup_end_wal_segment] , identifier[_] = identifier[cursor] . identifier[fetchone] ()
identifier[db_conn] . identifier[commit] ()
identifier[backup_end_name] = literal[string]
keyword[if] identifier[backup_mode] == literal[string] :
identifier[cursor] . identifier[execute] ( literal[string] ,[ identifier[backup_end_name] ])
identifier[cursor] . identifier[execute] ( literal[string] )
keyword[elif] identifier[backup_mode] == literal[string] :
identifier[cursor] . identifier[execute] ( literal[string] ,[ identifier[backup_end_name] ])
identifier[backup_label] = identifier[cursor] . identifier[fetchone] ()[ literal[int] ]
identifier[cursor] . identifier[execute] ( literal[string] ,[ identifier[backup_label] ])
keyword[else] :
identifier[cursor] . identifier[execute] ( literal[string] ,[ identifier[backup_end_name] ])
identifier[cursor] . identifier[execute] ( literal[string] )
identifier[db_conn] . identifier[commit] ()
keyword[return] identifier[backup_end_wal_segment] , identifier[backup_end_time] | def get_backup_end_segment_and_time(self, db_conn, backup_mode):
"""Grab a timestamp and WAL segment name after the end of the backup: this is a point in time to which
we must be able to recover to, and the last WAL segment that is required for the backup to be
consistent.
Note that pg_switch_xlog()/pg_switch_wal() is a superuser-only function, but since pg_start_backup() and
pg_stop_backup() cause an WAL switch we'll call them instead. The downside is an unnecessary
checkpoint.
"""
cursor = db_conn.cursor()
# Get backup end time and end segment and forcibly register a transaction in the current segment
# Note that we can't call pg_walfile_name() or pg_current_wal_lsn() in recovery
cursor.execute('SELECT now(), pg_is_in_recovery()')
(backup_end_time, in_recovery) = cursor.fetchone()
if in_recovery:
db_conn.commit()
return (None, backup_end_time) # depends on [control=['if'], data=[]]
if self.pg_version_server >= 100000:
cursor.execute('SELECT pg_walfile_name(pg_current_wal_lsn()), txid_current()') # depends on [control=['if'], data=[]]
else:
cursor.execute('SELECT pg_xlogfile_name(pg_current_xlog_location()), txid_current()')
(backup_end_wal_segment, _) = cursor.fetchone()
db_conn.commit()
# Now force switch of the WAL segment to make sure we have archived a segment with a known
# timestamp after pg_stop_backup() was called.
backup_end_name = 'pghoard_end_of_backup'
if backup_mode == 'non-exclusive':
cursor.execute('SELECT pg_start_backup(%s, true, false)', [backup_end_name])
cursor.execute('SELECT pg_stop_backup(false)') # depends on [control=['if'], data=[]]
elif backup_mode == 'pgespresso':
cursor.execute('SELECT pgespresso_start_backup(%s, false)', [backup_end_name])
backup_label = cursor.fetchone()[0]
cursor.execute('SELECT pgespresso_stop_backup(%s)', [backup_label]) # depends on [control=['if'], data=[]]
else:
cursor.execute('SELECT pg_start_backup(%s)', [backup_end_name])
cursor.execute('SELECT pg_stop_backup()')
db_conn.commit()
return (backup_end_wal_segment, backup_end_time) |
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):
"""
convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)
ab = np.zeros((2 * D, T * D))
# Fill in blocks along the diagonal
for d in range(D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))
ab[d] = h.ravel()
# Fill in lower left corner of blocks below the diagonal
for d in range(0, D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))
ab[D + d, :D * (T - 1)] = h.ravel()
# Fill in upper corner of blocks below the diagonal
for d in range(1, D):
# Get indices of (+d)-th diagonal of H_lower_diag
i = np.arange(0, D - d)
j = np.arange(d, D)
h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))
ab[D - d, :D * (T - 1)] += h.ravel()
return ab if lower else transpose_lower_banded_matrix(ab) | def function[convert_block_tridiag_to_banded, parameter[H_diag, H_upper_diag, lower]]:
constant[
convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
]
<ast.Tuple object at 0x7da18bcc8940> assign[=] name[H_diag].shape
assert[<ast.BoolOp object at 0x7da18bccb280>]
assert[compare[name[H_upper_diag].shape equal[==] tuple[[<ast.BinOp object at 0x7da18bccb580>, <ast.Name object at 0x7da18bcc99f0>, <ast.Name object at 0x7da18bccbb20>]]]]
variable[H_lower_diag] assign[=] call[name[np].swapaxes, parameter[name[H_upper_diag], <ast.UnaryOp object at 0x7da18bccbbe0>, <ast.UnaryOp object at 0x7da18bccb3d0>]]
variable[ab] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da18bcca1a0>, <ast.BinOp object at 0x7da18bccbb50>]]]]
for taget[name[d]] in starred[call[name[range], parameter[name[D]]]] begin[:]
variable[i] assign[=] call[name[np].arange, parameter[name[d], name[D]]]
variable[j] assign[=] call[name[np].arange, parameter[constant[0], binary_operation[name[D] - name[d]]]]
variable[h] assign[=] call[name[np].column_stack, parameter[tuple[[<ast.Subscript object at 0x7da18bcc8d60>, <ast.Call object at 0x7da18bccb370>]]]]
call[name[ab]][name[d]] assign[=] call[name[h].ravel, parameter[]]
for taget[name[d]] in starred[call[name[range], parameter[constant[0], name[D]]]] begin[:]
variable[i] assign[=] call[name[np].arange, parameter[name[d], name[D]]]
variable[j] assign[=] call[name[np].arange, parameter[constant[0], binary_operation[name[D] - name[d]]]]
variable[h] assign[=] call[name[np].column_stack, parameter[tuple[[<ast.Subscript object at 0x7da18bcca1d0>, <ast.Call object at 0x7da18bccaf80>]]]]
call[name[ab]][tuple[[<ast.BinOp object at 0x7da18bcca590>, <ast.Slice object at 0x7da18bcc8d00>]]] assign[=] call[name[h].ravel, parameter[]]
for taget[name[d]] in starred[call[name[range], parameter[constant[1], name[D]]]] begin[:]
variable[i] assign[=] call[name[np].arange, parameter[constant[0], binary_operation[name[D] - name[d]]]]
variable[j] assign[=] call[name[np].arange, parameter[name[d], name[D]]]
variable[h] assign[=] call[name[np].column_stack, parameter[tuple[[<ast.Call object at 0x7da2054a57e0>, <ast.Subscript object at 0x7da2054a5f30>]]]]
<ast.AugAssign object at 0x7da2054a6d10>
return[<ast.IfExp object at 0x7da2054a70a0>] | keyword[def] identifier[convert_block_tridiag_to_banded] ( identifier[H_diag] , identifier[H_upper_diag] , identifier[lower] = keyword[True] ):
literal[string]
identifier[T] , identifier[D] , identifier[_] = identifier[H_diag] . identifier[shape]
keyword[assert] identifier[H_diag] . identifier[ndim] == literal[int] keyword[and] identifier[H_diag] . identifier[shape] [ literal[int] ]== identifier[D]
keyword[assert] identifier[H_upper_diag] . identifier[shape] ==( identifier[T] - literal[int] , identifier[D] , identifier[D] )
identifier[H_lower_diag] = identifier[np] . identifier[swapaxes] ( identifier[H_upper_diag] ,- literal[int] ,- literal[int] )
identifier[ab] = identifier[np] . identifier[zeros] (( literal[int] * identifier[D] , identifier[T] * identifier[D] ))
keyword[for] identifier[d] keyword[in] identifier[range] ( identifier[D] ):
identifier[i] = identifier[np] . identifier[arange] ( identifier[d] , identifier[D] )
identifier[j] = identifier[np] . identifier[arange] ( literal[int] , identifier[D] - identifier[d] )
identifier[h] = identifier[np] . identifier[column_stack] (( identifier[H_diag] [:, identifier[i] , identifier[j] ], identifier[np] . identifier[zeros] (( identifier[T] , identifier[d] ))))
identifier[ab] [ identifier[d] ]= identifier[h] . identifier[ravel] ()
keyword[for] identifier[d] keyword[in] identifier[range] ( literal[int] , identifier[D] ):
identifier[i] = identifier[np] . identifier[arange] ( identifier[d] , identifier[D] )
identifier[j] = identifier[np] . identifier[arange] ( literal[int] , identifier[D] - identifier[d] )
identifier[h] = identifier[np] . identifier[column_stack] (( identifier[H_lower_diag] [:, identifier[i] , identifier[j] ], identifier[np] . identifier[zeros] (( identifier[T] - literal[int] , identifier[d] ))))
identifier[ab] [ identifier[D] + identifier[d] ,: identifier[D] *( identifier[T] - literal[int] )]= identifier[h] . identifier[ravel] ()
keyword[for] identifier[d] keyword[in] identifier[range] ( literal[int] , identifier[D] ):
identifier[i] = identifier[np] . identifier[arange] ( literal[int] , identifier[D] - identifier[d] )
identifier[j] = identifier[np] . identifier[arange] ( identifier[d] , identifier[D] )
identifier[h] = identifier[np] . identifier[column_stack] (( identifier[np] . identifier[zeros] (( identifier[T] - literal[int] , identifier[d] )), identifier[H_lower_diag] [:, identifier[i] , identifier[j] ]))
identifier[ab] [ identifier[D] - identifier[d] ,: identifier[D] *( identifier[T] - literal[int] )]+= identifier[h] . identifier[ravel] ()
keyword[return] identifier[ab] keyword[if] identifier[lower] keyword[else] identifier[transpose_lower_banded_matrix] ( identifier[ab] ) | def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):
"""
convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
(T, D, _) = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)
ab = np.zeros((2 * D, T * D))
# Fill in blocks along the diagonal
for d in range(D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))
ab[d] = h.ravel() # depends on [control=['for'], data=['d']]
# Fill in lower left corner of blocks below the diagonal
for d in range(0, D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))
ab[D + d, :D * (T - 1)] = h.ravel() # depends on [control=['for'], data=['d']]
# Fill in upper corner of blocks below the diagonal
for d in range(1, D):
# Get indices of (+d)-th diagonal of H_lower_diag
i = np.arange(0, D - d)
j = np.arange(d, D)
h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))
ab[D - d, :D * (T - 1)] += h.ravel() # depends on [control=['for'], data=['d']]
return ab if lower else transpose_lower_banded_matrix(ab) |
def del_bg(self, key):
"""Remove the background image data
Parameters
----------
key: str
One of :const:`VALID_BG_KEYS`
"""
if key not in VALID_BG_KEYS:
raise ValueError("Invalid bg key: {}".format(key))
if key in self.h5["bg_data"]:
del self.h5["bg_data"][key]
else:
msg = "No bg data to clear for '{}' in {}.".format(key, self)
warnings.warn(msg) | def function[del_bg, parameter[self, key]]:
constant[Remove the background image data
Parameters
----------
key: str
One of :const:`VALID_BG_KEYS`
]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[VALID_BG_KEYS]] begin[:]
<ast.Raise object at 0x7da1b1142290>
if compare[name[key] in call[name[self].h5][constant[bg_data]]] begin[:]
<ast.Delete object at 0x7da1b1046470> | keyword[def] identifier[del_bg] ( identifier[self] , identifier[key] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[VALID_BG_KEYS] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[key] ))
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[h5] [ literal[string] ]:
keyword[del] identifier[self] . identifier[h5] [ literal[string] ][ identifier[key] ]
keyword[else] :
identifier[msg] = literal[string] . identifier[format] ( identifier[key] , identifier[self] )
identifier[warnings] . identifier[warn] ( identifier[msg] ) | def del_bg(self, key):
"""Remove the background image data
Parameters
----------
key: str
One of :const:`VALID_BG_KEYS`
"""
if key not in VALID_BG_KEYS:
raise ValueError('Invalid bg key: {}'.format(key)) # depends on [control=['if'], data=['key']]
if key in self.h5['bg_data']:
del self.h5['bg_data'][key] # depends on [control=['if'], data=['key']]
else:
msg = "No bg data to clear for '{}' in {}.".format(key, self)
warnings.warn(msg) |
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.") | def function[set_reference, parameter[self, reference]]:
constant[Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
]
call[call[call[name[self].set_categorical_feature, parameter[name[reference].categorical_feature]].set_feature_name, parameter[name[reference].feature_name]]._set_predictor, parameter[name[reference]._predictor]]
if call[call[name[self].get_ref_chain, parameter[]].intersection, parameter[call[name[reference].get_ref_chain, parameter[]]]] begin[:]
return[name[self]]
if compare[name[self].data is_not constant[None]] begin[:]
name[self].reference assign[=] name[reference]
return[call[name[self]._free_handle, parameter[]]] | keyword[def] identifier[set_reference] ( identifier[self] , identifier[reference] ):
literal[string]
identifier[self] . identifier[set_categorical_feature] ( identifier[reference] . identifier[categorical_feature] ). identifier[set_feature_name] ( identifier[reference] . identifier[feature_name] ). identifier[_set_predictor] ( identifier[reference] . identifier[_predictor] )
keyword[if] identifier[self] . identifier[get_ref_chain] (). identifier[intersection] ( identifier[reference] . identifier[get_ref_chain] ()):
keyword[return] identifier[self]
keyword[if] identifier[self] . identifier[data] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[reference] = identifier[reference]
keyword[return] identifier[self] . identifier[_free_handle] ()
keyword[else] :
keyword[raise] identifier[LightGBMError] ( literal[string]
literal[string] ) | def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature).set_feature_name(reference.feature_name)._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self # depends on [control=['if'], data=[]]
if self.data is not None:
self.reference = reference
return self._free_handle() # depends on [control=['if'], data=[]]
else:
raise LightGBMError('Cannot set reference after freed raw data, set free_raw_data=False when construct Dataset to avoid this.') |
def calculate_slice_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C', H, W]
2. [N, C, H, W] ---> [N, C, H', W]
3. [N, C, H, W] ---> [N, C, H, W']
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
output_shape = copy.deepcopy(operator.inputs[0].type.shape)
params = operator.raw_operator.slice
from coremltools.proto.NeuralNetwork_pb2 import SliceLayerParams as Params
axis_map = {Params.CHANNEL_AXIS: 1, Params.HEIGHT_AXIS: 2, Params.WIDTH_AXIS: 3}
if params.startIndex >= 0:
output_shape[axis_map[Params.CHANNEL_AXIS]] = params.endIndex - params.startIndex
else:
output_shape[axis_map[Params.CHANNEL_AXIS]] += 1 + params.endIndex - params.startIndex
operator.outputs[0].type = FloatTensorType(output_shape, doc_string=operator.outputs[0].type.doc_string) | def function[calculate_slice_output_shapes, parameter[operator]]:
constant[
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C', H, W]
2. [N, C, H, W] ---> [N, C, H', W]
3. [N, C, H, W] ---> [N, C, H, W']
]
call[name[check_input_and_output_numbers], parameter[name[operator]]]
call[name[check_input_and_output_types], parameter[name[operator]]]
variable[output_shape] assign[=] call[name[copy].deepcopy, parameter[call[name[operator].inputs][constant[0]].type.shape]]
variable[params] assign[=] name[operator].raw_operator.slice
from relative_module[coremltools.proto.NeuralNetwork_pb2] import module[SliceLayerParams]
variable[axis_map] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1d61c60>, <ast.Attribute object at 0x7da1b1d61720>, <ast.Attribute object at 0x7da1b1d61a80>], [<ast.Constant object at 0x7da1b1d60760>, <ast.Constant object at 0x7da1b1d629b0>, <ast.Constant object at 0x7da1b1d61870>]]
if compare[name[params].startIndex greater_or_equal[>=] constant[0]] begin[:]
call[name[output_shape]][call[name[axis_map]][name[Params].CHANNEL_AXIS]] assign[=] binary_operation[name[params].endIndex - name[params].startIndex]
call[name[operator].outputs][constant[0]].type assign[=] call[name[FloatTensorType], parameter[name[output_shape]]] | keyword[def] identifier[calculate_slice_output_shapes] ( identifier[operator] ):
literal[string]
identifier[check_input_and_output_numbers] ( identifier[operator] , identifier[input_count_range] = literal[int] , identifier[output_count_range] = literal[int] )
identifier[check_input_and_output_types] ( identifier[operator] , identifier[good_input_types] =[ identifier[FloatTensorType] ])
identifier[output_shape] = identifier[copy] . identifier[deepcopy] ( identifier[operator] . identifier[inputs] [ literal[int] ]. identifier[type] . identifier[shape] )
identifier[params] = identifier[operator] . identifier[raw_operator] . identifier[slice]
keyword[from] identifier[coremltools] . identifier[proto] . identifier[NeuralNetwork_pb2] keyword[import] identifier[SliceLayerParams] keyword[as] identifier[Params]
identifier[axis_map] ={ identifier[Params] . identifier[CHANNEL_AXIS] : literal[int] , identifier[Params] . identifier[HEIGHT_AXIS] : literal[int] , identifier[Params] . identifier[WIDTH_AXIS] : literal[int] }
keyword[if] identifier[params] . identifier[startIndex] >= literal[int] :
identifier[output_shape] [ identifier[axis_map] [ identifier[Params] . identifier[CHANNEL_AXIS] ]]= identifier[params] . identifier[endIndex] - identifier[params] . identifier[startIndex]
keyword[else] :
identifier[output_shape] [ identifier[axis_map] [ identifier[Params] . identifier[CHANNEL_AXIS] ]]+= literal[int] + identifier[params] . identifier[endIndex] - identifier[params] . identifier[startIndex]
identifier[operator] . identifier[outputs] [ literal[int] ]. identifier[type] = identifier[FloatTensorType] ( identifier[output_shape] , identifier[doc_string] = identifier[operator] . identifier[outputs] [ literal[int] ]. identifier[type] . identifier[doc_string] ) | def calculate_slice_output_shapes(operator):
"""
Allowed input/output patterns are
1. [N, C, H, W] ---> [N, C', H, W]
2. [N, C, H, W] ---> [N, C, H', W]
3. [N, C, H, W] ---> [N, C, H, W']
"""
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
output_shape = copy.deepcopy(operator.inputs[0].type.shape)
params = operator.raw_operator.slice
from coremltools.proto.NeuralNetwork_pb2 import SliceLayerParams as Params
axis_map = {Params.CHANNEL_AXIS: 1, Params.HEIGHT_AXIS: 2, Params.WIDTH_AXIS: 3}
if params.startIndex >= 0:
output_shape[axis_map[Params.CHANNEL_AXIS]] = params.endIndex - params.startIndex # depends on [control=['if'], data=[]]
else:
output_shape[axis_map[Params.CHANNEL_AXIS]] += 1 + params.endIndex - params.startIndex
operator.outputs[0].type = FloatTensorType(output_shape, doc_string=operator.outputs[0].type.doc_string) |
def p_not_expression(tok):
"""not_expression : OP_NOT ex_expression
| ex_expression"""
if len(tok) == 3:
tok[0] = UnaryOperationRule(tok[1], tok[2])
else:
tok[0] = tok[1] | def function[p_not_expression, parameter[tok]]:
constant[not_expression : OP_NOT ex_expression
| ex_expression]
if compare[call[name[len], parameter[name[tok]]] equal[==] constant[3]] begin[:]
call[name[tok]][constant[0]] assign[=] call[name[UnaryOperationRule], parameter[call[name[tok]][constant[1]], call[name[tok]][constant[2]]]] | keyword[def] identifier[p_not_expression] ( identifier[tok] ):
literal[string]
keyword[if] identifier[len] ( identifier[tok] )== literal[int] :
identifier[tok] [ literal[int] ]= identifier[UnaryOperationRule] ( identifier[tok] [ literal[int] ], identifier[tok] [ literal[int] ])
keyword[else] :
identifier[tok] [ literal[int] ]= identifier[tok] [ literal[int] ] | def p_not_expression(tok):
"""not_expression : OP_NOT ex_expression
| ex_expression"""
if len(tok) == 3:
tok[0] = UnaryOperationRule(tok[1], tok[2]) # depends on [control=['if'], data=[]]
else:
tok[0] = tok[1] |
def svg(self, value):
"""
Set SVG field value.
If the svg has embed base64 element we will extract them
to disk in order to avoid duplication of content
"""
if len(value) < 500:
self._svg = value
return
try:
root = ET.fromstring(value)
except ET.ParseError as e:
log.error("Can't parse SVG: {}".format(e))
return
# SVG is the default namespace no need to prefix it
ET.register_namespace('xmlns', "http://www.w3.org/2000/svg")
ET.register_namespace('xmlns:xlink', "http://www.w3.org/1999/xlink")
if len(root.findall("{http://www.w3.org/2000/svg}image")) == 1:
href = "{http://www.w3.org/1999/xlink}href"
elem = root.find("{http://www.w3.org/2000/svg}image")
if elem.get(href, "").startswith("data:image/"):
changed = True
data = elem.get(href, "")
extension = re.sub(r"[^a-z0-9]", "", data.split(";")[0].split("/")[1].lower())
data = base64.decodebytes(data.split(",", 1)[1].encode())
# We compute an hash of the image file to avoid duplication
filename = hashlib.md5(data).hexdigest() + "." + extension
elem.set(href, filename)
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
with open(file_path, "wb") as f:
f.write(data)
value = filename
# We dump also large svg on disk to keep .gns3 small
if len(value) > 1000:
filename = hashlib.md5(value.encode()).hexdigest() + ".svg"
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
with open(file_path, "w+", encoding="utf-8") as f:
f.write(value)
self._svg = filename
else:
self._svg = value | def function[svg, parameter[self, value]]:
constant[
Set SVG field value.
If the svg has embed base64 element we will extract them
to disk in order to avoid duplication of content
]
if compare[call[name[len], parameter[name[value]]] less[<] constant[500]] begin[:]
name[self]._svg assign[=] name[value]
return[None]
<ast.Try object at 0x7da18fe937f0>
call[name[ET].register_namespace, parameter[constant[xmlns], constant[http://www.w3.org/2000/svg]]]
call[name[ET].register_namespace, parameter[constant[xmlns:xlink], constant[http://www.w3.org/1999/xlink]]]
if compare[call[name[len], parameter[call[name[root].findall, parameter[constant[{http://www.w3.org/2000/svg}image]]]]] equal[==] constant[1]] begin[:]
variable[href] assign[=] constant[{http://www.w3.org/1999/xlink}href]
variable[elem] assign[=] call[name[root].find, parameter[constant[{http://www.w3.org/2000/svg}image]]]
if call[call[name[elem].get, parameter[name[href], constant[]]].startswith, parameter[constant[data:image/]]] begin[:]
variable[changed] assign[=] constant[True]
variable[data] assign[=] call[name[elem].get, parameter[name[href], constant[]]]
variable[extension] assign[=] call[name[re].sub, parameter[constant[[^a-z0-9]], constant[], call[call[call[call[call[name[data].split, parameter[constant[;]]]][constant[0]].split, parameter[constant[/]]]][constant[1]].lower, parameter[]]]]
variable[data] assign[=] call[name[base64].decodebytes, parameter[call[call[call[name[data].split, parameter[constant[,], constant[1]]]][constant[1]].encode, parameter[]]]]
variable[filename] assign[=] binary_operation[binary_operation[call[call[name[hashlib].md5, parameter[name[data]]].hexdigest, parameter[]] + constant[.]] + name[extension]]
call[name[elem].set, parameter[name[href], name[filename]]]
variable[file_path] assign[=] call[name[os].path.join, parameter[name[self]._project.pictures_directory, name[filename]]]
if <ast.UnaryOp object at 0x7da18fe90100> begin[:]
with call[name[open], parameter[name[file_path], constant[wb]]] begin[:]
call[name[f].write, parameter[name[data]]]
variable[value] assign[=] name[filename]
if compare[call[name[len], parameter[name[value]]] greater[>] constant[1000]] begin[:]
variable[filename] assign[=] binary_operation[call[call[name[hashlib].md5, parameter[call[name[value].encode, parameter[]]]].hexdigest, parameter[]] + constant[.svg]]
variable[file_path] assign[=] call[name[os].path.join, parameter[name[self]._project.pictures_directory, name[filename]]]
if <ast.UnaryOp object at 0x7da20e954ac0> begin[:]
with call[name[open], parameter[name[file_path], constant[w+]]] begin[:]
call[name[f].write, parameter[name[value]]]
name[self]._svg assign[=] name[filename] | keyword[def] identifier[svg] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[len] ( identifier[value] )< literal[int] :
identifier[self] . identifier[_svg] = identifier[value]
keyword[return]
keyword[try] :
identifier[root] = identifier[ET] . identifier[fromstring] ( identifier[value] )
keyword[except] identifier[ET] . identifier[ParseError] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[e] ))
keyword[return]
identifier[ET] . identifier[register_namespace] ( literal[string] , literal[string] )
identifier[ET] . identifier[register_namespace] ( literal[string] , literal[string] )
keyword[if] identifier[len] ( identifier[root] . identifier[findall] ( literal[string] ))== literal[int] :
identifier[href] = literal[string]
identifier[elem] = identifier[root] . identifier[find] ( literal[string] )
keyword[if] identifier[elem] . identifier[get] ( identifier[href] , literal[string] ). identifier[startswith] ( literal[string] ):
identifier[changed] = keyword[True]
identifier[data] = identifier[elem] . identifier[get] ( identifier[href] , literal[string] )
identifier[extension] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[data] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[lower] ())
identifier[data] = identifier[base64] . identifier[decodebytes] ( identifier[data] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]. identifier[encode] ())
identifier[filename] = identifier[hashlib] . identifier[md5] ( identifier[data] ). identifier[hexdigest] ()+ literal[string] + identifier[extension]
identifier[elem] . identifier[set] ( identifier[href] , identifier[filename] )
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_project] . identifier[pictures_directory] , identifier[filename] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_path] ):
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[data] )
identifier[value] = identifier[filename]
keyword[if] identifier[len] ( identifier[value] )> literal[int] :
identifier[filename] = identifier[hashlib] . identifier[md5] ( identifier[value] . identifier[encode] ()). identifier[hexdigest] ()+ literal[string]
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_project] . identifier[pictures_directory] , identifier[filename] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_path] ):
keyword[with] identifier[open] ( identifier[file_path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[value] )
identifier[self] . identifier[_svg] = identifier[filename]
keyword[else] :
identifier[self] . identifier[_svg] = identifier[value] | def svg(self, value):
"""
Set SVG field value.
If the svg has embed base64 element we will extract them
to disk in order to avoid duplication of content
"""
if len(value) < 500:
self._svg = value
return # depends on [control=['if'], data=[]]
try:
root = ET.fromstring(value) # depends on [control=['try'], data=[]]
except ET.ParseError as e:
log.error("Can't parse SVG: {}".format(e))
return # depends on [control=['except'], data=['e']]
# SVG is the default namespace no need to prefix it
ET.register_namespace('xmlns', 'http://www.w3.org/2000/svg')
ET.register_namespace('xmlns:xlink', 'http://www.w3.org/1999/xlink')
if len(root.findall('{http://www.w3.org/2000/svg}image')) == 1:
href = '{http://www.w3.org/1999/xlink}href'
elem = root.find('{http://www.w3.org/2000/svg}image')
if elem.get(href, '').startswith('data:image/'):
changed = True
data = elem.get(href, '')
extension = re.sub('[^a-z0-9]', '', data.split(';')[0].split('/')[1].lower())
data = base64.decodebytes(data.split(',', 1)[1].encode())
# We compute an hash of the image file to avoid duplication
filename = hashlib.md5(data).hexdigest() + '.' + extension
elem.set(href, filename)
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(data) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
value = filename # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# We dump also large svg on disk to keep .gns3 small
if len(value) > 1000:
filename = hashlib.md5(value.encode()).hexdigest() + '.svg'
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
with open(file_path, 'w+', encoding='utf-8') as f:
f.write(value) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
self._svg = filename # depends on [control=['if'], data=[]]
else:
self._svg = value |
def set(self, **kwargs):
"""
Changes a setting or multiple settings. Can also call self() or
change individual parameters with self['parameter'] = value
"""
if len(kwargs)==0: return self
# Set settings
for k in list(kwargs.keys()): self[k] = kwargs[k]
# Plot if we're supposed to.
if self['autoplot'] and not self._initializing: self.plot()
return self | def function[set, parameter[self]]:
constant[
Changes a setting or multiple settings. Can also call self() or
change individual parameters with self['parameter'] = value
]
if compare[call[name[len], parameter[name[kwargs]]] equal[==] constant[0]] begin[:]
return[name[self]]
for taget[name[k]] in starred[call[name[list], parameter[call[name[kwargs].keys, parameter[]]]]] begin[:]
call[name[self]][name[k]] assign[=] call[name[kwargs]][name[k]]
if <ast.BoolOp object at 0x7da18eb55d20> begin[:]
call[name[self].plot, parameter[]]
return[name[self]] | keyword[def] identifier[set] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[len] ( identifier[kwargs] )== literal[int] : keyword[return] identifier[self]
keyword[for] identifier[k] keyword[in] identifier[list] ( identifier[kwargs] . identifier[keys] ()): identifier[self] [ identifier[k] ]= identifier[kwargs] [ identifier[k] ]
keyword[if] identifier[self] [ literal[string] ] keyword[and] keyword[not] identifier[self] . identifier[_initializing] : identifier[self] . identifier[plot] ()
keyword[return] identifier[self] | def set(self, **kwargs):
"""
Changes a setting or multiple settings. Can also call self() or
change individual parameters with self['parameter'] = value
"""
if len(kwargs) == 0:
return self # depends on [control=['if'], data=[]]
# Set settings
for k in list(kwargs.keys()):
self[k] = kwargs[k] # depends on [control=['for'], data=['k']]
# Plot if we're supposed to.
if self['autoplot'] and (not self._initializing):
self.plot() # depends on [control=['if'], data=[]]
return self |
def create_from_pybankid_exception(cls, exception):
"""Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError`
"""
return cls(
"{0}: {1}".format(exception.__class__.__name__, str(exception)),
_exception_class_to_status_code.get(exception.__class__),
) | def function[create_from_pybankid_exception, parameter[cls, exception]]:
constant[Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError`
]
return[call[name[cls], parameter[call[constant[{0}: {1}].format, parameter[name[exception].__class__.__name__, call[name[str], parameter[name[exception]]]]], call[name[_exception_class_to_status_code].get, parameter[name[exception].__class__]]]]] | keyword[def] identifier[create_from_pybankid_exception] ( identifier[cls] , identifier[exception] ):
literal[string]
keyword[return] identifier[cls] (
literal[string] . identifier[format] ( identifier[exception] . identifier[__class__] . identifier[__name__] , identifier[str] ( identifier[exception] )),
identifier[_exception_class_to_status_code] . identifier[get] ( identifier[exception] . identifier[__class__] ),
) | def create_from_pybankid_exception(cls, exception):
"""Class method for initiating from a `PyBankID` exception.
:param bankid.exceptions.BankIDError exception:
:return: The wrapped exception.
:rtype: :py:class:`~FlaskPyBankIDError`
"""
return cls('{0}: {1}'.format(exception.__class__.__name__, str(exception)), _exception_class_to_status_code.get(exception.__class__)) |
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out | def function[build_header, parameter[self, title]]:
constant[Generate the header for the Markdown file.]
variable[header] assign[=] list[[<ast.Constant object at 0x7da20c7950f0>, <ast.BinOp object at 0x7da20c796320>, <ast.BinOp object at 0x7da20c794f10>, <ast.Constant object at 0x7da20c795360>, <ast.BinOp object at 0x7da20c795210>, <ast.BinOp object at 0x7da20c795d80>, <ast.Constant object at 0x7da20c794310>, <ast.Constant object at 0x7da20c795a50>, <ast.Constant object at 0x7da20c794eb0>]]
name[self].out assign[=] binary_operation[name[header] + name[self].out] | keyword[def] identifier[build_header] ( identifier[self] , identifier[title] ):
literal[string]
identifier[header] =[ literal[string] ,
literal[string] + identifier[title] ,
literal[string] + identifier[self] . identifier[user] ,
literal[string] ,
literal[string] + identifier[str] ( identifier[self] . identifier[date_created] ),
literal[string] + identifier[str] ( identifier[self] . identifier[date_updated] ),
literal[string] ,
literal[string] ,
literal[string] ]
identifier[self] . identifier[out] = identifier[header] + identifier[self] . identifier[out] | def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---', 'title: ' + title, 'author(s): ' + self.user, 'tags: ', 'created_at: ' + str(self.date_created), 'updated_at: ' + str(self.date_updated), 'tldr: ', 'thumbnail: ', '---']
self.out = header + self.out |
def _do_history(self, cmd, args):
"""\
Display history.
history Display history.
history clear Clear history.
history clearall Clear history for all shells.
"""
if args and args[0] == 'clear':
readline.clear_history()
readline.write_history_file(self.history_fname)
elif args and args[0] == 'clearall':
readline.clear_history()
shutil.rmtree(self._temp_dir, ignore_errors = True)
os.makedirs(os.path.join(self._temp_dir, 'history'))
else:
readline.write_history_file(self.history_fname)
with open(self.history_fname, 'r', encoding = 'utf8') as f:
self.stdout.write(f.read()) | def function[_do_history, parameter[self, cmd, args]]:
constant[ Display history.
history Display history.
history clear Clear history.
history clearall Clear history for all shells.
]
if <ast.BoolOp object at 0x7da18eb543a0> begin[:]
call[name[readline].clear_history, parameter[]]
call[name[readline].write_history_file, parameter[name[self].history_fname]] | keyword[def] identifier[_do_history] ( identifier[self] , identifier[cmd] , identifier[args] ):
literal[string]
keyword[if] identifier[args] keyword[and] identifier[args] [ literal[int] ]== literal[string] :
identifier[readline] . identifier[clear_history] ()
identifier[readline] . identifier[write_history_file] ( identifier[self] . identifier[history_fname] )
keyword[elif] identifier[args] keyword[and] identifier[args] [ literal[int] ]== literal[string] :
identifier[readline] . identifier[clear_history] ()
identifier[shutil] . identifier[rmtree] ( identifier[self] . identifier[_temp_dir] , identifier[ignore_errors] = keyword[True] )
identifier[os] . identifier[makedirs] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_temp_dir] , literal[string] ))
keyword[else] :
identifier[readline] . identifier[write_history_file] ( identifier[self] . identifier[history_fname] )
keyword[with] identifier[open] ( identifier[self] . identifier[history_fname] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[self] . identifier[stdout] . identifier[write] ( identifier[f] . identifier[read] ()) | def _do_history(self, cmd, args):
""" Display history.
history Display history.
history clear Clear history.
history clearall Clear history for all shells.
"""
if args and args[0] == 'clear':
readline.clear_history()
readline.write_history_file(self.history_fname) # depends on [control=['if'], data=[]]
elif args and args[0] == 'clearall':
readline.clear_history()
shutil.rmtree(self._temp_dir, ignore_errors=True)
os.makedirs(os.path.join(self._temp_dir, 'history')) # depends on [control=['if'], data=[]]
else:
readline.write_history_file(self.history_fname)
with open(self.history_fname, 'r', encoding='utf8') as f:
self.stdout.write(f.read()) # depends on [control=['with'], data=['f']] |
def modify_distribution_list(self, dl_description, attrs):
"""
:param dl_description : a DistributionList specifying either :
- id: the dl_list_id
- dl_description: the name of the list
:param attrs : a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyDistributionList', {
'id': self._get_or_fetch_id(dl_description,
self.get_distribution_list),
'a': attrs
}) | def function[modify_distribution_list, parameter[self, dl_description, attrs]]:
constant[
:param dl_description : a DistributionList specifying either :
- id: the dl_list_id
- dl_description: the name of the list
:param attrs : a dictionary of attributes to set ({key:value,...})
]
variable[attrs] assign[=] <ast.ListComp object at 0x7da18ede74f0>
call[name[self].request, parameter[constant[ModifyDistributionList], dictionary[[<ast.Constant object at 0x7da18ede53c0>, <ast.Constant object at 0x7da18ede5150>], [<ast.Call object at 0x7da18ede43a0>, <ast.Name object at 0x7da18ede6e90>]]]] | keyword[def] identifier[modify_distribution_list] ( identifier[self] , identifier[dl_description] , identifier[attrs] ):
literal[string]
identifier[attrs] =[{ literal[string] : identifier[k] , literal[string] : identifier[v] } keyword[for] identifier[k] , identifier[v] keyword[in] identifier[attrs] . identifier[items] ()]
identifier[self] . identifier[request] ( literal[string] ,{
literal[string] : identifier[self] . identifier[_get_or_fetch_id] ( identifier[dl_description] ,
identifier[self] . identifier[get_distribution_list] ),
literal[string] : identifier[attrs]
}) | def modify_distribution_list(self, dl_description, attrs):
"""
:param dl_description : a DistributionList specifying either :
- id: the dl_list_id
- dl_description: the name of the list
:param attrs : a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for (k, v) in attrs.items()]
self.request('ModifyDistributionList', {'id': self._get_or_fetch_id(dl_description, self.get_distribution_list), 'a': attrs}) |
def prepare(self, data_batch, sparse_row_id_fn=None):
"""Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
"""
super(SVRGModule, self).prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn)
self._mod_aux.prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) | def function[prepare, parameter[self, data_batch, sparse_row_id_fn]]:
constant[Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
]
call[call[name[super], parameter[name[SVRGModule], name[self]]].prepare, parameter[name[data_batch]]]
call[name[self]._mod_aux.prepare, parameter[name[data_batch]]] | keyword[def] identifier[prepare] ( identifier[self] , identifier[data_batch] , identifier[sparse_row_id_fn] = keyword[None] ):
literal[string]
identifier[super] ( identifier[SVRGModule] , identifier[self] ). identifier[prepare] ( identifier[data_batch] , identifier[sparse_row_id_fn] = identifier[sparse_row_id_fn] )
identifier[self] . identifier[_mod_aux] . identifier[prepare] ( identifier[data_batch] , identifier[sparse_row_id_fn] = identifier[sparse_row_id_fn] ) | def prepare(self, data_batch, sparse_row_id_fn=None):
"""Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
"""
super(SVRGModule, self).prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn)
self._mod_aux.prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) |
def _divf(ins):
''' Divides 2 float values. The result is pushed onto the stack.
'''
op1, op2 = tuple(ins.quad[2:])
if is_float(op2) and float(op2) == 1: # Nothing to do. A / 1 = A
output = _float_oper(op1)
output.extend(_fpush())
return output
output = _float_oper(op1, op2)
output.append('call __DIVF')
output.extend(_fpush())
REQUIRES.add('divf.asm')
return output | def function[_divf, parameter[ins]]:
constant[ Divides 2 float values. The result is pushed onto the stack.
]
<ast.Tuple object at 0x7da18f09c460> assign[=] call[name[tuple], parameter[call[name[ins].quad][<ast.Slice object at 0x7da18f09fb50>]]]
if <ast.BoolOp object at 0x7da18f09ee90> begin[:]
variable[output] assign[=] call[name[_float_oper], parameter[name[op1]]]
call[name[output].extend, parameter[call[name[_fpush], parameter[]]]]
return[name[output]]
variable[output] assign[=] call[name[_float_oper], parameter[name[op1], name[op2]]]
call[name[output].append, parameter[constant[call __DIVF]]]
call[name[output].extend, parameter[call[name[_fpush], parameter[]]]]
call[name[REQUIRES].add, parameter[constant[divf.asm]]]
return[name[output]] | keyword[def] identifier[_divf] ( identifier[ins] ):
literal[string]
identifier[op1] , identifier[op2] = identifier[tuple] ( identifier[ins] . identifier[quad] [ literal[int] :])
keyword[if] identifier[is_float] ( identifier[op2] ) keyword[and] identifier[float] ( identifier[op2] )== literal[int] :
identifier[output] = identifier[_float_oper] ( identifier[op1] )
identifier[output] . identifier[extend] ( identifier[_fpush] ())
keyword[return] identifier[output]
identifier[output] = identifier[_float_oper] ( identifier[op1] , identifier[op2] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[extend] ( identifier[_fpush] ())
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[return] identifier[output] | def _divf(ins):
""" Divides 2 float values. The result is pushed onto the stack.
"""
(op1, op2) = tuple(ins.quad[2:])
if is_float(op2) and float(op2) == 1: # Nothing to do. A / 1 = A
output = _float_oper(op1)
output.extend(_fpush())
return output # depends on [control=['if'], data=[]]
output = _float_oper(op1, op2)
output.append('call __DIVF')
output.extend(_fpush())
REQUIRES.add('divf.asm')
return output |
def save(self, **kwargs):
"""Override save method to catch handled errors and repackage them as 400 errors."""
try:
return super().save(**kwargs)
except SlugError as error:
raise ParseError(error) | def function[save, parameter[self]]:
constant[Override save method to catch handled errors and repackage them as 400 errors.]
<ast.Try object at 0x7da18bccb880> | keyword[def] identifier[save] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[super] (). identifier[save] (** identifier[kwargs] )
keyword[except] identifier[SlugError] keyword[as] identifier[error] :
keyword[raise] identifier[ParseError] ( identifier[error] ) | def save(self, **kwargs):
"""Override save method to catch handled errors and repackage them as 400 errors."""
try:
return super().save(**kwargs) # depends on [control=['try'], data=[]]
except SlugError as error:
raise ParseError(error) # depends on [control=['except'], data=['error']] |
def present(name, auth=None, **kwargs):
'''
Ensure a subnet exists and is up-to-date
name
Name of the subnet
network_name_or_id
The unique name or ID of the attached network.
If a non-unique name is supplied, an exception is raised.
allocation_pools
A list of dictionaries of the start and end addresses
for the allocation pools
gateway_ip
The gateway IP address.
dns_nameservers
A list of DNS name servers for the subnet.
host_routes
A list of host route dictionaries for the subnet.
ipv6_ra_mode
IPv6 Router Advertisement mode.
Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
ipv6_address_mode
IPv6 address mode.
Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['neutronng.setup_clouds'](auth)
kwargs['subnet_name'] = name
subnet = __salt__['neutronng.subnet_get'](name=name)
if subnet is None:
if __opts__['test']:
ret['result'] = None
ret['changes'] = kwargs
ret['comment'] = 'Subnet will be created.'
return ret
new_subnet = __salt__['neutronng.subnet_create'](**kwargs)
ret['changes'] = new_subnet
ret['comment'] = 'Created subnet'
return ret
changes = __salt__['neutronng.compare_changes'](subnet, **kwargs)
if changes:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = changes
ret['comment'] = 'Project will be updated.'
return ret
# update_subnet does not support changing cidr,
# so we have to delete and recreate the subnet in this case.
if 'cidr' in changes or 'tenant_id' in changes:
__salt__['neutronng.subnet_delete'](name=name)
new_subnet = __salt__['neutronng.subnet_create'](**kwargs)
ret['changes'] = new_subnet
ret['comment'] = 'Deleted and recreated subnet'
return ret
__salt__['neutronng.subnet_update'](**kwargs)
ret['changes'].update(changes)
ret['comment'] = 'Updated subnet'
return ret | def function[present, parameter[name, auth]]:
constant[
Ensure a subnet exists and is up-to-date
name
Name of the subnet
network_name_or_id
The unique name or ID of the attached network.
If a non-unique name is supplied, an exception is raised.
allocation_pools
A list of dictionaries of the start and end addresses
for the allocation pools
gateway_ip
The gateway IP address.
dns_nameservers
A list of DNS name servers for the subnet.
host_routes
A list of host route dictionaries for the subnet.
ipv6_ra_mode
IPv6 Router Advertisement mode.
Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
ipv6_address_mode
IPv6 address mode.
Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21351b0>, <ast.Constant object at 0x7da1b21367a0>, <ast.Constant object at 0x7da1b21351e0>, <ast.Constant object at 0x7da1b2135210>], [<ast.Name object at 0x7da1b21374c0>, <ast.Dict object at 0x7da1b21377c0>, <ast.Constant object at 0x7da1b2137460>, <ast.Constant object at 0x7da1b2137490>]]
variable[kwargs] assign[=] call[call[name[__utils__]][constant[args.clean_kwargs]], parameter[]]
call[call[name[__salt__]][constant[neutronng.setup_clouds]], parameter[name[auth]]]
call[name[kwargs]][constant[subnet_name]] assign[=] name[name]
variable[subnet] assign[=] call[call[name[__salt__]][constant[neutronng.subnet_get]], parameter[]]
if compare[name[subnet] is constant[None]] begin[:]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[changes]] assign[=] name[kwargs]
call[name[ret]][constant[comment]] assign[=] constant[Subnet will be created.]
return[name[ret]]
variable[new_subnet] assign[=] call[call[name[__salt__]][constant[neutronng.subnet_create]], parameter[]]
call[name[ret]][constant[changes]] assign[=] name[new_subnet]
call[name[ret]][constant[comment]] assign[=] constant[Created subnet]
return[name[ret]]
variable[changes] assign[=] call[call[name[__salt__]][constant[neutronng.compare_changes]], parameter[name[subnet]]]
if name[changes] begin[:]
if compare[call[name[__opts__]][constant[test]] is constant[True]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[changes]] assign[=] name[changes]
call[name[ret]][constant[comment]] assign[=] constant[Project will be updated.]
return[name[ret]]
if <ast.BoolOp object at 0x7da1b1f34970> begin[:]
call[call[name[__salt__]][constant[neutronng.subnet_delete]], parameter[]]
variable[new_subnet] assign[=] call[call[name[__salt__]][constant[neutronng.subnet_create]], parameter[]]
call[name[ret]][constant[changes]] assign[=] name[new_subnet]
call[name[ret]][constant[comment]] assign[=] constant[Deleted and recreated subnet]
return[name[ret]]
call[call[name[__salt__]][constant[neutronng.subnet_update]], parameter[]]
call[call[name[ret]][constant[changes]].update, parameter[name[changes]]]
call[name[ret]][constant[comment]] assign[=] constant[Updated subnet]
return[name[ret]] | keyword[def] identifier[present] ( identifier[name] , identifier[auth] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string] }
identifier[kwargs] = identifier[__utils__] [ literal[string] ](** identifier[kwargs] )
identifier[__salt__] [ literal[string] ]( identifier[auth] )
identifier[kwargs] [ literal[string] ]= identifier[name]
identifier[subnet] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] )
keyword[if] identifier[subnet] keyword[is] keyword[None] :
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= identifier[kwargs]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[new_subnet] = identifier[__salt__] [ literal[string] ](** identifier[kwargs] )
identifier[ret] [ literal[string] ]= identifier[new_subnet]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[changes] = identifier[__salt__] [ literal[string] ]( identifier[subnet] ,** identifier[kwargs] )
keyword[if] identifier[changes] :
keyword[if] identifier[__opts__] [ literal[string] ] keyword[is] keyword[True] :
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= identifier[changes]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] literal[string] keyword[in] identifier[changes] keyword[or] literal[string] keyword[in] identifier[changes] :
identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] )
identifier[new_subnet] = identifier[__salt__] [ literal[string] ](** identifier[kwargs] )
identifier[ret] [ literal[string] ]= identifier[new_subnet]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[__salt__] [ literal[string] ](** identifier[kwargs] )
identifier[ret] [ literal[string] ]. identifier[update] ( identifier[changes] )
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def present(name, auth=None, **kwargs):
"""
Ensure a subnet exists and is up-to-date
name
Name of the subnet
network_name_or_id
The unique name or ID of the attached network.
If a non-unique name is supplied, an exception is raised.
allocation_pools
A list of dictionaries of the start and end addresses
for the allocation pools
gateway_ip
The gateway IP address.
dns_nameservers
A list of DNS name servers for the subnet.
host_routes
A list of host route dictionaries for the subnet.
ipv6_ra_mode
IPv6 Router Advertisement mode.
Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
ipv6_address_mode
IPv6 address mode.
Valid values are: ‘dhcpv6-stateful’, ‘dhcpv6-stateless’, or ‘slaac’.
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['neutronng.setup_clouds'](auth)
kwargs['subnet_name'] = name
subnet = __salt__['neutronng.subnet_get'](name=name)
if subnet is None:
if __opts__['test']:
ret['result'] = None
ret['changes'] = kwargs
ret['comment'] = 'Subnet will be created.'
return ret # depends on [control=['if'], data=[]]
new_subnet = __salt__['neutronng.subnet_create'](**kwargs)
ret['changes'] = new_subnet
ret['comment'] = 'Created subnet'
return ret # depends on [control=['if'], data=[]]
changes = __salt__['neutronng.compare_changes'](subnet, **kwargs)
if changes:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = changes
ret['comment'] = 'Project will be updated.'
return ret # depends on [control=['if'], data=[]]
# update_subnet does not support changing cidr,
# so we have to delete and recreate the subnet in this case.
if 'cidr' in changes or 'tenant_id' in changes:
__salt__['neutronng.subnet_delete'](name=name)
new_subnet = __salt__['neutronng.subnet_create'](**kwargs)
ret['changes'] = new_subnet
ret['comment'] = 'Deleted and recreated subnet'
return ret # depends on [control=['if'], data=[]]
__salt__['neutronng.subnet_update'](**kwargs)
ret['changes'].update(changes)
ret['comment'] = 'Updated subnet' # depends on [control=['if'], data=[]]
return ret |
def ParseShadowEntry(self, line):
"""Extract the user accounts in /etc/shadow.
Identifies the users in /etc/shadow and several attributes of their account,
including how their password is crypted and password aging characteristics.
Args:
line: An entry of the shadow file.
"""
fields = ("login", "passwd", "last_change", "min_age", "max_age",
"warn_time", "inactivity", "expire", "reserved")
if line:
rslt = dict(zip(fields, line.split(":")))
pw_entry = self.shadow.setdefault(rslt["login"], rdf_client.PwEntry())
pw_entry.store = self.shadow_store
pw_entry.hash_type = self.GetHashType(rslt["passwd"])
# Tread carefully here in case these values aren't set.
last_change = rslt.get("last_change")
if last_change:
pw_entry.age = int(last_change)
max_age = rslt.get("max_age")
if max_age:
pw_entry.max_age = int(max_age) | def function[ParseShadowEntry, parameter[self, line]]:
constant[Extract the user accounts in /etc/shadow.
Identifies the users in /etc/shadow and several attributes of their account,
including how their password is crypted and password aging characteristics.
Args:
line: An entry of the shadow file.
]
variable[fields] assign[=] tuple[[<ast.Constant object at 0x7da18fe90a00>, <ast.Constant object at 0x7da18fe93c40>, <ast.Constant object at 0x7da18fe92620>, <ast.Constant object at 0x7da18fe93160>, <ast.Constant object at 0x7da18fe90460>, <ast.Constant object at 0x7da18fe93730>, <ast.Constant object at 0x7da18fe93190>, <ast.Constant object at 0x7da18fe93940>, <ast.Constant object at 0x7da18fe90820>]]
if name[line] begin[:]
variable[rslt] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[fields], call[name[line].split, parameter[constant[:]]]]]]]
variable[pw_entry] assign[=] call[name[self].shadow.setdefault, parameter[call[name[rslt]][constant[login]], call[name[rdf_client].PwEntry, parameter[]]]]
name[pw_entry].store assign[=] name[self].shadow_store
name[pw_entry].hash_type assign[=] call[name[self].GetHashType, parameter[call[name[rslt]][constant[passwd]]]]
variable[last_change] assign[=] call[name[rslt].get, parameter[constant[last_change]]]
if name[last_change] begin[:]
name[pw_entry].age assign[=] call[name[int], parameter[name[last_change]]]
variable[max_age] assign[=] call[name[rslt].get, parameter[constant[max_age]]]
if name[max_age] begin[:]
name[pw_entry].max_age assign[=] call[name[int], parameter[name[max_age]]] | keyword[def] identifier[ParseShadowEntry] ( identifier[self] , identifier[line] ):
literal[string]
identifier[fields] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] )
keyword[if] identifier[line] :
identifier[rslt] = identifier[dict] ( identifier[zip] ( identifier[fields] , identifier[line] . identifier[split] ( literal[string] )))
identifier[pw_entry] = identifier[self] . identifier[shadow] . identifier[setdefault] ( identifier[rslt] [ literal[string] ], identifier[rdf_client] . identifier[PwEntry] ())
identifier[pw_entry] . identifier[store] = identifier[self] . identifier[shadow_store]
identifier[pw_entry] . identifier[hash_type] = identifier[self] . identifier[GetHashType] ( identifier[rslt] [ literal[string] ])
identifier[last_change] = identifier[rslt] . identifier[get] ( literal[string] )
keyword[if] identifier[last_change] :
identifier[pw_entry] . identifier[age] = identifier[int] ( identifier[last_change] )
identifier[max_age] = identifier[rslt] . identifier[get] ( literal[string] )
keyword[if] identifier[max_age] :
identifier[pw_entry] . identifier[max_age] = identifier[int] ( identifier[max_age] ) | def ParseShadowEntry(self, line):
"""Extract the user accounts in /etc/shadow.
Identifies the users in /etc/shadow and several attributes of their account,
including how their password is crypted and password aging characteristics.
Args:
line: An entry of the shadow file.
"""
fields = ('login', 'passwd', 'last_change', 'min_age', 'max_age', 'warn_time', 'inactivity', 'expire', 'reserved')
if line:
rslt = dict(zip(fields, line.split(':')))
pw_entry = self.shadow.setdefault(rslt['login'], rdf_client.PwEntry())
pw_entry.store = self.shadow_store
pw_entry.hash_type = self.GetHashType(rslt['passwd'])
# Tread carefully here in case these values aren't set.
last_change = rslt.get('last_change')
if last_change:
pw_entry.age = int(last_change) # depends on [control=['if'], data=[]]
max_age = rslt.get('max_age')
if max_age:
pw_entry.max_age = int(max_age) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def mkrows(l, pad, width, height):
'''
Compute the optimal number of rows based on our lists' largest element and
our terminal size in columns and rows.
Work out our maximum column number by dividing the width of the terminal by
our largest element.
While the length of our list is greater than the total number of elements we
can fit on the screen increment the height by one.
'''
maxcols = int(width/pad)
while len(l) > height * maxcols:
height += 1
return height | def function[mkrows, parameter[l, pad, width, height]]:
constant[
Compute the optimal number of rows based on our lists' largest element and
our terminal size in columns and rows.
Work out our maximum column number by dividing the width of the terminal by
our largest element.
While the length of our list is greater than the total number of elements we
can fit on the screen increment the height by one.
]
variable[maxcols] assign[=] call[name[int], parameter[binary_operation[name[width] / name[pad]]]]
while compare[call[name[len], parameter[name[l]]] greater[>] binary_operation[name[height] * name[maxcols]]] begin[:]
<ast.AugAssign object at 0x7da1b10eeb90>
return[name[height]] | keyword[def] identifier[mkrows] ( identifier[l] , identifier[pad] , identifier[width] , identifier[height] ):
literal[string]
identifier[maxcols] = identifier[int] ( identifier[width] / identifier[pad] )
keyword[while] identifier[len] ( identifier[l] )> identifier[height] * identifier[maxcols] :
identifier[height] += literal[int]
keyword[return] identifier[height] | def mkrows(l, pad, width, height):
"""
Compute the optimal number of rows based on our lists' largest element and
our terminal size in columns and rows.
Work out our maximum column number by dividing the width of the terminal by
our largest element.
While the length of our list is greater than the total number of elements we
can fit on the screen increment the height by one.
"""
maxcols = int(width / pad)
while len(l) > height * maxcols:
height += 1 # depends on [control=['while'], data=[]]
return height |
def delete(self):
"""
Destructor.
"""
if self.lingeling:
pysolvers.lingeling_del(self.lingeling, self.prfile)
self.lingeling = None
if self.prfile:
self.prfile.close() | def function[delete, parameter[self]]:
constant[
Destructor.
]
if name[self].lingeling begin[:]
call[name[pysolvers].lingeling_del, parameter[name[self].lingeling, name[self].prfile]]
name[self].lingeling assign[=] constant[None]
if name[self].prfile begin[:]
call[name[self].prfile.close, parameter[]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[lingeling] :
identifier[pysolvers] . identifier[lingeling_del] ( identifier[self] . identifier[lingeling] , identifier[self] . identifier[prfile] )
identifier[self] . identifier[lingeling] = keyword[None]
keyword[if] identifier[self] . identifier[prfile] :
identifier[self] . identifier[prfile] . identifier[close] () | def delete(self):
"""
Destructor.
"""
if self.lingeling:
pysolvers.lingeling_del(self.lingeling, self.prfile)
self.lingeling = None
if self.prfile:
self.prfile.close() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def confirmation(self, pdu):
"""Decode upstream PDUs and pass them up to the service access point."""
if _debug: NetworkAdapter._debug("confirmation %r (net=%r)", pdu, self.adapterNet)
npdu = NPDU(user_data=pdu.pduUserData)
npdu.decode(pdu)
self.adapterSAP.process_npdu(self, npdu) | def function[confirmation, parameter[self, pdu]]:
constant[Decode upstream PDUs and pass them up to the service access point.]
if name[_debug] begin[:]
call[name[NetworkAdapter]._debug, parameter[constant[confirmation %r (net=%r)], name[pdu], name[self].adapterNet]]
variable[npdu] assign[=] call[name[NPDU], parameter[]]
call[name[npdu].decode, parameter[name[pdu]]]
call[name[self].adapterSAP.process_npdu, parameter[name[self], name[npdu]]] | keyword[def] identifier[confirmation] ( identifier[self] , identifier[pdu] ):
literal[string]
keyword[if] identifier[_debug] : identifier[NetworkAdapter] . identifier[_debug] ( literal[string] , identifier[pdu] , identifier[self] . identifier[adapterNet] )
identifier[npdu] = identifier[NPDU] ( identifier[user_data] = identifier[pdu] . identifier[pduUserData] )
identifier[npdu] . identifier[decode] ( identifier[pdu] )
identifier[self] . identifier[adapterSAP] . identifier[process_npdu] ( identifier[self] , identifier[npdu] ) | def confirmation(self, pdu):
"""Decode upstream PDUs and pass them up to the service access point."""
if _debug:
NetworkAdapter._debug('confirmation %r (net=%r)', pdu, self.adapterNet) # depends on [control=['if'], data=[]]
npdu = NPDU(user_data=pdu.pduUserData)
npdu.decode(pdu)
self.adapterSAP.process_npdu(self, npdu) |
def add_curves_from_lasio(self, l, remap=None, funcs=None):
"""
Given a LAS file, add curves from it to the current well instance.
Essentially just wraps ``add_curves_from_lasio()``.
Args:
fname (str): The path of the LAS file to read curves from.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
None. Works in place.
"""
params = {}
for field, (sect, code) in LAS_FIELDS['data'].items():
params[field] = utils.lasio_get(l,
sect,
code,
remap=remap,
funcs=funcs)
curves = {c.mnemonic: Curve.from_lasio_curve(c, **params)
for c in l.curves}
# This will clobber anything with the same key!
self.data.update(curves)
return None | def function[add_curves_from_lasio, parameter[self, l, remap, funcs]]:
constant[
Given a LAS file, add curves from it to the current well instance.
Essentially just wraps ``add_curves_from_lasio()``.
Args:
fname (str): The path of the LAS file to read curves from.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
None. Works in place.
]
variable[params] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b23ee620>, <ast.Tuple object at 0x7da1b23ef970>]]] in starred[call[call[name[LAS_FIELDS]][constant[data]].items, parameter[]]] begin[:]
call[name[params]][name[field]] assign[=] call[name[utils].lasio_get, parameter[name[l], name[sect], name[code]]]
variable[curves] assign[=] <ast.DictComp object at 0x7da1b23ef2e0>
call[name[self].data.update, parameter[name[curves]]]
return[constant[None]] | keyword[def] identifier[add_curves_from_lasio] ( identifier[self] , identifier[l] , identifier[remap] = keyword[None] , identifier[funcs] = keyword[None] ):
literal[string]
identifier[params] ={}
keyword[for] identifier[field] ,( identifier[sect] , identifier[code] ) keyword[in] identifier[LAS_FIELDS] [ literal[string] ]. identifier[items] ():
identifier[params] [ identifier[field] ]= identifier[utils] . identifier[lasio_get] ( identifier[l] ,
identifier[sect] ,
identifier[code] ,
identifier[remap] = identifier[remap] ,
identifier[funcs] = identifier[funcs] )
identifier[curves] ={ identifier[c] . identifier[mnemonic] : identifier[Curve] . identifier[from_lasio_curve] ( identifier[c] ,** identifier[params] )
keyword[for] identifier[c] keyword[in] identifier[l] . identifier[curves] }
identifier[self] . identifier[data] . identifier[update] ( identifier[curves] )
keyword[return] keyword[None] | def add_curves_from_lasio(self, l, remap=None, funcs=None):
"""
Given a LAS file, add curves from it to the current well instance.
Essentially just wraps ``add_curves_from_lasio()``.
Args:
fname (str): The path of the LAS file to read curves from.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
None. Works in place.
"""
params = {}
for (field, (sect, code)) in LAS_FIELDS['data'].items():
params[field] = utils.lasio_get(l, sect, code, remap=remap, funcs=funcs) # depends on [control=['for'], data=[]]
curves = {c.mnemonic: Curve.from_lasio_curve(c, **params) for c in l.curves}
# This will clobber anything with the same key!
self.data.update(curves)
return None |
def In(self, *values):
"""Sets the type of the WHERE clause as "in".
Args:
*values: The values to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
"""
self._awql = self._CreateMultipleValuesCondition(values, 'IN')
return self._query_builder | def function[In, parameter[self]]:
constant[Sets the type of the WHERE clause as "in".
Args:
*values: The values to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
]
name[self]._awql assign[=] call[name[self]._CreateMultipleValuesCondition, parameter[name[values], constant[IN]]]
return[name[self]._query_builder] | keyword[def] identifier[In] ( identifier[self] ,* identifier[values] ):
literal[string]
identifier[self] . identifier[_awql] = identifier[self] . identifier[_CreateMultipleValuesCondition] ( identifier[values] , literal[string] )
keyword[return] identifier[self] . identifier[_query_builder] | def In(self, *values):
"""Sets the type of the WHERE clause as "in".
Args:
*values: The values to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
"""
self._awql = self._CreateMultipleValuesCondition(values, 'IN')
return self._query_builder |
def _head(self, client_kwargs):
"""
Returns object HTTP header.
Args:
client_kwargs (dict): Client arguments.
Returns:
dict: HTTP header.
"""
return _handle_http_errors(
self.client.request(
'HEAD', timeout=self._TIMEOUT, **client_kwargs)).headers | def function[_head, parameter[self, client_kwargs]]:
constant[
Returns object HTTP header.
Args:
client_kwargs (dict): Client arguments.
Returns:
dict: HTTP header.
]
return[call[name[_handle_http_errors], parameter[call[name[self].client.request, parameter[constant[HEAD]]]]].headers] | keyword[def] identifier[_head] ( identifier[self] , identifier[client_kwargs] ):
literal[string]
keyword[return] identifier[_handle_http_errors] (
identifier[self] . identifier[client] . identifier[request] (
literal[string] , identifier[timeout] = identifier[self] . identifier[_TIMEOUT] ,** identifier[client_kwargs] )). identifier[headers] | def _head(self, client_kwargs):
"""
Returns object HTTP header.
Args:
client_kwargs (dict): Client arguments.
Returns:
dict: HTTP header.
"""
return _handle_http_errors(self.client.request('HEAD', timeout=self._TIMEOUT, **client_kwargs)).headers |
def stamp_title(kb_app: kb,
sphinx_app: Sphinx,
doctree: doctree):
""" Walk the tree and extra RST title into resource.title """
# First, find out which resource this is. Won't be easy.
resources = sphinx_app.env.resources
confdir = sphinx_app.confdir
source = PurePath(doctree.attributes['source'])
# Get the relative path inside the docs dir, without .rst, then
# get the resource
docname = str(source.relative_to(confdir)).split('.rst')[0]
resource = resources.get(docname)
if resource:
# Stamp the title on the resource
title = get_rst_title(doctree)
resource.title = title | def function[stamp_title, parameter[kb_app, sphinx_app, doctree]]:
constant[ Walk the tree and extra RST title into resource.title ]
variable[resources] assign[=] name[sphinx_app].env.resources
variable[confdir] assign[=] name[sphinx_app].confdir
variable[source] assign[=] call[name[PurePath], parameter[call[name[doctree].attributes][constant[source]]]]
variable[docname] assign[=] call[call[call[name[str], parameter[call[name[source].relative_to, parameter[name[confdir]]]]].split, parameter[constant[.rst]]]][constant[0]]
variable[resource] assign[=] call[name[resources].get, parameter[name[docname]]]
if name[resource] begin[:]
variable[title] assign[=] call[name[get_rst_title], parameter[name[doctree]]]
name[resource].title assign[=] name[title] | keyword[def] identifier[stamp_title] ( identifier[kb_app] : identifier[kb] ,
identifier[sphinx_app] : identifier[Sphinx] ,
identifier[doctree] : identifier[doctree] ):
literal[string]
identifier[resources] = identifier[sphinx_app] . identifier[env] . identifier[resources]
identifier[confdir] = identifier[sphinx_app] . identifier[confdir]
identifier[source] = identifier[PurePath] ( identifier[doctree] . identifier[attributes] [ literal[string] ])
identifier[docname] = identifier[str] ( identifier[source] . identifier[relative_to] ( identifier[confdir] )). identifier[split] ( literal[string] )[ literal[int] ]
identifier[resource] = identifier[resources] . identifier[get] ( identifier[docname] )
keyword[if] identifier[resource] :
identifier[title] = identifier[get_rst_title] ( identifier[doctree] )
identifier[resource] . identifier[title] = identifier[title] | def stamp_title(kb_app: kb, sphinx_app: Sphinx, doctree: doctree):
""" Walk the tree and extra RST title into resource.title """
# First, find out which resource this is. Won't be easy.
resources = sphinx_app.env.resources
confdir = sphinx_app.confdir
source = PurePath(doctree.attributes['source'])
# Get the relative path inside the docs dir, without .rst, then
# get the resource
docname = str(source.relative_to(confdir)).split('.rst')[0]
resource = resources.get(docname)
if resource:
# Stamp the title on the resource
title = get_rst_title(doctree)
resource.title = title # depends on [control=['if'], data=[]] |
def dot(self, rhs):
"""
Return the dot product of this vector and *rhs*.
"""
return self.x * rhs.x + self.y * rhs.y + self.z * rhs.z | def function[dot, parameter[self, rhs]]:
constant[
Return the dot product of this vector and *rhs*.
]
return[binary_operation[binary_operation[binary_operation[name[self].x * name[rhs].x] + binary_operation[name[self].y * name[rhs].y]] + binary_operation[name[self].z * name[rhs].z]]] | keyword[def] identifier[dot] ( identifier[self] , identifier[rhs] ):
literal[string]
keyword[return] identifier[self] . identifier[x] * identifier[rhs] . identifier[x] + identifier[self] . identifier[y] * identifier[rhs] . identifier[y] + identifier[self] . identifier[z] * identifier[rhs] . identifier[z] | def dot(self, rhs):
"""
Return the dot product of this vector and *rhs*.
"""
return self.x * rhs.x + self.y * rhs.y + self.z * rhs.z |
def make_accept_response(self):
"""Create "accept" response for the "subscribe" / "subscribed" /
"unsubscribe" / "unsubscribed" presence stanza.
:return: new stanza.
:returntype: `Presence`
"""
if self.stanza_type not in ("subscribe", "subscribed",
"unsubscribe", "unsubscribed"):
raise ValueError("Results may only be generated for 'subscribe',"
"'subscribed','unsubscribe' or 'unsubscribed' presence")
stanza = Presence(stanza_type = ACCEPT_RESPONSES[self.stanza_type],
from_jid = self.to_jid, to_jid = self.from_jid,
stanza_id = self.stanza_id)
return stanza | def function[make_accept_response, parameter[self]]:
constant[Create "accept" response for the "subscribe" / "subscribed" /
"unsubscribe" / "unsubscribed" presence stanza.
:return: new stanza.
:returntype: `Presence`
]
if compare[name[self].stanza_type <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b00e4730>, <ast.Constant object at 0x7da1b00e7310>, <ast.Constant object at 0x7da1b00e6e00>, <ast.Constant object at 0x7da1b00e5450>]]] begin[:]
<ast.Raise object at 0x7da1b00e6f50>
variable[stanza] assign[=] call[name[Presence], parameter[]]
return[name[stanza]] | keyword[def] identifier[make_accept_response] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[stanza_type] keyword[not] keyword[in] ( literal[string] , literal[string] ,
literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[stanza] = identifier[Presence] ( identifier[stanza_type] = identifier[ACCEPT_RESPONSES] [ identifier[self] . identifier[stanza_type] ],
identifier[from_jid] = identifier[self] . identifier[to_jid] , identifier[to_jid] = identifier[self] . identifier[from_jid] ,
identifier[stanza_id] = identifier[self] . identifier[stanza_id] )
keyword[return] identifier[stanza] | def make_accept_response(self):
"""Create "accept" response for the "subscribe" / "subscribed" /
"unsubscribe" / "unsubscribed" presence stanza.
:return: new stanza.
:returntype: `Presence`
"""
if self.stanza_type not in ('subscribe', 'subscribed', 'unsubscribe', 'unsubscribed'):
raise ValueError("Results may only be generated for 'subscribe','subscribed','unsubscribe' or 'unsubscribed' presence") # depends on [control=['if'], data=[]]
stanza = Presence(stanza_type=ACCEPT_RESPONSES[self.stanza_type], from_jid=self.to_jid, to_jid=self.from_jid, stanza_id=self.stanza_id)
return stanza |
def get_in_ip_addr(cls, tenant_id):
"""Retrieves the 'in' service subnet attributes. """
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_in_ip_addr() | def function[get_in_ip_addr, parameter[cls, tenant_id]]:
constant[Retrieves the 'in' service subnet attributes. ]
if compare[name[tenant_id] <ast.NotIn object at 0x7da2590d7190> name[cls].serv_obj_dict] begin[:]
call[name[LOG].error, parameter[constant[Fabric not prepared for tenant %s], name[tenant_id]]]
return[None]
variable[tenant_obj] assign[=] call[name[cls].serv_obj_dict.get, parameter[name[tenant_id]]]
return[call[name[tenant_obj].get_in_ip_addr, parameter[]]] | keyword[def] identifier[get_in_ip_addr] ( identifier[cls] , identifier[tenant_id] ):
literal[string]
keyword[if] identifier[tenant_id] keyword[not] keyword[in] identifier[cls] . identifier[serv_obj_dict] :
identifier[LOG] . identifier[error] ( literal[string] , identifier[tenant_id] )
keyword[return]
identifier[tenant_obj] = identifier[cls] . identifier[serv_obj_dict] . identifier[get] ( identifier[tenant_id] )
keyword[return] identifier[tenant_obj] . identifier[get_in_ip_addr] () | def get_in_ip_addr(cls, tenant_id):
"""Retrieves the 'in' service subnet attributes. """
if tenant_id not in cls.serv_obj_dict:
LOG.error('Fabric not prepared for tenant %s', tenant_id)
return # depends on [control=['if'], data=['tenant_id']]
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_in_ip_addr() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.