code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def read_data(self, device_property):
"""
read values from the device
"""
if self.inst is None:
logger.error("Visa-MDO3014-read data-Self.inst : None")
return None
if device_property == 'present_value':
return self.parse_value(self.inst.query(':READ?'))
else:
value = self.inst.query(device_property)
logger.info("Visa-MDO3014-read data-property : %s - value : %s" %(device_property, value))
return self.parse_value(value)
return None | def function[read_data, parameter[self, device_property]]:
constant[
read values from the device
]
if compare[name[self].inst is constant[None]] begin[:]
call[name[logger].error, parameter[constant[Visa-MDO3014-read data-Self.inst : None]]]
return[constant[None]]
if compare[name[device_property] equal[==] constant[present_value]] begin[:]
return[call[name[self].parse_value, parameter[call[name[self].inst.query, parameter[constant[:READ?]]]]]]
return[constant[None]] | keyword[def] identifier[read_data] ( identifier[self] , identifier[device_property] ):
literal[string]
keyword[if] identifier[self] . identifier[inst] keyword[is] keyword[None] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[None]
keyword[if] identifier[device_property] == literal[string] :
keyword[return] identifier[self] . identifier[parse_value] ( identifier[self] . identifier[inst] . identifier[query] ( literal[string] ))
keyword[else] :
identifier[value] = identifier[self] . identifier[inst] . identifier[query] ( identifier[device_property] )
identifier[logger] . identifier[info] ( literal[string] %( identifier[device_property] , identifier[value] ))
keyword[return] identifier[self] . identifier[parse_value] ( identifier[value] )
keyword[return] keyword[None] | def read_data(self, device_property):
"""
read values from the device
"""
if self.inst is None:
logger.error('Visa-MDO3014-read data-Self.inst : None')
return None # depends on [control=['if'], data=[]]
if device_property == 'present_value':
return self.parse_value(self.inst.query(':READ?')) # depends on [control=['if'], data=[]]
else:
value = self.inst.query(device_property)
logger.info('Visa-MDO3014-read data-property : %s - value : %s' % (device_property, value))
return self.parse_value(value)
return None |
def getDescendantsUIDs(self, all_descendants=False):
"""Returns the UIDs of the descendant Analysis Requests
This method is used as metadata
"""
descendants = self.getDescendants(all_descendants=all_descendants)
return map(api.get_uid, descendants) | def function[getDescendantsUIDs, parameter[self, all_descendants]]:
constant[Returns the UIDs of the descendant Analysis Requests
This method is used as metadata
]
variable[descendants] assign[=] call[name[self].getDescendants, parameter[]]
return[call[name[map], parameter[name[api].get_uid, name[descendants]]]] | keyword[def] identifier[getDescendantsUIDs] ( identifier[self] , identifier[all_descendants] = keyword[False] ):
literal[string]
identifier[descendants] = identifier[self] . identifier[getDescendants] ( identifier[all_descendants] = identifier[all_descendants] )
keyword[return] identifier[map] ( identifier[api] . identifier[get_uid] , identifier[descendants] ) | def getDescendantsUIDs(self, all_descendants=False):
"""Returns the UIDs of the descendant Analysis Requests
This method is used as metadata
"""
descendants = self.getDescendants(all_descendants=all_descendants)
return map(api.get_uid, descendants) |
def get(self, slide_id, default=None):
"""
Return the slide identified by integer *slide_id* in this
presentation, or *default* if not found.
"""
slide = self.part.get_slide(slide_id)
if slide is None:
return default
return slide | def function[get, parameter[self, slide_id, default]]:
constant[
Return the slide identified by integer *slide_id* in this
presentation, or *default* if not found.
]
variable[slide] assign[=] call[name[self].part.get_slide, parameter[name[slide_id]]]
if compare[name[slide] is constant[None]] begin[:]
return[name[default]]
return[name[slide]] | keyword[def] identifier[get] ( identifier[self] , identifier[slide_id] , identifier[default] = keyword[None] ):
literal[string]
identifier[slide] = identifier[self] . identifier[part] . identifier[get_slide] ( identifier[slide_id] )
keyword[if] identifier[slide] keyword[is] keyword[None] :
keyword[return] identifier[default]
keyword[return] identifier[slide] | def get(self, slide_id, default=None):
"""
Return the slide identified by integer *slide_id* in this
presentation, or *default* if not found.
"""
slide = self.part.get_slide(slide_id)
if slide is None:
return default # depends on [control=['if'], data=[]]
return slide |
def load_freesurfer_geometry(filename, to='mesh', warn=False):
'''
load_freesurfer_geometry(filename) yields the data stored at the freesurfer geometry file given
by filename. The optional argument 'to' may be used to change the kind of data that is
returned.
The following are valid settings for the 'to' keyword argument:
* 'mesh' (the default) yields a mesh object
* 'tess' yields a tess object (discarding coordinates)
* 'raw' yields a tuple of numpy arrays, identical to the read_geometry return value.
'''
if not warn:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
category=UserWarning,
module='nibabel')
(xs, fs, info) = fsio.read_geometry(filename, read_metadata=True)
else:
(xs, fs, info) = fsio.read_geometry(filename, read_metadata=True)
# see if there's chirality data here...
filename = os.path.split(filename)[1]
filename = filename.lower()
if filename.startswith('lh'): info['chirality'] = 'lh.'
elif filename.startswith('rh'): info['chirality'] = 'rh.'
# parse it into something
to = to.lower()
if to in ['mesh', 'auto', 'automatic']:
return geo.Mesh(fs, xs, meta_data=info)
elif to in ['tess', 'tesselation']:
return geo.Tesselation(fs, meta_data=info)
elif to in ['coords', 'coordinates']:
return xs
elif to in ['triangles', 'faces']:
return fs
elif to in ['meta', 'meta_data']:
return info
elif to =='raw':
return (xs, fs)
else:
raise ValueError('Could not understand \'to\' argument: %s' % to) | def function[load_freesurfer_geometry, parameter[filename, to, warn]]:
constant[
load_freesurfer_geometry(filename) yields the data stored at the freesurfer geometry file given
by filename. The optional argument 'to' may be used to change the kind of data that is
returned.
The following are valid settings for the 'to' keyword argument:
* 'mesh' (the default) yields a mesh object
* 'tess' yields a tess object (discarding coordinates)
* 'raw' yields a tuple of numpy arrays, identical to the read_geometry return value.
]
if <ast.UnaryOp object at 0x7da20e9b3fd0> begin[:]
with call[name[warnings].catch_warnings, parameter[]] begin[:]
call[name[warnings].filterwarnings, parameter[constant[ignore]]]
<ast.Tuple object at 0x7da20e9b3910> assign[=] call[name[fsio].read_geometry, parameter[name[filename]]]
variable[filename] assign[=] call[call[name[os].path.split, parameter[name[filename]]]][constant[1]]
variable[filename] assign[=] call[name[filename].lower, parameter[]]
if call[name[filename].startswith, parameter[constant[lh]]] begin[:]
call[name[info]][constant[chirality]] assign[=] constant[lh.]
variable[to] assign[=] call[name[to].lower, parameter[]]
if compare[name[to] in list[[<ast.Constant object at 0x7da20e9b2230>, <ast.Constant object at 0x7da20e9b24d0>, <ast.Constant object at 0x7da20e9b16c0>]]] begin[:]
return[call[name[geo].Mesh, parameter[name[fs], name[xs]]]] | keyword[def] identifier[load_freesurfer_geometry] ( identifier[filename] , identifier[to] = literal[string] , identifier[warn] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[warn] :
keyword[with] identifier[warnings] . identifier[catch_warnings] ():
identifier[warnings] . identifier[filterwarnings] ( literal[string] ,
identifier[category] = identifier[UserWarning] ,
identifier[module] = literal[string] )
( identifier[xs] , identifier[fs] , identifier[info] )= identifier[fsio] . identifier[read_geometry] ( identifier[filename] , identifier[read_metadata] = keyword[True] )
keyword[else] :
( identifier[xs] , identifier[fs] , identifier[info] )= identifier[fsio] . identifier[read_geometry] ( identifier[filename] , identifier[read_metadata] = keyword[True] )
identifier[filename] = identifier[os] . identifier[path] . identifier[split] ( identifier[filename] )[ literal[int] ]
identifier[filename] = identifier[filename] . identifier[lower] ()
keyword[if] identifier[filename] . identifier[startswith] ( literal[string] ): identifier[info] [ literal[string] ]= literal[string]
keyword[elif] identifier[filename] . identifier[startswith] ( literal[string] ): identifier[info] [ literal[string] ]= literal[string]
identifier[to] = identifier[to] . identifier[lower] ()
keyword[if] identifier[to] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[return] identifier[geo] . identifier[Mesh] ( identifier[fs] , identifier[xs] , identifier[meta_data] = identifier[info] )
keyword[elif] identifier[to] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[geo] . identifier[Tesselation] ( identifier[fs] , identifier[meta_data] = identifier[info] )
keyword[elif] identifier[to] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[xs]
keyword[elif] identifier[to] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[fs]
keyword[elif] identifier[to] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[info]
keyword[elif] identifier[to] == literal[string] :
keyword[return] ( identifier[xs] , identifier[fs] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[to] ) | def load_freesurfer_geometry(filename, to='mesh', warn=False):
"""
load_freesurfer_geometry(filename) yields the data stored at the freesurfer geometry file given
by filename. The optional argument 'to' may be used to change the kind of data that is
returned.
The following are valid settings for the 'to' keyword argument:
* 'mesh' (the default) yields a mesh object
* 'tess' yields a tess object (discarding coordinates)
* 'raw' yields a tuple of numpy arrays, identical to the read_geometry return value.
"""
if not warn:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning, module='nibabel')
(xs, fs, info) = fsio.read_geometry(filename, read_metadata=True) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
else:
(xs, fs, info) = fsio.read_geometry(filename, read_metadata=True)
# see if there's chirality data here...
filename = os.path.split(filename)[1]
filename = filename.lower()
if filename.startswith('lh'):
info['chirality'] = 'lh.' # depends on [control=['if'], data=[]]
elif filename.startswith('rh'):
info['chirality'] = 'rh.' # depends on [control=['if'], data=[]]
# parse it into something
to = to.lower()
if to in ['mesh', 'auto', 'automatic']:
return geo.Mesh(fs, xs, meta_data=info) # depends on [control=['if'], data=[]]
elif to in ['tess', 'tesselation']:
return geo.Tesselation(fs, meta_data=info) # depends on [control=['if'], data=[]]
elif to in ['coords', 'coordinates']:
return xs # depends on [control=['if'], data=[]]
elif to in ['triangles', 'faces']:
return fs # depends on [control=['if'], data=[]]
elif to in ['meta', 'meta_data']:
return info # depends on [control=['if'], data=[]]
elif to == 'raw':
return (xs, fs) # depends on [control=['if'], data=[]]
else:
raise ValueError("Could not understand 'to' argument: %s" % to) |
def _authenticate(self):
"""Authenticate user and generate token."""
self.cleanup_headers()
url = LOGIN_ENDPOINT
data = self.query(
url,
method='POST',
extra_params={
'email': self.__username,
'password': self.__password
})
if isinstance(data, dict) and data.get('success'):
data = data.get('data')
self.authenticated = data.get('authenticated')
self.country_code = data.get('countryCode')
self.date_created = data.get('dateCreated')
self.__token = data.get('token')
self.userid = data.get('userId')
# update header with the generated token
self.__headers['Authorization'] = self.__token | def function[_authenticate, parameter[self]]:
constant[Authenticate user and generate token.]
call[name[self].cleanup_headers, parameter[]]
variable[url] assign[=] name[LOGIN_ENDPOINT]
variable[data] assign[=] call[name[self].query, parameter[name[url]]]
if <ast.BoolOp object at 0x7da18bc72830> begin[:]
variable[data] assign[=] call[name[data].get, parameter[constant[data]]]
name[self].authenticated assign[=] call[name[data].get, parameter[constant[authenticated]]]
name[self].country_code assign[=] call[name[data].get, parameter[constant[countryCode]]]
name[self].date_created assign[=] call[name[data].get, parameter[constant[dateCreated]]]
name[self].__token assign[=] call[name[data].get, parameter[constant[token]]]
name[self].userid assign[=] call[name[data].get, parameter[constant[userId]]]
call[name[self].__headers][constant[Authorization]] assign[=] name[self].__token | keyword[def] identifier[_authenticate] ( identifier[self] ):
literal[string]
identifier[self] . identifier[cleanup_headers] ()
identifier[url] = identifier[LOGIN_ENDPOINT]
identifier[data] = identifier[self] . identifier[query] (
identifier[url] ,
identifier[method] = literal[string] ,
identifier[extra_params] ={
literal[string] : identifier[self] . identifier[__username] ,
literal[string] : identifier[self] . identifier[__password]
})
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ) keyword[and] identifier[data] . identifier[get] ( literal[string] ):
identifier[data] = identifier[data] . identifier[get] ( literal[string] )
identifier[self] . identifier[authenticated] = identifier[data] . identifier[get] ( literal[string] )
identifier[self] . identifier[country_code] = identifier[data] . identifier[get] ( literal[string] )
identifier[self] . identifier[date_created] = identifier[data] . identifier[get] ( literal[string] )
identifier[self] . identifier[__token] = identifier[data] . identifier[get] ( literal[string] )
identifier[self] . identifier[userid] = identifier[data] . identifier[get] ( literal[string] )
identifier[self] . identifier[__headers] [ literal[string] ]= identifier[self] . identifier[__token] | def _authenticate(self):
"""Authenticate user and generate token."""
self.cleanup_headers()
url = LOGIN_ENDPOINT
data = self.query(url, method='POST', extra_params={'email': self.__username, 'password': self.__password})
if isinstance(data, dict) and data.get('success'):
data = data.get('data')
self.authenticated = data.get('authenticated')
self.country_code = data.get('countryCode')
self.date_created = data.get('dateCreated')
self.__token = data.get('token')
self.userid = data.get('userId')
# update header with the generated token
self.__headers['Authorization'] = self.__token # depends on [control=['if'], data=[]] |
def get_log_path(config_file_name):
'''generate stdout and stderr log path'''
stdout_full_path = os.path.join(NNICTL_HOME_DIR, config_file_name, 'stdout')
stderr_full_path = os.path.join(NNICTL_HOME_DIR, config_file_name, 'stderr')
return stdout_full_path, stderr_full_path | def function[get_log_path, parameter[config_file_name]]:
constant[generate stdout and stderr log path]
variable[stdout_full_path] assign[=] call[name[os].path.join, parameter[name[NNICTL_HOME_DIR], name[config_file_name], constant[stdout]]]
variable[stderr_full_path] assign[=] call[name[os].path.join, parameter[name[NNICTL_HOME_DIR], name[config_file_name], constant[stderr]]]
return[tuple[[<ast.Name object at 0x7da1b26af280>, <ast.Name object at 0x7da1b26ae290>]]] | keyword[def] identifier[get_log_path] ( identifier[config_file_name] ):
literal[string]
identifier[stdout_full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[NNICTL_HOME_DIR] , identifier[config_file_name] , literal[string] )
identifier[stderr_full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[NNICTL_HOME_DIR] , identifier[config_file_name] , literal[string] )
keyword[return] identifier[stdout_full_path] , identifier[stderr_full_path] | def get_log_path(config_file_name):
"""generate stdout and stderr log path"""
stdout_full_path = os.path.join(NNICTL_HOME_DIR, config_file_name, 'stdout')
stderr_full_path = os.path.join(NNICTL_HOME_DIR, config_file_name, 'stderr')
return (stdout_full_path, stderr_full_path) |
def is_public(self):
"""Return True iff this function should be considered public."""
if self.dunder_all is not None:
return self.name in self.dunder_all
else:
return not self.name.startswith('_') | def function[is_public, parameter[self]]:
constant[Return True iff this function should be considered public.]
if compare[name[self].dunder_all is_not constant[None]] begin[:]
return[compare[name[self].name in name[self].dunder_all]] | keyword[def] identifier[is_public] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[dunder_all] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[name] keyword[in] identifier[self] . identifier[dunder_all]
keyword[else] :
keyword[return] keyword[not] identifier[self] . identifier[name] . identifier[startswith] ( literal[string] ) | def is_public(self):
"""Return True iff this function should be considered public."""
if self.dunder_all is not None:
return self.name in self.dunder_all # depends on [control=['if'], data=[]]
else:
return not self.name.startswith('_') |
async def get_target(config, url):
""" Given a URL, get the webmention endpoint """
previous = config.cache.get(
'target', url, schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
return previous
if request.cached:
return previous
current = Target(request)
if config.cache:
config.cache.set('target', url, current)
return current | <ast.AsyncFunctionDef object at 0x7da18f00eec0> | keyword[async] keyword[def] identifier[get_target] ( identifier[config] , identifier[url] ):
literal[string]
identifier[previous] = identifier[config] . identifier[cache] . identifier[get] (
literal[string] , identifier[url] , identifier[schema_version] = identifier[SCHEMA_VERSION] ) keyword[if] identifier[config] . identifier[cache] keyword[else] keyword[None]
identifier[headers] = identifier[previous] . identifier[caching] keyword[if] identifier[previous] keyword[else] keyword[None]
identifier[request] = keyword[await] identifier[utils] . identifier[retry_get] ( identifier[config] , identifier[url] , identifier[headers] = identifier[headers] )
keyword[if] keyword[not] identifier[request] keyword[or] keyword[not] identifier[request] . identifier[success] :
keyword[return] identifier[previous]
keyword[if] identifier[request] . identifier[cached] :
keyword[return] identifier[previous]
identifier[current] = identifier[Target] ( identifier[request] )
keyword[if] identifier[config] . identifier[cache] :
identifier[config] . identifier[cache] . identifier[set] ( literal[string] , identifier[url] , identifier[current] )
keyword[return] identifier[current] | async def get_target(config, url):
""" Given a URL, get the webmention endpoint """
previous = config.cache.get('target', url, schema_version=SCHEMA_VERSION) if config.cache else None
headers = previous.caching if previous else None
request = await utils.retry_get(config, url, headers=headers)
if not request or not request.success:
return previous # depends on [control=['if'], data=[]]
if request.cached:
return previous # depends on [control=['if'], data=[]]
current = Target(request)
if config.cache:
config.cache.set('target', url, current) # depends on [control=['if'], data=[]]
return current |
def _se_all(self):
"""Standard errors (SE) for all parameters, including the intercept."""
err = np.expand_dims(self._ms_err, axis=1)
t1 = np.diagonal(
np.linalg.inv(np.matmul(self.xwins.swapaxes(1, 2), self.xwins)),
axis1=1,
axis2=2,
)
return np.squeeze(np.sqrt(t1 * err)) | def function[_se_all, parameter[self]]:
constant[Standard errors (SE) for all parameters, including the intercept.]
variable[err] assign[=] call[name[np].expand_dims, parameter[name[self]._ms_err]]
variable[t1] assign[=] call[name[np].diagonal, parameter[call[name[np].linalg.inv, parameter[call[name[np].matmul, parameter[call[name[self].xwins.swapaxes, parameter[constant[1], constant[2]]], name[self].xwins]]]]]]
return[call[name[np].squeeze, parameter[call[name[np].sqrt, parameter[binary_operation[name[t1] * name[err]]]]]]] | keyword[def] identifier[_se_all] ( identifier[self] ):
literal[string]
identifier[err] = identifier[np] . identifier[expand_dims] ( identifier[self] . identifier[_ms_err] , identifier[axis] = literal[int] )
identifier[t1] = identifier[np] . identifier[diagonal] (
identifier[np] . identifier[linalg] . identifier[inv] ( identifier[np] . identifier[matmul] ( identifier[self] . identifier[xwins] . identifier[swapaxes] ( literal[int] , literal[int] ), identifier[self] . identifier[xwins] )),
identifier[axis1] = literal[int] ,
identifier[axis2] = literal[int] ,
)
keyword[return] identifier[np] . identifier[squeeze] ( identifier[np] . identifier[sqrt] ( identifier[t1] * identifier[err] )) | def _se_all(self):
"""Standard errors (SE) for all parameters, including the intercept."""
err = np.expand_dims(self._ms_err, axis=1)
t1 = np.diagonal(np.linalg.inv(np.matmul(self.xwins.swapaxes(1, 2), self.xwins)), axis1=1, axis2=2)
return np.squeeze(np.sqrt(t1 * err)) |
def get_earliest_start_date_from_program(program):
"""
Get the earliest date that one of the courses in the program was available.
For the sake of emails to new learners, we treat this as the program start date.
Arguemnts:
program (dict): Program data from Course Catalog API
returns:
datetime.datetime: The date and time at which the first course started
"""
start_dates = []
for course in program.get('courses', []):
for run in course.get('course_runs', []):
if run.get('start'):
start_dates.append(parse_lms_api_datetime(run['start']))
if not start_dates:
return None
return min(start_dates) | def function[get_earliest_start_date_from_program, parameter[program]]:
constant[
Get the earliest date that one of the courses in the program was available.
For the sake of emails to new learners, we treat this as the program start date.
Arguemnts:
program (dict): Program data from Course Catalog API
returns:
datetime.datetime: The date and time at which the first course started
]
variable[start_dates] assign[=] list[[]]
for taget[name[course]] in starred[call[name[program].get, parameter[constant[courses], list[[]]]]] begin[:]
for taget[name[run]] in starred[call[name[course].get, parameter[constant[course_runs], list[[]]]]] begin[:]
if call[name[run].get, parameter[constant[start]]] begin[:]
call[name[start_dates].append, parameter[call[name[parse_lms_api_datetime], parameter[call[name[run]][constant[start]]]]]]
if <ast.UnaryOp object at 0x7da1b01262c0> begin[:]
return[constant[None]]
return[call[name[min], parameter[name[start_dates]]]] | keyword[def] identifier[get_earliest_start_date_from_program] ( identifier[program] ):
literal[string]
identifier[start_dates] =[]
keyword[for] identifier[course] keyword[in] identifier[program] . identifier[get] ( literal[string] ,[]):
keyword[for] identifier[run] keyword[in] identifier[course] . identifier[get] ( literal[string] ,[]):
keyword[if] identifier[run] . identifier[get] ( literal[string] ):
identifier[start_dates] . identifier[append] ( identifier[parse_lms_api_datetime] ( identifier[run] [ literal[string] ]))
keyword[if] keyword[not] identifier[start_dates] :
keyword[return] keyword[None]
keyword[return] identifier[min] ( identifier[start_dates] ) | def get_earliest_start_date_from_program(program):
"""
Get the earliest date that one of the courses in the program was available.
For the sake of emails to new learners, we treat this as the program start date.
Arguemnts:
program (dict): Program data from Course Catalog API
returns:
datetime.datetime: The date and time at which the first course started
"""
start_dates = []
for course in program.get('courses', []):
for run in course.get('course_runs', []):
if run.get('start'):
start_dates.append(parse_lms_api_datetime(run['start'])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['run']] # depends on [control=['for'], data=['course']]
if not start_dates:
return None # depends on [control=['if'], data=[]]
return min(start_dates) |
def _notify_report(self, device_uuid, event_name, report):
"""Notify that a report has been received from a device.
This routine is called synchronously in the event loop by the DeviceManager
"""
if device_uuid not in self._connections:
self._logger.debug("Dropping report for device without an active connection, uuid=0x%X", device_uuid)
return
slug = self._build_device_slug(device_uuid)
streaming_topic = self.topics.prefix + 'devices/{}/data/streaming'.format(slug)
data = {'type': 'notification', 'operation': 'report'}
ser = report.serialize()
data['received_time'] = ser['received_time'].strftime("%Y%m%dT%H:%M:%S.%fZ").encode()
data['report_origin'] = ser['origin']
data['report_format'] = ser['report_format']
data['report'] = binascii.hexlify(ser['encoded_report'])
data['fragment_count'] = 1
data['fragment_index'] = 0
self._logger.debug("Publishing report: (topic=%s)", streaming_topic)
self.client.publish(streaming_topic, data) | def function[_notify_report, parameter[self, device_uuid, event_name, report]]:
constant[Notify that a report has been received from a device.
This routine is called synchronously in the event loop by the DeviceManager
]
if compare[name[device_uuid] <ast.NotIn object at 0x7da2590d7190> name[self]._connections] begin[:]
call[name[self]._logger.debug, parameter[constant[Dropping report for device without an active connection, uuid=0x%X], name[device_uuid]]]
return[None]
variable[slug] assign[=] call[name[self]._build_device_slug, parameter[name[device_uuid]]]
variable[streaming_topic] assign[=] binary_operation[name[self].topics.prefix + call[constant[devices/{}/data/streaming].format, parameter[name[slug]]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c00310>, <ast.Constant object at 0x7da1b0c01540>], [<ast.Constant object at 0x7da1b0c00070>, <ast.Constant object at 0x7da1b0c014b0>]]
variable[ser] assign[=] call[name[report].serialize, parameter[]]
call[name[data]][constant[received_time]] assign[=] call[call[call[name[ser]][constant[received_time]].strftime, parameter[constant[%Y%m%dT%H:%M:%S.%fZ]]].encode, parameter[]]
call[name[data]][constant[report_origin]] assign[=] call[name[ser]][constant[origin]]
call[name[data]][constant[report_format]] assign[=] call[name[ser]][constant[report_format]]
call[name[data]][constant[report]] assign[=] call[name[binascii].hexlify, parameter[call[name[ser]][constant[encoded_report]]]]
call[name[data]][constant[fragment_count]] assign[=] constant[1]
call[name[data]][constant[fragment_index]] assign[=] constant[0]
call[name[self]._logger.debug, parameter[constant[Publishing report: (topic=%s)], name[streaming_topic]]]
call[name[self].client.publish, parameter[name[streaming_topic], name[data]]] | keyword[def] identifier[_notify_report] ( identifier[self] , identifier[device_uuid] , identifier[event_name] , identifier[report] ):
literal[string]
keyword[if] identifier[device_uuid] keyword[not] keyword[in] identifier[self] . identifier[_connections] :
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] , identifier[device_uuid] )
keyword[return]
identifier[slug] = identifier[self] . identifier[_build_device_slug] ( identifier[device_uuid] )
identifier[streaming_topic] = identifier[self] . identifier[topics] . identifier[prefix] + literal[string] . identifier[format] ( identifier[slug] )
identifier[data] ={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[ser] = identifier[report] . identifier[serialize] ()
identifier[data] [ literal[string] ]= identifier[ser] [ literal[string] ]. identifier[strftime] ( literal[string] ). identifier[encode] ()
identifier[data] [ literal[string] ]= identifier[ser] [ literal[string] ]
identifier[data] [ literal[string] ]= identifier[ser] [ literal[string] ]
identifier[data] [ literal[string] ]= identifier[binascii] . identifier[hexlify] ( identifier[ser] [ literal[string] ])
identifier[data] [ literal[string] ]= literal[int]
identifier[data] [ literal[string] ]= literal[int]
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] , identifier[streaming_topic] )
identifier[self] . identifier[client] . identifier[publish] ( identifier[streaming_topic] , identifier[data] ) | def _notify_report(self, device_uuid, event_name, report):
"""Notify that a report has been received from a device.
This routine is called synchronously in the event loop by the DeviceManager
"""
if device_uuid not in self._connections:
self._logger.debug('Dropping report for device without an active connection, uuid=0x%X', device_uuid)
return # depends on [control=['if'], data=['device_uuid']]
slug = self._build_device_slug(device_uuid)
streaming_topic = self.topics.prefix + 'devices/{}/data/streaming'.format(slug)
data = {'type': 'notification', 'operation': 'report'}
ser = report.serialize()
data['received_time'] = ser['received_time'].strftime('%Y%m%dT%H:%M:%S.%fZ').encode()
data['report_origin'] = ser['origin']
data['report_format'] = ser['report_format']
data['report'] = binascii.hexlify(ser['encoded_report'])
data['fragment_count'] = 1
data['fragment_index'] = 0
self._logger.debug('Publishing report: (topic=%s)', streaming_topic)
self.client.publish(streaming_topic, data) |
def register(self, request_class: Request, handler_factory: Callable[[], Handler]) -> None:
"""
Register the handler for the command
:param request_class: The command or event to dispatch. It must implement getKey()
:param handler_factory: A factory method to create the handler to dispatch to
:return:
"""
key = request_class.__name__
is_command = request_class.is_command()
is_event = request_class.is_event()
is_present = key in self._registry
if is_command and is_present:
raise ConfigurationException("A handler for this request has already been registered")
elif is_event and is_present:
self._registry[key].append(handler_factory)
elif is_command or is_event:
self._registry[key] = [handler_factory] | def function[register, parameter[self, request_class, handler_factory]]:
constant[
Register the handler for the command
:param request_class: The command or event to dispatch. It must implement getKey()
:param handler_factory: A factory method to create the handler to dispatch to
:return:
]
variable[key] assign[=] name[request_class].__name__
variable[is_command] assign[=] call[name[request_class].is_command, parameter[]]
variable[is_event] assign[=] call[name[request_class].is_event, parameter[]]
variable[is_present] assign[=] compare[name[key] in name[self]._registry]
if <ast.BoolOp object at 0x7da1b0ff0100> begin[:]
<ast.Raise object at 0x7da1b0ff2560> | keyword[def] identifier[register] ( identifier[self] , identifier[request_class] : identifier[Request] , identifier[handler_factory] : identifier[Callable] [[], identifier[Handler] ])-> keyword[None] :
literal[string]
identifier[key] = identifier[request_class] . identifier[__name__]
identifier[is_command] = identifier[request_class] . identifier[is_command] ()
identifier[is_event] = identifier[request_class] . identifier[is_event] ()
identifier[is_present] = identifier[key] keyword[in] identifier[self] . identifier[_registry]
keyword[if] identifier[is_command] keyword[and] identifier[is_present] :
keyword[raise] identifier[ConfigurationException] ( literal[string] )
keyword[elif] identifier[is_event] keyword[and] identifier[is_present] :
identifier[self] . identifier[_registry] [ identifier[key] ]. identifier[append] ( identifier[handler_factory] )
keyword[elif] identifier[is_command] keyword[or] identifier[is_event] :
identifier[self] . identifier[_registry] [ identifier[key] ]=[ identifier[handler_factory] ] | def register(self, request_class: Request, handler_factory: Callable[[], Handler]) -> None:
"""
Register the handler for the command
:param request_class: The command or event to dispatch. It must implement getKey()
:param handler_factory: A factory method to create the handler to dispatch to
:return:
"""
key = request_class.__name__
is_command = request_class.is_command()
is_event = request_class.is_event()
is_present = key in self._registry
if is_command and is_present:
raise ConfigurationException('A handler for this request has already been registered') # depends on [control=['if'], data=[]]
elif is_event and is_present:
self._registry[key].append(handler_factory) # depends on [control=['if'], data=[]]
elif is_command or is_event:
self._registry[key] = [handler_factory] # depends on [control=['if'], data=[]] |
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True | def function[pid_exists, parameter[pid]]:
constant[Check whether pid exists in the current process table.]
if compare[name[pid] less[<] constant[0]] begin[:]
return[constant[False]]
<ast.Try object at 0x7da20c6ab760> | keyword[def] identifier[pid_exists] ( identifier[pid] ):
literal[string]
keyword[if] identifier[pid] < literal[int] :
keyword[return] keyword[False]
keyword[try] :
identifier[os] . identifier[kill] ( identifier[pid] , literal[int] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[return] identifier[e] . identifier[errno] == identifier[errno] . identifier[EPERM]
keyword[else] :
keyword[return] keyword[True] | def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if pid < 0:
return False # depends on [control=['if'], data=[]]
try:
os.kill(pid, 0) # depends on [control=['try'], data=[]]
except OSError as e:
return e.errno == errno.EPERM # depends on [control=['except'], data=['e']]
else:
return True |
def Upload(self, fd, sign_fn=None):
"""Uploads data from a given stream and signs them with a given key."""
if not sign_fn:
raise ValueError("sign_fn can't be empty. "
"See DefaultUploadSigner as a possible option.")
args = binary_management_pb2.ApiUploadGrrBinaryArgs(
type=self.binary_type, path=self.path)
while True:
data = fd.read(self.__class__.CHUNK_SIZE)
if not data:
break
blob = args.blobs.add()
blob.signature = sign_fn(data)
blob.signature_type = blob.RSA_PKCS1v15
blob.digest = hashlib.sha256(data).digest()
blob.digest_type = blob.SHA256
blob.data = data
self._context.SendRequest("UploadGrrBinary", args) | def function[Upload, parameter[self, fd, sign_fn]]:
constant[Uploads data from a given stream and signs them with a given key.]
if <ast.UnaryOp object at 0x7da1b1b98970> begin[:]
<ast.Raise object at 0x7da1b1b98b50>
variable[args] assign[=] call[name[binary_management_pb2].ApiUploadGrrBinaryArgs, parameter[]]
while constant[True] begin[:]
variable[data] assign[=] call[name[fd].read, parameter[name[self].__class__.CHUNK_SIZE]]
if <ast.UnaryOp object at 0x7da1b1b98670> begin[:]
break
variable[blob] assign[=] call[name[args].blobs.add, parameter[]]
name[blob].signature assign[=] call[name[sign_fn], parameter[name[data]]]
name[blob].signature_type assign[=] name[blob].RSA_PKCS1v15
name[blob].digest assign[=] call[call[name[hashlib].sha256, parameter[name[data]]].digest, parameter[]]
name[blob].digest_type assign[=] name[blob].SHA256
name[blob].data assign[=] name[data]
call[name[self]._context.SendRequest, parameter[constant[UploadGrrBinary], name[args]]] | keyword[def] identifier[Upload] ( identifier[self] , identifier[fd] , identifier[sign_fn] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[sign_fn] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[args] = identifier[binary_management_pb2] . identifier[ApiUploadGrrBinaryArgs] (
identifier[type] = identifier[self] . identifier[binary_type] , identifier[path] = identifier[self] . identifier[path] )
keyword[while] keyword[True] :
identifier[data] = identifier[fd] . identifier[read] ( identifier[self] . identifier[__class__] . identifier[CHUNK_SIZE] )
keyword[if] keyword[not] identifier[data] :
keyword[break]
identifier[blob] = identifier[args] . identifier[blobs] . identifier[add] ()
identifier[blob] . identifier[signature] = identifier[sign_fn] ( identifier[data] )
identifier[blob] . identifier[signature_type] = identifier[blob] . identifier[RSA_PKCS1v15]
identifier[blob] . identifier[digest] = identifier[hashlib] . identifier[sha256] ( identifier[data] ). identifier[digest] ()
identifier[blob] . identifier[digest_type] = identifier[blob] . identifier[SHA256]
identifier[blob] . identifier[data] = identifier[data]
identifier[self] . identifier[_context] . identifier[SendRequest] ( literal[string] , identifier[args] ) | def Upload(self, fd, sign_fn=None):
"""Uploads data from a given stream and signs them with a given key."""
if not sign_fn:
raise ValueError("sign_fn can't be empty. See DefaultUploadSigner as a possible option.") # depends on [control=['if'], data=[]]
args = binary_management_pb2.ApiUploadGrrBinaryArgs(type=self.binary_type, path=self.path)
while True:
data = fd.read(self.__class__.CHUNK_SIZE)
if not data:
break # depends on [control=['if'], data=[]]
blob = args.blobs.add()
blob.signature = sign_fn(data)
blob.signature_type = blob.RSA_PKCS1v15
blob.digest = hashlib.sha256(data).digest()
blob.digest_type = blob.SHA256
blob.data = data # depends on [control=['while'], data=[]]
self._context.SendRequest('UploadGrrBinary', args) |
def save_statement(self, statement):
"""
Save xAPI statement.
Arguments:
statement (EnterpriseStatement): xAPI Statement to send to the LRS.
Raises:
ClientError: If xAPI statement fails to save.
"""
response = self.lrs.save_statement(statement)
if not response:
raise ClientError('EnterpriseXAPIClient request failed.') | def function[save_statement, parameter[self, statement]]:
constant[
Save xAPI statement.
Arguments:
statement (EnterpriseStatement): xAPI Statement to send to the LRS.
Raises:
ClientError: If xAPI statement fails to save.
]
variable[response] assign[=] call[name[self].lrs.save_statement, parameter[name[statement]]]
if <ast.UnaryOp object at 0x7da1b0057b20> begin[:]
<ast.Raise object at 0x7da1b0057d60> | keyword[def] identifier[save_statement] ( identifier[self] , identifier[statement] ):
literal[string]
identifier[response] = identifier[self] . identifier[lrs] . identifier[save_statement] ( identifier[statement] )
keyword[if] keyword[not] identifier[response] :
keyword[raise] identifier[ClientError] ( literal[string] ) | def save_statement(self, statement):
"""
Save xAPI statement.
Arguments:
statement (EnterpriseStatement): xAPI Statement to send to the LRS.
Raises:
ClientError: If xAPI statement fails to save.
"""
response = self.lrs.save_statement(statement)
if not response:
raise ClientError('EnterpriseXAPIClient request failed.') # depends on [control=['if'], data=[]] |
def top_stories(self, limit=5, first=None, last=None, json=False):
"""
Get the top story objects list
params :
limit = (default | 5) number of story objects needed
json = (default | False)
The method uses asynchronous grequest form gevent
"""
story_ids = requests.get(TOP_STORIES_URL).json()
story_urls = []
for story_id in story_ids:
url = API_BASE + "item/" + str(story_id) + '.json'
story_urls.append(url)
if first and last:
story_urls = story_urls[first:last]
if limit != 5:
story_urls[:limit] # default not given
else:
story_urls = story_urls[:limit]
# try:
# response_queue = fetch_parallel(story_urls)
# if json:
# while not response_queue.empty():
# yield response_queue.get()
# else:
# while not response_queue.empty():
# yield story_parser(response_queue.get())
# except AttributeError:
# Exception("Too many requests worker!!")
# using gevent
response_list = fetch_event(story_urls)
if json:
yield response_list
else:
for response in response_list:
yield story_parser(response) | def function[top_stories, parameter[self, limit, first, last, json]]:
constant[
Get the top story objects list
params :
limit = (default | 5) number of story objects needed
json = (default | False)
The method uses asynchronous grequest form gevent
]
variable[story_ids] assign[=] call[call[name[requests].get, parameter[name[TOP_STORIES_URL]]].json, parameter[]]
variable[story_urls] assign[=] list[[]]
for taget[name[story_id]] in starred[name[story_ids]] begin[:]
variable[url] assign[=] binary_operation[binary_operation[binary_operation[name[API_BASE] + constant[item/]] + call[name[str], parameter[name[story_id]]]] + constant[.json]]
call[name[story_urls].append, parameter[name[url]]]
if <ast.BoolOp object at 0x7da1b2650a30> begin[:]
variable[story_urls] assign[=] call[name[story_urls]][<ast.Slice object at 0x7da1b2651180>]
if compare[name[limit] not_equal[!=] constant[5]] begin[:]
call[name[story_urls]][<ast.Slice object at 0x7da20e957370>]
variable[response_list] assign[=] call[name[fetch_event], parameter[name[story_urls]]]
if name[json] begin[:]
<ast.Yield object at 0x7da20e954490> | keyword[def] identifier[top_stories] ( identifier[self] , identifier[limit] = literal[int] , identifier[first] = keyword[None] , identifier[last] = keyword[None] , identifier[json] = keyword[False] ):
literal[string]
identifier[story_ids] = identifier[requests] . identifier[get] ( identifier[TOP_STORIES_URL] ). identifier[json] ()
identifier[story_urls] =[]
keyword[for] identifier[story_id] keyword[in] identifier[story_ids] :
identifier[url] = identifier[API_BASE] + literal[string] + identifier[str] ( identifier[story_id] )+ literal[string]
identifier[story_urls] . identifier[append] ( identifier[url] )
keyword[if] identifier[first] keyword[and] identifier[last] :
identifier[story_urls] = identifier[story_urls] [ identifier[first] : identifier[last] ]
keyword[if] identifier[limit] != literal[int] :
identifier[story_urls] [: identifier[limit] ]
keyword[else] :
identifier[story_urls] = identifier[story_urls] [: identifier[limit] ]
identifier[response_list] = identifier[fetch_event] ( identifier[story_urls] )
keyword[if] identifier[json] :
keyword[yield] identifier[response_list]
keyword[else] :
keyword[for] identifier[response] keyword[in] identifier[response_list] :
keyword[yield] identifier[story_parser] ( identifier[response] ) | def top_stories(self, limit=5, first=None, last=None, json=False):
"""
Get the top story objects list
params :
limit = (default | 5) number of story objects needed
json = (default | False)
The method uses asynchronous grequest form gevent
"""
story_ids = requests.get(TOP_STORIES_URL).json()
story_urls = []
for story_id in story_ids:
url = API_BASE + 'item/' + str(story_id) + '.json'
story_urls.append(url) # depends on [control=['for'], data=['story_id']]
if first and last:
story_urls = story_urls[first:last]
if limit != 5:
story_urls[:limit] # default not given # depends on [control=['if'], data=['limit']] # depends on [control=['if'], data=[]]
else:
story_urls = story_urls[:limit]
# try:
# response_queue = fetch_parallel(story_urls)
# if json:
# while not response_queue.empty():
# yield response_queue.get()
# else:
# while not response_queue.empty():
# yield story_parser(response_queue.get())
# except AttributeError:
# Exception("Too many requests worker!!")
# using gevent
response_list = fetch_event(story_urls)
if json:
yield response_list # depends on [control=['if'], data=[]]
else:
for response in response_list:
yield story_parser(response) # depends on [control=['for'], data=['response']] |
def _compute_term2(self, C, mag, r):
"""
This computes the term f2 equation 8 Drouet & Cotton (2015)
"""
return (C['c4'] + C['c5'] * mag) * \
np.log(np.sqrt(r**2 + C['c6']**2)) + C['c7'] * r | def function[_compute_term2, parameter[self, C, mag, r]]:
constant[
This computes the term f2 equation 8 Drouet & Cotton (2015)
]
return[binary_operation[binary_operation[binary_operation[call[name[C]][constant[c4]] + binary_operation[call[name[C]][constant[c5]] * name[mag]]] * call[name[np].log, parameter[call[name[np].sqrt, parameter[binary_operation[binary_operation[name[r] ** constant[2]] + binary_operation[call[name[C]][constant[c6]] ** constant[2]]]]]]]] + binary_operation[call[name[C]][constant[c7]] * name[r]]]] | keyword[def] identifier[_compute_term2] ( identifier[self] , identifier[C] , identifier[mag] , identifier[r] ):
literal[string]
keyword[return] ( identifier[C] [ literal[string] ]+ identifier[C] [ literal[string] ]* identifier[mag] )* identifier[np] . identifier[log] ( identifier[np] . identifier[sqrt] ( identifier[r] ** literal[int] + identifier[C] [ literal[string] ]** literal[int] ))+ identifier[C] [ literal[string] ]* identifier[r] | def _compute_term2(self, C, mag, r):
"""
This computes the term f2 equation 8 Drouet & Cotton (2015)
"""
return (C['c4'] + C['c5'] * mag) * np.log(np.sqrt(r ** 2 + C['c6'] ** 2)) + C['c7'] * r |
def _simplify(self):
"""
RegionSimplifier performs the following simplifications:
- Remove redundant Gotos
- Remove redundant If/If-else statements
"""
r = self.region
r = self._simplify_gotos(r)
r = self._simplify_ifs(r)
self.result = r | def function[_simplify, parameter[self]]:
constant[
RegionSimplifier performs the following simplifications:
- Remove redundant Gotos
- Remove redundant If/If-else statements
]
variable[r] assign[=] name[self].region
variable[r] assign[=] call[name[self]._simplify_gotos, parameter[name[r]]]
variable[r] assign[=] call[name[self]._simplify_ifs, parameter[name[r]]]
name[self].result assign[=] name[r] | keyword[def] identifier[_simplify] ( identifier[self] ):
literal[string]
identifier[r] = identifier[self] . identifier[region]
identifier[r] = identifier[self] . identifier[_simplify_gotos] ( identifier[r] )
identifier[r] = identifier[self] . identifier[_simplify_ifs] ( identifier[r] )
identifier[self] . identifier[result] = identifier[r] | def _simplify(self):
"""
RegionSimplifier performs the following simplifications:
- Remove redundant Gotos
- Remove redundant If/If-else statements
"""
r = self.region
r = self._simplify_gotos(r)
r = self._simplify_ifs(r)
self.result = r |
def merge_data(path_data, request_data):
"""
Merge data from the URI path and the request.
Path data wins.
"""
merged = request_data.copy() if request_data else {}
merged.update(path_data or {})
return merged | def function[merge_data, parameter[path_data, request_data]]:
constant[
Merge data from the URI path and the request.
Path data wins.
]
variable[merged] assign[=] <ast.IfExp object at 0x7da1b0c40820>
call[name[merged].update, parameter[<ast.BoolOp object at 0x7da1b0c3f5b0>]]
return[name[merged]] | keyword[def] identifier[merge_data] ( identifier[path_data] , identifier[request_data] ):
literal[string]
identifier[merged] = identifier[request_data] . identifier[copy] () keyword[if] identifier[request_data] keyword[else] {}
identifier[merged] . identifier[update] ( identifier[path_data] keyword[or] {})
keyword[return] identifier[merged] | def merge_data(path_data, request_data):
"""
Merge data from the URI path and the request.
Path data wins.
"""
merged = request_data.copy() if request_data else {}
merged.update(path_data or {})
return merged |
def dumps(obj, **kwargs) -> str:
"""
Serialize a BioC ``obj`` to a JSON formatted ``str``.
"""
return json.dumps(obj, cls=BioCJSONEncoder, **kwargs) | def function[dumps, parameter[obj]]:
constant[
Serialize a BioC ``obj`` to a JSON formatted ``str``.
]
return[call[name[json].dumps, parameter[name[obj]]]] | keyword[def] identifier[dumps] ( identifier[obj] ,** identifier[kwargs] )-> identifier[str] :
literal[string]
keyword[return] identifier[json] . identifier[dumps] ( identifier[obj] , identifier[cls] = identifier[BioCJSONEncoder] ,** identifier[kwargs] ) | def dumps(obj, **kwargs) -> str:
"""
Serialize a BioC ``obj`` to a JSON formatted ``str``.
"""
return json.dumps(obj, cls=BioCJSONEncoder, **kwargs) |
def pasa(args):
"""
%prog ${pasadb}.assemblies.fasta ${pasadb}.pasa_assemblies.gff3
Wraps `pasa_asmbls_to_training_set.dbi`.
"""
from jcvi.formats.base import SetFile
from jcvi.formats.gff import Gff
p = OptionParser(pasa.__doc__)
p.set_home("pasa")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, gffile = args
transcodergff = fastafile + ".transdecoder.gff3"
transcodergenomegff = fastafile + ".transdecoder.genome.gff3"
if need_update((fastafile, gffile), (transcodergff, transcodergenomegff)):
cmd = "{0}/scripts/pasa_asmbls_to_training_set.dbi".format(opts.pasa_home)
cmd += " --pasa_transcripts_fasta {0} --pasa_transcripts_gff3 {1}".\
format(fastafile, gffile)
sh(cmd)
completeids = fastafile.rsplit(".", 1)[0] + ".complete.ids"
if need_update(transcodergff, completeids):
cmd = "grep complete {0} | cut -f1 | sort -u".format(transcodergff)
sh(cmd, outfile=completeids)
complete = SetFile(completeids)
seen = set()
completegff = transcodergenomegff.rsplit(".", 1)[0] + ".complete.gff3"
fw = open(completegff, "w")
gff = Gff(transcodergenomegff)
for g in gff:
a = g.attributes
if "Parent" in a:
id = a["Parent"][0]
else:
id = a["ID"][0]
asmbl_id = id.split("|")[0]
if asmbl_id not in complete:
continue
print(g, file=fw)
if g.type == "gene":
seen.add(id)
fw.close()
logging.debug("A total of {0} complete models extracted to `{1}`.".\
format(len(seen), completegff)) | def function[pasa, parameter[args]]:
constant[
%prog ${pasadb}.assemblies.fasta ${pasadb}.pasa_assemblies.gff3
Wraps `pasa_asmbls_to_training_set.dbi`.
]
from relative_module[jcvi.formats.base] import module[SetFile]
from relative_module[jcvi.formats.gff] import module[Gff]
variable[p] assign[=] call[name[OptionParser], parameter[name[pasa].__doc__]]
call[name[p].set_home, parameter[constant[pasa]]]
<ast.Tuple object at 0x7da2046219c0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da204623460>]]
<ast.Tuple object at 0x7da204621ff0> assign[=] name[args]
variable[transcodergff] assign[=] binary_operation[name[fastafile] + constant[.transdecoder.gff3]]
variable[transcodergenomegff] assign[=] binary_operation[name[fastafile] + constant[.transdecoder.genome.gff3]]
if call[name[need_update], parameter[tuple[[<ast.Name object at 0x7da2046217e0>, <ast.Name object at 0x7da204623e80>]], tuple[[<ast.Name object at 0x7da204621f30>, <ast.Name object at 0x7da2046210c0>]]]] begin[:]
variable[cmd] assign[=] call[constant[{0}/scripts/pasa_asmbls_to_training_set.dbi].format, parameter[name[opts].pasa_home]]
<ast.AugAssign object at 0x7da2046238e0>
call[name[sh], parameter[name[cmd]]]
variable[completeids] assign[=] binary_operation[call[call[name[fastafile].rsplit, parameter[constant[.], constant[1]]]][constant[0]] + constant[.complete.ids]]
if call[name[need_update], parameter[name[transcodergff], name[completeids]]] begin[:]
variable[cmd] assign[=] call[constant[grep complete {0} | cut -f1 | sort -u].format, parameter[name[transcodergff]]]
call[name[sh], parameter[name[cmd]]]
variable[complete] assign[=] call[name[SetFile], parameter[name[completeids]]]
variable[seen] assign[=] call[name[set], parameter[]]
variable[completegff] assign[=] binary_operation[call[call[name[transcodergenomegff].rsplit, parameter[constant[.], constant[1]]]][constant[0]] + constant[.complete.gff3]]
variable[fw] assign[=] call[name[open], parameter[name[completegff], constant[w]]]
variable[gff] assign[=] call[name[Gff], parameter[name[transcodergenomegff]]]
for taget[name[g]] in starred[name[gff]] begin[:]
variable[a] assign[=] name[g].attributes
if compare[constant[Parent] in name[a]] begin[:]
variable[id] assign[=] call[call[name[a]][constant[Parent]]][constant[0]]
variable[asmbl_id] assign[=] call[call[name[id].split, parameter[constant[|]]]][constant[0]]
if compare[name[asmbl_id] <ast.NotIn object at 0x7da2590d7190> name[complete]] begin[:]
continue
call[name[print], parameter[name[g]]]
if compare[name[g].type equal[==] constant[gene]] begin[:]
call[name[seen].add, parameter[name[id]]]
call[name[fw].close, parameter[]]
call[name[logging].debug, parameter[call[constant[A total of {0} complete models extracted to `{1}`.].format, parameter[call[name[len], parameter[name[seen]]], name[completegff]]]]] | keyword[def] identifier[pasa] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[base] keyword[import] identifier[SetFile]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[gff] keyword[import] identifier[Gff]
identifier[p] = identifier[OptionParser] ( identifier[pasa] . identifier[__doc__] )
identifier[p] . identifier[set_home] ( literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[fastafile] , identifier[gffile] = identifier[args]
identifier[transcodergff] = identifier[fastafile] + literal[string]
identifier[transcodergenomegff] = identifier[fastafile] + literal[string]
keyword[if] identifier[need_update] (( identifier[fastafile] , identifier[gffile] ),( identifier[transcodergff] , identifier[transcodergenomegff] )):
identifier[cmd] = literal[string] . identifier[format] ( identifier[opts] . identifier[pasa_home] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[fastafile] , identifier[gffile] )
identifier[sh] ( identifier[cmd] )
identifier[completeids] = identifier[fastafile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string]
keyword[if] identifier[need_update] ( identifier[transcodergff] , identifier[completeids] ):
identifier[cmd] = literal[string] . identifier[format] ( identifier[transcodergff] )
identifier[sh] ( identifier[cmd] , identifier[outfile] = identifier[completeids] )
identifier[complete] = identifier[SetFile] ( identifier[completeids] )
identifier[seen] = identifier[set] ()
identifier[completegff] = identifier[transcodergenomegff] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string]
identifier[fw] = identifier[open] ( identifier[completegff] , literal[string] )
identifier[gff] = identifier[Gff] ( identifier[transcodergenomegff] )
keyword[for] identifier[g] keyword[in] identifier[gff] :
identifier[a] = identifier[g] . identifier[attributes]
keyword[if] literal[string] keyword[in] identifier[a] :
identifier[id] = identifier[a] [ literal[string] ][ literal[int] ]
keyword[else] :
identifier[id] = identifier[a] [ literal[string] ][ literal[int] ]
identifier[asmbl_id] = identifier[id] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[asmbl_id] keyword[not] keyword[in] identifier[complete] :
keyword[continue]
identifier[print] ( identifier[g] , identifier[file] = identifier[fw] )
keyword[if] identifier[g] . identifier[type] == literal[string] :
identifier[seen] . identifier[add] ( identifier[id] )
identifier[fw] . identifier[close] ()
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[seen] ), identifier[completegff] )) | def pasa(args):
"""
%prog ${pasadb}.assemblies.fasta ${pasadb}.pasa_assemblies.gff3
Wraps `pasa_asmbls_to_training_set.dbi`.
"""
from jcvi.formats.base import SetFile
from jcvi.formats.gff import Gff
p = OptionParser(pasa.__doc__)
p.set_home('pasa')
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(fastafile, gffile) = args
transcodergff = fastafile + '.transdecoder.gff3'
transcodergenomegff = fastafile + '.transdecoder.genome.gff3'
if need_update((fastafile, gffile), (transcodergff, transcodergenomegff)):
cmd = '{0}/scripts/pasa_asmbls_to_training_set.dbi'.format(opts.pasa_home)
cmd += ' --pasa_transcripts_fasta {0} --pasa_transcripts_gff3 {1}'.format(fastafile, gffile)
sh(cmd) # depends on [control=['if'], data=[]]
completeids = fastafile.rsplit('.', 1)[0] + '.complete.ids'
if need_update(transcodergff, completeids):
cmd = 'grep complete {0} | cut -f1 | sort -u'.format(transcodergff)
sh(cmd, outfile=completeids) # depends on [control=['if'], data=[]]
complete = SetFile(completeids)
seen = set()
completegff = transcodergenomegff.rsplit('.', 1)[0] + '.complete.gff3'
fw = open(completegff, 'w')
gff = Gff(transcodergenomegff)
for g in gff:
a = g.attributes
if 'Parent' in a:
id = a['Parent'][0] # depends on [control=['if'], data=['a']]
else:
id = a['ID'][0]
asmbl_id = id.split('|')[0]
if asmbl_id not in complete:
continue # depends on [control=['if'], data=[]]
print(g, file=fw)
if g.type == 'gene':
seen.add(id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['g']]
fw.close()
logging.debug('A total of {0} complete models extracted to `{1}`.'.format(len(seen), completegff)) |
def _translate_sext(self, oprnd1, oprnd2, oprnd3):
"""Return a formula representation of a SEXT instruction.
"""
assert oprnd1.size and oprnd3.size
op1_var = self._translate_src_oprnd(oprnd1)
op3_var, op3_var_constrs = self._translate_dst_oprnd(oprnd3)
if oprnd3.size > oprnd1.size:
result = smtfunction.sign_extend(op1_var, op3_var.size)
elif oprnd3.size < oprnd1.size:
raise Exception("Operands size mismatch.")
else:
result = op1_var
return [op3_var == result] + op3_var_constrs | def function[_translate_sext, parameter[self, oprnd1, oprnd2, oprnd3]]:
constant[Return a formula representation of a SEXT instruction.
]
assert[<ast.BoolOp object at 0x7da1b0980220>]
variable[op1_var] assign[=] call[name[self]._translate_src_oprnd, parameter[name[oprnd1]]]
<ast.Tuple object at 0x7da1b0983250> assign[=] call[name[self]._translate_dst_oprnd, parameter[name[oprnd3]]]
if compare[name[oprnd3].size greater[>] name[oprnd1].size] begin[:]
variable[result] assign[=] call[name[smtfunction].sign_extend, parameter[name[op1_var], name[op3_var].size]]
return[binary_operation[list[[<ast.Compare object at 0x7da1b0981090>]] + name[op3_var_constrs]]] | keyword[def] identifier[_translate_sext] ( identifier[self] , identifier[oprnd1] , identifier[oprnd2] , identifier[oprnd3] ):
literal[string]
keyword[assert] identifier[oprnd1] . identifier[size] keyword[and] identifier[oprnd3] . identifier[size]
identifier[op1_var] = identifier[self] . identifier[_translate_src_oprnd] ( identifier[oprnd1] )
identifier[op3_var] , identifier[op3_var_constrs] = identifier[self] . identifier[_translate_dst_oprnd] ( identifier[oprnd3] )
keyword[if] identifier[oprnd3] . identifier[size] > identifier[oprnd1] . identifier[size] :
identifier[result] = identifier[smtfunction] . identifier[sign_extend] ( identifier[op1_var] , identifier[op3_var] . identifier[size] )
keyword[elif] identifier[oprnd3] . identifier[size] < identifier[oprnd1] . identifier[size] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[else] :
identifier[result] = identifier[op1_var]
keyword[return] [ identifier[op3_var] == identifier[result] ]+ identifier[op3_var_constrs] | def _translate_sext(self, oprnd1, oprnd2, oprnd3):
"""Return a formula representation of a SEXT instruction.
"""
assert oprnd1.size and oprnd3.size
op1_var = self._translate_src_oprnd(oprnd1)
(op3_var, op3_var_constrs) = self._translate_dst_oprnd(oprnd3)
if oprnd3.size > oprnd1.size:
result = smtfunction.sign_extend(op1_var, op3_var.size) # depends on [control=['if'], data=[]]
elif oprnd3.size < oprnd1.size:
raise Exception('Operands size mismatch.') # depends on [control=['if'], data=[]]
else:
result = op1_var
return [op3_var == result] + op3_var_constrs |
def run_step(context):
"""Print debug info to console.
context is a dictionary or dictionary-like.
If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the
debug input context), it will just dump the entire context to stdout.
Configure the debug step with the following optional context item:
debug:
keys: str (for single key) or list (of str keys). Only dump the
specified keys.
format: bool. Defaults False. Applies formatting expressions on
dump.
"""
logger.debug("started")
debug = context.get('debug', None)
if debug:
keys = debug.get('keys', None)
format = debug.get('format', False)
if keys:
logger.debug(f"Writing to output: {keys}")
if isinstance(keys, str):
payload = {keys: context[keys]}
else:
payload = {k: context[k] for k in keys}
else:
logger.debug(
"No keys specified. Writing entire context to output.")
payload = context
if format:
payload = context.get_formatted_iterable(payload)
else:
payload = context
logger.info(f'\n{json.dumps(payload, indent=2, ensure_ascii=False)}')
logger.debug("done") | def function[run_step, parameter[context]]:
constant[Print debug info to console.
context is a dictionary or dictionary-like.
If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the
debug input context), it will just dump the entire context to stdout.
Configure the debug step with the following optional context item:
debug:
keys: str (for single key) or list (of str keys). Only dump the
specified keys.
format: bool. Defaults False. Applies formatting expressions on
dump.
]
call[name[logger].debug, parameter[constant[started]]]
variable[debug] assign[=] call[name[context].get, parameter[constant[debug], constant[None]]]
if name[debug] begin[:]
variable[keys] assign[=] call[name[debug].get, parameter[constant[keys], constant[None]]]
variable[format] assign[=] call[name[debug].get, parameter[constant[format], constant[False]]]
if name[keys] begin[:]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da20c6a81f0>]]
if call[name[isinstance], parameter[name[keys], name[str]]] begin[:]
variable[payload] assign[=] dictionary[[<ast.Name object at 0x7da20c6a8b50>], [<ast.Subscript object at 0x7da20c6a9030>]]
if name[format] begin[:]
variable[payload] assign[=] call[name[context].get_formatted_iterable, parameter[name[payload]]]
call[name[logger].info, parameter[<ast.JoinedStr object at 0x7da18eb57e20>]]
call[name[logger].debug, parameter[constant[done]]] | keyword[def] identifier[run_step] ( identifier[context] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[debug] = identifier[context] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[debug] :
identifier[keys] = identifier[debug] . identifier[get] ( literal[string] , keyword[None] )
identifier[format] = identifier[debug] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[keys] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[keys] , identifier[str] ):
identifier[payload] ={ identifier[keys] : identifier[context] [ identifier[keys] ]}
keyword[else] :
identifier[payload] ={ identifier[k] : identifier[context] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[keys] }
keyword[else] :
identifier[logger] . identifier[debug] (
literal[string] )
identifier[payload] = identifier[context]
keyword[if] identifier[format] :
identifier[payload] = identifier[context] . identifier[get_formatted_iterable] ( identifier[payload] )
keyword[else] :
identifier[payload] = identifier[context]
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] ) | def run_step(context):
"""Print debug info to console.
context is a dictionary or dictionary-like.
If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the
debug input context), it will just dump the entire context to stdout.
Configure the debug step with the following optional context item:
debug:
keys: str (for single key) or list (of str keys). Only dump the
specified keys.
format: bool. Defaults False. Applies formatting expressions on
dump.
"""
logger.debug('started')
debug = context.get('debug', None)
if debug:
keys = debug.get('keys', None)
format = debug.get('format', False)
if keys:
logger.debug(f'Writing to output: {keys}')
if isinstance(keys, str):
payload = {keys: context[keys]} # depends on [control=['if'], data=[]]
else:
payload = {k: context[k] for k in keys} # depends on [control=['if'], data=[]]
else:
logger.debug('No keys specified. Writing entire context to output.')
payload = context
if format:
payload = context.get_formatted_iterable(payload) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
payload = context
logger.info(f'\n{json.dumps(payload, indent=2, ensure_ascii=False)}')
logger.debug('done') |
def _predict_forest(model, X, joint_contribution=False):
"""
For a given RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, or ExtraTreesClassifier returns a triple of
[prediction, bias and feature_contributions], such that prediction ≈ bias +
feature_contributions.
"""
biases = []
contributions = []
predictions = []
if joint_contribution:
for tree in model.estimators_:
pred, bias, contribution = _predict_tree(tree, X, joint_contribution=joint_contribution)
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
total_contributions = []
for i in range(len(X)):
contr = {}
for j, dct in enumerate(contributions):
for k in set(dct[i]).union(set(contr.keys())):
contr[k] = (contr.get(k, 0)*j + dct[i].get(k,0) ) / (j+1)
total_contributions.append(contr)
for i, item in enumerate(contribution):
total_contributions[i]
sm = sum([v for v in contribution[i].values()])
return (np.mean(predictions, axis=0), np.mean(biases, axis=0),
total_contributions)
else:
for tree in model.estimators_:
pred, bias, contribution = _predict_tree(tree, X)
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
return (np.mean(predictions, axis=0), np.mean(biases, axis=0),
np.mean(contributions, axis=0)) | def function[_predict_forest, parameter[model, X, joint_contribution]]:
constant[
For a given RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, or ExtraTreesClassifier returns a triple of
[prediction, bias and feature_contributions], such that prediction ≈ bias +
feature_contributions.
]
variable[biases] assign[=] list[[]]
variable[contributions] assign[=] list[[]]
variable[predictions] assign[=] list[[]]
if name[joint_contribution] begin[:]
for taget[name[tree]] in starred[name[model].estimators_] begin[:]
<ast.Tuple object at 0x7da1b13abf10> assign[=] call[name[_predict_tree], parameter[name[tree], name[X]]]
call[name[biases].append, parameter[name[bias]]]
call[name[contributions].append, parameter[name[contribution]]]
call[name[predictions].append, parameter[name[pred]]]
variable[total_contributions] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[X]]]]]] begin[:]
variable[contr] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b13ab9a0>, <ast.Name object at 0x7da1b13aa4d0>]]] in starred[call[name[enumerate], parameter[name[contributions]]]] begin[:]
for taget[name[k]] in starred[call[call[name[set], parameter[call[name[dct]][name[i]]]].union, parameter[call[name[set], parameter[call[name[contr].keys, parameter[]]]]]]] begin[:]
call[name[contr]][name[k]] assign[=] binary_operation[binary_operation[binary_operation[call[name[contr].get, parameter[name[k], constant[0]]] * name[j]] + call[call[name[dct]][name[i]].get, parameter[name[k], constant[0]]]] / binary_operation[name[j] + constant[1]]]
call[name[total_contributions].append, parameter[name[contr]]]
for taget[tuple[[<ast.Name object at 0x7da1b13aaa10>, <ast.Name object at 0x7da1b13a9e70>]]] in starred[call[name[enumerate], parameter[name[contribution]]]] begin[:]
call[name[total_contributions]][name[i]]
variable[sm] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b13a9630>]]
return[tuple[[<ast.Call object at 0x7da1b1345600>, <ast.Call object at 0x7da1b1344640>, <ast.Name object at 0x7da1b1345e40>]]] | keyword[def] identifier[_predict_forest] ( identifier[model] , identifier[X] , identifier[joint_contribution] = keyword[False] ):
literal[string]
identifier[biases] =[]
identifier[contributions] =[]
identifier[predictions] =[]
keyword[if] identifier[joint_contribution] :
keyword[for] identifier[tree] keyword[in] identifier[model] . identifier[estimators_] :
identifier[pred] , identifier[bias] , identifier[contribution] = identifier[_predict_tree] ( identifier[tree] , identifier[X] , identifier[joint_contribution] = identifier[joint_contribution] )
identifier[biases] . identifier[append] ( identifier[bias] )
identifier[contributions] . identifier[append] ( identifier[contribution] )
identifier[predictions] . identifier[append] ( identifier[pred] )
identifier[total_contributions] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[X] )):
identifier[contr] ={}
keyword[for] identifier[j] , identifier[dct] keyword[in] identifier[enumerate] ( identifier[contributions] ):
keyword[for] identifier[k] keyword[in] identifier[set] ( identifier[dct] [ identifier[i] ]). identifier[union] ( identifier[set] ( identifier[contr] . identifier[keys] ())):
identifier[contr] [ identifier[k] ]=( identifier[contr] . identifier[get] ( identifier[k] , literal[int] )* identifier[j] + identifier[dct] [ identifier[i] ]. identifier[get] ( identifier[k] , literal[int] ))/( identifier[j] + literal[int] )
identifier[total_contributions] . identifier[append] ( identifier[contr] )
keyword[for] identifier[i] , identifier[item] keyword[in] identifier[enumerate] ( identifier[contribution] ):
identifier[total_contributions] [ identifier[i] ]
identifier[sm] = identifier[sum] ([ identifier[v] keyword[for] identifier[v] keyword[in] identifier[contribution] [ identifier[i] ]. identifier[values] ()])
keyword[return] ( identifier[np] . identifier[mean] ( identifier[predictions] , identifier[axis] = literal[int] ), identifier[np] . identifier[mean] ( identifier[biases] , identifier[axis] = literal[int] ),
identifier[total_contributions] )
keyword[else] :
keyword[for] identifier[tree] keyword[in] identifier[model] . identifier[estimators_] :
identifier[pred] , identifier[bias] , identifier[contribution] = identifier[_predict_tree] ( identifier[tree] , identifier[X] )
identifier[biases] . identifier[append] ( identifier[bias] )
identifier[contributions] . identifier[append] ( identifier[contribution] )
identifier[predictions] . identifier[append] ( identifier[pred] )
keyword[return] ( identifier[np] . identifier[mean] ( identifier[predictions] , identifier[axis] = literal[int] ), identifier[np] . identifier[mean] ( identifier[biases] , identifier[axis] = literal[int] ),
identifier[np] . identifier[mean] ( identifier[contributions] , identifier[axis] = literal[int] )) | def _predict_forest(model, X, joint_contribution=False):
"""
For a given RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, or ExtraTreesClassifier returns a triple of
[prediction, bias and feature_contributions], such that prediction ≈ bias +
feature_contributions.
"""
biases = []
contributions = []
predictions = []
if joint_contribution:
for tree in model.estimators_:
(pred, bias, contribution) = _predict_tree(tree, X, joint_contribution=joint_contribution)
biases.append(bias)
contributions.append(contribution)
predictions.append(pred) # depends on [control=['for'], data=['tree']]
total_contributions = []
for i in range(len(X)):
contr = {}
for (j, dct) in enumerate(contributions):
for k in set(dct[i]).union(set(contr.keys())):
contr[k] = (contr.get(k, 0) * j + dct[i].get(k, 0)) / (j + 1) # depends on [control=['for'], data=['k']] # depends on [control=['for'], data=[]]
total_contributions.append(contr) # depends on [control=['for'], data=['i']]
for (i, item) in enumerate(contribution):
total_contributions[i]
sm = sum([v for v in contribution[i].values()]) # depends on [control=['for'], data=[]]
return (np.mean(predictions, axis=0), np.mean(biases, axis=0), total_contributions) # depends on [control=['if'], data=[]]
else:
for tree in model.estimators_:
(pred, bias, contribution) = _predict_tree(tree, X)
biases.append(bias)
contributions.append(contribution)
predictions.append(pred) # depends on [control=['for'], data=['tree']]
return (np.mean(predictions, axis=0), np.mean(biases, axis=0), np.mean(contributions, axis=0)) |
def dist(self, src, tar):
"""Return the normalized indel distance between two strings.
This is equivalent to normalized Levenshtein distance, when only
inserts and deletes are possible.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Normalized indel distance
Examples
--------
>>> cmp = Indel()
>>> round(cmp.dist('cat', 'hat'), 12)
0.333333333333
>>> round(cmp.dist('Niall', 'Neil'), 12)
0.333333333333
>>> round(cmp.dist('Colin', 'Cuilen'), 12)
0.454545454545
>>> cmp.dist('ATCG', 'TAGC')
0.5
"""
if src == tar:
return 0.0
return self.dist_abs(src, tar) / (len(src) + len(tar)) | def function[dist, parameter[self, src, tar]]:
constant[Return the normalized indel distance between two strings.
This is equivalent to normalized Levenshtein distance, when only
inserts and deletes are possible.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Normalized indel distance
Examples
--------
>>> cmp = Indel()
>>> round(cmp.dist('cat', 'hat'), 12)
0.333333333333
>>> round(cmp.dist('Niall', 'Neil'), 12)
0.333333333333
>>> round(cmp.dist('Colin', 'Cuilen'), 12)
0.454545454545
>>> cmp.dist('ATCG', 'TAGC')
0.5
]
if compare[name[src] equal[==] name[tar]] begin[:]
return[constant[0.0]]
return[binary_operation[call[name[self].dist_abs, parameter[name[src], name[tar]]] / binary_operation[call[name[len], parameter[name[src]]] + call[name[len], parameter[name[tar]]]]]] | keyword[def] identifier[dist] ( identifier[self] , identifier[src] , identifier[tar] ):
literal[string]
keyword[if] identifier[src] == identifier[tar] :
keyword[return] literal[int]
keyword[return] identifier[self] . identifier[dist_abs] ( identifier[src] , identifier[tar] )/( identifier[len] ( identifier[src] )+ identifier[len] ( identifier[tar] )) | def dist(self, src, tar):
"""Return the normalized indel distance between two strings.
This is equivalent to normalized Levenshtein distance, when only
inserts and deletes are possible.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Normalized indel distance
Examples
--------
>>> cmp = Indel()
>>> round(cmp.dist('cat', 'hat'), 12)
0.333333333333
>>> round(cmp.dist('Niall', 'Neil'), 12)
0.333333333333
>>> round(cmp.dist('Colin', 'Cuilen'), 12)
0.454545454545
>>> cmp.dist('ATCG', 'TAGC')
0.5
"""
if src == tar:
return 0.0 # depends on [control=['if'], data=[]]
return self.dist_abs(src, tar) / (len(src) + len(tar)) |
def mementoweb_api_tags(url):
"""
Parse list of :class:`TimeResource` objects based on the mementoweb.org.
Args:
url (str): Any url.
Returns:
list: :class:`TimeResource` objects.
"""
memento_url = "http://labs.mementoweb.org/timemap/json/"
r = requests.get(memento_url + url)
if r.status_code != 200:
return []
data = r.json().get("mementos", {}).get("list", [])
if not data:
return []
resources = (
TimeResource(
url=item.get("uri", ""),
date=item.get("datetime", ""),
val=item.get("datetime", "").split("-")[0],
source="MementoWeb.org",
)
for item in data
)
# deduplicate the resources
resource_dict = {
res.val: res
for res in resources
}
return sorted(resource_dict.values(), key=lambda x: x.val) | def function[mementoweb_api_tags, parameter[url]]:
constant[
Parse list of :class:`TimeResource` objects based on the mementoweb.org.
Args:
url (str): Any url.
Returns:
list: :class:`TimeResource` objects.
]
variable[memento_url] assign[=] constant[http://labs.mementoweb.org/timemap/json/]
variable[r] assign[=] call[name[requests].get, parameter[binary_operation[name[memento_url] + name[url]]]]
if compare[name[r].status_code not_equal[!=] constant[200]] begin[:]
return[list[[]]]
variable[data] assign[=] call[call[call[name[r].json, parameter[]].get, parameter[constant[mementos], dictionary[[], []]]].get, parameter[constant[list], list[[]]]]
if <ast.UnaryOp object at 0x7da1b0ae3ee0> begin[:]
return[list[[]]]
variable[resources] assign[=] <ast.GeneratorExp object at 0x7da1b0ae3ca0>
variable[resource_dict] assign[=] <ast.DictComp object at 0x7da18dc9b070>
return[call[name[sorted], parameter[call[name[resource_dict].values, parameter[]]]]] | keyword[def] identifier[mementoweb_api_tags] ( identifier[url] ):
literal[string]
identifier[memento_url] = literal[string]
identifier[r] = identifier[requests] . identifier[get] ( identifier[memento_url] + identifier[url] )
keyword[if] identifier[r] . identifier[status_code] != literal[int] :
keyword[return] []
identifier[data] = identifier[r] . identifier[json] (). identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,[])
keyword[if] keyword[not] identifier[data] :
keyword[return] []
identifier[resources] =(
identifier[TimeResource] (
identifier[url] = identifier[item] . identifier[get] ( literal[string] , literal[string] ),
identifier[date] = identifier[item] . identifier[get] ( literal[string] , literal[string] ),
identifier[val] = identifier[item] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )[ literal[int] ],
identifier[source] = literal[string] ,
)
keyword[for] identifier[item] keyword[in] identifier[data]
)
identifier[resource_dict] ={
identifier[res] . identifier[val] : identifier[res]
keyword[for] identifier[res] keyword[in] identifier[resources]
}
keyword[return] identifier[sorted] ( identifier[resource_dict] . identifier[values] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[val] ) | def mementoweb_api_tags(url):
"""
Parse list of :class:`TimeResource` objects based on the mementoweb.org.
Args:
url (str): Any url.
Returns:
list: :class:`TimeResource` objects.
"""
memento_url = 'http://labs.mementoweb.org/timemap/json/'
r = requests.get(memento_url + url)
if r.status_code != 200:
return [] # depends on [control=['if'], data=[]]
data = r.json().get('mementos', {}).get('list', [])
if not data:
return [] # depends on [control=['if'], data=[]]
resources = (TimeResource(url=item.get('uri', ''), date=item.get('datetime', ''), val=item.get('datetime', '').split('-')[0], source='MementoWeb.org') for item in data)
# deduplicate the resources
resource_dict = {res.val: res for res in resources}
return sorted(resource_dict.values(), key=lambda x: x.val) |
def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplesat_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close() | def function[delete, parameter[self]]:
constant[
Destructor.
]
if name[self].maplesat begin[:]
call[name[pysolvers].maplesat_del, parameter[name[self].maplesat]]
name[self].maplesat assign[=] constant[None]
if name[self].prfile begin[:]
call[name[self].prfile.close, parameter[]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[maplesat] :
identifier[pysolvers] . identifier[maplesat_del] ( identifier[self] . identifier[maplesat] )
identifier[self] . identifier[maplesat] = keyword[None]
keyword[if] identifier[self] . identifier[prfile] :
identifier[self] . identifier[prfile] . identifier[close] () | def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplesat_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def from_pairs(cls, doc, pairs):
"""
Construct an enumeration from an iterable of pairs.
:param doc: See `Enum.__init__`.
:type pairs: ``Iterable[Tuple[unicode, unicode]]``
:param pairs: Iterable to construct the enumeration from.
:rtype: Enum
"""
values = (EnumItem(value, desc) for value, desc in pairs)
return cls(doc=doc, values=values) | def function[from_pairs, parameter[cls, doc, pairs]]:
constant[
Construct an enumeration from an iterable of pairs.
:param doc: See `Enum.__init__`.
:type pairs: ``Iterable[Tuple[unicode, unicode]]``
:param pairs: Iterable to construct the enumeration from.
:rtype: Enum
]
variable[values] assign[=] <ast.GeneratorExp object at 0x7da20e9576a0>
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_pairs] ( identifier[cls] , identifier[doc] , identifier[pairs] ):
literal[string]
identifier[values] =( identifier[EnumItem] ( identifier[value] , identifier[desc] ) keyword[for] identifier[value] , identifier[desc] keyword[in] identifier[pairs] )
keyword[return] identifier[cls] ( identifier[doc] = identifier[doc] , identifier[values] = identifier[values] ) | def from_pairs(cls, doc, pairs):
"""
Construct an enumeration from an iterable of pairs.
:param doc: See `Enum.__init__`.
:type pairs: ``Iterable[Tuple[unicode, unicode]]``
:param pairs: Iterable to construct the enumeration from.
:rtype: Enum
"""
values = (EnumItem(value, desc) for (value, desc) in pairs)
return cls(doc=doc, values=values) |
def database_describe(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /database-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Databases#API-method%3A-%2Fdatabase-xxxx%2Fdescribe
"""
return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs) | def function[database_describe, parameter[object_id, input_params, always_retry]]:
constant[
Invokes the /database-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Databases#API-method%3A-%2Fdatabase-xxxx%2Fdescribe
]
return[call[name[DXHTTPRequest], parameter[binary_operation[constant[/%s/describe] <ast.Mod object at 0x7da2590d6920> name[object_id]], name[input_params]]]] | keyword[def] identifier[database_describe] ( identifier[object_id] , identifier[input_params] ={}, identifier[always_retry] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[DXHTTPRequest] ( literal[string] % identifier[object_id] , identifier[input_params] , identifier[always_retry] = identifier[always_retry] ,** identifier[kwargs] ) | def database_describe(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /database-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Databases#API-method%3A-%2Fdatabase-xxxx%2Fdescribe
"""
return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs) |
def _adjust_scrollbars(self):
""" Expands the vertical scrollbar beyond the range set by Qt.
"""
# This code is adapted from _q_adjustScrollbars in qplaintextedit.cpp
# and qtextedit.cpp.
document = self._control.document()
scrollbar = self._control.verticalScrollBar()
viewport_height = self._control.viewport().height()
if isinstance(self._control, QtGui.QPlainTextEdit):
maximum = max(0, document.lineCount() - 1)
step = viewport_height / self._control.fontMetrics().lineSpacing()
else:
# QTextEdit does not do line-based layout and blocks will not in
# general have the same height. Therefore it does not make sense to
# attempt to scroll in line height increments.
maximum = document.size().height()
step = viewport_height
diff = maximum - scrollbar.maximum()
scrollbar.setRange(0, maximum)
scrollbar.setPageStep(step)
# Compensate for undesirable scrolling that occurs automatically due to
# maximumBlockCount() text truncation.
if diff < 0 and document.blockCount() == document.maximumBlockCount():
scrollbar.setValue(scrollbar.value() + diff) | def function[_adjust_scrollbars, parameter[self]]:
constant[ Expands the vertical scrollbar beyond the range set by Qt.
]
variable[document] assign[=] call[name[self]._control.document, parameter[]]
variable[scrollbar] assign[=] call[name[self]._control.verticalScrollBar, parameter[]]
variable[viewport_height] assign[=] call[call[name[self]._control.viewport, parameter[]].height, parameter[]]
if call[name[isinstance], parameter[name[self]._control, name[QtGui].QPlainTextEdit]] begin[:]
variable[maximum] assign[=] call[name[max], parameter[constant[0], binary_operation[call[name[document].lineCount, parameter[]] - constant[1]]]]
variable[step] assign[=] binary_operation[name[viewport_height] / call[call[name[self]._control.fontMetrics, parameter[]].lineSpacing, parameter[]]]
variable[diff] assign[=] binary_operation[name[maximum] - call[name[scrollbar].maximum, parameter[]]]
call[name[scrollbar].setRange, parameter[constant[0], name[maximum]]]
call[name[scrollbar].setPageStep, parameter[name[step]]]
if <ast.BoolOp object at 0x7da18f723e20> begin[:]
call[name[scrollbar].setValue, parameter[binary_operation[call[name[scrollbar].value, parameter[]] + name[diff]]]] | keyword[def] identifier[_adjust_scrollbars] ( identifier[self] ):
literal[string]
identifier[document] = identifier[self] . identifier[_control] . identifier[document] ()
identifier[scrollbar] = identifier[self] . identifier[_control] . identifier[verticalScrollBar] ()
identifier[viewport_height] = identifier[self] . identifier[_control] . identifier[viewport] (). identifier[height] ()
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_control] , identifier[QtGui] . identifier[QPlainTextEdit] ):
identifier[maximum] = identifier[max] ( literal[int] , identifier[document] . identifier[lineCount] ()- literal[int] )
identifier[step] = identifier[viewport_height] / identifier[self] . identifier[_control] . identifier[fontMetrics] (). identifier[lineSpacing] ()
keyword[else] :
identifier[maximum] = identifier[document] . identifier[size] (). identifier[height] ()
identifier[step] = identifier[viewport_height]
identifier[diff] = identifier[maximum] - identifier[scrollbar] . identifier[maximum] ()
identifier[scrollbar] . identifier[setRange] ( literal[int] , identifier[maximum] )
identifier[scrollbar] . identifier[setPageStep] ( identifier[step] )
keyword[if] identifier[diff] < literal[int] keyword[and] identifier[document] . identifier[blockCount] ()== identifier[document] . identifier[maximumBlockCount] ():
identifier[scrollbar] . identifier[setValue] ( identifier[scrollbar] . identifier[value] ()+ identifier[diff] ) | def _adjust_scrollbars(self):
""" Expands the vertical scrollbar beyond the range set by Qt.
"""
# This code is adapted from _q_adjustScrollbars in qplaintextedit.cpp
# and qtextedit.cpp.
document = self._control.document()
scrollbar = self._control.verticalScrollBar()
viewport_height = self._control.viewport().height()
if isinstance(self._control, QtGui.QPlainTextEdit):
maximum = max(0, document.lineCount() - 1)
step = viewport_height / self._control.fontMetrics().lineSpacing() # depends on [control=['if'], data=[]]
else:
# QTextEdit does not do line-based layout and blocks will not in
# general have the same height. Therefore it does not make sense to
# attempt to scroll in line height increments.
maximum = document.size().height()
step = viewport_height
diff = maximum - scrollbar.maximum()
scrollbar.setRange(0, maximum)
scrollbar.setPageStep(step)
# Compensate for undesirable scrolling that occurs automatically due to
# maximumBlockCount() text truncation.
if diff < 0 and document.blockCount() == document.maximumBlockCount():
scrollbar.setValue(scrollbar.value() + diff) # depends on [control=['if'], data=[]] |
def feed(self, data):
"""
Feed new data into this pipe. This method is assumed to be called
from a separate thread, so synchronization is done.
:param data: the data to add, as a ``str`` or ``bytes``
"""
self._lock.acquire()
try:
if self._event is not None:
self._event.set()
self._buffer_frombytes(b(data))
self._cv.notifyAll()
finally:
self._lock.release() | def function[feed, parameter[self, data]]:
constant[
Feed new data into this pipe. This method is assumed to be called
from a separate thread, so synchronization is done.
:param data: the data to add, as a ``str`` or ``bytes``
]
call[name[self]._lock.acquire, parameter[]]
<ast.Try object at 0x7da1b2146320> | keyword[def] identifier[feed] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[_lock] . identifier[acquire] ()
keyword[try] :
keyword[if] identifier[self] . identifier[_event] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_event] . identifier[set] ()
identifier[self] . identifier[_buffer_frombytes] ( identifier[b] ( identifier[data] ))
identifier[self] . identifier[_cv] . identifier[notifyAll] ()
keyword[finally] :
identifier[self] . identifier[_lock] . identifier[release] () | def feed(self, data):
"""
Feed new data into this pipe. This method is assumed to be called
from a separate thread, so synchronization is done.
:param data: the data to add, as a ``str`` or ``bytes``
"""
self._lock.acquire()
try:
if self._event is not None:
self._event.set() # depends on [control=['if'], data=[]]
self._buffer_frombytes(b(data))
self._cv.notifyAll() # depends on [control=['try'], data=[]]
finally:
self._lock.release() |
def fermi_fourier_trans_inverse_conjugate_4(qubits):
"""We will need to map the momentum states in the reversed order for
spin-down states to the position picture. This transformation can be
simply implemented the complex conjugate of the former one. We only
need to change the S gate to S* = S ** 3.
Args:
qubits: list of four qubits
"""
yield fswap(qubits[1], qubits[2]),
yield fermi_fourier_trans_2(qubits[0], qubits[1])
yield fermi_fourier_trans_2(qubits[2], qubits[3])
yield fswap(qubits[1], qubits[2])
yield fermi_fourier_trans_2(qubits[0], qubits[1])
yield cirq.S(qubits[2]) ** 3
yield fermi_fourier_trans_2(qubits[2], qubits[3])
yield fswap(qubits[1], qubits[2]) | def function[fermi_fourier_trans_inverse_conjugate_4, parameter[qubits]]:
constant[We will need to map the momentum states in the reversed order for
spin-down states to the position picture. This transformation can be
simply implemented the complex conjugate of the former one. We only
need to change the S gate to S* = S ** 3.
Args:
qubits: list of four qubits
]
<ast.Yield object at 0x7da1b217d0f0>
<ast.Yield object at 0x7da1b217e680>
<ast.Yield object at 0x7da1b1c3fc40>
<ast.Yield object at 0x7da1b1c3d870>
<ast.Yield object at 0x7da1b1f49300>
<ast.Yield object at 0x7da1b1f491b0>
<ast.Yield object at 0x7da1b1f4bcd0>
<ast.Yield object at 0x7da1b1f48940> | keyword[def] identifier[fermi_fourier_trans_inverse_conjugate_4] ( identifier[qubits] ):
literal[string]
keyword[yield] identifier[fswap] ( identifier[qubits] [ literal[int] ], identifier[qubits] [ literal[int] ]),
keyword[yield] identifier[fermi_fourier_trans_2] ( identifier[qubits] [ literal[int] ], identifier[qubits] [ literal[int] ])
keyword[yield] identifier[fermi_fourier_trans_2] ( identifier[qubits] [ literal[int] ], identifier[qubits] [ literal[int] ])
keyword[yield] identifier[fswap] ( identifier[qubits] [ literal[int] ], identifier[qubits] [ literal[int] ])
keyword[yield] identifier[fermi_fourier_trans_2] ( identifier[qubits] [ literal[int] ], identifier[qubits] [ literal[int] ])
keyword[yield] identifier[cirq] . identifier[S] ( identifier[qubits] [ literal[int] ])** literal[int]
keyword[yield] identifier[fermi_fourier_trans_2] ( identifier[qubits] [ literal[int] ], identifier[qubits] [ literal[int] ])
keyword[yield] identifier[fswap] ( identifier[qubits] [ literal[int] ], identifier[qubits] [ literal[int] ]) | def fermi_fourier_trans_inverse_conjugate_4(qubits):
"""We will need to map the momentum states in the reversed order for
spin-down states to the position picture. This transformation can be
simply implemented the complex conjugate of the former one. We only
need to change the S gate to S* = S ** 3.
Args:
qubits: list of four qubits
"""
yield (fswap(qubits[1], qubits[2]),)
yield fermi_fourier_trans_2(qubits[0], qubits[1])
yield fermi_fourier_trans_2(qubits[2], qubits[3])
yield fswap(qubits[1], qubits[2])
yield fermi_fourier_trans_2(qubits[0], qubits[1])
yield (cirq.S(qubits[2]) ** 3)
yield fermi_fourier_trans_2(qubits[2], qubits[3])
yield fswap(qubits[1], qubits[2]) |
def get_file(self, filename, dir='static'):
"""
get_file will search from apps directory
"""
if os.path.exists(filename):
return filename
dirs = self.apps
if dir:
fname = os.path.join(dir, filename)
else:
fname = filename
for d in reversed(dirs):
path = pkg.resource_filename(d, fname)
if os.path.exists(path):
return path
return None | def function[get_file, parameter[self, filename, dir]]:
constant[
get_file will search from apps directory
]
if call[name[os].path.exists, parameter[name[filename]]] begin[:]
return[name[filename]]
variable[dirs] assign[=] name[self].apps
if name[dir] begin[:]
variable[fname] assign[=] call[name[os].path.join, parameter[name[dir], name[filename]]]
for taget[name[d]] in starred[call[name[reversed], parameter[name[dirs]]]] begin[:]
variable[path] assign[=] call[name[pkg].resource_filename, parameter[name[d], name[fname]]]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
return[name[path]]
return[constant[None]] | keyword[def] identifier[get_file] ( identifier[self] , identifier[filename] , identifier[dir] = literal[string] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
keyword[return] identifier[filename]
identifier[dirs] = identifier[self] . identifier[apps]
keyword[if] identifier[dir] :
identifier[fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[dir] , identifier[filename] )
keyword[else] :
identifier[fname] = identifier[filename]
keyword[for] identifier[d] keyword[in] identifier[reversed] ( identifier[dirs] ):
identifier[path] = identifier[pkg] . identifier[resource_filename] ( identifier[d] , identifier[fname] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[return] identifier[path]
keyword[return] keyword[None] | def get_file(self, filename, dir='static'):
"""
get_file will search from apps directory
"""
if os.path.exists(filename):
return filename # depends on [control=['if'], data=[]]
dirs = self.apps
if dir:
fname = os.path.join(dir, filename) # depends on [control=['if'], data=[]]
else:
fname = filename
for d in reversed(dirs):
path = pkg.resource_filename(d, fname)
if os.path.exists(path):
return path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
return None |
def patched_function(self, *args, **kwargs):
"""
Step 3. Wrapped function calling.
"""
result = self.function(*args, **kwargs)
self.validate(result)
return result | def function[patched_function, parameter[self]]:
constant[
Step 3. Wrapped function calling.
]
variable[result] assign[=] call[name[self].function, parameter[<ast.Starred object at 0x7da1b0578bb0>]]
call[name[self].validate, parameter[name[result]]]
return[name[result]] | keyword[def] identifier[patched_function] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[result] = identifier[self] . identifier[function] (* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[validate] ( identifier[result] )
keyword[return] identifier[result] | def patched_function(self, *args, **kwargs):
"""
Step 3. Wrapped function calling.
"""
result = self.function(*args, **kwargs)
self.validate(result)
return result |
def _add_resourcegroupitem(group_item, scenario_id):
"""
Add a single resource group item (no DB flush, as it's an internal function)
"""
if group_item.id and group_item.id > 0:
try:
group_item_i = db.DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.id == group_item.id).one()
except NoResultFound:
raise ResourceNotFoundError("ResourceGroupItem %s not found" % (group_item.id))
else:
group_item_i = ResourceGroupItem()
group_item_i.group_id = group_item.group_id
if scenario_id is not None:
group_item_i.scenario_id = scenario_id
db.DBSession.add(group_item_i)
ref_key = group_item.ref_key
group_item_i.ref_key = ref_key
if ref_key == 'NODE':
group_item_i.node_id =group_item.ref_id if group_item.ref_id else group_item.node_id
elif ref_key == 'LINK':
group_item_i.link_id =group_item.ref_id if group_item.ref_id else group_item.link_id
elif ref_key == 'GROUP':
group_item_i.subgroup_id = group_item.ref_id if group_item.ref_id else group_item.subgroup_id
return group_item_i | def function[_add_resourcegroupitem, parameter[group_item, scenario_id]]:
constant[
Add a single resource group item (no DB flush, as it's an internal function)
]
if <ast.BoolOp object at 0x7da20cabdf00> begin[:]
<ast.Try object at 0x7da20cabdd80>
variable[ref_key] assign[=] name[group_item].ref_key
name[group_item_i].ref_key assign[=] name[ref_key]
if compare[name[ref_key] equal[==] constant[NODE]] begin[:]
name[group_item_i].node_id assign[=] <ast.IfExp object at 0x7da20cabec50>
return[name[group_item_i]] | keyword[def] identifier[_add_resourcegroupitem] ( identifier[group_item] , identifier[scenario_id] ):
literal[string]
keyword[if] identifier[group_item] . identifier[id] keyword[and] identifier[group_item] . identifier[id] > literal[int] :
keyword[try] :
identifier[group_item_i] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[ResourceGroupItem] ). identifier[filter] ( identifier[ResourceGroupItem] . identifier[id] == identifier[group_item] . identifier[id] ). identifier[one] ()
keyword[except] identifier[NoResultFound] :
keyword[raise] identifier[ResourceNotFoundError] ( literal[string] %( identifier[group_item] . identifier[id] ))
keyword[else] :
identifier[group_item_i] = identifier[ResourceGroupItem] ()
identifier[group_item_i] . identifier[group_id] = identifier[group_item] . identifier[group_id]
keyword[if] identifier[scenario_id] keyword[is] keyword[not] keyword[None] :
identifier[group_item_i] . identifier[scenario_id] = identifier[scenario_id]
identifier[db] . identifier[DBSession] . identifier[add] ( identifier[group_item_i] )
identifier[ref_key] = identifier[group_item] . identifier[ref_key]
identifier[group_item_i] . identifier[ref_key] = identifier[ref_key]
keyword[if] identifier[ref_key] == literal[string] :
identifier[group_item_i] . identifier[node_id] = identifier[group_item] . identifier[ref_id] keyword[if] identifier[group_item] . identifier[ref_id] keyword[else] identifier[group_item] . identifier[node_id]
keyword[elif] identifier[ref_key] == literal[string] :
identifier[group_item_i] . identifier[link_id] = identifier[group_item] . identifier[ref_id] keyword[if] identifier[group_item] . identifier[ref_id] keyword[else] identifier[group_item] . identifier[link_id]
keyword[elif] identifier[ref_key] == literal[string] :
identifier[group_item_i] . identifier[subgroup_id] = identifier[group_item] . identifier[ref_id] keyword[if] identifier[group_item] . identifier[ref_id] keyword[else] identifier[group_item] . identifier[subgroup_id]
keyword[return] identifier[group_item_i] | def _add_resourcegroupitem(group_item, scenario_id):
"""
Add a single resource group item (no DB flush, as it's an internal function)
"""
if group_item.id and group_item.id > 0:
try:
group_item_i = db.DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.id == group_item.id).one() # depends on [control=['try'], data=[]]
except NoResultFound:
raise ResourceNotFoundError('ResourceGroupItem %s not found' % group_item.id) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
group_item_i = ResourceGroupItem()
group_item_i.group_id = group_item.group_id
if scenario_id is not None:
group_item_i.scenario_id = scenario_id # depends on [control=['if'], data=['scenario_id']]
db.DBSession.add(group_item_i)
ref_key = group_item.ref_key
group_item_i.ref_key = ref_key
if ref_key == 'NODE':
group_item_i.node_id = group_item.ref_id if group_item.ref_id else group_item.node_id # depends on [control=['if'], data=[]]
elif ref_key == 'LINK':
group_item_i.link_id = group_item.ref_id if group_item.ref_id else group_item.link_id # depends on [control=['if'], data=[]]
elif ref_key == 'GROUP':
group_item_i.subgroup_id = group_item.ref_id if group_item.ref_id else group_item.subgroup_id # depends on [control=['if'], data=[]]
return group_item_i |
def fidx(right, left, left_fk=None):
"""
Re-indexes a series or data frame (right) to align with
another (left) series or data frame via foreign key relationship.
The index of the right must be unique.
This is similar to misc.reindex, but allows for data frame
re-indexes and supports re-indexing data frames or
series with a multi-index.
Parameters:
-----------
right: pandas.DataFrame or pandas.Series
Series or data frame to re-index from.
left: pandas.Series or pandas.DataFrame
Series or data frame to re-index to.
If a series is provided, its values serve as the foreign keys.
If a data frame is provided, one or more columns may be used
as foreign keys, must specify the ``left_fk`` argument to
specify which column(s) will serve as keys.
left_fk: optional, str or list of str
Used when the left is a data frame, specifies the column(s) in
the left to serve as foreign keys. The specified columns' ordering
must match the order of the multi-index in the right.
Returns:
--------
pandas.Series or pandas.DataFrame with column(s) from
right aligned with the left.
"""
# ensure that we can align correctly
if not right.index.is_unique:
raise ValueError("The right's index must be unique!")
# simpler case:
# if the left (target) is a single series then just re-index to it
if isinstance(left_fk, str):
left = left[left_fk]
if isinstance(left, pd.Series):
a = right.reindex(left)
a.index = left.index
return a
# when reindexing using multiple columns (composite foreign key)
# i.e. the right has a multindex
# if a series for the right provided, convert to a data frame
if isinstance(right, pd.Series):
right = right.to_frame('right')
right_cols = 'right'
else:
right_cols = right.columns
# do the merge
return pd.merge(
left=left,
right=right,
left_on=left_fk,
right_index=True,
how='left'
)[right_cols] | def function[fidx, parameter[right, left, left_fk]]:
constant[
Re-indexes a series or data frame (right) to align with
another (left) series or data frame via foreign key relationship.
The index of the right must be unique.
This is similar to misc.reindex, but allows for data frame
re-indexes and supports re-indexing data frames or
series with a multi-index.
Parameters:
-----------
right: pandas.DataFrame or pandas.Series
Series or data frame to re-index from.
left: pandas.Series or pandas.DataFrame
Series or data frame to re-index to.
If a series is provided, its values serve as the foreign keys.
If a data frame is provided, one or more columns may be used
as foreign keys, must specify the ``left_fk`` argument to
specify which column(s) will serve as keys.
left_fk: optional, str or list of str
Used when the left is a data frame, specifies the column(s) in
the left to serve as foreign keys. The specified columns' ordering
must match the order of the multi-index in the right.
Returns:
--------
pandas.Series or pandas.DataFrame with column(s) from
right aligned with the left.
]
if <ast.UnaryOp object at 0x7da18f09c460> begin[:]
<ast.Raise object at 0x7da18f09cb50>
if call[name[isinstance], parameter[name[left_fk], name[str]]] begin[:]
variable[left] assign[=] call[name[left]][name[left_fk]]
if call[name[isinstance], parameter[name[left], name[pd].Series]] begin[:]
variable[a] assign[=] call[name[right].reindex, parameter[name[left]]]
name[a].index assign[=] name[left].index
return[name[a]]
if call[name[isinstance], parameter[name[right], name[pd].Series]] begin[:]
variable[right] assign[=] call[name[right].to_frame, parameter[constant[right]]]
variable[right_cols] assign[=] constant[right]
return[call[call[name[pd].merge, parameter[]]][name[right_cols]]] | keyword[def] identifier[fidx] ( identifier[right] , identifier[left] , identifier[left_fk] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[right] . identifier[index] . identifier[is_unique] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[left_fk] , identifier[str] ):
identifier[left] = identifier[left] [ identifier[left_fk] ]
keyword[if] identifier[isinstance] ( identifier[left] , identifier[pd] . identifier[Series] ):
identifier[a] = identifier[right] . identifier[reindex] ( identifier[left] )
identifier[a] . identifier[index] = identifier[left] . identifier[index]
keyword[return] identifier[a]
keyword[if] identifier[isinstance] ( identifier[right] , identifier[pd] . identifier[Series] ):
identifier[right] = identifier[right] . identifier[to_frame] ( literal[string] )
identifier[right_cols] = literal[string]
keyword[else] :
identifier[right_cols] = identifier[right] . identifier[columns]
keyword[return] identifier[pd] . identifier[merge] (
identifier[left] = identifier[left] ,
identifier[right] = identifier[right] ,
identifier[left_on] = identifier[left_fk] ,
identifier[right_index] = keyword[True] ,
identifier[how] = literal[string]
)[ identifier[right_cols] ] | def fidx(right, left, left_fk=None):
"""
Re-indexes a series or data frame (right) to align with
another (left) series or data frame via foreign key relationship.
The index of the right must be unique.
This is similar to misc.reindex, but allows for data frame
re-indexes and supports re-indexing data frames or
series with a multi-index.
Parameters:
-----------
right: pandas.DataFrame or pandas.Series
Series or data frame to re-index from.
left: pandas.Series or pandas.DataFrame
Series or data frame to re-index to.
If a series is provided, its values serve as the foreign keys.
If a data frame is provided, one or more columns may be used
as foreign keys, must specify the ``left_fk`` argument to
specify which column(s) will serve as keys.
left_fk: optional, str or list of str
Used when the left is a data frame, specifies the column(s) in
the left to serve as foreign keys. The specified columns' ordering
must match the order of the multi-index in the right.
Returns:
--------
pandas.Series or pandas.DataFrame with column(s) from
right aligned with the left.
"""
# ensure that we can align correctly
if not right.index.is_unique:
raise ValueError("The right's index must be unique!") # depends on [control=['if'], data=[]]
# simpler case:
# if the left (target) is a single series then just re-index to it
if isinstance(left_fk, str):
left = left[left_fk] # depends on [control=['if'], data=[]]
if isinstance(left, pd.Series):
a = right.reindex(left)
a.index = left.index
return a # depends on [control=['if'], data=[]]
# when reindexing using multiple columns (composite foreign key)
# i.e. the right has a multindex
# if a series for the right provided, convert to a data frame
if isinstance(right, pd.Series):
right = right.to_frame('right')
right_cols = 'right' # depends on [control=['if'], data=[]]
else:
right_cols = right.columns
# do the merge
return pd.merge(left=left, right=right, left_on=left_fk, right_index=True, how='left')[right_cols] |
def delete(args):
"""
cdstarcat delete OID
Delete an object specified by OID from CDSTAR.
"""
with _catalog(args) as cat:
n = len(cat)
cat.delete(args.args[0])
args.log.info('{0} objects deleted'.format(n - len(cat)))
return n - len(cat) | def function[delete, parameter[args]]:
constant[
cdstarcat delete OID
Delete an object specified by OID from CDSTAR.
]
with call[name[_catalog], parameter[name[args]]] begin[:]
variable[n] assign[=] call[name[len], parameter[name[cat]]]
call[name[cat].delete, parameter[call[name[args].args][constant[0]]]]
call[name[args].log.info, parameter[call[constant[{0} objects deleted].format, parameter[binary_operation[name[n] - call[name[len], parameter[name[cat]]]]]]]]
return[binary_operation[name[n] - call[name[len], parameter[name[cat]]]]] | keyword[def] identifier[delete] ( identifier[args] ):
literal[string]
keyword[with] identifier[_catalog] ( identifier[args] ) keyword[as] identifier[cat] :
identifier[n] = identifier[len] ( identifier[cat] )
identifier[cat] . identifier[delete] ( identifier[args] . identifier[args] [ literal[int] ])
identifier[args] . identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[n] - identifier[len] ( identifier[cat] )))
keyword[return] identifier[n] - identifier[len] ( identifier[cat] ) | def delete(args):
"""
cdstarcat delete OID
Delete an object specified by OID from CDSTAR.
"""
with _catalog(args) as cat:
n = len(cat)
cat.delete(args.args[0])
args.log.info('{0} objects deleted'.format(n - len(cat)))
return n - len(cat) # depends on [control=['with'], data=['cat']] |
def describe(name, tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Return RDS instance details.
CLI example::
salt myminion boto_rds.describe myrds
'''
res = __salt__['boto_rds.exists'](name, tags, region, key, keyid,
profile)
if not res.get('exists'):
return {'exists': bool(res), 'message':
'RDS instance {0} does not exist.'.format(name)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)}
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
rds = [
i for i in rds.get('DBInstances', [])
if i.get('DBInstanceIdentifier') == name
].pop(0)
if rds:
keys = ('DBInstanceIdentifier', 'DBInstanceClass', 'Engine',
'DBInstanceStatus', 'DBName', 'AllocatedStorage',
'PreferredBackupWindow', 'BackupRetentionPeriod',
'AvailabilityZone', 'PreferredMaintenanceWindow',
'LatestRestorableTime', 'EngineVersion',
'AutoMinorVersionUpgrade', 'LicenseModel',
'Iops', 'CharacterSetName', 'PubliclyAccessible',
'StorageType', 'TdeCredentialArn', 'DBInstancePort',
'DBClusterIdentifier', 'StorageEncrypted', 'KmsKeyId',
'DbiResourceId', 'CACertificateIdentifier',
'CopyTagsToSnapshot', 'MonitoringInterval',
'MonitoringRoleArn', 'PromotionTier',
'DomainMemberships')
return {'rds': dict([(k, rds.get(k)) for k in keys])}
else:
return {'rds': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
except IndexError:
return {'rds': None} | def function[describe, parameter[name, tags, region, key, keyid, profile]]:
constant[
Return RDS instance details.
CLI example::
salt myminion boto_rds.describe myrds
]
variable[res] assign[=] call[call[name[__salt__]][constant[boto_rds.exists]], parameter[name[name], name[tags], name[region], name[key], name[keyid], name[profile]]]
if <ast.UnaryOp object at 0x7da1b2184c10> begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b2187b20>, <ast.Constant object at 0x7da1b2186290>], [<ast.Call object at 0x7da1b2186740>, <ast.Call object at 0x7da1b2184af0>]]]
<ast.Try object at 0x7da1b2185000> | keyword[def] identifier[describe] ( identifier[name] , identifier[tags] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] ,
identifier[profile] = keyword[None] ):
literal[string]
identifier[res] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[tags] , identifier[region] , identifier[key] , identifier[keyid] ,
identifier[profile] )
keyword[if] keyword[not] identifier[res] . identifier[get] ( literal[string] ):
keyword[return] { literal[string] : identifier[bool] ( identifier[res] ), literal[string] :
literal[string] . identifier[format] ( identifier[name] )}
keyword[try] :
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[conn] :
keyword[return] { literal[string] : identifier[bool] ( identifier[conn] )}
identifier[rds] = identifier[conn] . identifier[describe_db_instances] ( identifier[DBInstanceIdentifier] = identifier[name] )
identifier[rds] =[
identifier[i] keyword[for] identifier[i] keyword[in] identifier[rds] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[i] . identifier[get] ( literal[string] )== identifier[name]
]. identifier[pop] ( literal[int] )
keyword[if] identifier[rds] :
identifier[keys] =( literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] )
keyword[return] { literal[string] : identifier[dict] ([( identifier[k] , identifier[rds] . identifier[get] ( identifier[k] )) keyword[for] identifier[k] keyword[in] identifier[keys] ])}
keyword[else] :
keyword[return] { literal[string] : keyword[None] }
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
keyword[return] { literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )}
keyword[except] identifier[IndexError] :
keyword[return] { literal[string] : keyword[None] } | def describe(name, tags=None, region=None, key=None, keyid=None, profile=None):
"""
Return RDS instance details.
CLI example::
salt myminion boto_rds.describe myrds
"""
res = __salt__['boto_rds.exists'](name, tags, region, key, keyid, profile)
if not res.get('exists'):
return {'exists': bool(res), 'message': 'RDS instance {0} does not exist.'.format(name)} # depends on [control=['if'], data=[]]
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)} # depends on [control=['if'], data=[]]
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
rds = [i for i in rds.get('DBInstances', []) if i.get('DBInstanceIdentifier') == name].pop(0)
if rds:
keys = ('DBInstanceIdentifier', 'DBInstanceClass', 'Engine', 'DBInstanceStatus', 'DBName', 'AllocatedStorage', 'PreferredBackupWindow', 'BackupRetentionPeriod', 'AvailabilityZone', 'PreferredMaintenanceWindow', 'LatestRestorableTime', 'EngineVersion', 'AutoMinorVersionUpgrade', 'LicenseModel', 'Iops', 'CharacterSetName', 'PubliclyAccessible', 'StorageType', 'TdeCredentialArn', 'DBInstancePort', 'DBClusterIdentifier', 'StorageEncrypted', 'KmsKeyId', 'DbiResourceId', 'CACertificateIdentifier', 'CopyTagsToSnapshot', 'MonitoringInterval', 'MonitoringRoleArn', 'PromotionTier', 'DomainMemberships')
return {'rds': dict([(k, rds.get(k)) for k in keys])} # depends on [control=['if'], data=[]]
else:
return {'rds': None} # depends on [control=['try'], data=[]]
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']]
except IndexError:
return {'rds': None} # depends on [control=['except'], data=[]] |
def calibrate_white(self):
"""
The RGB raw values are on a scale of 0-1020 but you never see a value
anywhere close to 1020. This function is designed to be called when
the sensor is placed over a white object in order to figure out what
are the maximum RGB values the robot can expect to see. We will use
these maximum values to scale future raw values to a 0-255 range in
rgb().
If you never call this function red_max, green_max, and blue_max will
use a default value of 300. This default was selected by measuring
the RGB values of a white sheet of paper in a well lit room.
Note that there are several variables that influence the maximum RGB
values detected by the color sensor
- the distance of the color sensor to the white object
- the amount of light in the room
- shadows that the robot casts on the sensor
"""
(self.red_max, self.green_max, self.blue_max) = self.raw | def function[calibrate_white, parameter[self]]:
constant[
The RGB raw values are on a scale of 0-1020 but you never see a value
anywhere close to 1020. This function is designed to be called when
the sensor is placed over a white object in order to figure out what
are the maximum RGB values the robot can expect to see. We will use
these maximum values to scale future raw values to a 0-255 range in
rgb().
If you never call this function red_max, green_max, and blue_max will
use a default value of 300. This default was selected by measuring
the RGB values of a white sheet of paper in a well lit room.
Note that there are several variables that influence the maximum RGB
values detected by the color sensor
- the distance of the color sensor to the white object
- the amount of light in the room
- shadows that the robot casts on the sensor
]
<ast.Tuple object at 0x7da1b16b0550> assign[=] name[self].raw | keyword[def] identifier[calibrate_white] ( identifier[self] ):
literal[string]
( identifier[self] . identifier[red_max] , identifier[self] . identifier[green_max] , identifier[self] . identifier[blue_max] )= identifier[self] . identifier[raw] | def calibrate_white(self):
"""
The RGB raw values are on a scale of 0-1020 but you never see a value
anywhere close to 1020. This function is designed to be called when
the sensor is placed over a white object in order to figure out what
are the maximum RGB values the robot can expect to see. We will use
these maximum values to scale future raw values to a 0-255 range in
rgb().
If you never call this function red_max, green_max, and blue_max will
use a default value of 300. This default was selected by measuring
the RGB values of a white sheet of paper in a well lit room.
Note that there are several variables that influence the maximum RGB
values detected by the color sensor
- the distance of the color sensor to the white object
- the amount of light in the room
- shadows that the robot casts on the sensor
"""
(self.red_max, self.green_max, self.blue_max) = self.raw |
def V_super_stock(self, V_stock, C_super_stock):
"""Return the volume of super (more concentrated) stock that must be
diluted for the desired stock volume and stock concentration.
:param V_stock: Volume of the stock of material
:type V_stock: float
:param C_super_stock: Concentration of the super stock
:type C_super_stock: float
:return: Volume of super stock to dilute
:rtype: float
"""
return Stock.V_super_stock(self, V_stock, self._C_stock, C_super_stock) | def function[V_super_stock, parameter[self, V_stock, C_super_stock]]:
constant[Return the volume of super (more concentrated) stock that must be
diluted for the desired stock volume and stock concentration.
:param V_stock: Volume of the stock of material
:type V_stock: float
:param C_super_stock: Concentration of the super stock
:type C_super_stock: float
:return: Volume of super stock to dilute
:rtype: float
]
return[call[name[Stock].V_super_stock, parameter[name[self], name[V_stock], name[self]._C_stock, name[C_super_stock]]]] | keyword[def] identifier[V_super_stock] ( identifier[self] , identifier[V_stock] , identifier[C_super_stock] ):
literal[string]
keyword[return] identifier[Stock] . identifier[V_super_stock] ( identifier[self] , identifier[V_stock] , identifier[self] . identifier[_C_stock] , identifier[C_super_stock] ) | def V_super_stock(self, V_stock, C_super_stock):
"""Return the volume of super (more concentrated) stock that must be
diluted for the desired stock volume and stock concentration.
:param V_stock: Volume of the stock of material
:type V_stock: float
:param C_super_stock: Concentration of the super stock
:type C_super_stock: float
:return: Volume of super stock to dilute
:rtype: float
"""
return Stock.V_super_stock(self, V_stock, self._C_stock, C_super_stock) |
def get_max_tail_check(y_Arai, y_tail, t_Arai, tail_temps, n_tail):
"""
input: y_Arai, y_tail, t_Arai, tail_temps, n_tail
output: max_check, diffs
"""
if not n_tail:
return float('nan'), []
tail_compare = []
y_Arai_compare = []
for temp in tail_temps[:n_tail]:
tail_index = list(tail_temps).index(temp)
tail_check = y_tail[tail_index]
tail_compare.append(tail_check)
arai_index = list(t_Arai).index(temp)
nrm_orig = y_Arai[arai_index]
y_Arai_compare.append(nrm_orig)
diffs = numpy.array(y_Arai_compare) - numpy.array(tail_compare)
abs_diffs = abs(diffs)
max_check = max(abs_diffs)
return max_check, diffs | def function[get_max_tail_check, parameter[y_Arai, y_tail, t_Arai, tail_temps, n_tail]]:
constant[
input: y_Arai, y_tail, t_Arai, tail_temps, n_tail
output: max_check, diffs
]
if <ast.UnaryOp object at 0x7da20e955180> begin[:]
return[tuple[[<ast.Call object at 0x7da20e957520>, <ast.List object at 0x7da20e957700>]]]
variable[tail_compare] assign[=] list[[]]
variable[y_Arai_compare] assign[=] list[[]]
for taget[name[temp]] in starred[call[name[tail_temps]][<ast.Slice object at 0x7da20e9566b0>]] begin[:]
variable[tail_index] assign[=] call[call[name[list], parameter[name[tail_temps]]].index, parameter[name[temp]]]
variable[tail_check] assign[=] call[name[y_tail]][name[tail_index]]
call[name[tail_compare].append, parameter[name[tail_check]]]
variable[arai_index] assign[=] call[call[name[list], parameter[name[t_Arai]]].index, parameter[name[temp]]]
variable[nrm_orig] assign[=] call[name[y_Arai]][name[arai_index]]
call[name[y_Arai_compare].append, parameter[name[nrm_orig]]]
variable[diffs] assign[=] binary_operation[call[name[numpy].array, parameter[name[y_Arai_compare]]] - call[name[numpy].array, parameter[name[tail_compare]]]]
variable[abs_diffs] assign[=] call[name[abs], parameter[name[diffs]]]
variable[max_check] assign[=] call[name[max], parameter[name[abs_diffs]]]
return[tuple[[<ast.Name object at 0x7da20e9568c0>, <ast.Name object at 0x7da20e955b10>]]] | keyword[def] identifier[get_max_tail_check] ( identifier[y_Arai] , identifier[y_tail] , identifier[t_Arai] , identifier[tail_temps] , identifier[n_tail] ):
literal[string]
keyword[if] keyword[not] identifier[n_tail] :
keyword[return] identifier[float] ( literal[string] ),[]
identifier[tail_compare] =[]
identifier[y_Arai_compare] =[]
keyword[for] identifier[temp] keyword[in] identifier[tail_temps] [: identifier[n_tail] ]:
identifier[tail_index] = identifier[list] ( identifier[tail_temps] ). identifier[index] ( identifier[temp] )
identifier[tail_check] = identifier[y_tail] [ identifier[tail_index] ]
identifier[tail_compare] . identifier[append] ( identifier[tail_check] )
identifier[arai_index] = identifier[list] ( identifier[t_Arai] ). identifier[index] ( identifier[temp] )
identifier[nrm_orig] = identifier[y_Arai] [ identifier[arai_index] ]
identifier[y_Arai_compare] . identifier[append] ( identifier[nrm_orig] )
identifier[diffs] = identifier[numpy] . identifier[array] ( identifier[y_Arai_compare] )- identifier[numpy] . identifier[array] ( identifier[tail_compare] )
identifier[abs_diffs] = identifier[abs] ( identifier[diffs] )
identifier[max_check] = identifier[max] ( identifier[abs_diffs] )
keyword[return] identifier[max_check] , identifier[diffs] | def get_max_tail_check(y_Arai, y_tail, t_Arai, tail_temps, n_tail):
"""
input: y_Arai, y_tail, t_Arai, tail_temps, n_tail
output: max_check, diffs
"""
if not n_tail:
return (float('nan'), []) # depends on [control=['if'], data=[]]
tail_compare = []
y_Arai_compare = []
for temp in tail_temps[:n_tail]:
tail_index = list(tail_temps).index(temp)
tail_check = y_tail[tail_index]
tail_compare.append(tail_check)
arai_index = list(t_Arai).index(temp)
nrm_orig = y_Arai[arai_index]
y_Arai_compare.append(nrm_orig) # depends on [control=['for'], data=['temp']]
diffs = numpy.array(y_Arai_compare) - numpy.array(tail_compare)
abs_diffs = abs(diffs)
max_check = max(abs_diffs)
return (max_check, diffs) |
def clean_features(df):
"""Fixes up columns of the passed DataFrame, such as casting T/F columns to
boolean and filling in NaNs for team and opp.
:param df: DataFrame of play-by-play data.
:returns: Dataframe with cleaned columns.
"""
df = pd.DataFrame(df)
bool_vals = set([True, False, None, np.nan])
sparse_cols = sparse_lineup_cols(df)
for col in df:
# make indicator columns boolean type (and fill in NaNs)
if set(df[col].unique()[:5]) <= bool_vals:
df[col] = (df[col] == True)
# fill NaN's in sparse lineup columns to 0
elif col in sparse_cols:
df[col] = df[col].fillna(0)
# fix free throw columns on technicals
df.loc[df.is_tech_fta, ['fta_num', 'tot_fta']] = 1
# fill in NaN's/fix off_team and def_team columns
df.off_team.fillna(method='bfill', inplace=True)
df.def_team.fillna(method='bfill', inplace=True)
df.off_team.fillna(method='ffill', inplace=True)
df.def_team.fillna(method='ffill', inplace=True)
return df | def function[clean_features, parameter[df]]:
constant[Fixes up columns of the passed DataFrame, such as casting T/F columns to
boolean and filling in NaNs for team and opp.
:param df: DataFrame of play-by-play data.
:returns: Dataframe with cleaned columns.
]
variable[df] assign[=] call[name[pd].DataFrame, parameter[name[df]]]
variable[bool_vals] assign[=] call[name[set], parameter[list[[<ast.Constant object at 0x7da1b26af5b0>, <ast.Constant object at 0x7da1b26afbb0>, <ast.Constant object at 0x7da1b26ad270>, <ast.Attribute object at 0x7da1b26acbb0>]]]]
variable[sparse_cols] assign[=] call[name[sparse_lineup_cols], parameter[name[df]]]
for taget[name[col]] in starred[name[df]] begin[:]
if compare[call[name[set], parameter[call[call[call[name[df]][name[col]].unique, parameter[]]][<ast.Slice object at 0x7da1b26ae770>]]] less_or_equal[<=] name[bool_vals]] begin[:]
call[name[df]][name[col]] assign[=] compare[call[name[df]][name[col]] equal[==] constant[True]]
call[name[df].loc][tuple[[<ast.Attribute object at 0x7da1b02dbfd0>, <ast.List object at 0x7da1b02dbf70>]]] assign[=] constant[1]
call[name[df].off_team.fillna, parameter[]]
call[name[df].def_team.fillna, parameter[]]
call[name[df].off_team.fillna, parameter[]]
call[name[df].def_team.fillna, parameter[]]
return[name[df]] | keyword[def] identifier[clean_features] ( identifier[df] ):
literal[string]
identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[df] )
identifier[bool_vals] = identifier[set] ([ keyword[True] , keyword[False] , keyword[None] , identifier[np] . identifier[nan] ])
identifier[sparse_cols] = identifier[sparse_lineup_cols] ( identifier[df] )
keyword[for] identifier[col] keyword[in] identifier[df] :
keyword[if] identifier[set] ( identifier[df] [ identifier[col] ]. identifier[unique] ()[: literal[int] ])<= identifier[bool_vals] :
identifier[df] [ identifier[col] ]=( identifier[df] [ identifier[col] ]== keyword[True] )
keyword[elif] identifier[col] keyword[in] identifier[sparse_cols] :
identifier[df] [ identifier[col] ]= identifier[df] [ identifier[col] ]. identifier[fillna] ( literal[int] )
identifier[df] . identifier[loc] [ identifier[df] . identifier[is_tech_fta] ,[ literal[string] , literal[string] ]]= literal[int]
identifier[df] . identifier[off_team] . identifier[fillna] ( identifier[method] = literal[string] , identifier[inplace] = keyword[True] )
identifier[df] . identifier[def_team] . identifier[fillna] ( identifier[method] = literal[string] , identifier[inplace] = keyword[True] )
identifier[df] . identifier[off_team] . identifier[fillna] ( identifier[method] = literal[string] , identifier[inplace] = keyword[True] )
identifier[df] . identifier[def_team] . identifier[fillna] ( identifier[method] = literal[string] , identifier[inplace] = keyword[True] )
keyword[return] identifier[df] | def clean_features(df):
"""Fixes up columns of the passed DataFrame, such as casting T/F columns to
boolean and filling in NaNs for team and opp.
:param df: DataFrame of play-by-play data.
:returns: Dataframe with cleaned columns.
"""
df = pd.DataFrame(df)
bool_vals = set([True, False, None, np.nan])
sparse_cols = sparse_lineup_cols(df)
for col in df:
# make indicator columns boolean type (and fill in NaNs)
if set(df[col].unique()[:5]) <= bool_vals:
df[col] = df[col] == True # depends on [control=['if'], data=[]]
# fill NaN's in sparse lineup columns to 0
elif col in sparse_cols:
df[col] = df[col].fillna(0) # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=['col']]
# fix free throw columns on technicals
df.loc[df.is_tech_fta, ['fta_num', 'tot_fta']] = 1
# fill in NaN's/fix off_team and def_team columns
df.off_team.fillna(method='bfill', inplace=True)
df.def_team.fillna(method='bfill', inplace=True)
df.off_team.fillna(method='ffill', inplace=True)
df.def_team.fillna(method='ffill', inplace=True)
return df |
def url(self):
"""
Constructs and returns the View URL.
:returns: View URL
"""
if self._partition_key:
base_url = self.design_doc.document_partition_url(
self._partition_key)
else:
base_url = self.design_doc.document_url
return '/'.join((
base_url,
'_view',
self.view_name
)) | def function[url, parameter[self]]:
constant[
Constructs and returns the View URL.
:returns: View URL
]
if name[self]._partition_key begin[:]
variable[base_url] assign[=] call[name[self].design_doc.document_partition_url, parameter[name[self]._partition_key]]
return[call[constant[/].join, parameter[tuple[[<ast.Name object at 0x7da20c76d390>, <ast.Constant object at 0x7da20c76e740>, <ast.Attribute object at 0x7da20c76c7c0>]]]]] | keyword[def] identifier[url] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_partition_key] :
identifier[base_url] = identifier[self] . identifier[design_doc] . identifier[document_partition_url] (
identifier[self] . identifier[_partition_key] )
keyword[else] :
identifier[base_url] = identifier[self] . identifier[design_doc] . identifier[document_url]
keyword[return] literal[string] . identifier[join] ((
identifier[base_url] ,
literal[string] ,
identifier[self] . identifier[view_name]
)) | def url(self):
"""
Constructs and returns the View URL.
:returns: View URL
"""
if self._partition_key:
base_url = self.design_doc.document_partition_url(self._partition_key) # depends on [control=['if'], data=[]]
else:
base_url = self.design_doc.document_url
return '/'.join((base_url, '_view', self.view_name)) |
def _get_environment_updates(self, display_all_distributions=False):
"""
Check all pacakges installed in the environment to see if there are
any updates availalble.
Args:
display_all_distributions (bool): Return distribution even if it is
up-to-date. Defaults to ``False``.
Returns:
list: A list of Update objects ordered based on ``instance.name``.
"""
updates = []
for distribution in self.pip.get_installed_distributions():
versions = self.get_available_versions(distribution.project_name)
max_version = max(versions.keys()) if versions else UNKNOW_NUM
update = None
distribution_version = self._parse_version(distribution.version)
if versions and max_version > distribution_version:
update = Update(
distribution.project_name,
distribution.version,
versions[max_version],
prelease=max_version[-1]
)
elif (
display_all_distributions and
max_version == distribution_version
):
update = Update(
distribution.project_name,
distribution.version,
versions[max_version],
)
elif display_all_distributions:
update = Update(
distribution.project_name,
distribution.version,
UNKNOWN
)
if update:
updates.append(update)
return sorted(updates, key=lambda x: x.name) | def function[_get_environment_updates, parameter[self, display_all_distributions]]:
constant[
Check all pacakges installed in the environment to see if there are
any updates availalble.
Args:
display_all_distributions (bool): Return distribution even if it is
up-to-date. Defaults to ``False``.
Returns:
list: A list of Update objects ordered based on ``instance.name``.
]
variable[updates] assign[=] list[[]]
for taget[name[distribution]] in starred[call[name[self].pip.get_installed_distributions, parameter[]]] begin[:]
variable[versions] assign[=] call[name[self].get_available_versions, parameter[name[distribution].project_name]]
variable[max_version] assign[=] <ast.IfExp object at 0x7da20e956b00>
variable[update] assign[=] constant[None]
variable[distribution_version] assign[=] call[name[self]._parse_version, parameter[name[distribution].version]]
if <ast.BoolOp object at 0x7da20e955c60> begin[:]
variable[update] assign[=] call[name[Update], parameter[name[distribution].project_name, name[distribution].version, call[name[versions]][name[max_version]]]]
if name[update] begin[:]
call[name[updates].append, parameter[name[update]]]
return[call[name[sorted], parameter[name[updates]]]] | keyword[def] identifier[_get_environment_updates] ( identifier[self] , identifier[display_all_distributions] = keyword[False] ):
literal[string]
identifier[updates] =[]
keyword[for] identifier[distribution] keyword[in] identifier[self] . identifier[pip] . identifier[get_installed_distributions] ():
identifier[versions] = identifier[self] . identifier[get_available_versions] ( identifier[distribution] . identifier[project_name] )
identifier[max_version] = identifier[max] ( identifier[versions] . identifier[keys] ()) keyword[if] identifier[versions] keyword[else] identifier[UNKNOW_NUM]
identifier[update] = keyword[None]
identifier[distribution_version] = identifier[self] . identifier[_parse_version] ( identifier[distribution] . identifier[version] )
keyword[if] identifier[versions] keyword[and] identifier[max_version] > identifier[distribution_version] :
identifier[update] = identifier[Update] (
identifier[distribution] . identifier[project_name] ,
identifier[distribution] . identifier[version] ,
identifier[versions] [ identifier[max_version] ],
identifier[prelease] = identifier[max_version] [- literal[int] ]
)
keyword[elif] (
identifier[display_all_distributions] keyword[and]
identifier[max_version] == identifier[distribution_version]
):
identifier[update] = identifier[Update] (
identifier[distribution] . identifier[project_name] ,
identifier[distribution] . identifier[version] ,
identifier[versions] [ identifier[max_version] ],
)
keyword[elif] identifier[display_all_distributions] :
identifier[update] = identifier[Update] (
identifier[distribution] . identifier[project_name] ,
identifier[distribution] . identifier[version] ,
identifier[UNKNOWN]
)
keyword[if] identifier[update] :
identifier[updates] . identifier[append] ( identifier[update] )
keyword[return] identifier[sorted] ( identifier[updates] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[name] ) | def _get_environment_updates(self, display_all_distributions=False):
"""
Check all pacakges installed in the environment to see if there are
any updates availalble.
Args:
display_all_distributions (bool): Return distribution even if it is
up-to-date. Defaults to ``False``.
Returns:
list: A list of Update objects ordered based on ``instance.name``.
"""
updates = []
for distribution in self.pip.get_installed_distributions():
versions = self.get_available_versions(distribution.project_name)
max_version = max(versions.keys()) if versions else UNKNOW_NUM
update = None
distribution_version = self._parse_version(distribution.version)
if versions and max_version > distribution_version:
update = Update(distribution.project_name, distribution.version, versions[max_version], prelease=max_version[-1]) # depends on [control=['if'], data=[]]
elif display_all_distributions and max_version == distribution_version:
update = Update(distribution.project_name, distribution.version, versions[max_version]) # depends on [control=['if'], data=[]]
elif display_all_distributions:
update = Update(distribution.project_name, distribution.version, UNKNOWN) # depends on [control=['if'], data=[]]
if update:
updates.append(update) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['distribution']]
return sorted(updates, key=lambda x: x.name) |
def spectrum(clm, normalization='4pi', degrees=None, lmax=None,
convention='power', unit='per_l', base=10.):
"""
Return the spectrum of the spherical harmonic coefficients as a function
of spherical harmonic degree.
Usage
-----
array = spectrum(clm, [normalization, degrees, lmax, convention,
unit, base])
Returns
-------
array : ndarray, shape (len(degrees))
1-D ndarray of the spectrum.
Parameters
----------
clm : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized coefficients,
respectively.
lmax : int, optional, default = len(clm[0,:,0]) - 1.
Maximum spherical harmonic degree to output.
degrees : ndarray, optional, default = numpy.arange(lmax+1)
Array containing the spherical harmonic degrees where the spectrum
is computed.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum, 'energy'
for energy spectrum, and 'l2norm' for the l2-norm spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This function returns either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. The
l2-norm is the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, and is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to
the total spectrum from all angular orders over an infinitessimal
logarithmic degree band. The contrubution in the band dlog_a(l) is
spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where
spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a).
"""
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError("The normalization must be '4pi', 'ortho', " +
"'schmidt', or 'unnorm'. Input value was {:s}."
.format(repr(normalization)))
if convention.lower() not in ('power', 'energy', 'l2norm'):
raise ValueError("convention must be 'power', 'energy', or " +
"'l2norm'. Input value was {:s}"
.format(repr(convention)))
if unit.lower() not in ('per_l', 'per_lm', 'per_dlogl'):
raise ValueError("unit must be 'per_l', 'per_lm', or 'per_dlogl'." +
"Input value was {:s}".format(repr(unit)))
if lmax is None:
lmax = len(clm[0, :, 0]) - 1
if degrees is None:
degrees = _np.arange(lmax+1)
array = _np.empty(len(degrees))
if normalization.lower() == 'unnorm':
if convention.lower() == 'l2norm':
raise ValueError("convention can not be set to 'l2norm' when " +
"using unnormalized harmonics.")
for i, l in enumerate(degrees):
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if _np.iscomplexobj(clm):
array[i] = (conv[0:l + 1] * clm[0, l, 0:l + 1] *
clm[0, l, 0:l + 1].conjugate()).real.sum() + \
(conv[1:l + 1] * clm[1, l, 1:l + 1] *
clm[1, l, 1:l + 1].conjugate()).real.sum()
else:
conv[1:l + 1] = conv[1:l + 1] / 2.
array[i] = (conv[0:l + 1] * clm[0, l, 0:l+1]**2).sum() + \
(conv[1:l + 1] * clm[1, l, 1:l+1]**2).sum()
else:
for i, l in enumerate(degrees):
if _np.iscomplexobj(clm):
array[i] = (clm[0, l, 0:l + 1] *
clm[0, l, 0:l + 1].conjugate()).real.sum() + \
(clm[1, l, 1:l + 1] *
clm[1, l, 1:l + 1].conjugate()).real.sum()
else:
array[i] = (clm[0, l, 0:l+1]**2).sum() + \
(clm[1, l, 1:l+1]**2).sum()
if convention.lower() == 'l2norm':
return array
else:
if normalization.lower() == '4pi':
pass
elif normalization.lower() == 'schmidt':
array /= (2. * degrees + 1.)
elif normalization.lower() == 'ortho':
array /= (4. * _np.pi)
if convention.lower() == 'energy':
array *= 4. * _np.pi
if unit.lower() == 'per_l':
pass
elif unit.lower() == 'per_lm':
array /= (2. * degrees + 1.)
elif unit.lower() == 'per_dlogl':
array *= degrees * _np.log(base)
return array | def function[spectrum, parameter[clm, normalization, degrees, lmax, convention, unit, base]]:
constant[
Return the spectrum of the spherical harmonic coefficients as a function
of spherical harmonic degree.
Usage
-----
array = spectrum(clm, [normalization, degrees, lmax, convention,
unit, base])
Returns
-------
array : ndarray, shape (len(degrees))
1-D ndarray of the spectrum.
Parameters
----------
clm : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized coefficients,
respectively.
lmax : int, optional, default = len(clm[0,:,0]) - 1.
Maximum spherical harmonic degree to output.
degrees : ndarray, optional, default = numpy.arange(lmax+1)
Array containing the spherical harmonic degrees where the spectrum
is computed.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum, 'energy'
for energy spectrum, and 'l2norm' for the l2-norm spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This function returns either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. The
l2-norm is the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, and is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to
the total spectrum from all angular orders over an infinitessimal
logarithmic degree band. The contrubution in the band dlog_a(l) is
spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where
spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a).
]
if compare[call[name[normalization].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b078ab60>, <ast.Constant object at 0x7da1b078ae30>, <ast.Constant object at 0x7da1b0789420>, <ast.Constant object at 0x7da1b078a890>]]] begin[:]
<ast.Raise object at 0x7da1b0789ba0>
if compare[call[name[convention].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b0789bd0>, <ast.Constant object at 0x7da1b078add0>, <ast.Constant object at 0x7da1b0789360>]]] begin[:]
<ast.Raise object at 0x7da1b07893c0>
if compare[call[name[unit].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b0788370>, <ast.Constant object at 0x7da1b0789f00>, <ast.Constant object at 0x7da1b078b1c0>]]] begin[:]
<ast.Raise object at 0x7da1b078be50>
if compare[name[lmax] is constant[None]] begin[:]
variable[lmax] assign[=] binary_operation[call[name[len], parameter[call[name[clm]][tuple[[<ast.Constant object at 0x7da1b078b790>, <ast.Slice object at 0x7da18ede4310>, <ast.Constant object at 0x7da18ede56f0>]]]]] - constant[1]]
if compare[name[degrees] is constant[None]] begin[:]
variable[degrees] assign[=] call[name[_np].arange, parameter[binary_operation[name[lmax] + constant[1]]]]
variable[array] assign[=] call[name[_np].empty, parameter[call[name[len], parameter[name[degrees]]]]]
if compare[call[name[normalization].lower, parameter[]] equal[==] constant[unnorm]] begin[:]
if compare[call[name[convention].lower, parameter[]] equal[==] constant[l2norm]] begin[:]
<ast.Raise object at 0x7da18ede6620>
for taget[tuple[[<ast.Name object at 0x7da1b078ba00>, <ast.Name object at 0x7da1b0789d50>]]] in starred[call[name[enumerate], parameter[name[degrees]]]] begin[:]
variable[ms] assign[=] call[name[_np].arange, parameter[binary_operation[name[l] + constant[1]]]]
variable[conv] assign[=] binary_operation[binary_operation[call[name[_factorial], parameter[binary_operation[name[l] + name[ms]]]] / binary_operation[binary_operation[constant[2.0] * name[l]] + constant[1.0]]] / call[name[_factorial], parameter[binary_operation[name[l] - name[ms]]]]]
if call[name[_np].iscomplexobj, parameter[name[clm]]] begin[:]
call[name[array]][name[i]] assign[=] binary_operation[call[binary_operation[binary_operation[call[name[conv]][<ast.Slice object at 0x7da2047ebe50>] * call[name[clm]][tuple[[<ast.Constant object at 0x7da2047eaad0>, <ast.Name object at 0x7da2047e9840>, <ast.Slice object at 0x7da2047ea4d0>]]]] * call[call[name[clm]][tuple[[<ast.Constant object at 0x7da2047e92d0>, <ast.Name object at 0x7da2047e8280>, <ast.Slice object at 0x7da2047eae60>]]].conjugate, parameter[]]].real.sum, parameter[]] + call[binary_operation[binary_operation[call[name[conv]][<ast.Slice object at 0x7da2047ebcd0>] * call[name[clm]][tuple[[<ast.Constant object at 0x7da2047eb820>, <ast.Name object at 0x7da2047e9540>, <ast.Slice object at 0x7da2047e96c0>]]]] * call[call[name[clm]][tuple[[<ast.Constant object at 0x7da2047eace0>, <ast.Name object at 0x7da2047e8880>, <ast.Slice object at 0x7da2047e88b0>]]].conjugate, parameter[]]].real.sum, parameter[]]]
if compare[call[name[convention].lower, parameter[]] equal[==] constant[energy]] begin[:]
<ast.AugAssign object at 0x7da204565000>
if compare[call[name[unit].lower, parameter[]] equal[==] constant[per_l]] begin[:]
pass
return[name[array]] | keyword[def] identifier[spectrum] ( identifier[clm] , identifier[normalization] = literal[string] , identifier[degrees] = keyword[None] , identifier[lmax] = keyword[None] ,
identifier[convention] = literal[string] , identifier[unit] = literal[string] , identifier[base] = literal[int] ):
literal[string]
keyword[if] identifier[normalization] . identifier[lower] () keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string]
. identifier[format] ( identifier[repr] ( identifier[normalization] )))
keyword[if] identifier[convention] . identifier[lower] () keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string]
. identifier[format] ( identifier[repr] ( identifier[convention] )))
keyword[if] identifier[unit] . identifier[lower] () keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] . identifier[format] ( identifier[repr] ( identifier[unit] )))
keyword[if] identifier[lmax] keyword[is] keyword[None] :
identifier[lmax] = identifier[len] ( identifier[clm] [ literal[int] ,:, literal[int] ])- literal[int]
keyword[if] identifier[degrees] keyword[is] keyword[None] :
identifier[degrees] = identifier[_np] . identifier[arange] ( identifier[lmax] + literal[int] )
identifier[array] = identifier[_np] . identifier[empty] ( identifier[len] ( identifier[degrees] ))
keyword[if] identifier[normalization] . identifier[lower] ()== literal[string] :
keyword[if] identifier[convention] . identifier[lower] ()== literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] )
keyword[for] identifier[i] , identifier[l] keyword[in] identifier[enumerate] ( identifier[degrees] ):
identifier[ms] = identifier[_np] . identifier[arange] ( identifier[l] + literal[int] )
identifier[conv] = identifier[_factorial] ( identifier[l] + identifier[ms] )/( literal[int] * identifier[l] + literal[int] )/ identifier[_factorial] ( identifier[l] - identifier[ms] )
keyword[if] identifier[_np] . identifier[iscomplexobj] ( identifier[clm] ):
identifier[array] [ identifier[i] ]=( identifier[conv] [ literal[int] : identifier[l] + literal[int] ]* identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]*
identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]. identifier[conjugate] ()). identifier[real] . identifier[sum] ()+( identifier[conv] [ literal[int] : identifier[l] + literal[int] ]* identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]*
identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]. identifier[conjugate] ()). identifier[real] . identifier[sum] ()
keyword[else] :
identifier[conv] [ literal[int] : identifier[l] + literal[int] ]= identifier[conv] [ literal[int] : identifier[l] + literal[int] ]/ literal[int]
identifier[array] [ identifier[i] ]=( identifier[conv] [ literal[int] : identifier[l] + literal[int] ]* identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]** literal[int] ). identifier[sum] ()+( identifier[conv] [ literal[int] : identifier[l] + literal[int] ]* identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]** literal[int] ). identifier[sum] ()
keyword[else] :
keyword[for] identifier[i] , identifier[l] keyword[in] identifier[enumerate] ( identifier[degrees] ):
keyword[if] identifier[_np] . identifier[iscomplexobj] ( identifier[clm] ):
identifier[array] [ identifier[i] ]=( identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]*
identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]. identifier[conjugate] ()). identifier[real] . identifier[sum] ()+( identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]*
identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]. identifier[conjugate] ()). identifier[real] . identifier[sum] ()
keyword[else] :
identifier[array] [ identifier[i] ]=( identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]** literal[int] ). identifier[sum] ()+( identifier[clm] [ literal[int] , identifier[l] , literal[int] : identifier[l] + literal[int] ]** literal[int] ). identifier[sum] ()
keyword[if] identifier[convention] . identifier[lower] ()== literal[string] :
keyword[return] identifier[array]
keyword[else] :
keyword[if] identifier[normalization] . identifier[lower] ()== literal[string] :
keyword[pass]
keyword[elif] identifier[normalization] . identifier[lower] ()== literal[string] :
identifier[array] /=( literal[int] * identifier[degrees] + literal[int] )
keyword[elif] identifier[normalization] . identifier[lower] ()== literal[string] :
identifier[array] /=( literal[int] * identifier[_np] . identifier[pi] )
keyword[if] identifier[convention] . identifier[lower] ()== literal[string] :
identifier[array] *= literal[int] * identifier[_np] . identifier[pi]
keyword[if] identifier[unit] . identifier[lower] ()== literal[string] :
keyword[pass]
keyword[elif] identifier[unit] . identifier[lower] ()== literal[string] :
identifier[array] /=( literal[int] * identifier[degrees] + literal[int] )
keyword[elif] identifier[unit] . identifier[lower] ()== literal[string] :
identifier[array] *= identifier[degrees] * identifier[_np] . identifier[log] ( identifier[base] )
keyword[return] identifier[array] | def spectrum(clm, normalization='4pi', degrees=None, lmax=None, convention='power', unit='per_l', base=10.0):
"""
Return the spectrum of the spherical harmonic coefficients as a function
of spherical harmonic degree.
Usage
-----
array = spectrum(clm, [normalization, degrees, lmax, convention,
unit, base])
Returns
-------
array : ndarray, shape (len(degrees))
1-D ndarray of the spectrum.
Parameters
----------
clm : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized coefficients,
respectively.
lmax : int, optional, default = len(clm[0,:,0]) - 1.
Maximum spherical harmonic degree to output.
degrees : ndarray, optional, default = numpy.arange(lmax+1)
Array containing the spherical harmonic degrees where the spectrum
is computed.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum, 'energy'
for energy spectrum, and 'l2norm' for the l2-norm spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This function returns either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. The
l2-norm is the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, and is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the contribution to
the total spectrum from all angular orders over an infinitessimal
logarithmic degree band. The contrubution in the band dlog_a(l) is
spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base, and where
spectrum(l, 'per_dlogl) is equal to spectrum(l, 'per_l')*l*log(a).
"""
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError("The normalization must be '4pi', 'ortho', " + "'schmidt', or 'unnorm'. Input value was {:s}.".format(repr(normalization))) # depends on [control=['if'], data=[]]
if convention.lower() not in ('power', 'energy', 'l2norm'):
raise ValueError("convention must be 'power', 'energy', or " + "'l2norm'. Input value was {:s}".format(repr(convention))) # depends on [control=['if'], data=[]]
if unit.lower() not in ('per_l', 'per_lm', 'per_dlogl'):
raise ValueError("unit must be 'per_l', 'per_lm', or 'per_dlogl'." + 'Input value was {:s}'.format(repr(unit))) # depends on [control=['if'], data=[]]
if lmax is None:
lmax = len(clm[0, :, 0]) - 1 # depends on [control=['if'], data=['lmax']]
if degrees is None:
degrees = _np.arange(lmax + 1) # depends on [control=['if'], data=['degrees']]
array = _np.empty(len(degrees))
if normalization.lower() == 'unnorm':
if convention.lower() == 'l2norm':
raise ValueError("convention can not be set to 'l2norm' when " + 'using unnormalized harmonics.') # depends on [control=['if'], data=[]]
for (i, l) in enumerate(degrees):
ms = _np.arange(l + 1)
conv = _factorial(l + ms) / (2.0 * l + 1.0) / _factorial(l - ms)
if _np.iscomplexobj(clm):
array[i] = (conv[0:l + 1] * clm[0, l, 0:l + 1] * clm[0, l, 0:l + 1].conjugate()).real.sum() + (conv[1:l + 1] * clm[1, l, 1:l + 1] * clm[1, l, 1:l + 1].conjugate()).real.sum() # depends on [control=['if'], data=[]]
else:
conv[1:l + 1] = conv[1:l + 1] / 2.0
array[i] = (conv[0:l + 1] * clm[0, l, 0:l + 1] ** 2).sum() + (conv[1:l + 1] * clm[1, l, 1:l + 1] ** 2).sum() # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
for (i, l) in enumerate(degrees):
if _np.iscomplexobj(clm):
array[i] = (clm[0, l, 0:l + 1] * clm[0, l, 0:l + 1].conjugate()).real.sum() + (clm[1, l, 1:l + 1] * clm[1, l, 1:l + 1].conjugate()).real.sum() # depends on [control=['if'], data=[]]
else:
array[i] = (clm[0, l, 0:l + 1] ** 2).sum() + (clm[1, l, 1:l + 1] ** 2).sum() # depends on [control=['for'], data=[]]
if convention.lower() == 'l2norm':
return array # depends on [control=['if'], data=[]]
elif normalization.lower() == '4pi':
pass # depends on [control=['if'], data=[]]
elif normalization.lower() == 'schmidt':
array /= 2.0 * degrees + 1.0 # depends on [control=['if'], data=[]]
elif normalization.lower() == 'ortho':
array /= 4.0 * _np.pi # depends on [control=['if'], data=[]]
if convention.lower() == 'energy':
array *= 4.0 * _np.pi # depends on [control=['if'], data=[]]
if unit.lower() == 'per_l':
pass # depends on [control=['if'], data=[]]
elif unit.lower() == 'per_lm':
array /= 2.0 * degrees + 1.0 # depends on [control=['if'], data=[]]
elif unit.lower() == 'per_dlogl':
array *= degrees * _np.log(base) # depends on [control=['if'], data=[]]
return array |
def _parse_abstract(self):
"""Parse the abstract from the TeX source.
Sets the ``_abstract`` attribute.
"""
command = LatexCommand(
'setDocAbstract',
{'name': 'abstract', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no abstract')
self._abstract = None
return
try:
content = parsed['abstract']
except KeyError:
self._logger.warning('lsstdoc has no abstract')
self._abstract = None
return
content = content.strip()
self._abstract = content | def function[_parse_abstract, parameter[self]]:
constant[Parse the abstract from the TeX source.
Sets the ``_abstract`` attribute.
]
variable[command] assign[=] call[name[LatexCommand], parameter[constant[setDocAbstract], dictionary[[<ast.Constant object at 0x7da1b00371c0>, <ast.Constant object at 0x7da1b0037520>, <ast.Constant object at 0x7da1b0036ad0>], [<ast.Constant object at 0x7da1b0034820>, <ast.Constant object at 0x7da1b0036860>, <ast.Constant object at 0x7da1b0037b80>]]]]
<ast.Try object at 0x7da1b0034f70>
<ast.Try object at 0x7da1b0037f10>
variable[content] assign[=] call[name[content].strip, parameter[]]
name[self]._abstract assign[=] name[content] | keyword[def] identifier[_parse_abstract] ( identifier[self] ):
literal[string]
identifier[command] = identifier[LatexCommand] (
literal[string] ,
{ literal[string] : literal[string] , literal[string] : keyword[True] , literal[string] : literal[string] })
keyword[try] :
identifier[parsed] = identifier[next] ( identifier[command] . identifier[parse] ( identifier[self] . identifier[_tex] ))
keyword[except] identifier[StopIteration] :
identifier[self] . identifier[_logger] . identifier[warning] ( literal[string] )
identifier[self] . identifier[_abstract] = keyword[None]
keyword[return]
keyword[try] :
identifier[content] = identifier[parsed] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[self] . identifier[_logger] . identifier[warning] ( literal[string] )
identifier[self] . identifier[_abstract] = keyword[None]
keyword[return]
identifier[content] = identifier[content] . identifier[strip] ()
identifier[self] . identifier[_abstract] = identifier[content] | def _parse_abstract(self):
"""Parse the abstract from the TeX source.
Sets the ``_abstract`` attribute.
"""
command = LatexCommand('setDocAbstract', {'name': 'abstract', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex)) # depends on [control=['try'], data=[]]
except StopIteration:
self._logger.warning('lsstdoc has no abstract')
self._abstract = None
return # depends on [control=['except'], data=[]]
try:
content = parsed['abstract'] # depends on [control=['try'], data=[]]
except KeyError:
self._logger.warning('lsstdoc has no abstract')
self._abstract = None
return # depends on [control=['except'], data=[]]
content = content.strip()
self._abstract = content |
def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, six.text_type):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return six.text_type(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore') | def function[displayable_path, parameter[path, separator]]:
constant[Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
]
if call[name[isinstance], parameter[name[path], tuple[[<ast.Name object at 0x7da1b10af580>, <ast.Name object at 0x7da1b10af6a0>]]]] begin[:]
return[call[name[separator].join, parameter[<ast.GeneratorExp object at 0x7da1b10af700>]]]
<ast.Try object at 0x7da1b10ac5b0> | keyword[def] identifier[displayable_path] ( identifier[path] , identifier[separator] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[path] ,( identifier[list] , identifier[tuple] )):
keyword[return] identifier[separator] . identifier[join] ( identifier[displayable_path] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[path] )
keyword[elif] identifier[isinstance] ( identifier[path] , identifier[six] . identifier[text_type] ):
keyword[return] identifier[path]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[path] , identifier[bytes] ):
keyword[return] identifier[six] . identifier[text_type] ( identifier[path] )
keyword[try] :
keyword[return] identifier[path] . identifier[decode] ( identifier[_fsencoding] (), literal[string] )
keyword[except] ( identifier[UnicodeError] , identifier[LookupError] ):
keyword[return] identifier[path] . identifier[decode] ( literal[string] , literal[string] ) | def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join((displayable_path(p) for p in path)) # depends on [control=['if'], data=[]]
elif isinstance(path, six.text_type):
return path # depends on [control=['if'], data=[]]
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return six.text_type(path) # depends on [control=['if'], data=[]]
try:
return path.decode(_fsencoding(), 'ignore') # depends on [control=['try'], data=[]]
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore') # depends on [control=['except'], data=[]] |
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv) | def function[overlay, parameter[self, block_start_string, block_end_string, variable_start_string, variable_end_string, comment_start_string, comment_end_string, line_statement_prefix, line_comment_prefix, trim_blocks, lstrip_blocks, extensions, optimized, undefined, finalize, autoescape, loader, cache_size, auto_reload, bytecode_cache]]:
constant[Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
]
variable[args] assign[=] call[name[dict], parameter[call[name[locals], parameter[]]]]
<ast.Delete object at 0x7da18ede6380>
variable[rv] assign[=] call[name[object].__new__, parameter[name[self].__class__]]
call[name[rv].__dict__.update, parameter[name[self].__dict__]]
name[rv].overlayed assign[=] constant[True]
name[rv].linked_to assign[=] name[self]
for taget[tuple[[<ast.Name object at 0x7da18ede4b50>, <ast.Name object at 0x7da18ede7dc0>]]] in starred[call[name[iteritems], parameter[name[args]]]] begin[:]
if compare[name[value] is_not name[missing]] begin[:]
call[name[setattr], parameter[name[rv], name[key], name[value]]]
if compare[name[cache_size] is_not name[missing]] begin[:]
name[rv].cache assign[=] call[name[create_cache], parameter[name[cache_size]]]
name[rv].extensions assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1de3550>, <ast.Name object at 0x7da1b1de3400>]]] in starred[call[name[iteritems], parameter[name[self].extensions]]] begin[:]
call[name[rv].extensions][name[key]] assign[=] call[name[value].bind, parameter[name[rv]]]
if compare[name[extensions] is_not name[missing]] begin[:]
call[name[rv].extensions.update, parameter[call[name[load_extensions], parameter[name[rv], name[extensions]]]]]
return[call[name[_environment_sanity_check], parameter[name[rv]]]] | keyword[def] identifier[overlay] ( identifier[self] , identifier[block_start_string] = identifier[missing] , identifier[block_end_string] = identifier[missing] ,
identifier[variable_start_string] = identifier[missing] , identifier[variable_end_string] = identifier[missing] ,
identifier[comment_start_string] = identifier[missing] , identifier[comment_end_string] = identifier[missing] ,
identifier[line_statement_prefix] = identifier[missing] , identifier[line_comment_prefix] = identifier[missing] ,
identifier[trim_blocks] = identifier[missing] , identifier[lstrip_blocks] = identifier[missing] ,
identifier[extensions] = identifier[missing] , identifier[optimized] = identifier[missing] ,
identifier[undefined] = identifier[missing] , identifier[finalize] = identifier[missing] , identifier[autoescape] = identifier[missing] ,
identifier[loader] = identifier[missing] , identifier[cache_size] = identifier[missing] , identifier[auto_reload] = identifier[missing] ,
identifier[bytecode_cache] = identifier[missing] ):
literal[string]
identifier[args] = identifier[dict] ( identifier[locals] ())
keyword[del] identifier[args] [ literal[string] ], identifier[args] [ literal[string] ], identifier[args] [ literal[string] ]
identifier[rv] = identifier[object] . identifier[__new__] ( identifier[self] . identifier[__class__] )
identifier[rv] . identifier[__dict__] . identifier[update] ( identifier[self] . identifier[__dict__] )
identifier[rv] . identifier[overlayed] = keyword[True]
identifier[rv] . identifier[linked_to] = identifier[self]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[iteritems] ( identifier[args] ):
keyword[if] identifier[value] keyword[is] keyword[not] identifier[missing] :
identifier[setattr] ( identifier[rv] , identifier[key] , identifier[value] )
keyword[if] identifier[cache_size] keyword[is] keyword[not] identifier[missing] :
identifier[rv] . identifier[cache] = identifier[create_cache] ( identifier[cache_size] )
keyword[else] :
identifier[rv] . identifier[cache] = identifier[copy_cache] ( identifier[self] . identifier[cache] )
identifier[rv] . identifier[extensions] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[iteritems] ( identifier[self] . identifier[extensions] ):
identifier[rv] . identifier[extensions] [ identifier[key] ]= identifier[value] . identifier[bind] ( identifier[rv] )
keyword[if] identifier[extensions] keyword[is] keyword[not] identifier[missing] :
identifier[rv] . identifier[extensions] . identifier[update] ( identifier[load_extensions] ( identifier[rv] , identifier[extensions] ))
keyword[return] identifier[_environment_sanity_check] ( identifier[rv] ) | def overlay(self, block_start_string=missing, block_end_string=missing, variable_start_string=missing, variable_end_string=missing, comment_start_string=missing, comment_end_string=missing, line_statement_prefix=missing, line_comment_prefix=missing, trim_blocks=missing, lstrip_blocks=missing, extensions=missing, optimized=missing, undefined=missing, finalize=missing, autoescape=missing, loader=missing, cache_size=missing, auto_reload=missing, bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for (key, value) in iteritems(args):
if value is not missing:
setattr(rv, key, value) # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=[]]
if cache_size is not missing:
rv.cache = create_cache(cache_size) # depends on [control=['if'], data=['cache_size']]
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for (key, value) in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv) # depends on [control=['for'], data=[]]
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions)) # depends on [control=['if'], data=['extensions']]
return _environment_sanity_check(rv) |
def load_basic_system_bindings():
"""
Basic system bindings (For both Emacs and Vi mode.)
"""
registry = Registry()
suspend_supported = Condition(
lambda cli: suspend_to_background_supported())
@registry.add_binding(Keys.ControlZ, filter=suspend_supported)
def _(event):
"""
Suspend process to background.
"""
event.cli.suspend_to_background()
return registry | def function[load_basic_system_bindings, parameter[]]:
constant[
Basic system bindings (For both Emacs and Vi mode.)
]
variable[registry] assign[=] call[name[Registry], parameter[]]
variable[suspend_supported] assign[=] call[name[Condition], parameter[<ast.Lambda object at 0x7da1b07f4700>]]
def function[_, parameter[event]]:
constant[
Suspend process to background.
]
call[name[event].cli.suspend_to_background, parameter[]]
return[name[registry]] | keyword[def] identifier[load_basic_system_bindings] ():
literal[string]
identifier[registry] = identifier[Registry] ()
identifier[suspend_supported] = identifier[Condition] (
keyword[lambda] identifier[cli] : identifier[suspend_to_background_supported] ())
@ identifier[registry] . identifier[add_binding] ( identifier[Keys] . identifier[ControlZ] , identifier[filter] = identifier[suspend_supported] )
keyword[def] identifier[_] ( identifier[event] ):
literal[string]
identifier[event] . identifier[cli] . identifier[suspend_to_background] ()
keyword[return] identifier[registry] | def load_basic_system_bindings():
"""
Basic system bindings (For both Emacs and Vi mode.)
"""
registry = Registry()
suspend_supported = Condition(lambda cli: suspend_to_background_supported())
@registry.add_binding(Keys.ControlZ, filter=suspend_supported)
def _(event):
"""
Suspend process to background.
"""
event.cli.suspend_to_background()
return registry |
def default(self, obj):
'''Overrides the default serializer for `JSONEncoder`.
This can serialize the following objects in addition to what
`JSONEncoder` can already do.
- `np.array`
- `bytes`
- `complex`
- `np.float64` and other `np.dtype` objects
Parameters
----------
obj : object
A Python object to serialize to JSON.
Returns
-------
str
A JSON encoded representation of the input object.
'''
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, bytes):
return obj.decode()
elif isinstance(obj, complex):
return (obj.real, obj.imag)
elif (isinstance(obj, (float, np.float64, np.float_)) and
not np.isfinite(obj)):
return None
elif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)):
return int(obj)
else:
return json.JSONEncoder.default(self, obj) | def function[default, parameter[self, obj]]:
constant[Overrides the default serializer for `JSONEncoder`.
This can serialize the following objects in addition to what
`JSONEncoder` can already do.
- `np.array`
- `bytes`
- `complex`
- `np.float64` and other `np.dtype` objects
Parameters
----------
obj : object
A Python object to serialize to JSON.
Returns
-------
str
A JSON encoded representation of the input object.
]
if call[name[isinstance], parameter[name[obj], name[np].ndarray]] begin[:]
return[call[name[obj].tolist, parameter[]]] | keyword[def] identifier[default] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[np] . identifier[ndarray] ):
keyword[return] identifier[obj] . identifier[tolist] ()
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[bytes] ):
keyword[return] identifier[obj] . identifier[decode] ()
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[complex] ):
keyword[return] ( identifier[obj] . identifier[real] , identifier[obj] . identifier[imag] )
keyword[elif] ( identifier[isinstance] ( identifier[obj] ,( identifier[float] , identifier[np] . identifier[float64] , identifier[np] . identifier[float_] )) keyword[and]
keyword[not] identifier[np] . identifier[isfinite] ( identifier[obj] )):
keyword[return] keyword[None]
keyword[elif] identifier[isinstance] ( identifier[obj] ,( identifier[np] . identifier[int8] , identifier[np] . identifier[int16] , identifier[np] . identifier[int32] , identifier[np] . identifier[int64] )):
keyword[return] identifier[int] ( identifier[obj] )
keyword[else] :
keyword[return] identifier[json] . identifier[JSONEncoder] . identifier[default] ( identifier[self] , identifier[obj] ) | def default(self, obj):
"""Overrides the default serializer for `JSONEncoder`.
This can serialize the following objects in addition to what
`JSONEncoder` can already do.
- `np.array`
- `bytes`
- `complex`
- `np.float64` and other `np.dtype` objects
Parameters
----------
obj : object
A Python object to serialize to JSON.
Returns
-------
str
A JSON encoded representation of the input object.
"""
if isinstance(obj, np.ndarray):
return obj.tolist() # depends on [control=['if'], data=[]]
elif isinstance(obj, bytes):
return obj.decode() # depends on [control=['if'], data=[]]
elif isinstance(obj, complex):
return (obj.real, obj.imag) # depends on [control=['if'], data=[]]
elif isinstance(obj, (float, np.float64, np.float_)) and (not np.isfinite(obj)):
return None # depends on [control=['if'], data=[]]
elif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)):
return int(obj) # depends on [control=['if'], data=[]]
else:
return json.JSONEncoder.default(self, obj) |
def fit(self, y):
"""Estimate survival distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
event, time = check_y_survival(y, allow_all_censored=True)
unique_time, prob = kaplan_meier_estimator(event, time)
self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time))
self.prob_ = numpy.concatenate(([1.], prob))
return self | def function[fit, parameter[self, y]]:
constant[Estimate survival distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
]
<ast.Tuple object at 0x7da1b18818d0> assign[=] call[name[check_y_survival], parameter[name[y]]]
<ast.Tuple object at 0x7da1b17cd1e0> assign[=] call[name[kaplan_meier_estimator], parameter[name[event], name[time]]]
name[self].unique_time_ assign[=] call[name[numpy].concatenate, parameter[tuple[[<ast.List object at 0x7da1b17cfd60>, <ast.Name object at 0x7da1b17cdc30>]]]]
name[self].prob_ assign[=] call[name[numpy].concatenate, parameter[tuple[[<ast.List object at 0x7da1b17cd750>, <ast.Name object at 0x7da1b17cc3a0>]]]]
return[name[self]] | keyword[def] identifier[fit] ( identifier[self] , identifier[y] ):
literal[string]
identifier[event] , identifier[time] = identifier[check_y_survival] ( identifier[y] , identifier[allow_all_censored] = keyword[True] )
identifier[unique_time] , identifier[prob] = identifier[kaplan_meier_estimator] ( identifier[event] , identifier[time] )
identifier[self] . identifier[unique_time_] = identifier[numpy] . identifier[concatenate] (([- identifier[numpy] . identifier[infty] ], identifier[unique_time] ))
identifier[self] . identifier[prob_] = identifier[numpy] . identifier[concatenate] (([ literal[int] ], identifier[prob] ))
keyword[return] identifier[self] | def fit(self, y):
"""Estimate survival distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
(event, time) = check_y_survival(y, allow_all_censored=True)
(unique_time, prob) = kaplan_meier_estimator(event, time)
self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time))
self.prob_ = numpy.concatenate(([1.0], prob))
return self |
def add_section(self, section_name):
"""Add an empty section.
"""
if section_name == "DEFAULT":
raise Exception("'DEFAULT' is reserved section name.")
if section_name in self._sections:
raise Exception(
"Error! %s is already one of the sections" % section_name)
else:
self._sections[section_name] = Section(section_name) | def function[add_section, parameter[self, section_name]]:
constant[Add an empty section.
]
if compare[name[section_name] equal[==] constant[DEFAULT]] begin[:]
<ast.Raise object at 0x7da204566260>
if compare[name[section_name] in name[self]._sections] begin[:]
<ast.Raise object at 0x7da204564cd0> | keyword[def] identifier[add_section] ( identifier[self] , identifier[section_name] ):
literal[string]
keyword[if] identifier[section_name] == literal[string] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[section_name] keyword[in] identifier[self] . identifier[_sections] :
keyword[raise] identifier[Exception] (
literal[string] % identifier[section_name] )
keyword[else] :
identifier[self] . identifier[_sections] [ identifier[section_name] ]= identifier[Section] ( identifier[section_name] ) | def add_section(self, section_name):
"""Add an empty section.
"""
if section_name == 'DEFAULT':
raise Exception("'DEFAULT' is reserved section name.") # depends on [control=['if'], data=[]]
if section_name in self._sections:
raise Exception('Error! %s is already one of the sections' % section_name) # depends on [control=['if'], data=['section_name']]
else:
self._sections[section_name] = Section(section_name) |
def filter_google_songs(songs, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Match a Google Music song dict against a set of metadata filters.
Parameters:
songs (list): Google Music song dicts to filter.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
::
(matched, filtered)
"""
matched_songs = []
filtered_songs = []
if include_filters or exclude_filters:
for song in songs:
if _check_filters(
song, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes):
matched_songs.append(song)
else:
filtered_songs.append(song)
else:
matched_songs += songs
return matched_songs, filtered_songs | def function[filter_google_songs, parameter[songs, include_filters, exclude_filters, all_includes, all_excludes]]:
constant[Match a Google Music song dict against a set of metadata filters.
Parameters:
songs (list): Google Music song dicts to filter.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
::
(matched, filtered)
]
variable[matched_songs] assign[=] list[[]]
variable[filtered_songs] assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c7cb4f0> begin[:]
for taget[name[song]] in starred[name[songs]] begin[:]
if call[name[_check_filters], parameter[name[song]]] begin[:]
call[name[matched_songs].append, parameter[name[song]]]
return[tuple[[<ast.Name object at 0x7da20c7cb640>, <ast.Name object at 0x7da20c7ca710>]]] | keyword[def] identifier[filter_google_songs] ( identifier[songs] , identifier[include_filters] = keyword[None] , identifier[exclude_filters] = keyword[None] , identifier[all_includes] = keyword[False] , identifier[all_excludes] = keyword[False] ):
literal[string]
identifier[matched_songs] =[]
identifier[filtered_songs] =[]
keyword[if] identifier[include_filters] keyword[or] identifier[exclude_filters] :
keyword[for] identifier[song] keyword[in] identifier[songs] :
keyword[if] identifier[_check_filters] (
identifier[song] , identifier[include_filters] = identifier[include_filters] , identifier[exclude_filters] = identifier[exclude_filters] ,
identifier[all_includes] = identifier[all_includes] , identifier[all_excludes] = identifier[all_excludes] ):
identifier[matched_songs] . identifier[append] ( identifier[song] )
keyword[else] :
identifier[filtered_songs] . identifier[append] ( identifier[song] )
keyword[else] :
identifier[matched_songs] += identifier[songs]
keyword[return] identifier[matched_songs] , identifier[filtered_songs] | def filter_google_songs(songs, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Match a Google Music song dict against a set of metadata filters.
Parameters:
songs (list): Google Music song dicts to filter.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
::
(matched, filtered)
"""
matched_songs = []
filtered_songs = []
if include_filters or exclude_filters:
for song in songs:
if _check_filters(song, include_filters=include_filters, exclude_filters=exclude_filters, all_includes=all_includes, all_excludes=all_excludes):
matched_songs.append(song) # depends on [control=['if'], data=[]]
else:
filtered_songs.append(song) # depends on [control=['for'], data=['song']] # depends on [control=['if'], data=[]]
else:
matched_songs += songs
return (matched_songs, filtered_songs) |
def check_unused_args(self, used_args, args, kwargs):
"""Implement the check_unused_args in superclass."""
for k, v in kwargs.items():
if k in used_args:
self._used_kwargs.update({k: v})
else:
self._unused_kwargs.update({k: v}) | def function[check_unused_args, parameter[self, used_args, args, kwargs]]:
constant[Implement the check_unused_args in superclass.]
for taget[tuple[[<ast.Name object at 0x7da1afe189d0>, <ast.Name object at 0x7da1afe1a260>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if compare[name[k] in name[used_args]] begin[:]
call[name[self]._used_kwargs.update, parameter[dictionary[[<ast.Name object at 0x7da1afe1ac20>], [<ast.Name object at 0x7da1afe18c10>]]]] | keyword[def] identifier[check_unused_args] ( identifier[self] , identifier[used_args] , identifier[args] , identifier[kwargs] ):
literal[string]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[used_args] :
identifier[self] . identifier[_used_kwargs] . identifier[update] ({ identifier[k] : identifier[v] })
keyword[else] :
identifier[self] . identifier[_unused_kwargs] . identifier[update] ({ identifier[k] : identifier[v] }) | def check_unused_args(self, used_args, args, kwargs):
"""Implement the check_unused_args in superclass."""
for (k, v) in kwargs.items():
if k in used_args:
self._used_kwargs.update({k: v}) # depends on [control=['if'], data=['k']]
else:
self._unused_kwargs.update({k: v}) # depends on [control=['for'], data=[]] |
def get_template_by_name(name,**kwargs):
"""
Get a specific resource template, by name.
"""
try:
tmpl_i = db.DBSession.query(Template).filter(Template.name == name).options(joinedload_all('templatetypes.typeattrs.default_dataset.metadata')).one()
return tmpl_i
except NoResultFound:
log.info("%s is not a valid identifier for a template",name)
raise HydraError('Template "%s" not found'%name) | def function[get_template_by_name, parameter[name]]:
constant[
Get a specific resource template, by name.
]
<ast.Try object at 0x7da18bcc8670> | keyword[def] identifier[get_template_by_name] ( identifier[name] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[tmpl_i] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Template] ). identifier[filter] ( identifier[Template] . identifier[name] == identifier[name] ). identifier[options] ( identifier[joinedload_all] ( literal[string] )). identifier[one] ()
keyword[return] identifier[tmpl_i]
keyword[except] identifier[NoResultFound] :
identifier[log] . identifier[info] ( literal[string] , identifier[name] )
keyword[raise] identifier[HydraError] ( literal[string] % identifier[name] ) | def get_template_by_name(name, **kwargs):
"""
Get a specific resource template, by name.
"""
try:
tmpl_i = db.DBSession.query(Template).filter(Template.name == name).options(joinedload_all('templatetypes.typeattrs.default_dataset.metadata')).one()
return tmpl_i # depends on [control=['try'], data=[]]
except NoResultFound:
log.info('%s is not a valid identifier for a template', name)
raise HydraError('Template "%s" not found' % name) # depends on [control=['except'], data=[]] |
def set(conf):
"""Applies a configuration to the global config object"""
for name, value in conf.items():
if value is not None:
setattr(Conf, name.upper(), value) | def function[set, parameter[conf]]:
constant[Applies a configuration to the global config object]
for taget[tuple[[<ast.Name object at 0x7da1b1b1a470>, <ast.Name object at 0x7da1b1b1ba30>]]] in starred[call[name[conf].items, parameter[]]] begin[:]
if compare[name[value] is_not constant[None]] begin[:]
call[name[setattr], parameter[name[Conf], call[name[name].upper, parameter[]], name[value]]] | keyword[def] identifier[set] ( identifier[conf] ):
literal[string]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[conf] . identifier[items] ():
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[setattr] ( identifier[Conf] , identifier[name] . identifier[upper] (), identifier[value] ) | def set(conf):
"""Applies a configuration to the global config object"""
for (name, value) in conf.items():
if value is not None:
setattr(Conf, name.upper(), value) # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=[]] |
def make_motif34lib():
'''
This function generates the motif34lib.mat library required for all
other motif computations. Not to be called externally.
'''
from scipy import io
import os
def motif3generate():
n = 0
M = np.zeros((54, 6), dtype=bool) # isomorphs
# canonical labels (predecssors of IDs)
CL = np.zeros((54, 6), dtype=np.uint8)
cl = np.zeros((6,), dtype=np.uint8)
for i in range(2**6): # loop through all subgraphs
m = '{0:b}'.format(i)
m = str().zfill(6 - len(m)) + m
G = np.array(((0, m[2], m[4]), (m[0], 0, m[5]),
(m[1], m[3], 0)), dtype=int)
ko = np.sum(G, axis=1)
ki = np.sum(G, axis=0)
if np.all(ko + ki): # if subgraph weakly connected
u = np.array((ko, ki)).T
cl.flat = u[np.lexsort((ki, ko))]
CL[n, :] = cl # assign motif label to isomorph
M[n, :] = np.array((G.T.flat[1:4], G.T.flat[5:8])).flat
n += 1
# convert CLs into motif IDs
_, ID = np.unique(
CL.view(CL.dtype.descr * CL.shape[1]), return_inverse=True)
ID += 1
# convert IDs into sporns & kotter classification
id_mika = (1, 3, 4, 6, 7, 8, 11)
id_olaf = (-3, -6, -1, -11, -4, -7, -8)
for mika, olaf in zip(id_mika, id_olaf):
ID[ID == mika] = olaf
ID = np.abs(ID)
ix = np.argsort(ID)
ID = ID[ix] # sort IDs
M = M[ix, :] # sort isomorphs
N = np.squeeze(np.sum(M, axis=1)) # number of edges
Mn = np.array(np.sum(np.tile(np.power(10, np.arange(5, -1, -1)),
(M.shape[0], 1)) * M, axis=1), dtype=np.uint32)
return M, Mn, ID, N
def motif4generate():
n = 0
M = np.zeros((3834, 12), dtype=bool) # isomorphs
CL = np.zeros((3834, 16), dtype=np.uint8) # canonical labels
cl = np.zeros((16,), dtype=np.uint8)
for i in range(2**12): # loop through all subgraphs
m = '{0:b}'.format(i)
m = str().zfill(12 - len(m)) + m
G = np.array(((0, m[3], m[6], m[9]), (m[0], 0, m[7], m[10]),
(m[1], m[4], 0, m[11]), (m[2], m[5], m[8], 0)), dtype=int)
Gs = G + G.T
v = Gs[0, :]
for j in range(2):
v = np.any(Gs[v != 0, :], axis=0) + v
if np.all(v): # if subgraph weakly connected
G2 = np.dot(G, G) != 0
ko = np.sum(G, axis=1)
ki = np.sum(G, axis=0)
ko2 = np.sum(G2, axis=1)
ki2 = np.sum(G2, axis=0)
u = np.array((ki, ko, ki2, ko2)).T
cl.flat = u[np.lexsort((ko2, ki2, ko, ki))]
CL[n, :] = cl # assign motif label to isomorph
M[n, :] = np.array((G.T.flat[1:5], G.T.flat[6:10],
G.T.flat[11:15])).flat
n += 1
# convert CLs into motif IDs
_, ID = np.unique(
CL.view(CL.dtype.descr * CL.shape[1]), return_inverse=True)
ID += 1
ix = np.argsort(ID)
ID = ID[ix] # sort IDs
M = M[ix, :] # sort isomorphs
N = np.sum(M, axis=1) # number of edges
Mn = np.array(np.sum(np.tile(np.power(10, np.arange(11, -1, -1)),
(M.shape[0], 1)) * M, axis=1), dtype=np.uint64)
return M, Mn, ID, N
dir = os.path.dirname(__file__)
fname = os.path.join(dir, motiflib)
if os.path.exists(fname):
print("motif34lib already exists")
return
m3, m3n, id3, n3 = motif3generate()
m4, m4n, id4, n4 = motif4generate()
io.savemat(fname, mdict={'m3': m3, 'm3n': m3n, 'id3': id3, 'n3': n3,
'm4': m4, 'm4n': m4n, 'id4': id4, 'n4': n4}) | def function[make_motif34lib, parameter[]]:
constant[
This function generates the motif34lib.mat library required for all
other motif computations. Not to be called externally.
]
from relative_module[scipy] import module[io]
import module[os]
def function[motif3generate, parameter[]]:
variable[n] assign[=] constant[0]
variable[M] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b07971c0>, <ast.Constant object at 0x7da1b0796ad0>]]]]
variable[CL] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b0794340>, <ast.Constant object at 0x7da1b0794400>]]]]
variable[cl] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b07961d0>]]]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[constant[2] ** constant[6]]]]] begin[:]
variable[m] assign[=] call[constant[{0:b}].format, parameter[name[i]]]
variable[m] assign[=] binary_operation[call[call[name[str], parameter[]].zfill, parameter[binary_operation[constant[6] - call[name[len], parameter[name[m]]]]]] + name[m]]
variable[G] assign[=] call[name[np].array, parameter[tuple[[<ast.Tuple object at 0x7da1b0796e60>, <ast.Tuple object at 0x7da1b0796bf0>, <ast.Tuple object at 0x7da1b07967d0>]]]]
variable[ko] assign[=] call[name[np].sum, parameter[name[G]]]
variable[ki] assign[=] call[name[np].sum, parameter[name[G]]]
if call[name[np].all, parameter[binary_operation[name[ko] + name[ki]]]] begin[:]
variable[u] assign[=] call[name[np].array, parameter[tuple[[<ast.Name object at 0x7da1b0797af0>, <ast.Name object at 0x7da1b0797e20>]]]].T
name[cl].flat assign[=] call[name[u]][call[name[np].lexsort, parameter[tuple[[<ast.Name object at 0x7da1b0797850>, <ast.Name object at 0x7da1b0796500>]]]]]
call[name[CL]][tuple[[<ast.Name object at 0x7da1b07963b0>, <ast.Slice object at 0x7da1b0797460>]]] assign[=] name[cl]
call[name[M]][tuple[[<ast.Name object at 0x7da1b0797c10>, <ast.Slice object at 0x7da1b0797d00>]]] assign[=] call[name[np].array, parameter[tuple[[<ast.Subscript object at 0x7da1b0795e70>, <ast.Subscript object at 0x7da1b0795e10>]]]].flat
<ast.AugAssign object at 0x7da1b0795c60>
<ast.Tuple object at 0x7da1b0795930> assign[=] call[name[np].unique, parameter[call[name[CL].view, parameter[binary_operation[name[CL].dtype.descr * call[name[CL].shape][constant[1]]]]]]]
<ast.AugAssign object at 0x7da1b0795870>
variable[id_mika] assign[=] tuple[[<ast.Constant object at 0x7da1b08d9150>, <ast.Constant object at 0x7da1b08daa40>, <ast.Constant object at 0x7da1b08d9a80>, <ast.Constant object at 0x7da1b08db400>, <ast.Constant object at 0x7da1b08d97b0>, <ast.Constant object at 0x7da1b08dae00>, <ast.Constant object at 0x7da1b085fa60>]]
variable[id_olaf] assign[=] tuple[[<ast.UnaryOp object at 0x7da1b085fa00>, <ast.UnaryOp object at 0x7da1b085f9a0>, <ast.UnaryOp object at 0x7da1b085fc70>, <ast.UnaryOp object at 0x7da1b085fc10>, <ast.UnaryOp object at 0x7da1b085fca0>, <ast.UnaryOp object at 0x7da1b085fb20>, <ast.UnaryOp object at 0x7da1b085fbb0>]]
for taget[tuple[[<ast.Name object at 0x7da1b085fdf0>, <ast.Name object at 0x7da1b085fe20>]]] in starred[call[name[zip], parameter[name[id_mika], name[id_olaf]]]] begin[:]
call[name[ID]][compare[name[ID] equal[==] name[mika]]] assign[=] name[olaf]
variable[ID] assign[=] call[name[np].abs, parameter[name[ID]]]
variable[ix] assign[=] call[name[np].argsort, parameter[name[ID]]]
variable[ID] assign[=] call[name[ID]][name[ix]]
variable[M] assign[=] call[name[M]][tuple[[<ast.Name object at 0x7da1b085ed10>, <ast.Slice object at 0x7da1b085f8b0>]]]
variable[N] assign[=] call[name[np].squeeze, parameter[call[name[np].sum, parameter[name[M]]]]]
variable[Mn] assign[=] call[name[np].array, parameter[call[name[np].sum, parameter[binary_operation[call[name[np].tile, parameter[call[name[np].power, parameter[constant[10], call[name[np].arange, parameter[constant[5], <ast.UnaryOp object at 0x7da1b085f370>, <ast.UnaryOp object at 0x7da1b085e350>]]]], tuple[[<ast.Subscript object at 0x7da1b085e3e0>, <ast.Constant object at 0x7da1b085e110>]]]] * name[M]]]]]]
return[tuple[[<ast.Name object at 0x7da1b085e230>, <ast.Name object at 0x7da1b085e2f0>, <ast.Name object at 0x7da1b085e410>, <ast.Name object at 0x7da1b085f490>]]]
def function[motif4generate, parameter[]]:
variable[n] assign[=] constant[0]
variable[M] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b085ef20>, <ast.Constant object at 0x7da1b085eef0>]]]]
variable[CL] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b085eec0>, <ast.Constant object at 0x7da1b085ee60>]]]]
variable[cl] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b085e050>]]]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[constant[2] ** constant[12]]]]] begin[:]
variable[m] assign[=] call[constant[{0:b}].format, parameter[name[i]]]
variable[m] assign[=] binary_operation[call[call[name[str], parameter[]].zfill, parameter[binary_operation[constant[12] - call[name[len], parameter[name[m]]]]]] + name[m]]
variable[G] assign[=] call[name[np].array, parameter[tuple[[<ast.Tuple object at 0x7da1b08d5a20>, <ast.Tuple object at 0x7da1b08d5570>, <ast.Tuple object at 0x7da1b08d7910>, <ast.Tuple object at 0x7da1b08d6950>]]]]
variable[Gs] assign[=] binary_operation[name[G] + name[G].T]
variable[v] assign[=] call[name[Gs]][tuple[[<ast.Constant object at 0x7da1b08d7df0>, <ast.Slice object at 0x7da1b08d6c80>]]]
for taget[name[j]] in starred[call[name[range], parameter[constant[2]]]] begin[:]
variable[v] assign[=] binary_operation[call[name[np].any, parameter[call[name[Gs]][tuple[[<ast.Compare object at 0x7da1b08d5a80>, <ast.Slice object at 0x7da1b08d7c40>]]]]] + name[v]]
if call[name[np].all, parameter[name[v]]] begin[:]
variable[G2] assign[=] compare[call[name[np].dot, parameter[name[G], name[G]]] not_equal[!=] constant[0]]
variable[ko] assign[=] call[name[np].sum, parameter[name[G]]]
variable[ki] assign[=] call[name[np].sum, parameter[name[G]]]
variable[ko2] assign[=] call[name[np].sum, parameter[name[G2]]]
variable[ki2] assign[=] call[name[np].sum, parameter[name[G2]]]
variable[u] assign[=] call[name[np].array, parameter[tuple[[<ast.Name object at 0x7da1b08d7e80>, <ast.Name object at 0x7da1b08d6ce0>, <ast.Name object at 0x7da1b08d7520>, <ast.Name object at 0x7da1b08d4af0>]]]].T
name[cl].flat assign[=] call[name[u]][call[name[np].lexsort, parameter[tuple[[<ast.Name object at 0x7da1b08d72b0>, <ast.Name object at 0x7da1b08d7580>, <ast.Name object at 0x7da1b08d61d0>, <ast.Name object at 0x7da1b08d60e0>]]]]]
call[name[CL]][tuple[[<ast.Name object at 0x7da1b08d6b90>, <ast.Slice object at 0x7da1b08d7f40>]]] assign[=] name[cl]
call[name[M]][tuple[[<ast.Name object at 0x7da1b08d6110>, <ast.Slice object at 0x7da1b08d6290>]]] assign[=] call[name[np].array, parameter[tuple[[<ast.Subscript object at 0x7da1b08d7a30>, <ast.Subscript object at 0x7da1b08d6680>, <ast.Subscript object at 0x7da1b08d6020>]]]].flat
<ast.AugAssign object at 0x7da1b08d40d0>
<ast.Tuple object at 0x7da1b08d5bd0> assign[=] call[name[np].unique, parameter[call[name[CL].view, parameter[binary_operation[name[CL].dtype.descr * call[name[CL].shape][constant[1]]]]]]]
<ast.AugAssign object at 0x7da1b08d5090>
variable[ix] assign[=] call[name[np].argsort, parameter[name[ID]]]
variable[ID] assign[=] call[name[ID]][name[ix]]
variable[M] assign[=] call[name[M]][tuple[[<ast.Name object at 0x7da1b08d7070>, <ast.Slice object at 0x7da1b08d74f0>]]]
variable[N] assign[=] call[name[np].sum, parameter[name[M]]]
variable[Mn] assign[=] call[name[np].array, parameter[call[name[np].sum, parameter[binary_operation[call[name[np].tile, parameter[call[name[np].power, parameter[constant[10], call[name[np].arange, parameter[constant[11], <ast.UnaryOp object at 0x7da1b08d5ab0>, <ast.UnaryOp object at 0x7da1b08d6b00>]]]], tuple[[<ast.Subscript object at 0x7da1b083ffa0>, <ast.Constant object at 0x7da1b084ef20>]]]] * name[M]]]]]]
return[tuple[[<ast.Name object at 0x7da1b084d780>, <ast.Name object at 0x7da1b084f6d0>, <ast.Name object at 0x7da1b084d0c0>, <ast.Name object at 0x7da1b084f2b0>]]]
variable[dir] assign[=] call[name[os].path.dirname, parameter[name[__file__]]]
variable[fname] assign[=] call[name[os].path.join, parameter[name[dir], name[motiflib]]]
if call[name[os].path.exists, parameter[name[fname]]] begin[:]
call[name[print], parameter[constant[motif34lib already exists]]]
return[None]
<ast.Tuple object at 0x7da1b084f220> assign[=] call[name[motif3generate], parameter[]]
<ast.Tuple object at 0x7da1b084f9d0> assign[=] call[name[motif4generate], parameter[]]
call[name[io].savemat, parameter[name[fname]]] | keyword[def] identifier[make_motif34lib] ():
literal[string]
keyword[from] identifier[scipy] keyword[import] identifier[io]
keyword[import] identifier[os]
keyword[def] identifier[motif3generate] ():
identifier[n] = literal[int]
identifier[M] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[dtype] = identifier[bool] )
identifier[CL] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[dtype] = identifier[np] . identifier[uint8] )
identifier[cl] = identifier[np] . identifier[zeros] (( literal[int] ,), identifier[dtype] = identifier[np] . identifier[uint8] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ** literal[int] ):
identifier[m] = literal[string] . identifier[format] ( identifier[i] )
identifier[m] = identifier[str] (). identifier[zfill] ( literal[int] - identifier[len] ( identifier[m] ))+ identifier[m]
identifier[G] = identifier[np] . identifier[array] ((( literal[int] , identifier[m] [ literal[int] ], identifier[m] [ literal[int] ]),( identifier[m] [ literal[int] ], literal[int] , identifier[m] [ literal[int] ]),
( identifier[m] [ literal[int] ], identifier[m] [ literal[int] ], literal[int] )), identifier[dtype] = identifier[int] )
identifier[ko] = identifier[np] . identifier[sum] ( identifier[G] , identifier[axis] = literal[int] )
identifier[ki] = identifier[np] . identifier[sum] ( identifier[G] , identifier[axis] = literal[int] )
keyword[if] identifier[np] . identifier[all] ( identifier[ko] + identifier[ki] ):
identifier[u] = identifier[np] . identifier[array] (( identifier[ko] , identifier[ki] )). identifier[T]
identifier[cl] . identifier[flat] = identifier[u] [ identifier[np] . identifier[lexsort] (( identifier[ki] , identifier[ko] ))]
identifier[CL] [ identifier[n] ,:]= identifier[cl]
identifier[M] [ identifier[n] ,:]= identifier[np] . identifier[array] (( identifier[G] . identifier[T] . identifier[flat] [ literal[int] : literal[int] ], identifier[G] . identifier[T] . identifier[flat] [ literal[int] : literal[int] ])). identifier[flat]
identifier[n] += literal[int]
identifier[_] , identifier[ID] = identifier[np] . identifier[unique] (
identifier[CL] . identifier[view] ( identifier[CL] . identifier[dtype] . identifier[descr] * identifier[CL] . identifier[shape] [ literal[int] ]), identifier[return_inverse] = keyword[True] )
identifier[ID] += literal[int]
identifier[id_mika] =( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] )
identifier[id_olaf] =(- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] )
keyword[for] identifier[mika] , identifier[olaf] keyword[in] identifier[zip] ( identifier[id_mika] , identifier[id_olaf] ):
identifier[ID] [ identifier[ID] == identifier[mika] ]= identifier[olaf]
identifier[ID] = identifier[np] . identifier[abs] ( identifier[ID] )
identifier[ix] = identifier[np] . identifier[argsort] ( identifier[ID] )
identifier[ID] = identifier[ID] [ identifier[ix] ]
identifier[M] = identifier[M] [ identifier[ix] ,:]
identifier[N] = identifier[np] . identifier[squeeze] ( identifier[np] . identifier[sum] ( identifier[M] , identifier[axis] = literal[int] ))
identifier[Mn] = identifier[np] . identifier[array] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[tile] ( identifier[np] . identifier[power] ( literal[int] , identifier[np] . identifier[arange] ( literal[int] ,- literal[int] ,- literal[int] )),
( identifier[M] . identifier[shape] [ literal[int] ], literal[int] ))* identifier[M] , identifier[axis] = literal[int] ), identifier[dtype] = identifier[np] . identifier[uint32] )
keyword[return] identifier[M] , identifier[Mn] , identifier[ID] , identifier[N]
keyword[def] identifier[motif4generate] ():
identifier[n] = literal[int]
identifier[M] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[dtype] = identifier[bool] )
identifier[CL] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[dtype] = identifier[np] . identifier[uint8] )
identifier[cl] = identifier[np] . identifier[zeros] (( literal[int] ,), identifier[dtype] = identifier[np] . identifier[uint8] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ** literal[int] ):
identifier[m] = literal[string] . identifier[format] ( identifier[i] )
identifier[m] = identifier[str] (). identifier[zfill] ( literal[int] - identifier[len] ( identifier[m] ))+ identifier[m]
identifier[G] = identifier[np] . identifier[array] ((( literal[int] , identifier[m] [ literal[int] ], identifier[m] [ literal[int] ], identifier[m] [ literal[int] ]),( identifier[m] [ literal[int] ], literal[int] , identifier[m] [ literal[int] ], identifier[m] [ literal[int] ]),
( identifier[m] [ literal[int] ], identifier[m] [ literal[int] ], literal[int] , identifier[m] [ literal[int] ]),( identifier[m] [ literal[int] ], identifier[m] [ literal[int] ], identifier[m] [ literal[int] ], literal[int] )), identifier[dtype] = identifier[int] )
identifier[Gs] = identifier[G] + identifier[G] . identifier[T]
identifier[v] = identifier[Gs] [ literal[int] ,:]
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] ):
identifier[v] = identifier[np] . identifier[any] ( identifier[Gs] [ identifier[v] != literal[int] ,:], identifier[axis] = literal[int] )+ identifier[v]
keyword[if] identifier[np] . identifier[all] ( identifier[v] ):
identifier[G2] = identifier[np] . identifier[dot] ( identifier[G] , identifier[G] )!= literal[int]
identifier[ko] = identifier[np] . identifier[sum] ( identifier[G] , identifier[axis] = literal[int] )
identifier[ki] = identifier[np] . identifier[sum] ( identifier[G] , identifier[axis] = literal[int] )
identifier[ko2] = identifier[np] . identifier[sum] ( identifier[G2] , identifier[axis] = literal[int] )
identifier[ki2] = identifier[np] . identifier[sum] ( identifier[G2] , identifier[axis] = literal[int] )
identifier[u] = identifier[np] . identifier[array] (( identifier[ki] , identifier[ko] , identifier[ki2] , identifier[ko2] )). identifier[T]
identifier[cl] . identifier[flat] = identifier[u] [ identifier[np] . identifier[lexsort] (( identifier[ko2] , identifier[ki2] , identifier[ko] , identifier[ki] ))]
identifier[CL] [ identifier[n] ,:]= identifier[cl]
identifier[M] [ identifier[n] ,:]= identifier[np] . identifier[array] (( identifier[G] . identifier[T] . identifier[flat] [ literal[int] : literal[int] ], identifier[G] . identifier[T] . identifier[flat] [ literal[int] : literal[int] ],
identifier[G] . identifier[T] . identifier[flat] [ literal[int] : literal[int] ])). identifier[flat]
identifier[n] += literal[int]
identifier[_] , identifier[ID] = identifier[np] . identifier[unique] (
identifier[CL] . identifier[view] ( identifier[CL] . identifier[dtype] . identifier[descr] * identifier[CL] . identifier[shape] [ literal[int] ]), identifier[return_inverse] = keyword[True] )
identifier[ID] += literal[int]
identifier[ix] = identifier[np] . identifier[argsort] ( identifier[ID] )
identifier[ID] = identifier[ID] [ identifier[ix] ]
identifier[M] = identifier[M] [ identifier[ix] ,:]
identifier[N] = identifier[np] . identifier[sum] ( identifier[M] , identifier[axis] = literal[int] )
identifier[Mn] = identifier[np] . identifier[array] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[tile] ( identifier[np] . identifier[power] ( literal[int] , identifier[np] . identifier[arange] ( literal[int] ,- literal[int] ,- literal[int] )),
( identifier[M] . identifier[shape] [ literal[int] ], literal[int] ))* identifier[M] , identifier[axis] = literal[int] ), identifier[dtype] = identifier[np] . identifier[uint64] )
keyword[return] identifier[M] , identifier[Mn] , identifier[ID] , identifier[N]
identifier[dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )
identifier[fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[dir] , identifier[motiflib] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[fname] ):
identifier[print] ( literal[string] )
keyword[return]
identifier[m3] , identifier[m3n] , identifier[id3] , identifier[n3] = identifier[motif3generate] ()
identifier[m4] , identifier[m4n] , identifier[id4] , identifier[n4] = identifier[motif4generate] ()
identifier[io] . identifier[savemat] ( identifier[fname] , identifier[mdict] ={ literal[string] : identifier[m3] , literal[string] : identifier[m3n] , literal[string] : identifier[id3] , literal[string] : identifier[n3] ,
literal[string] : identifier[m4] , literal[string] : identifier[m4n] , literal[string] : identifier[id4] , literal[string] : identifier[n4] }) | def make_motif34lib():
"""
This function generates the motif34lib.mat library required for all
other motif computations. Not to be called externally.
"""
from scipy import io
import os
def motif3generate():
n = 0
M = np.zeros((54, 6), dtype=bool) # isomorphs
# canonical labels (predecssors of IDs)
CL = np.zeros((54, 6), dtype=np.uint8)
cl = np.zeros((6,), dtype=np.uint8)
for i in range(2 ** 6): # loop through all subgraphs
m = '{0:b}'.format(i)
m = str().zfill(6 - len(m)) + m
G = np.array(((0, m[2], m[4]), (m[0], 0, m[5]), (m[1], m[3], 0)), dtype=int)
ko = np.sum(G, axis=1)
ki = np.sum(G, axis=0)
if np.all(ko + ki): # if subgraph weakly connected
u = np.array((ko, ki)).T
cl.flat = u[np.lexsort((ki, ko))]
CL[n, :] = cl # assign motif label to isomorph
M[n, :] = np.array((G.T.flat[1:4], G.T.flat[5:8])).flat
n += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# convert CLs into motif IDs
(_, ID) = np.unique(CL.view(CL.dtype.descr * CL.shape[1]), return_inverse=True)
ID += 1
# convert IDs into sporns & kotter classification
id_mika = (1, 3, 4, 6, 7, 8, 11)
id_olaf = (-3, -6, -1, -11, -4, -7, -8)
for (mika, olaf) in zip(id_mika, id_olaf):
ID[ID == mika] = olaf # depends on [control=['for'], data=[]]
ID = np.abs(ID)
ix = np.argsort(ID)
ID = ID[ix] # sort IDs
M = M[ix, :] # sort isomorphs
N = np.squeeze(np.sum(M, axis=1)) # number of edges
Mn = np.array(np.sum(np.tile(np.power(10, np.arange(5, -1, -1)), (M.shape[0], 1)) * M, axis=1), dtype=np.uint32)
return (M, Mn, ID, N)
def motif4generate():
n = 0
M = np.zeros((3834, 12), dtype=bool) # isomorphs
CL = np.zeros((3834, 16), dtype=np.uint8) # canonical labels
cl = np.zeros((16,), dtype=np.uint8)
for i in range(2 ** 12): # loop through all subgraphs
m = '{0:b}'.format(i)
m = str().zfill(12 - len(m)) + m
G = np.array(((0, m[3], m[6], m[9]), (m[0], 0, m[7], m[10]), (m[1], m[4], 0, m[11]), (m[2], m[5], m[8], 0)), dtype=int)
Gs = G + G.T
v = Gs[0, :]
for j in range(2):
v = np.any(Gs[v != 0, :], axis=0) + v # depends on [control=['for'], data=[]]
if np.all(v): # if subgraph weakly connected
G2 = np.dot(G, G) != 0
ko = np.sum(G, axis=1)
ki = np.sum(G, axis=0)
ko2 = np.sum(G2, axis=1)
ki2 = np.sum(G2, axis=0)
u = np.array((ki, ko, ki2, ko2)).T
cl.flat = u[np.lexsort((ko2, ki2, ko, ki))]
CL[n, :] = cl # assign motif label to isomorph
M[n, :] = np.array((G.T.flat[1:5], G.T.flat[6:10], G.T.flat[11:15])).flat
n += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# convert CLs into motif IDs
(_, ID) = np.unique(CL.view(CL.dtype.descr * CL.shape[1]), return_inverse=True)
ID += 1
ix = np.argsort(ID)
ID = ID[ix] # sort IDs
M = M[ix, :] # sort isomorphs
N = np.sum(M, axis=1) # number of edges
Mn = np.array(np.sum(np.tile(np.power(10, np.arange(11, -1, -1)), (M.shape[0], 1)) * M, axis=1), dtype=np.uint64)
return (M, Mn, ID, N)
dir = os.path.dirname(__file__)
fname = os.path.join(dir, motiflib)
if os.path.exists(fname):
print('motif34lib already exists')
return # depends on [control=['if'], data=[]]
(m3, m3n, id3, n3) = motif3generate()
(m4, m4n, id4, n4) = motif4generate()
io.savemat(fname, mdict={'m3': m3, 'm3n': m3n, 'id3': id3, 'n3': n3, 'm4': m4, 'm4n': m4n, 'id4': id4, 'n4': n4}) |
def login(self):
"""Logs into MAL and sets cookies appropriately.
:rtype: :class:`.Session`
:return: The current session.
"""
# POSTS a login to mal.
mal_headers = {
'Host': 'myanimelist.net',
}
mal_payload = {
'username': self.username,
'password': self.password,
'cookie': 1,
'sublogin': 'Login'
}
self.session.headers.update(mal_headers)
r = self.session.post(u'http://myanimelist.net/login.php', data=mal_payload)
return self | def function[login, parameter[self]]:
constant[Logs into MAL and sets cookies appropriately.
:rtype: :class:`.Session`
:return: The current session.
]
variable[mal_headers] assign[=] dictionary[[<ast.Constant object at 0x7da20e955b70>], [<ast.Constant object at 0x7da20e954340>]]
variable[mal_payload] assign[=] dictionary[[<ast.Constant object at 0x7da20e956350>, <ast.Constant object at 0x7da20e955000>, <ast.Constant object at 0x7da20e957100>, <ast.Constant object at 0x7da20e954d00>], [<ast.Attribute object at 0x7da20e954070>, <ast.Attribute object at 0x7da20e9558a0>, <ast.Constant object at 0x7da20e954730>, <ast.Constant object at 0x7da20e956500>]]
call[name[self].session.headers.update, parameter[name[mal_headers]]]
variable[r] assign[=] call[name[self].session.post, parameter[constant[http://myanimelist.net/login.php]]]
return[name[self]] | keyword[def] identifier[login] ( identifier[self] ):
literal[string]
identifier[mal_headers] ={
literal[string] : literal[string] ,
}
identifier[mal_payload] ={
literal[string] : identifier[self] . identifier[username] ,
literal[string] : identifier[self] . identifier[password] ,
literal[string] : literal[int] ,
literal[string] : literal[string]
}
identifier[self] . identifier[session] . identifier[headers] . identifier[update] ( identifier[mal_headers] )
identifier[r] = identifier[self] . identifier[session] . identifier[post] ( literal[string] , identifier[data] = identifier[mal_payload] )
keyword[return] identifier[self] | def login(self):
"""Logs into MAL and sets cookies appropriately.
:rtype: :class:`.Session`
:return: The current session.
"""
# POSTS a login to mal.
mal_headers = {'Host': 'myanimelist.net'}
mal_payload = {'username': self.username, 'password': self.password, 'cookie': 1, 'sublogin': 'Login'}
self.session.headers.update(mal_headers)
r = self.session.post(u'http://myanimelist.net/login.php', data=mal_payload)
return self |
def ip_to_geojson(ipaddress, name="Point"):
"""Generate GeoJSON for given IP address"""
geo = ip_to_geo(ipaddress)
point = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"name": name
},
"geometry": {
"type": "Point",
"coordinates": [
geo["longitude"],
geo["latitude"]
]
}
}
]
}
return point | def function[ip_to_geojson, parameter[ipaddress, name]]:
constant[Generate GeoJSON for given IP address]
variable[geo] assign[=] call[name[ip_to_geo], parameter[name[ipaddress]]]
variable[point] assign[=] dictionary[[<ast.Constant object at 0x7da1b28ad900>, <ast.Constant object at 0x7da1b28ae230>], [<ast.Constant object at 0x7da1b28ac6a0>, <ast.List object at 0x7da1b28ad060>]]
return[name[point]] | keyword[def] identifier[ip_to_geojson] ( identifier[ipaddress] , identifier[name] = literal[string] ):
literal[string]
identifier[geo] = identifier[ip_to_geo] ( identifier[ipaddress] )
identifier[point] ={
literal[string] : literal[string] ,
literal[string] :[
{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : identifier[name]
},
literal[string] :{
literal[string] : literal[string] ,
literal[string] :[
identifier[geo] [ literal[string] ],
identifier[geo] [ literal[string] ]
]
}
}
]
}
keyword[return] identifier[point] | def ip_to_geojson(ipaddress, name='Point'):
"""Generate GeoJSON for given IP address"""
geo = ip_to_geo(ipaddress)
point = {'type': 'FeatureCollection', 'features': [{'type': 'Feature', 'properties': {'name': name}, 'geometry': {'type': 'Point', 'coordinates': [geo['longitude'], geo['latitude']]}}]}
return point |
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped | def function[process_request_body, parameter[fn]]:
constant[
A decorator to skip a processor function if process_request_body is False
]
def function[wrapped, parameter[]]:
if compare[name[cherrypy].request.process_request_body is_not constant[False]] begin[:]
call[name[fn], parameter[<ast.Starred object at 0x7da1b21ed330>]]
return[name[wrapped]] | keyword[def] identifier[process_request_body] ( identifier[fn] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[fn] )
keyword[def] identifier[wrapped] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[cherrypy] . identifier[request] . identifier[process_request_body] keyword[is] keyword[not] keyword[False] :
identifier[fn] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapped] | def process_request_body(fn):
"""
A decorator to skip a processor function if process_request_body is False
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs) # depends on [control=['if'], data=[]]
return wrapped |
async def info(self, ctx, *, member : discord.Member = None):
"""Shows info about a member.
This cannot be used in private messages. If you don't specify
a member then the info returned will be yours.
"""
channel = ctx.message.channel
if member is None:
member = ctx.message.author
e = discord.Embed()
roles = [role.name.replace('@', '@\u200b') for role in member.roles]
shared = sum(1 for m in self.bot.get_all_members() if m.id == member.id)
voice = member.voice_channel
if voice is not None:
other_people = len(voice.voice_members) - 1
voice_fmt = '{} with {} others' if other_people else '{} by themselves'
voice = voice_fmt.format(voice.name, other_people)
else:
voice = 'Not connected.'
e.set_author(name=str(member), icon_url=member.avatar_url or member.default_avatar_url)
e.set_footer(text='Member since').timestamp = member.joined_at
e.add_field(name='ID', value=member.id)
e.add_field(name='Servers', value='%s shared' % shared)
e.add_field(name='Voice', value=voice)
e.add_field(name='Created', value=member.created_at)
e.add_field(name='Roles', value=', '.join(roles))
e.colour = member.colour
if member.avatar:
e.set_image(url=member.avatar_url)
await self.bot.say(embed=e) | <ast.AsyncFunctionDef object at 0x7da1b260b820> | keyword[async] keyword[def] identifier[info] ( identifier[self] , identifier[ctx] ,*, identifier[member] : identifier[discord] . identifier[Member] = keyword[None] ):
literal[string]
identifier[channel] = identifier[ctx] . identifier[message] . identifier[channel]
keyword[if] identifier[member] keyword[is] keyword[None] :
identifier[member] = identifier[ctx] . identifier[message] . identifier[author]
identifier[e] = identifier[discord] . identifier[Embed] ()
identifier[roles] =[ identifier[role] . identifier[name] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[role] keyword[in] identifier[member] . identifier[roles] ]
identifier[shared] = identifier[sum] ( literal[int] keyword[for] identifier[m] keyword[in] identifier[self] . identifier[bot] . identifier[get_all_members] () keyword[if] identifier[m] . identifier[id] == identifier[member] . identifier[id] )
identifier[voice] = identifier[member] . identifier[voice_channel]
keyword[if] identifier[voice] keyword[is] keyword[not] keyword[None] :
identifier[other_people] = identifier[len] ( identifier[voice] . identifier[voice_members] )- literal[int]
identifier[voice_fmt] = literal[string] keyword[if] identifier[other_people] keyword[else] literal[string]
identifier[voice] = identifier[voice_fmt] . identifier[format] ( identifier[voice] . identifier[name] , identifier[other_people] )
keyword[else] :
identifier[voice] = literal[string]
identifier[e] . identifier[set_author] ( identifier[name] = identifier[str] ( identifier[member] ), identifier[icon_url] = identifier[member] . identifier[avatar_url] keyword[or] identifier[member] . identifier[default_avatar_url] )
identifier[e] . identifier[set_footer] ( identifier[text] = literal[string] ). identifier[timestamp] = identifier[member] . identifier[joined_at]
identifier[e] . identifier[add_field] ( identifier[name] = literal[string] , identifier[value] = identifier[member] . identifier[id] )
identifier[e] . identifier[add_field] ( identifier[name] = literal[string] , identifier[value] = literal[string] % identifier[shared] )
identifier[e] . identifier[add_field] ( identifier[name] = literal[string] , identifier[value] = identifier[voice] )
identifier[e] . identifier[add_field] ( identifier[name] = literal[string] , identifier[value] = identifier[member] . identifier[created_at] )
identifier[e] . identifier[add_field] ( identifier[name] = literal[string] , identifier[value] = literal[string] . identifier[join] ( identifier[roles] ))
identifier[e] . identifier[colour] = identifier[member] . identifier[colour]
keyword[if] identifier[member] . identifier[avatar] :
identifier[e] . identifier[set_image] ( identifier[url] = identifier[member] . identifier[avatar_url] )
keyword[await] identifier[self] . identifier[bot] . identifier[say] ( identifier[embed] = identifier[e] ) | async def info(self, ctx, *, member: discord.Member=None):
"""Shows info about a member.
This cannot be used in private messages. If you don't specify
a member then the info returned will be yours.
"""
channel = ctx.message.channel
if member is None:
member = ctx.message.author # depends on [control=['if'], data=['member']]
e = discord.Embed()
roles = [role.name.replace('@', '@\u200b') for role in member.roles]
shared = sum((1 for m in self.bot.get_all_members() if m.id == member.id))
voice = member.voice_channel
if voice is not None:
other_people = len(voice.voice_members) - 1
voice_fmt = '{} with {} others' if other_people else '{} by themselves'
voice = voice_fmt.format(voice.name, other_people) # depends on [control=['if'], data=['voice']]
else:
voice = 'Not connected.'
e.set_author(name=str(member), icon_url=member.avatar_url or member.default_avatar_url)
e.set_footer(text='Member since').timestamp = member.joined_at
e.add_field(name='ID', value=member.id)
e.add_field(name='Servers', value='%s shared' % shared)
e.add_field(name='Voice', value=voice)
e.add_field(name='Created', value=member.created_at)
e.add_field(name='Roles', value=', '.join(roles))
e.colour = member.colour
if member.avatar:
e.set_image(url=member.avatar_url) # depends on [control=['if'], data=[]]
await self.bot.say(embed=e) |
def bootstrap_styled(cls=None, add_meta=True, form_group=True,
input_class='form-control'):
"""
Wrap a widget to conform with Bootstrap's html control design.
Args:
input_class: Class to give to the rendered <input> control.
add_meta: bool:
"""
def real_decorator(cls):
class NewClass(cls): pass
NewClass.__name__ = cls.__name__
NewClass = custom_widget_wrapper(NewClass)
_call = NewClass.__call__
def call(*args, **kwargs):
if input_class:
kwargs.setdefault('class', input_class)
return _call(*args, **kwargs)
if add_meta: call = meta_wrapped(call)
if form_group: call = form_group_wrapped(call)
NewClass.__call__ = call
return NewClass
if cls:
# Allow calling decorator(cls) instead of decorator()(cls)
rv = real_decorator(cls)
return rv
return real_decorator | def function[bootstrap_styled, parameter[cls, add_meta, form_group, input_class]]:
constant[
Wrap a widget to conform with Bootstrap's html control design.
Args:
input_class: Class to give to the rendered <input> control.
add_meta: bool:
]
def function[real_decorator, parameter[cls]]:
class class[NewClass, parameter[]] begin[:]
pass
name[NewClass].__name__ assign[=] name[cls].__name__
variable[NewClass] assign[=] call[name[custom_widget_wrapper], parameter[name[NewClass]]]
variable[_call] assign[=] name[NewClass].__call__
def function[call, parameter[]]:
if name[input_class] begin[:]
call[name[kwargs].setdefault, parameter[constant[class], name[input_class]]]
return[call[name[_call], parameter[<ast.Starred object at 0x7da20e955cf0>]]]
if name[add_meta] begin[:]
variable[call] assign[=] call[name[meta_wrapped], parameter[name[call]]]
if name[form_group] begin[:]
variable[call] assign[=] call[name[form_group_wrapped], parameter[name[call]]]
name[NewClass].__call__ assign[=] name[call]
return[name[NewClass]]
if name[cls] begin[:]
variable[rv] assign[=] call[name[real_decorator], parameter[name[cls]]]
return[name[rv]]
return[name[real_decorator]] | keyword[def] identifier[bootstrap_styled] ( identifier[cls] = keyword[None] , identifier[add_meta] = keyword[True] , identifier[form_group] = keyword[True] ,
identifier[input_class] = literal[string] ):
literal[string]
keyword[def] identifier[real_decorator] ( identifier[cls] ):
keyword[class] identifier[NewClass] ( identifier[cls] ): keyword[pass]
identifier[NewClass] . identifier[__name__] = identifier[cls] . identifier[__name__]
identifier[NewClass] = identifier[custom_widget_wrapper] ( identifier[NewClass] )
identifier[_call] = identifier[NewClass] . identifier[__call__]
keyword[def] identifier[call] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[input_class] :
identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[input_class] )
keyword[return] identifier[_call] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[add_meta] : identifier[call] = identifier[meta_wrapped] ( identifier[call] )
keyword[if] identifier[form_group] : identifier[call] = identifier[form_group_wrapped] ( identifier[call] )
identifier[NewClass] . identifier[__call__] = identifier[call]
keyword[return] identifier[NewClass]
keyword[if] identifier[cls] :
identifier[rv] = identifier[real_decorator] ( identifier[cls] )
keyword[return] identifier[rv]
keyword[return] identifier[real_decorator] | def bootstrap_styled(cls=None, add_meta=True, form_group=True, input_class='form-control'):
"""
Wrap a widget to conform with Bootstrap's html control design.
Args:
input_class: Class to give to the rendered <input> control.
add_meta: bool:
"""
def real_decorator(cls):
class NewClass(cls):
pass
NewClass.__name__ = cls.__name__
NewClass = custom_widget_wrapper(NewClass)
_call = NewClass.__call__
def call(*args, **kwargs):
if input_class:
kwargs.setdefault('class', input_class) # depends on [control=['if'], data=[]]
return _call(*args, **kwargs)
if add_meta:
call = meta_wrapped(call) # depends on [control=['if'], data=[]]
if form_group:
call = form_group_wrapped(call) # depends on [control=['if'], data=[]]
NewClass.__call__ = call
return NewClass
if cls:
# Allow calling decorator(cls) instead of decorator()(cls)
rv = real_decorator(cls)
return rv # depends on [control=['if'], data=[]]
return real_decorator |
def delete_user_jobs(session, job_ids):
"""
Remove a list of jobs from the currently authenticated user
"""
jobs_data = {
'jobs[]': job_ids
}
response = make_delete_request(session, 'self/jobs', json_data=jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise UserJobsNotDeletedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | def function[delete_user_jobs, parameter[session, job_ids]]:
constant[
Remove a list of jobs from the currently authenticated user
]
variable[jobs_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b26acac0>], [<ast.Name object at 0x7da1b26ac4f0>]]
variable[response] assign[=] call[name[make_delete_request], parameter[name[session], constant[self/jobs]]]
variable[json_data] assign[=] call[name[response].json, parameter[]]
if compare[name[response].status_code equal[==] constant[200]] begin[:]
return[call[name[json_data]][constant[status]]] | keyword[def] identifier[delete_user_jobs] ( identifier[session] , identifier[job_ids] ):
literal[string]
identifier[jobs_data] ={
literal[string] : identifier[job_ids]
}
identifier[response] = identifier[make_delete_request] ( identifier[session] , literal[string] , identifier[json_data] = identifier[jobs_data] )
identifier[json_data] = identifier[response] . identifier[json] ()
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] identifier[json_data] [ literal[string] ]
keyword[else] :
keyword[raise] identifier[UserJobsNotDeletedException] (
identifier[message] = identifier[json_data] [ literal[string] ],
identifier[error_code] = identifier[json_data] [ literal[string] ],
identifier[request_id] = identifier[json_data] [ literal[string] ]) | def delete_user_jobs(session, job_ids):
"""
Remove a list of jobs from the currently authenticated user
"""
jobs_data = {'jobs[]': job_ids}
response = make_delete_request(session, 'self/jobs', json_data=jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status'] # depends on [control=['if'], data=[]]
else:
raise UserJobsNotDeletedException(message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id']) |
def get_instrument_title(self):
"""Return the current instrument title
"""
instrument = self.context.getInstrument()
if not instrument:
return ""
return api.get_title(instrument) | def function[get_instrument_title, parameter[self]]:
constant[Return the current instrument title
]
variable[instrument] assign[=] call[name[self].context.getInstrument, parameter[]]
if <ast.UnaryOp object at 0x7da1b2344e50> begin[:]
return[constant[]]
return[call[name[api].get_title, parameter[name[instrument]]]] | keyword[def] identifier[get_instrument_title] ( identifier[self] ):
literal[string]
identifier[instrument] = identifier[self] . identifier[context] . identifier[getInstrument] ()
keyword[if] keyword[not] identifier[instrument] :
keyword[return] literal[string]
keyword[return] identifier[api] . identifier[get_title] ( identifier[instrument] ) | def get_instrument_title(self):
"""Return the current instrument title
"""
instrument = self.context.getInstrument()
if not instrument:
return '' # depends on [control=['if'], data=[]]
return api.get_title(instrument) |
def download_user_playlists_by_search(self, user_name):
"""Download user's playlists by his/her name.
:params user_name: user name.
"""
try:
user = self.crawler.search_user(user_name, self.quiet)
except RequestException as exception:
click.echo(exception)
else:
self.download_user_playlists_by_id(user.user_id) | def function[download_user_playlists_by_search, parameter[self, user_name]]:
constant[Download user's playlists by his/her name.
:params user_name: user name.
]
<ast.Try object at 0x7da1b085a620> | keyword[def] identifier[download_user_playlists_by_search] ( identifier[self] , identifier[user_name] ):
literal[string]
keyword[try] :
identifier[user] = identifier[self] . identifier[crawler] . identifier[search_user] ( identifier[user_name] , identifier[self] . identifier[quiet] )
keyword[except] identifier[RequestException] keyword[as] identifier[exception] :
identifier[click] . identifier[echo] ( identifier[exception] )
keyword[else] :
identifier[self] . identifier[download_user_playlists_by_id] ( identifier[user] . identifier[user_id] ) | def download_user_playlists_by_search(self, user_name):
"""Download user's playlists by his/her name.
:params user_name: user name.
"""
try:
user = self.crawler.search_user(user_name, self.quiet) # depends on [control=['try'], data=[]]
except RequestException as exception:
click.echo(exception) # depends on [control=['except'], data=['exception']]
else:
self.download_user_playlists_by_id(user.user_id) |
def to_items(self, func=str):
"""
Contruct a list of dictionary items.
The items are normalized using:
- A sort function by key (for consistent results)
- A transformation function for values
The transformation function will default to `str`, which is a good choice when encoding values
as part of a response; this requires that complex types (UUID, Enum, etc.) have a valid string
encoding.
The transformation function should be set to `identity` in cases where raw values are desired;
this is normally necessary when passing page data to controller functions as kwargs.
"""
return [
(key, func(self.kwargs[key]))
for key in sorted(self.kwargs.keys())
] | def function[to_items, parameter[self, func]]:
constant[
Contruct a list of dictionary items.
The items are normalized using:
- A sort function by key (for consistent results)
- A transformation function for values
The transformation function will default to `str`, which is a good choice when encoding values
as part of a response; this requires that complex types (UUID, Enum, etc.) have a valid string
encoding.
The transformation function should be set to `identity` in cases where raw values are desired;
this is normally necessary when passing page data to controller functions as kwargs.
]
return[<ast.ListComp object at 0x7da1b0efbd90>] | keyword[def] identifier[to_items] ( identifier[self] , identifier[func] = identifier[str] ):
literal[string]
keyword[return] [
( identifier[key] , identifier[func] ( identifier[self] . identifier[kwargs] [ identifier[key] ]))
keyword[for] identifier[key] keyword[in] identifier[sorted] ( identifier[self] . identifier[kwargs] . identifier[keys] ())
] | def to_items(self, func=str):
"""
Contruct a list of dictionary items.
The items are normalized using:
- A sort function by key (for consistent results)
- A transformation function for values
The transformation function will default to `str`, which is a good choice when encoding values
as part of a response; this requires that complex types (UUID, Enum, etc.) have a valid string
encoding.
The transformation function should be set to `identity` in cases where raw values are desired;
this is normally necessary when passing page data to controller functions as kwargs.
"""
return [(key, func(self.kwargs[key])) for key in sorted(self.kwargs.keys())] |
def addfield(table, field, value=None, index=None, missing=None):
"""
Add a field with a fixed or calculated value. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['M', 12],
... ['F', 34],
... ['-', 56]]
>>> # using a fixed value
... table2 = etl.addfield(table1, 'baz', 42)
>>> table2
+-----+-----+-----+
| foo | bar | baz |
+=====+=====+=====+
| 'M' | 12 | 42 |
+-----+-----+-----+
| 'F' | 34 | 42 |
+-----+-----+-----+
| '-' | 56 | 42 |
+-----+-----+-----+
>>> # calculating the value
... table2 = etl.addfield(table1, 'baz', lambda rec: rec['bar'] * 2)
>>> table2
+-----+-----+-----+
| foo | bar | baz |
+=====+=====+=====+
| 'M' | 12 | 24 |
+-----+-----+-----+
| 'F' | 34 | 68 |
+-----+-----+-----+
| '-' | 56 | 112 |
+-----+-----+-----+
Use the `index` parameter to control the position of the inserted field.
"""
return AddFieldView(table, field, value=value, index=index,
missing=missing) | def function[addfield, parameter[table, field, value, index, missing]]:
constant[
Add a field with a fixed or calculated value. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['M', 12],
... ['F', 34],
... ['-', 56]]
>>> # using a fixed value
... table2 = etl.addfield(table1, 'baz', 42)
>>> table2
+-----+-----+-----+
| foo | bar | baz |
+=====+=====+=====+
| 'M' | 12 | 42 |
+-----+-----+-----+
| 'F' | 34 | 42 |
+-----+-----+-----+
| '-' | 56 | 42 |
+-----+-----+-----+
>>> # calculating the value
... table2 = etl.addfield(table1, 'baz', lambda rec: rec['bar'] * 2)
>>> table2
+-----+-----+-----+
| foo | bar | baz |
+=====+=====+=====+
| 'M' | 12 | 24 |
+-----+-----+-----+
| 'F' | 34 | 68 |
+-----+-----+-----+
| '-' | 56 | 112 |
+-----+-----+-----+
Use the `index` parameter to control the position of the inserted field.
]
return[call[name[AddFieldView], parameter[name[table], name[field]]]] | keyword[def] identifier[addfield] ( identifier[table] , identifier[field] , identifier[value] = keyword[None] , identifier[index] = keyword[None] , identifier[missing] = keyword[None] ):
literal[string]
keyword[return] identifier[AddFieldView] ( identifier[table] , identifier[field] , identifier[value] = identifier[value] , identifier[index] = identifier[index] ,
identifier[missing] = identifier[missing] ) | def addfield(table, field, value=None, index=None, missing=None):
"""
Add a field with a fixed or calculated value. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['M', 12],
... ['F', 34],
... ['-', 56]]
>>> # using a fixed value
... table2 = etl.addfield(table1, 'baz', 42)
>>> table2
+-----+-----+-----+
| foo | bar | baz |
+=====+=====+=====+
| 'M' | 12 | 42 |
+-----+-----+-----+
| 'F' | 34 | 42 |
+-----+-----+-----+
| '-' | 56 | 42 |
+-----+-----+-----+
>>> # calculating the value
... table2 = etl.addfield(table1, 'baz', lambda rec: rec['bar'] * 2)
>>> table2
+-----+-----+-----+
| foo | bar | baz |
+=====+=====+=====+
| 'M' | 12 | 24 |
+-----+-----+-----+
| 'F' | 34 | 68 |
+-----+-----+-----+
| '-' | 56 | 112 |
+-----+-----+-----+
Use the `index` parameter to control the position of the inserted field.
"""
return AddFieldView(table, field, value=value, index=index, missing=missing) |
def setup(self, config_file):
""" Setup settings from a JSON config file """
def get_config_and_warn(key, default, abort=False):
value = config.get(key, None)
if not value:
print(f"Cannot find {key} in settings, using default value: {default}")
value = default
if abort:
sys.exit(-1)
return value
if not self.DATA_DIR_PATH:
# Setup default data dir
self.set_data_dir(None)
with open(config_file) as data_file:
data = json.load(data_file)
config = data['ProtocolConfiguration']
self.MAGIC = config['Magic']
self.ADDRESS_VERSION = config['AddressVersion']
self.STANDBY_VALIDATORS = config['StandbyValidators']
self.SEED_LIST = config['SeedList']
self.RPC_LIST = config['RPCList']
fees = config['SystemFee']
self.ALL_FEES = fees
self.ENROLLMENT_TX_FEE = fees['EnrollmentTransaction']
self.ISSUE_TX_FEE = fees['IssueTransaction']
self.PUBLISH_TX_FEE = fees['PublishTransaction']
self.REGISTER_TX_FEE = fees['RegisterTransaction']
config = data['ApplicationConfiguration']
self.LEVELDB_PATH = config['DataDirectoryPath']
self.RPC_PORT = int(config['RPCPort'])
self.NODE_PORT = int(config['NodePort'])
self.WS_PORT = config['WsPort']
self.URI_PREFIX = config['UriPrefix']
self.ACCEPT_INCOMING_PEERS = config.get('AcceptIncomingPeers', False)
self.BOOTSTRAP_NAME = get_config_and_warn('BootstrapName', "mainnet")
self.BOOTSTRAP_LOCATIONS = get_config_and_warn('BootstrapFiles', "abort", abort=True)
Helper.ADDRESS_VERSION = self.ADDRESS_VERSION
self.USE_DEBUG_STORAGE = config.get('DebugStorage', False)
self.DEBUG_STORAGE_PATH = config.get('DebugStoragePath', 'Chains/debugstorage')
self.NOTIFICATION_DB_PATH = config.get('NotificationDataPath', 'Chains/notification_data')
self.SERVICE_ENABLED = config.get('ServiceEnabled', self.ACCEPT_INCOMING_PEERS)
self.COMPILER_NEP_8 = config.get('CompilerNep8', False)
self.REST_SERVER = config.get('RestServer', self.DEFAULT_REST_SERVER)
self.RPC_SERVER = config.get('RPCServer', self.DEFAULT_RPC_SERVER) | def function[setup, parameter[self, config_file]]:
constant[ Setup settings from a JSON config file ]
def function[get_config_and_warn, parameter[key, default, abort]]:
variable[value] assign[=] call[name[config].get, parameter[name[key], constant[None]]]
if <ast.UnaryOp object at 0x7da18dc9b550> begin[:]
call[name[print], parameter[<ast.JoinedStr object at 0x7da18dc9b160>]]
variable[value] assign[=] name[default]
if name[abort] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18dc998d0>]]
return[name[value]]
if <ast.UnaryOp object at 0x7da18dc98610> begin[:]
call[name[self].set_data_dir, parameter[constant[None]]]
with call[name[open], parameter[name[config_file]]] begin[:]
variable[data] assign[=] call[name[json].load, parameter[name[data_file]]]
variable[config] assign[=] call[name[data]][constant[ProtocolConfiguration]]
name[self].MAGIC assign[=] call[name[config]][constant[Magic]]
name[self].ADDRESS_VERSION assign[=] call[name[config]][constant[AddressVersion]]
name[self].STANDBY_VALIDATORS assign[=] call[name[config]][constant[StandbyValidators]]
name[self].SEED_LIST assign[=] call[name[config]][constant[SeedList]]
name[self].RPC_LIST assign[=] call[name[config]][constant[RPCList]]
variable[fees] assign[=] call[name[config]][constant[SystemFee]]
name[self].ALL_FEES assign[=] name[fees]
name[self].ENROLLMENT_TX_FEE assign[=] call[name[fees]][constant[EnrollmentTransaction]]
name[self].ISSUE_TX_FEE assign[=] call[name[fees]][constant[IssueTransaction]]
name[self].PUBLISH_TX_FEE assign[=] call[name[fees]][constant[PublishTransaction]]
name[self].REGISTER_TX_FEE assign[=] call[name[fees]][constant[RegisterTransaction]]
variable[config] assign[=] call[name[data]][constant[ApplicationConfiguration]]
name[self].LEVELDB_PATH assign[=] call[name[config]][constant[DataDirectoryPath]]
name[self].RPC_PORT assign[=] call[name[int], parameter[call[name[config]][constant[RPCPort]]]]
name[self].NODE_PORT assign[=] call[name[int], parameter[call[name[config]][constant[NodePort]]]]
name[self].WS_PORT assign[=] call[name[config]][constant[WsPort]]
name[self].URI_PREFIX assign[=] call[name[config]][constant[UriPrefix]]
name[self].ACCEPT_INCOMING_PEERS assign[=] call[name[config].get, parameter[constant[AcceptIncomingPeers], constant[False]]]
name[self].BOOTSTRAP_NAME assign[=] call[name[get_config_and_warn], parameter[constant[BootstrapName], constant[mainnet]]]
name[self].BOOTSTRAP_LOCATIONS assign[=] call[name[get_config_and_warn], parameter[constant[BootstrapFiles], constant[abort]]]
name[Helper].ADDRESS_VERSION assign[=] name[self].ADDRESS_VERSION
name[self].USE_DEBUG_STORAGE assign[=] call[name[config].get, parameter[constant[DebugStorage], constant[False]]]
name[self].DEBUG_STORAGE_PATH assign[=] call[name[config].get, parameter[constant[DebugStoragePath], constant[Chains/debugstorage]]]
name[self].NOTIFICATION_DB_PATH assign[=] call[name[config].get, parameter[constant[NotificationDataPath], constant[Chains/notification_data]]]
name[self].SERVICE_ENABLED assign[=] call[name[config].get, parameter[constant[ServiceEnabled], name[self].ACCEPT_INCOMING_PEERS]]
name[self].COMPILER_NEP_8 assign[=] call[name[config].get, parameter[constant[CompilerNep8], constant[False]]]
name[self].REST_SERVER assign[=] call[name[config].get, parameter[constant[RestServer], name[self].DEFAULT_REST_SERVER]]
name[self].RPC_SERVER assign[=] call[name[config].get, parameter[constant[RPCServer], name[self].DEFAULT_RPC_SERVER]] | keyword[def] identifier[setup] ( identifier[self] , identifier[config_file] ):
literal[string]
keyword[def] identifier[get_config_and_warn] ( identifier[key] , identifier[default] , identifier[abort] = keyword[False] ):
identifier[value] = identifier[config] . identifier[get] ( identifier[key] , keyword[None] )
keyword[if] keyword[not] identifier[value] :
identifier[print] ( literal[string] )
identifier[value] = identifier[default]
keyword[if] identifier[abort] :
identifier[sys] . identifier[exit] (- literal[int] )
keyword[return] identifier[value]
keyword[if] keyword[not] identifier[self] . identifier[DATA_DIR_PATH] :
identifier[self] . identifier[set_data_dir] ( keyword[None] )
keyword[with] identifier[open] ( identifier[config_file] ) keyword[as] identifier[data_file] :
identifier[data] = identifier[json] . identifier[load] ( identifier[data_file] )
identifier[config] = identifier[data] [ literal[string] ]
identifier[self] . identifier[MAGIC] = identifier[config] [ literal[string] ]
identifier[self] . identifier[ADDRESS_VERSION] = identifier[config] [ literal[string] ]
identifier[self] . identifier[STANDBY_VALIDATORS] = identifier[config] [ literal[string] ]
identifier[self] . identifier[SEED_LIST] = identifier[config] [ literal[string] ]
identifier[self] . identifier[RPC_LIST] = identifier[config] [ literal[string] ]
identifier[fees] = identifier[config] [ literal[string] ]
identifier[self] . identifier[ALL_FEES] = identifier[fees]
identifier[self] . identifier[ENROLLMENT_TX_FEE] = identifier[fees] [ literal[string] ]
identifier[self] . identifier[ISSUE_TX_FEE] = identifier[fees] [ literal[string] ]
identifier[self] . identifier[PUBLISH_TX_FEE] = identifier[fees] [ literal[string] ]
identifier[self] . identifier[REGISTER_TX_FEE] = identifier[fees] [ literal[string] ]
identifier[config] = identifier[data] [ literal[string] ]
identifier[self] . identifier[LEVELDB_PATH] = identifier[config] [ literal[string] ]
identifier[self] . identifier[RPC_PORT] = identifier[int] ( identifier[config] [ literal[string] ])
identifier[self] . identifier[NODE_PORT] = identifier[int] ( identifier[config] [ literal[string] ])
identifier[self] . identifier[WS_PORT] = identifier[config] [ literal[string] ]
identifier[self] . identifier[URI_PREFIX] = identifier[config] [ literal[string] ]
identifier[self] . identifier[ACCEPT_INCOMING_PEERS] = identifier[config] . identifier[get] ( literal[string] , keyword[False] )
identifier[self] . identifier[BOOTSTRAP_NAME] = identifier[get_config_and_warn] ( literal[string] , literal[string] )
identifier[self] . identifier[BOOTSTRAP_LOCATIONS] = identifier[get_config_and_warn] ( literal[string] , literal[string] , identifier[abort] = keyword[True] )
identifier[Helper] . identifier[ADDRESS_VERSION] = identifier[self] . identifier[ADDRESS_VERSION]
identifier[self] . identifier[USE_DEBUG_STORAGE] = identifier[config] . identifier[get] ( literal[string] , keyword[False] )
identifier[self] . identifier[DEBUG_STORAGE_PATH] = identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[self] . identifier[NOTIFICATION_DB_PATH] = identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[self] . identifier[SERVICE_ENABLED] = identifier[config] . identifier[get] ( literal[string] , identifier[self] . identifier[ACCEPT_INCOMING_PEERS] )
identifier[self] . identifier[COMPILER_NEP_8] = identifier[config] . identifier[get] ( literal[string] , keyword[False] )
identifier[self] . identifier[REST_SERVER] = identifier[config] . identifier[get] ( literal[string] , identifier[self] . identifier[DEFAULT_REST_SERVER] )
identifier[self] . identifier[RPC_SERVER] = identifier[config] . identifier[get] ( literal[string] , identifier[self] . identifier[DEFAULT_RPC_SERVER] ) | def setup(self, config_file):
""" Setup settings from a JSON config file """
def get_config_and_warn(key, default, abort=False):
value = config.get(key, None)
if not value:
print(f'Cannot find {key} in settings, using default value: {default}')
value = default
if abort:
sys.exit(-1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return value
if not self.DATA_DIR_PATH:
# Setup default data dir
self.set_data_dir(None) # depends on [control=['if'], data=[]]
with open(config_file) as data_file:
data = json.load(data_file) # depends on [control=['with'], data=['data_file']]
config = data['ProtocolConfiguration']
self.MAGIC = config['Magic']
self.ADDRESS_VERSION = config['AddressVersion']
self.STANDBY_VALIDATORS = config['StandbyValidators']
self.SEED_LIST = config['SeedList']
self.RPC_LIST = config['RPCList']
fees = config['SystemFee']
self.ALL_FEES = fees
self.ENROLLMENT_TX_FEE = fees['EnrollmentTransaction']
self.ISSUE_TX_FEE = fees['IssueTransaction']
self.PUBLISH_TX_FEE = fees['PublishTransaction']
self.REGISTER_TX_FEE = fees['RegisterTransaction']
config = data['ApplicationConfiguration']
self.LEVELDB_PATH = config['DataDirectoryPath']
self.RPC_PORT = int(config['RPCPort'])
self.NODE_PORT = int(config['NodePort'])
self.WS_PORT = config['WsPort']
self.URI_PREFIX = config['UriPrefix']
self.ACCEPT_INCOMING_PEERS = config.get('AcceptIncomingPeers', False)
self.BOOTSTRAP_NAME = get_config_and_warn('BootstrapName', 'mainnet')
self.BOOTSTRAP_LOCATIONS = get_config_and_warn('BootstrapFiles', 'abort', abort=True)
Helper.ADDRESS_VERSION = self.ADDRESS_VERSION
self.USE_DEBUG_STORAGE = config.get('DebugStorage', False)
self.DEBUG_STORAGE_PATH = config.get('DebugStoragePath', 'Chains/debugstorage')
self.NOTIFICATION_DB_PATH = config.get('NotificationDataPath', 'Chains/notification_data')
self.SERVICE_ENABLED = config.get('ServiceEnabled', self.ACCEPT_INCOMING_PEERS)
self.COMPILER_NEP_8 = config.get('CompilerNep8', False)
self.REST_SERVER = config.get('RestServer', self.DEFAULT_REST_SERVER)
self.RPC_SERVER = config.get('RPCServer', self.DEFAULT_RPC_SERVER) |
def eliot_friendly_generator_function(original):
"""
Decorate a generator function so that the Eliot action context is
preserved across ``yield`` expressions.
"""
@wraps(original)
def wrapper(*a, **kw):
# Keep track of whether the next value to deliver to the generator is
# a non-exception or an exception.
ok = True
# Keep track of the next value to deliver to the generator.
value_in = None
# Create the generator with a call to the generator function. This
# happens with whatever Eliot action context happens to be active,
# which is fine and correct and also irrelevant because no code in the
# generator function can run until we call send or throw on it.
gen = original(*a, **kw)
# Initialize the per-generator context to a copy of the current context.
context = copy_context()
while True:
try:
# Whichever way we invoke the generator, we will do it
# with the Eliot action context stack we've saved for it.
# Then the context manager will re-save it and restore the
# "outside" stack for us.
#
# Regarding the support of Twisted's inlineCallbacks-like
# functionality (see eliot.twisted.inline_callbacks):
#
# The invocation may raise the inlineCallbacks internal
# control flow exception _DefGen_Return. It is not wrong to
# just let that propagate upwards here but inlineCallbacks
# does think it is wrong. The behavior triggers a
# DeprecationWarning to try to get us to fix our code. We
# could explicitly handle and re-raise the _DefGen_Return but
# only at the expense of depending on a private Twisted API.
# For now, I'm opting to try to encourage Twisted to fix the
# situation (or at least not worsen it):
# https://twistedmatrix.com/trac/ticket/9590
#
# Alternatively, _DefGen_Return is only required on Python 2.
# When Python 2 support is dropped, this concern can be
# eliminated by always using `return value` instead of
# `returnValue(value)` (and adding the necessary logic to the
# StopIteration handler below).
def go():
if ok:
value_out = gen.send(value_in)
else:
value_out = gen.throw(*value_in)
# We have obtained a value from the generator. In
# giving it to us, it has given up control. Note this
# fact here. Importantly, this is within the
# generator's action context so that we get a good
# indication of where the yield occurred.
#
# This is noisy, enable only for debugging:
if wrapper.debug:
Message.log(message_type=u"yielded")
return value_out
value_out = context.run(go)
except StopIteration:
# When the generator raises this, it is signaling
# completion. Leave the loop.
break
else:
try:
# Pass the generator's result along to whoever is
# driving. Capture the result as the next value to
# send inward.
value_in = yield value_out
except:
# Or capture the exception if that's the flavor of the
# next value. This could possibly include GeneratorExit
# which turns out to be just fine because throwing it into
# the inner generator effectively propagates the close
# (and with the right context!) just as you would want.
# True, the GeneratorExit does get re-throwing out of the
# gen.throw call and hits _the_generator_context's
# contextmanager. But @contextmanager extremely
# conveniently eats it for us! Thanks, @contextmanager!
ok = False
value_in = exc_info()
else:
ok = True
wrapper.debug = False
return wrapper | def function[eliot_friendly_generator_function, parameter[original]]:
constant[
Decorate a generator function so that the Eliot action context is
preserved across ``yield`` expressions.
]
def function[wrapper, parameter[]]:
variable[ok] assign[=] constant[True]
variable[value_in] assign[=] constant[None]
variable[gen] assign[=] call[name[original], parameter[<ast.Starred object at 0x7da1b2346bf0>]]
variable[context] assign[=] call[name[copy_context], parameter[]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b23456c0>
name[wrapper].debug assign[=] constant[False]
return[name[wrapper]] | keyword[def] identifier[eliot_friendly_generator_function] ( identifier[original] ):
literal[string]
@ identifier[wraps] ( identifier[original] )
keyword[def] identifier[wrapper] (* identifier[a] ,** identifier[kw] ):
identifier[ok] = keyword[True]
identifier[value_in] = keyword[None]
identifier[gen] = identifier[original] (* identifier[a] ,** identifier[kw] )
identifier[context] = identifier[copy_context] ()
keyword[while] keyword[True] :
keyword[try] :
keyword[def] identifier[go] ():
keyword[if] identifier[ok] :
identifier[value_out] = identifier[gen] . identifier[send] ( identifier[value_in] )
keyword[else] :
identifier[value_out] = identifier[gen] . identifier[throw] (* identifier[value_in] )
keyword[if] identifier[wrapper] . identifier[debug] :
identifier[Message] . identifier[log] ( identifier[message_type] = literal[string] )
keyword[return] identifier[value_out]
identifier[value_out] = identifier[context] . identifier[run] ( identifier[go] )
keyword[except] identifier[StopIteration] :
keyword[break]
keyword[else] :
keyword[try] :
identifier[value_in] = keyword[yield] identifier[value_out]
keyword[except] :
identifier[ok] = keyword[False]
identifier[value_in] = identifier[exc_info] ()
keyword[else] :
identifier[ok] = keyword[True]
identifier[wrapper] . identifier[debug] = keyword[False]
keyword[return] identifier[wrapper] | def eliot_friendly_generator_function(original):
"""
Decorate a generator function so that the Eliot action context is
preserved across ``yield`` expressions.
"""
@wraps(original)
def wrapper(*a, **kw):
# Keep track of whether the next value to deliver to the generator is
# a non-exception or an exception.
ok = True
# Keep track of the next value to deliver to the generator.
value_in = None
# Create the generator with a call to the generator function. This
# happens with whatever Eliot action context happens to be active,
# which is fine and correct and also irrelevant because no code in the
# generator function can run until we call send or throw on it.
gen = original(*a, **kw)
# Initialize the per-generator context to a copy of the current context.
context = copy_context()
while True:
try:
# Whichever way we invoke the generator, we will do it
# with the Eliot action context stack we've saved for it.
# Then the context manager will re-save it and restore the
# "outside" stack for us.
#
# Regarding the support of Twisted's inlineCallbacks-like
# functionality (see eliot.twisted.inline_callbacks):
#
# The invocation may raise the inlineCallbacks internal
# control flow exception _DefGen_Return. It is not wrong to
# just let that propagate upwards here but inlineCallbacks
# does think it is wrong. The behavior triggers a
# DeprecationWarning to try to get us to fix our code. We
# could explicitly handle and re-raise the _DefGen_Return but
# only at the expense of depending on a private Twisted API.
# For now, I'm opting to try to encourage Twisted to fix the
# situation (or at least not worsen it):
# https://twistedmatrix.com/trac/ticket/9590
#
# Alternatively, _DefGen_Return is only required on Python 2.
# When Python 2 support is dropped, this concern can be
# eliminated by always using `return value` instead of
# `returnValue(value)` (and adding the necessary logic to the
# StopIteration handler below).
def go():
if ok:
value_out = gen.send(value_in) # depends on [control=['if'], data=[]]
else:
value_out = gen.throw(*value_in)
# We have obtained a value from the generator. In
# giving it to us, it has given up control. Note this
# fact here. Importantly, this is within the
# generator's action context so that we get a good
# indication of where the yield occurred.
#
# This is noisy, enable only for debugging:
if wrapper.debug:
Message.log(message_type=u'yielded') # depends on [control=['if'], data=[]]
return value_out
value_out = context.run(go) # depends on [control=['try'], data=[]]
except StopIteration:
# When the generator raises this, it is signaling
# completion. Leave the loop.
break # depends on [control=['except'], data=[]]
else:
try:
# Pass the generator's result along to whoever is
# driving. Capture the result as the next value to
# send inward.
value_in = (yield value_out) # depends on [control=['try'], data=[]]
except:
# Or capture the exception if that's the flavor of the
# next value. This could possibly include GeneratorExit
# which turns out to be just fine because throwing it into
# the inner generator effectively propagates the close
# (and with the right context!) just as you would want.
# True, the GeneratorExit does get re-throwing out of the
# gen.throw call and hits _the_generator_context's
# contextmanager. But @contextmanager extremely
# conveniently eats it for us! Thanks, @contextmanager!
ok = False
value_in = exc_info() # depends on [control=['except'], data=[]]
else:
ok = True # depends on [control=['while'], data=[]]
wrapper.debug = False
return wrapper |
def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect() | def function[_read_callback, parameter[self, data]]:
constant[Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
]
<ast.Try object at 0x7da18dc07100> | keyword[def] identifier[_read_callback] ( identifier[self] , identifier[data] = keyword[None] ):
literal[string]
keyword[try] :
keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[__reader] . identifier[feed] ( identifier[data] )
keyword[while] keyword[True] :
identifier[reply] = identifier[self] . identifier[__reader] . identifier[gets] ()
keyword[if] identifier[reply] keyword[is] keyword[not] keyword[False] :
keyword[try] :
identifier[callback] = identifier[self] . identifier[__callback_queue] . identifier[popleft] ()
identifier[callback] ( identifier[reply] )
keyword[except] identifier[IndexError] :
identifier[self] . identifier[_reply_list] . identifier[append] ( identifier[reply] )
identifier[self] . identifier[_condition] . identifier[notify_all] ()
keyword[else] :
keyword[break]
keyword[except] identifier[hiredis] . identifier[ProtocolError] :
identifier[LOG] . identifier[warning] ( literal[string] )
identifier[self] . identifier[disconnect] () | def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply) # depends on [control=['try'], data=[]]
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['reply']]
else:
break # depends on [control=['while'], data=[]] # depends on [control=['if'], data=['data']] # depends on [control=['try'], data=[]]
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning('corrupted stream => disconnect')
self.disconnect() # depends on [control=['except'], data=[]] |
def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(NewDbFieldWithDefaultChecker(linter))
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter) | def function[register, parameter[linter]]:
constant[Required method to auto register this checker.]
call[name[linter].register_checker, parameter[call[name[NewDbFieldWithDefaultChecker], parameter[name[linter]]]]]
if <ast.UnaryOp object at 0x7da20c6a84c0> begin[:]
call[name[load_configuration], parameter[name[linter]]] | keyword[def] identifier[register] ( identifier[linter] ):
literal[string]
identifier[linter] . identifier[register_checker] ( identifier[NewDbFieldWithDefaultChecker] ( identifier[linter] ))
keyword[if] keyword[not] identifier[compat] . identifier[LOAD_CONFIGURATION_SUPPORTED] :
identifier[load_configuration] ( identifier[linter] ) | def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(NewDbFieldWithDefaultChecker(linter))
if not compat.LOAD_CONFIGURATION_SUPPORTED:
load_configuration(linter) # depends on [control=['if'], data=[]] |
def tee(data, n=2):
"""
Tee or "T" copy to help working with Stream instances as well as with
numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Size of returned tuple. Defaults to 2.
Returns
-------
Tuple of n independent Stream instances, if the input is a Stream or an
iterator, otherwise a tuple with n times the same object.
See Also
--------
thub :
use Stream instances *almost* like constants in your equations.
"""
if isinstance(data, (Stream, Iterator)):
return tuple(Stream(cp) for cp in it.tee(data, n))
else:
return tuple(data for unused in xrange(n)) | def function[tee, parameter[data, n]]:
constant[
Tee or "T" copy to help working with Stream instances as well as with
numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Size of returned tuple. Defaults to 2.
Returns
-------
Tuple of n independent Stream instances, if the input is a Stream or an
iterator, otherwise a tuple with n times the same object.
See Also
--------
thub :
use Stream instances *almost* like constants in your equations.
]
if call[name[isinstance], parameter[name[data], tuple[[<ast.Name object at 0x7da1b06eb1c0>, <ast.Name object at 0x7da1b06eb340>]]]] begin[:]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b06e8af0>]]] | keyword[def] identifier[tee] ( identifier[data] , identifier[n] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] ,( identifier[Stream] , identifier[Iterator] )):
keyword[return] identifier[tuple] ( identifier[Stream] ( identifier[cp] ) keyword[for] identifier[cp] keyword[in] identifier[it] . identifier[tee] ( identifier[data] , identifier[n] ))
keyword[else] :
keyword[return] identifier[tuple] ( identifier[data] keyword[for] identifier[unused] keyword[in] identifier[xrange] ( identifier[n] )) | def tee(data, n=2):
"""
Tee or "T" copy to help working with Stream instances as well as with
numbers.
Parameters
----------
data :
Input to be copied. Can be anything.
n :
Size of returned tuple. Defaults to 2.
Returns
-------
Tuple of n independent Stream instances, if the input is a Stream or an
iterator, otherwise a tuple with n times the same object.
See Also
--------
thub :
use Stream instances *almost* like constants in your equations.
"""
if isinstance(data, (Stream, Iterator)):
return tuple((Stream(cp) for cp in it.tee(data, n))) # depends on [control=['if'], data=[]]
else:
return tuple((data for unused in xrange(n))) |
def get_resource_attribute(resource_attr_id, **kwargs):
"""
Get a specific resource attribte, by ID
If type_id is Gspecified, only
return the resource attributes within the type.
"""
resource_attr_qry = db.DBSession.query(ResourceAttr).filter(
ResourceAttr.id == resource_attr_id,
)
resource_attr = resource_attr_qry.first()
if resource_attr is None:
raise ResourceNotFoundError("Resource attribute %s does not exist", resource_attr_id)
return resource_attr | def function[get_resource_attribute, parameter[resource_attr_id]]:
constant[
Get a specific resource attribte, by ID
If type_id is Gspecified, only
return the resource attributes within the type.
]
variable[resource_attr_qry] assign[=] call[call[name[db].DBSession.query, parameter[name[ResourceAttr]]].filter, parameter[compare[name[ResourceAttr].id equal[==] name[resource_attr_id]]]]
variable[resource_attr] assign[=] call[name[resource_attr_qry].first, parameter[]]
if compare[name[resource_attr] is constant[None]] begin[:]
<ast.Raise object at 0x7da20e9b0ee0>
return[name[resource_attr]] | keyword[def] identifier[get_resource_attribute] ( identifier[resource_attr_id] ,** identifier[kwargs] ):
literal[string]
identifier[resource_attr_qry] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[ResourceAttr] ). identifier[filter] (
identifier[ResourceAttr] . identifier[id] == identifier[resource_attr_id] ,
)
identifier[resource_attr] = identifier[resource_attr_qry] . identifier[first] ()
keyword[if] identifier[resource_attr] keyword[is] keyword[None] :
keyword[raise] identifier[ResourceNotFoundError] ( literal[string] , identifier[resource_attr_id] )
keyword[return] identifier[resource_attr] | def get_resource_attribute(resource_attr_id, **kwargs):
"""
Get a specific resource attribte, by ID
If type_id is Gspecified, only
return the resource attributes within the type.
"""
resource_attr_qry = db.DBSession.query(ResourceAttr).filter(ResourceAttr.id == resource_attr_id)
resource_attr = resource_attr_qry.first()
if resource_attr is None:
raise ResourceNotFoundError('Resource attribute %s does not exist', resource_attr_id) # depends on [control=['if'], data=[]]
return resource_attr |
def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_energy_not_discharging = []
all_power_now = []
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
energy_full, energy_now, power_now = self.get_battery_state(supply_path)
all_energy_now.append(energy_now)
all_power_now.append(power_now)
elif self.is_battery_present(supply_path) and not self.is_battery_discharging(supply_path):
energy_now = self.get_battery_state(supply_path)[1]
all_energy_not_discharging.append(energy_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read properties of {0}: {1}".format(supply_path, e), category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])\
+ sum(all_energy_not_discharging) / (sum(all_power_now) / len(all_power_now)) * 60.0
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN | def function[get_time_remaining_estimate, parameter[self]]:
constant[
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
]
variable[all_energy_now] assign[=] list[[]]
variable[all_energy_not_discharging] assign[=] list[[]]
variable[all_power_now] assign[=] list[[]]
for taget[name[supply]] in starred[call[name[os].listdir, parameter[name[POWER_SUPPLY_PATH]]]] begin[:]
variable[supply_path] assign[=] call[name[os].path.join, parameter[name[POWER_SUPPLY_PATH], name[supply]]]
<ast.Try object at 0x7da20c7cad40>
if compare[call[name[len], parameter[name[all_energy_now]]] greater[>] constant[0]] begin[:]
<ast.Try object at 0x7da1b10c7460> | keyword[def] identifier[get_time_remaining_estimate] ( identifier[self] ):
literal[string]
identifier[all_energy_now] =[]
identifier[all_energy_not_discharging] =[]
identifier[all_power_now] =[]
keyword[for] identifier[supply] keyword[in] identifier[os] . identifier[listdir] ( identifier[POWER_SUPPLY_PATH] ):
identifier[supply_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[POWER_SUPPLY_PATH] , identifier[supply] )
keyword[try] :
identifier[type] = identifier[self] . identifier[power_source_type] ( identifier[supply_path] )
keyword[if] identifier[type] == identifier[common] . identifier[POWER_TYPE_AC] :
keyword[if] identifier[self] . identifier[is_ac_online] ( identifier[supply_path] ):
keyword[return] identifier[common] . identifier[TIME_REMAINING_UNLIMITED]
keyword[elif] identifier[type] == identifier[common] . identifier[POWER_TYPE_BATTERY] :
keyword[if] identifier[self] . identifier[is_battery_present] ( identifier[supply_path] ) keyword[and] identifier[self] . identifier[is_battery_discharging] ( identifier[supply_path] ):
identifier[energy_full] , identifier[energy_now] , identifier[power_now] = identifier[self] . identifier[get_battery_state] ( identifier[supply_path] )
identifier[all_energy_now] . identifier[append] ( identifier[energy_now] )
identifier[all_power_now] . identifier[append] ( identifier[power_now] )
keyword[elif] identifier[self] . identifier[is_battery_present] ( identifier[supply_path] ) keyword[and] keyword[not] identifier[self] . identifier[is_battery_discharging] ( identifier[supply_path] ):
identifier[energy_now] = identifier[self] . identifier[get_battery_state] ( identifier[supply_path] )[ literal[int] ]
identifier[all_energy_not_discharging] . identifier[append] ( identifier[energy_now] )
keyword[else] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[except] ( identifier[RuntimeError] , identifier[IOError] ) keyword[as] identifier[e] :
identifier[warnings] . identifier[warn] ( literal[string] . identifier[format] ( identifier[supply_path] , identifier[e] ), identifier[category] = identifier[RuntimeWarning] )
keyword[if] identifier[len] ( identifier[all_energy_now] )> literal[int] :
keyword[try] :
keyword[return] identifier[sum] ([ identifier[energy_now] / identifier[power_now] * literal[int] keyword[for] identifier[energy_now] , identifier[power_now] keyword[in] identifier[zip] ( identifier[all_energy_now] , identifier[all_power_now] )])+ identifier[sum] ( identifier[all_energy_not_discharging] )/( identifier[sum] ( identifier[all_power_now] )/ identifier[len] ( identifier[all_power_now] ))* literal[int]
keyword[except] identifier[ZeroDivisionError] keyword[as] identifier[e] :
identifier[warnings] . identifier[warn] ( literal[string] . identifier[format] ( identifier[e] ), identifier[category] = identifier[RuntimeWarning] )
keyword[return] identifier[common] . identifier[TIME_REMAINING_UNKNOWN]
keyword[else] :
keyword[return] identifier[common] . identifier[TIME_REMAINING_UNKNOWN] | def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_energy_not_discharging = []
all_power_now = []
for supply in os.listdir(POWER_SUPPLY_PATH):
supply_path = os.path.join(POWER_SUPPLY_PATH, supply)
try:
type = self.power_source_type(supply_path)
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path):
(energy_full, energy_now, power_now) = self.get_battery_state(supply_path)
all_energy_now.append(energy_now)
all_power_now.append(power_now) # depends on [control=['if'], data=[]]
elif self.is_battery_present(supply_path) and (not self.is_battery_discharging(supply_path)):
energy_now = self.get_battery_state(supply_path)[1]
all_energy_not_discharging.append(energy_now) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
warnings.warn('UPS is not supported.') # depends on [control=['try'], data=[]]
except (RuntimeError, IOError) as e:
warnings.warn('Unable to read properties of {0}: {1}'.format(supply_path, e), category=RuntimeWarning) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['supply']]
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for (energy_now, power_now) in zip(all_energy_now, all_power_now)]) + sum(all_energy_not_discharging) / (sum(all_power_now) / len(all_power_now)) * 60.0 # depends on [control=['try'], data=[]]
except ZeroDivisionError as e:
warnings.warn('Unable to calculate time remaining estimate: {0}'.format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
return common.TIME_REMAINING_UNKNOWN |
def reload_config(self, dockercfg_path=None):
"""
Force a reload of the auth configuration
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
None
"""
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
) | def function[reload_config, parameter[self, dockercfg_path]]:
constant[
Force a reload of the auth configuration
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
None
]
name[self]._auth_configs assign[=] call[name[auth].load_config, parameter[name[dockercfg_path]]] | keyword[def] identifier[reload_config] ( identifier[self] , identifier[dockercfg_path] = keyword[None] ):
literal[string]
identifier[self] . identifier[_auth_configs] = identifier[auth] . identifier[load_config] (
identifier[dockercfg_path] , identifier[credstore_env] = identifier[self] . identifier[credstore_env]
) | def reload_config(self, dockercfg_path=None):
"""
Force a reload of the auth configuration
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
None
"""
self._auth_configs = auth.load_config(dockercfg_path, credstore_env=self.credstore_env) |
def _ScanEncryptedVolume(self, scan_context, scan_node):
"""Scans an encrypted volume scan node for volume and file systems.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): volume scan node.
Raises:
ScannerError: if the format of or within the source is not supported,
the scan node is invalid, there are no credentials defined for
the format or no mediator is provided and a locked scan node was
found, e.g. an encrypted volume,
"""
if not scan_node or not scan_node.path_spec:
raise errors.ScannerError('Invalid or missing scan node.')
credentials = credentials_manager.CredentialsManager.GetCredentials(
scan_node.path_spec)
if not credentials:
raise errors.ScannerError('Missing credentials for scan node.')
if not self._mediator:
raise errors.ScannerError(
'Unable to proceed. Encrypted volume found but no mediator to '
'determine how it should be unlocked.')
if self._mediator.UnlockEncryptedVolume(
self._source_scanner, scan_context, scan_node, credentials):
self._source_scanner.Scan(
scan_context, scan_path_spec=scan_node.path_spec) | def function[_ScanEncryptedVolume, parameter[self, scan_context, scan_node]]:
constant[Scans an encrypted volume scan node for volume and file systems.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): volume scan node.
Raises:
ScannerError: if the format of or within the source is not supported,
the scan node is invalid, there are no credentials defined for
the format or no mediator is provided and a locked scan node was
found, e.g. an encrypted volume,
]
if <ast.BoolOp object at 0x7da1b07a2b00> begin[:]
<ast.Raise object at 0x7da1b07a2b90>
variable[credentials] assign[=] call[name[credentials_manager].CredentialsManager.GetCredentials, parameter[name[scan_node].path_spec]]
if <ast.UnaryOp object at 0x7da1b07a3250> begin[:]
<ast.Raise object at 0x7da1b064e4d0>
if <ast.UnaryOp object at 0x7da1b064dae0> begin[:]
<ast.Raise object at 0x7da1b064e860>
if call[name[self]._mediator.UnlockEncryptedVolume, parameter[name[self]._source_scanner, name[scan_context], name[scan_node], name[credentials]]] begin[:]
call[name[self]._source_scanner.Scan, parameter[name[scan_context]]] | keyword[def] identifier[_ScanEncryptedVolume] ( identifier[self] , identifier[scan_context] , identifier[scan_node] ):
literal[string]
keyword[if] keyword[not] identifier[scan_node] keyword[or] keyword[not] identifier[scan_node] . identifier[path_spec] :
keyword[raise] identifier[errors] . identifier[ScannerError] ( literal[string] )
identifier[credentials] = identifier[credentials_manager] . identifier[CredentialsManager] . identifier[GetCredentials] (
identifier[scan_node] . identifier[path_spec] )
keyword[if] keyword[not] identifier[credentials] :
keyword[raise] identifier[errors] . identifier[ScannerError] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[_mediator] :
keyword[raise] identifier[errors] . identifier[ScannerError] (
literal[string]
literal[string] )
keyword[if] identifier[self] . identifier[_mediator] . identifier[UnlockEncryptedVolume] (
identifier[self] . identifier[_source_scanner] , identifier[scan_context] , identifier[scan_node] , identifier[credentials] ):
identifier[self] . identifier[_source_scanner] . identifier[Scan] (
identifier[scan_context] , identifier[scan_path_spec] = identifier[scan_node] . identifier[path_spec] ) | def _ScanEncryptedVolume(self, scan_context, scan_node):
"""Scans an encrypted volume scan node for volume and file systems.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): volume scan node.
Raises:
ScannerError: if the format of or within the source is not supported,
the scan node is invalid, there are no credentials defined for
the format or no mediator is provided and a locked scan node was
found, e.g. an encrypted volume,
"""
if not scan_node or not scan_node.path_spec:
raise errors.ScannerError('Invalid or missing scan node.') # depends on [control=['if'], data=[]]
credentials = credentials_manager.CredentialsManager.GetCredentials(scan_node.path_spec)
if not credentials:
raise errors.ScannerError('Missing credentials for scan node.') # depends on [control=['if'], data=[]]
if not self._mediator:
raise errors.ScannerError('Unable to proceed. Encrypted volume found but no mediator to determine how it should be unlocked.') # depends on [control=['if'], data=[]]
if self._mediator.UnlockEncryptedVolume(self._source_scanner, scan_context, scan_node, credentials):
self._source_scanner.Scan(scan_context, scan_path_spec=scan_node.path_spec) # depends on [control=['if'], data=[]] |
def getMetadata(self, objectId=None, **filters):
""" Request metadata about a text or a collection
:param objectId: Object Identifier to filter on
:type objectId: str
:param filters: Kwargs parameters.
:type filters: dict
:return: Collection
"""
if objectId is None:
return self.inventory
elif objectId in self.inventory.children.keys():
return self.inventory[objectId]
texts, _, _ = self.__getTextMetadata__(urn=objectId)
# We store inventory names and if there is only one we recreate the inventory
inv_names = [text.parent.parent.parent.id for text in texts]
if len(set(inv_names)) == 1:
inventory = self.classes["inventory"](name=inv_names[0])
else:
inventory = self.classes["inventory"]()
# For each text we found using the filter
for text in texts:
tg_urn = str(text.parent.parent.urn)
wk_urn = str(text.parent.urn)
txt_urn = str(text.urn)
# If we need to generate a textgroup object
if tg_urn not in inventory.textgroups:
self.classes["textgroup"](urn=tg_urn, parent=inventory)
# If we need to generate a work object
if wk_urn not in inventory.textgroups[tg_urn].works:
self.classes["work"](urn=wk_urn, parent=inventory.textgroups[tg_urn])
if isinstance(text, CtsEditionMetadata):
x = self.classes["edition"](urn=txt_urn, parent=inventory.textgroups[tg_urn].works[wk_urn])
x.citation = text.citation
elif isinstance(text, CtsTranslationMetadata):
x = self.classes["translation"](urn=txt_urn, parent=inventory.textgroups[tg_urn].works[wk_urn], lang=text.lang)
x.citation = text.citation
elif isinstance(text, CtsCommentaryMetadata):
x = self.classes["commentary"](urn=txt_urn, parent=inventory.textgroups[tg_urn].works[wk_urn], lang=text.lang)
x.citation = text.citation
return inventory[objectId] | def function[getMetadata, parameter[self, objectId]]:
constant[ Request metadata about a text or a collection
:param objectId: Object Identifier to filter on
:type objectId: str
:param filters: Kwargs parameters.
:type filters: dict
:return: Collection
]
if compare[name[objectId] is constant[None]] begin[:]
return[name[self].inventory]
<ast.Tuple object at 0x7da18f09c9a0> assign[=] call[name[self].__getTextMetadata__, parameter[]]
variable[inv_names] assign[=] <ast.ListComp object at 0x7da18f09d570>
if compare[call[name[len], parameter[call[name[set], parameter[name[inv_names]]]]] equal[==] constant[1]] begin[:]
variable[inventory] assign[=] call[call[name[self].classes][constant[inventory]], parameter[]]
for taget[name[text]] in starred[name[texts]] begin[:]
variable[tg_urn] assign[=] call[name[str], parameter[name[text].parent.parent.urn]]
variable[wk_urn] assign[=] call[name[str], parameter[name[text].parent.urn]]
variable[txt_urn] assign[=] call[name[str], parameter[name[text].urn]]
if compare[name[tg_urn] <ast.NotIn object at 0x7da2590d7190> name[inventory].textgroups] begin[:]
call[call[name[self].classes][constant[textgroup]], parameter[]]
if compare[name[wk_urn] <ast.NotIn object at 0x7da2590d7190> call[name[inventory].textgroups][name[tg_urn]].works] begin[:]
call[call[name[self].classes][constant[work]], parameter[]]
if call[name[isinstance], parameter[name[text], name[CtsEditionMetadata]]] begin[:]
variable[x] assign[=] call[call[name[self].classes][constant[edition]], parameter[]]
name[x].citation assign[=] name[text].citation
return[call[name[inventory]][name[objectId]]] | keyword[def] identifier[getMetadata] ( identifier[self] , identifier[objectId] = keyword[None] ,** identifier[filters] ):
literal[string]
keyword[if] identifier[objectId] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[inventory]
keyword[elif] identifier[objectId] keyword[in] identifier[self] . identifier[inventory] . identifier[children] . identifier[keys] ():
keyword[return] identifier[self] . identifier[inventory] [ identifier[objectId] ]
identifier[texts] , identifier[_] , identifier[_] = identifier[self] . identifier[__getTextMetadata__] ( identifier[urn] = identifier[objectId] )
identifier[inv_names] =[ identifier[text] . identifier[parent] . identifier[parent] . identifier[parent] . identifier[id] keyword[for] identifier[text] keyword[in] identifier[texts] ]
keyword[if] identifier[len] ( identifier[set] ( identifier[inv_names] ))== literal[int] :
identifier[inventory] = identifier[self] . identifier[classes] [ literal[string] ]( identifier[name] = identifier[inv_names] [ literal[int] ])
keyword[else] :
identifier[inventory] = identifier[self] . identifier[classes] [ literal[string] ]()
keyword[for] identifier[text] keyword[in] identifier[texts] :
identifier[tg_urn] = identifier[str] ( identifier[text] . identifier[parent] . identifier[parent] . identifier[urn] )
identifier[wk_urn] = identifier[str] ( identifier[text] . identifier[parent] . identifier[urn] )
identifier[txt_urn] = identifier[str] ( identifier[text] . identifier[urn] )
keyword[if] identifier[tg_urn] keyword[not] keyword[in] identifier[inventory] . identifier[textgroups] :
identifier[self] . identifier[classes] [ literal[string] ]( identifier[urn] = identifier[tg_urn] , identifier[parent] = identifier[inventory] )
keyword[if] identifier[wk_urn] keyword[not] keyword[in] identifier[inventory] . identifier[textgroups] [ identifier[tg_urn] ]. identifier[works] :
identifier[self] . identifier[classes] [ literal[string] ]( identifier[urn] = identifier[wk_urn] , identifier[parent] = identifier[inventory] . identifier[textgroups] [ identifier[tg_urn] ])
keyword[if] identifier[isinstance] ( identifier[text] , identifier[CtsEditionMetadata] ):
identifier[x] = identifier[self] . identifier[classes] [ literal[string] ]( identifier[urn] = identifier[txt_urn] , identifier[parent] = identifier[inventory] . identifier[textgroups] [ identifier[tg_urn] ]. identifier[works] [ identifier[wk_urn] ])
identifier[x] . identifier[citation] = identifier[text] . identifier[citation]
keyword[elif] identifier[isinstance] ( identifier[text] , identifier[CtsTranslationMetadata] ):
identifier[x] = identifier[self] . identifier[classes] [ literal[string] ]( identifier[urn] = identifier[txt_urn] , identifier[parent] = identifier[inventory] . identifier[textgroups] [ identifier[tg_urn] ]. identifier[works] [ identifier[wk_urn] ], identifier[lang] = identifier[text] . identifier[lang] )
identifier[x] . identifier[citation] = identifier[text] . identifier[citation]
keyword[elif] identifier[isinstance] ( identifier[text] , identifier[CtsCommentaryMetadata] ):
identifier[x] = identifier[self] . identifier[classes] [ literal[string] ]( identifier[urn] = identifier[txt_urn] , identifier[parent] = identifier[inventory] . identifier[textgroups] [ identifier[tg_urn] ]. identifier[works] [ identifier[wk_urn] ], identifier[lang] = identifier[text] . identifier[lang] )
identifier[x] . identifier[citation] = identifier[text] . identifier[citation]
keyword[return] identifier[inventory] [ identifier[objectId] ] | def getMetadata(self, objectId=None, **filters):
""" Request metadata about a text or a collection
:param objectId: Object Identifier to filter on
:type objectId: str
:param filters: Kwargs parameters.
:type filters: dict
:return: Collection
"""
if objectId is None:
return self.inventory # depends on [control=['if'], data=[]]
elif objectId in self.inventory.children.keys():
return self.inventory[objectId] # depends on [control=['if'], data=['objectId']]
(texts, _, _) = self.__getTextMetadata__(urn=objectId)
# We store inventory names and if there is only one we recreate the inventory
inv_names = [text.parent.parent.parent.id for text in texts]
if len(set(inv_names)) == 1:
inventory = self.classes['inventory'](name=inv_names[0]) # depends on [control=['if'], data=[]]
else:
inventory = self.classes['inventory']()
# For each text we found using the filter
for text in texts:
tg_urn = str(text.parent.parent.urn)
wk_urn = str(text.parent.urn)
txt_urn = str(text.urn)
# If we need to generate a textgroup object
if tg_urn not in inventory.textgroups:
self.classes['textgroup'](urn=tg_urn, parent=inventory) # depends on [control=['if'], data=['tg_urn']]
# If we need to generate a work object
if wk_urn not in inventory.textgroups[tg_urn].works:
self.classes['work'](urn=wk_urn, parent=inventory.textgroups[tg_urn]) # depends on [control=['if'], data=['wk_urn']]
if isinstance(text, CtsEditionMetadata):
x = self.classes['edition'](urn=txt_urn, parent=inventory.textgroups[tg_urn].works[wk_urn])
x.citation = text.citation # depends on [control=['if'], data=[]]
elif isinstance(text, CtsTranslationMetadata):
x = self.classes['translation'](urn=txt_urn, parent=inventory.textgroups[tg_urn].works[wk_urn], lang=text.lang)
x.citation = text.citation # depends on [control=['if'], data=[]]
elif isinstance(text, CtsCommentaryMetadata):
x = self.classes['commentary'](urn=txt_urn, parent=inventory.textgroups[tg_urn].works[wk_urn], lang=text.lang)
x.citation = text.citation # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['text']]
return inventory[objectId] |
def finalize_media(app):
"""Point media files at our media server."""
if (app.builder.name == 'readthedocssinglehtmllocalmedia' or
app.builder.format != 'html' or
not hasattr(app.builder, 'script_files')):
return # Use local media for downloadable files
# Pull project data from conf.py if it exists
context = app.builder.config.html_context
STATIC_URL = context.get('STATIC_URL', DEFAULT_STATIC_URL)
js_file = '{}javascript/readthedocs-doc-embed.js'.format(STATIC_URL)
if sphinx.version_info < (1, 8):
app.builder.script_files.append(js_file)
else:
app.add_js_file(js_file) | def function[finalize_media, parameter[app]]:
constant[Point media files at our media server.]
if <ast.BoolOp object at 0x7da18c4cd510> begin[:]
return[None]
variable[context] assign[=] name[app].builder.config.html_context
variable[STATIC_URL] assign[=] call[name[context].get, parameter[constant[STATIC_URL], name[DEFAULT_STATIC_URL]]]
variable[js_file] assign[=] call[constant[{}javascript/readthedocs-doc-embed.js].format, parameter[name[STATIC_URL]]]
if compare[name[sphinx].version_info less[<] tuple[[<ast.Constant object at 0x7da1b10c0cd0>, <ast.Constant object at 0x7da1b10c0ca0>]]] begin[:]
call[name[app].builder.script_files.append, parameter[name[js_file]]] | keyword[def] identifier[finalize_media] ( identifier[app] ):
literal[string]
keyword[if] ( identifier[app] . identifier[builder] . identifier[name] == literal[string] keyword[or]
identifier[app] . identifier[builder] . identifier[format] != literal[string] keyword[or]
keyword[not] identifier[hasattr] ( identifier[app] . identifier[builder] , literal[string] )):
keyword[return]
identifier[context] = identifier[app] . identifier[builder] . identifier[config] . identifier[html_context]
identifier[STATIC_URL] = identifier[context] . identifier[get] ( literal[string] , identifier[DEFAULT_STATIC_URL] )
identifier[js_file] = literal[string] . identifier[format] ( identifier[STATIC_URL] )
keyword[if] identifier[sphinx] . identifier[version_info] <( literal[int] , literal[int] ):
identifier[app] . identifier[builder] . identifier[script_files] . identifier[append] ( identifier[js_file] )
keyword[else] :
identifier[app] . identifier[add_js_file] ( identifier[js_file] ) | def finalize_media(app):
"""Point media files at our media server."""
if app.builder.name == 'readthedocssinglehtmllocalmedia' or app.builder.format != 'html' or (not hasattr(app.builder, 'script_files')):
return # Use local media for downloadable files # depends on [control=['if'], data=[]]
# Pull project data from conf.py if it exists
context = app.builder.config.html_context
STATIC_URL = context.get('STATIC_URL', DEFAULT_STATIC_URL)
js_file = '{}javascript/readthedocs-doc-embed.js'.format(STATIC_URL)
if sphinx.version_info < (1, 8):
app.builder.script_files.append(js_file) # depends on [control=['if'], data=[]]
else:
app.add_js_file(js_file) |
def insert_completions(self, e): # (M-*)
u"""Insert all completions of the text before point that would have
been generated by possible-completions."""
completions = self._get_completions()
b = self.begidx
e = self.endidx
for comp in completions:
rep = [ c for c in comp ]
rep.append(' ')
self.l_buffer[b:e] = rep
b += len(rep)
e = b
self.line_cursor = b
self.finalize() | def function[insert_completions, parameter[self, e]]:
constant[Insert all completions of the text before point that would have
been generated by possible-completions.]
variable[completions] assign[=] call[name[self]._get_completions, parameter[]]
variable[b] assign[=] name[self].begidx
variable[e] assign[=] name[self].endidx
for taget[name[comp]] in starred[name[completions]] begin[:]
variable[rep] assign[=] <ast.ListComp object at 0x7da18f00f640>
call[name[rep].append, parameter[constant[ ]]]
call[name[self].l_buffer][<ast.Slice object at 0x7da18f00eb90>] assign[=] name[rep]
<ast.AugAssign object at 0x7da18f00ef50>
variable[e] assign[=] name[b]
name[self].line_cursor assign[=] name[b]
call[name[self].finalize, parameter[]] | keyword[def] identifier[insert_completions] ( identifier[self] , identifier[e] ):
literal[string]
identifier[completions] = identifier[self] . identifier[_get_completions] ()
identifier[b] = identifier[self] . identifier[begidx]
identifier[e] = identifier[self] . identifier[endidx]
keyword[for] identifier[comp] keyword[in] identifier[completions] :
identifier[rep] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[comp] ]
identifier[rep] . identifier[append] ( literal[string] )
identifier[self] . identifier[l_buffer] [ identifier[b] : identifier[e] ]= identifier[rep]
identifier[b] += identifier[len] ( identifier[rep] )
identifier[e] = identifier[b]
identifier[self] . identifier[line_cursor] = identifier[b]
identifier[self] . identifier[finalize] () | def insert_completions(self, e): # (M-*)
u'Insert all completions of the text before point that would have\n been generated by possible-completions.'
completions = self._get_completions()
b = self.begidx
e = self.endidx
for comp in completions:
rep = [c for c in comp]
rep.append(' ')
self.l_buffer[b:e] = rep
b += len(rep)
e = b # depends on [control=['for'], data=['comp']]
self.line_cursor = b
self.finalize() |
def enterstring(self, window_name, object_name='', data=''):
"""
Type string sequence.
@param window_name: Window name to focus on, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to focus on, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
if not object_name and not data:
return self.generatekeyevent(window_name)
else:
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
self._grabfocus(object_handle)
object_handle.sendKeys(data)
return 1 | def function[enterstring, parameter[self, window_name, object_name, data]]:
constant[
Type string sequence.
@param window_name: Window name to focus on, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to focus on, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
]
if <ast.BoolOp object at 0x7da20c796350> begin[:]
return[call[name[self].generatekeyevent, parameter[name[window_name]]]] | keyword[def] identifier[enterstring] ( identifier[self] , identifier[window_name] , identifier[object_name] = literal[string] , identifier[data] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[object_name] keyword[and] keyword[not] identifier[data] :
keyword[return] identifier[self] . identifier[generatekeyevent] ( identifier[window_name] )
keyword[else] :
identifier[object_handle] = identifier[self] . identifier[_get_object_handle] ( identifier[window_name] , identifier[object_name] )
keyword[if] keyword[not] identifier[object_handle] . identifier[AXEnabled] :
keyword[raise] identifier[LdtpServerException] ( literal[string] % identifier[object_name] )
identifier[self] . identifier[_grabfocus] ( identifier[object_handle] )
identifier[object_handle] . identifier[sendKeys] ( identifier[data] )
keyword[return] literal[int] | def enterstring(self, window_name, object_name='', data=''):
"""
Type string sequence.
@param window_name: Window name to focus on, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to focus on, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
if not object_name and (not data):
return self.generatekeyevent(window_name) # depends on [control=['if'], data=[]]
else:
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u'Object %s state disabled' % object_name) # depends on [control=['if'], data=[]]
self._grabfocus(object_handle)
object_handle.sendKeys(data)
return 1 |
def write_tables(target, tables, append=False, overwrite=False, **kwargs):
"""Write an LIGO_LW table to file
Parameters
----------
target : `str`, `file`, :class:`~ligo.lw.ligolw.Document`
the file or document to write into
tables : `list`, `tuple` of :class:`~ligo.lw.table.Table`
the tables to write
append : `bool`, optional, default: `False`
if `True`, append to an existing file/table, otherwise `overwrite`
overwrite : `bool`, optional, default: `False`
if `True`, delete an existing instance of the table type, otherwise
append new rows
**kwargs
other keyword arguments to pass to
:func:`~ligo.lw.utils.load_filename`, or
:func:`~ligo.lw.utils.load_fileobj` as appropriate
"""
from ligo.lw.ligolw import (Document, LIGO_LW, LIGOLWContentHandler)
from ligo.lw import utils as ligolw_utils
# allow writing directly to XML
if isinstance(target, (Document, LIGO_LW)):
xmldoc = target
# open existing document, if possible
elif append:
xmldoc = open_xmldoc(
target, contenthandler=kwargs.pop('contenthandler',
LIGOLWContentHandler))
# fail on existing document and not overwriting
elif (not overwrite and isinstance(target, string_types) and
os.path.isfile(target)):
raise IOError("File exists: {}".format(target))
else: # or create a new document
xmldoc = Document()
# convert table to format
write_tables_to_document(xmldoc, tables, overwrite=overwrite)
# write file
if isinstance(target, string_types):
kwargs.setdefault('gz', target.endswith('.gz'))
ligolw_utils.write_filename(xmldoc, target, **kwargs)
elif isinstance(target, FILE_LIKE):
kwargs.setdefault('gz', target.name.endswith('.gz'))
ligolw_utils.write_fileobj(xmldoc, target, **kwargs) | def function[write_tables, parameter[target, tables, append, overwrite]]:
constant[Write an LIGO_LW table to file
Parameters
----------
target : `str`, `file`, :class:`~ligo.lw.ligolw.Document`
the file or document to write into
tables : `list`, `tuple` of :class:`~ligo.lw.table.Table`
the tables to write
append : `bool`, optional, default: `False`
if `True`, append to an existing file/table, otherwise `overwrite`
overwrite : `bool`, optional, default: `False`
if `True`, delete an existing instance of the table type, otherwise
append new rows
**kwargs
other keyword arguments to pass to
:func:`~ligo.lw.utils.load_filename`, or
:func:`~ligo.lw.utils.load_fileobj` as appropriate
]
from relative_module[ligo.lw.ligolw] import module[Document], module[LIGO_LW], module[LIGOLWContentHandler]
from relative_module[ligo.lw] import module[utils]
if call[name[isinstance], parameter[name[target], tuple[[<ast.Name object at 0x7da20e9b12a0>, <ast.Name object at 0x7da20e9b0490>]]]] begin[:]
variable[xmldoc] assign[=] name[target]
call[name[write_tables_to_document], parameter[name[xmldoc], name[tables]]]
if call[name[isinstance], parameter[name[target], name[string_types]]] begin[:]
call[name[kwargs].setdefault, parameter[constant[gz], call[name[target].endswith, parameter[constant[.gz]]]]]
call[name[ligolw_utils].write_filename, parameter[name[xmldoc], name[target]]] | keyword[def] identifier[write_tables] ( identifier[target] , identifier[tables] , identifier[append] = keyword[False] , identifier[overwrite] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[ligo] . identifier[lw] . identifier[ligolw] keyword[import] ( identifier[Document] , identifier[LIGO_LW] , identifier[LIGOLWContentHandler] )
keyword[from] identifier[ligo] . identifier[lw] keyword[import] identifier[utils] keyword[as] identifier[ligolw_utils]
keyword[if] identifier[isinstance] ( identifier[target] ,( identifier[Document] , identifier[LIGO_LW] )):
identifier[xmldoc] = identifier[target]
keyword[elif] identifier[append] :
identifier[xmldoc] = identifier[open_xmldoc] (
identifier[target] , identifier[contenthandler] = identifier[kwargs] . identifier[pop] ( literal[string] ,
identifier[LIGOLWContentHandler] ))
keyword[elif] ( keyword[not] identifier[overwrite] keyword[and] identifier[isinstance] ( identifier[target] , identifier[string_types] ) keyword[and]
identifier[os] . identifier[path] . identifier[isfile] ( identifier[target] )):
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[target] ))
keyword[else] :
identifier[xmldoc] = identifier[Document] ()
identifier[write_tables_to_document] ( identifier[xmldoc] , identifier[tables] , identifier[overwrite] = identifier[overwrite] )
keyword[if] identifier[isinstance] ( identifier[target] , identifier[string_types] ):
identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[target] . identifier[endswith] ( literal[string] ))
identifier[ligolw_utils] . identifier[write_filename] ( identifier[xmldoc] , identifier[target] ,** identifier[kwargs] )
keyword[elif] identifier[isinstance] ( identifier[target] , identifier[FILE_LIKE] ):
identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[target] . identifier[name] . identifier[endswith] ( literal[string] ))
identifier[ligolw_utils] . identifier[write_fileobj] ( identifier[xmldoc] , identifier[target] ,** identifier[kwargs] ) | def write_tables(target, tables, append=False, overwrite=False, **kwargs):
"""Write an LIGO_LW table to file
Parameters
----------
target : `str`, `file`, :class:`~ligo.lw.ligolw.Document`
the file or document to write into
tables : `list`, `tuple` of :class:`~ligo.lw.table.Table`
the tables to write
append : `bool`, optional, default: `False`
if `True`, append to an existing file/table, otherwise `overwrite`
overwrite : `bool`, optional, default: `False`
if `True`, delete an existing instance of the table type, otherwise
append new rows
**kwargs
other keyword arguments to pass to
:func:`~ligo.lw.utils.load_filename`, or
:func:`~ligo.lw.utils.load_fileobj` as appropriate
"""
from ligo.lw.ligolw import Document, LIGO_LW, LIGOLWContentHandler
from ligo.lw import utils as ligolw_utils
# allow writing directly to XML
if isinstance(target, (Document, LIGO_LW)):
xmldoc = target # depends on [control=['if'], data=[]]
# open existing document, if possible
elif append:
xmldoc = open_xmldoc(target, contenthandler=kwargs.pop('contenthandler', LIGOLWContentHandler)) # depends on [control=['if'], data=[]]
# fail on existing document and not overwriting
elif not overwrite and isinstance(target, string_types) and os.path.isfile(target):
raise IOError('File exists: {}'.format(target)) # depends on [control=['if'], data=[]]
else: # or create a new document
xmldoc = Document()
# convert table to format
write_tables_to_document(xmldoc, tables, overwrite=overwrite)
# write file
if isinstance(target, string_types):
kwargs.setdefault('gz', target.endswith('.gz'))
ligolw_utils.write_filename(xmldoc, target, **kwargs) # depends on [control=['if'], data=[]]
elif isinstance(target, FILE_LIKE):
kwargs.setdefault('gz', target.name.endswith('.gz'))
ligolw_utils.write_fileobj(xmldoc, target, **kwargs) # depends on [control=['if'], data=[]] |
def event_from_item(self, sequenced_item):
"""
Reconstructs domain event from stored event topic and
event attrs. Used in the event store when getting domain events.
"""
assert isinstance(sequenced_item, self.sequenced_item_class), (
self.sequenced_item_class, type(sequenced_item)
)
# Get the topic and state.
topic = getattr(sequenced_item, self.field_names.topic)
state = getattr(sequenced_item, self.field_names.state)
return self.event_from_topic_and_state(topic, state) | def function[event_from_item, parameter[self, sequenced_item]]:
constant[
Reconstructs domain event from stored event topic and
event attrs. Used in the event store when getting domain events.
]
assert[call[name[isinstance], parameter[name[sequenced_item], name[self].sequenced_item_class]]]
variable[topic] assign[=] call[name[getattr], parameter[name[sequenced_item], name[self].field_names.topic]]
variable[state] assign[=] call[name[getattr], parameter[name[sequenced_item], name[self].field_names.state]]
return[call[name[self].event_from_topic_and_state, parameter[name[topic], name[state]]]] | keyword[def] identifier[event_from_item] ( identifier[self] , identifier[sequenced_item] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[sequenced_item] , identifier[self] . identifier[sequenced_item_class] ),(
identifier[self] . identifier[sequenced_item_class] , identifier[type] ( identifier[sequenced_item] )
)
identifier[topic] = identifier[getattr] ( identifier[sequenced_item] , identifier[self] . identifier[field_names] . identifier[topic] )
identifier[state] = identifier[getattr] ( identifier[sequenced_item] , identifier[self] . identifier[field_names] . identifier[state] )
keyword[return] identifier[self] . identifier[event_from_topic_and_state] ( identifier[topic] , identifier[state] ) | def event_from_item(self, sequenced_item):
"""
Reconstructs domain event from stored event topic and
event attrs. Used in the event store when getting domain events.
"""
assert isinstance(sequenced_item, self.sequenced_item_class), (self.sequenced_item_class, type(sequenced_item))
# Get the topic and state.
topic = getattr(sequenced_item, self.field_names.topic)
state = getattr(sequenced_item, self.field_names.state)
return self.event_from_topic_and_state(topic, state) |
def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.mturk.connection.MTurkConnection`
:return: A connection to MTurk
"""
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs) | def function[connect_mturk, parameter[aws_access_key_id, aws_secret_access_key]]:
constant[
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.mturk.connection.MTurkConnection`
:return: A connection to MTurk
]
from relative_module[boto.mturk.connection] import module[MTurkConnection]
return[call[name[MTurkConnection], parameter[name[aws_access_key_id], name[aws_secret_access_key]]]] | keyword[def] identifier[connect_mturk] ( identifier[aws_access_key_id] = keyword[None] , identifier[aws_secret_access_key] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[boto] . identifier[mturk] . identifier[connection] keyword[import] identifier[MTurkConnection]
keyword[return] identifier[MTurkConnection] ( identifier[aws_access_key_id] , identifier[aws_secret_access_key] ,** identifier[kwargs] ) | def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.mturk.connection.MTurkConnection`
:return: A connection to MTurk
"""
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs) |
def measureSize(self, diff, chunkSize):
""" Spend some time to get an accurate size. """
(toUUID, fromUUID) = self.toArg.diff(diff)
isInteractive = sys.stderr.isatty()
return self.toObj.diff(self._client.measureSize(
toUUID,
fromUUID,
diff.size,
chunkSize,
isInteractive,
)) | def function[measureSize, parameter[self, diff, chunkSize]]:
constant[ Spend some time to get an accurate size. ]
<ast.Tuple object at 0x7da1b2725270> assign[=] call[name[self].toArg.diff, parameter[name[diff]]]
variable[isInteractive] assign[=] call[name[sys].stderr.isatty, parameter[]]
return[call[name[self].toObj.diff, parameter[call[name[self]._client.measureSize, parameter[name[toUUID], name[fromUUID], name[diff].size, name[chunkSize], name[isInteractive]]]]]] | keyword[def] identifier[measureSize] ( identifier[self] , identifier[diff] , identifier[chunkSize] ):
literal[string]
( identifier[toUUID] , identifier[fromUUID] )= identifier[self] . identifier[toArg] . identifier[diff] ( identifier[diff] )
identifier[isInteractive] = identifier[sys] . identifier[stderr] . identifier[isatty] ()
keyword[return] identifier[self] . identifier[toObj] . identifier[diff] ( identifier[self] . identifier[_client] . identifier[measureSize] (
identifier[toUUID] ,
identifier[fromUUID] ,
identifier[diff] . identifier[size] ,
identifier[chunkSize] ,
identifier[isInteractive] ,
)) | def measureSize(self, diff, chunkSize):
""" Spend some time to get an accurate size. """
(toUUID, fromUUID) = self.toArg.diff(diff)
isInteractive = sys.stderr.isatty()
return self.toObj.diff(self._client.measureSize(toUUID, fromUUID, diff.size, chunkSize, isInteractive)) |
def create(self, path, data, ephemeral=False, sequence=False):
"""
Creates a ZooKeeper node
:param path: Z-Path
:param data: Node Content
:param ephemeral: Ephemeral flag
:param sequence: Sequential flag
"""
return self._zk.create(
self.__path(path), data, ephemeral=ephemeral, sequence=sequence
) | def function[create, parameter[self, path, data, ephemeral, sequence]]:
constant[
Creates a ZooKeeper node
:param path: Z-Path
:param data: Node Content
:param ephemeral: Ephemeral flag
:param sequence: Sequential flag
]
return[call[name[self]._zk.create, parameter[call[name[self].__path, parameter[name[path]]], name[data]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[path] , identifier[data] , identifier[ephemeral] = keyword[False] , identifier[sequence] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[_zk] . identifier[create] (
identifier[self] . identifier[__path] ( identifier[path] ), identifier[data] , identifier[ephemeral] = identifier[ephemeral] , identifier[sequence] = identifier[sequence]
) | def create(self, path, data, ephemeral=False, sequence=False):
"""
Creates a ZooKeeper node
:param path: Z-Path
:param data: Node Content
:param ephemeral: Ephemeral flag
:param sequence: Sequential flag
"""
return self._zk.create(self.__path(path), data, ephemeral=ephemeral, sequence=sequence) |
def tilt(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
tilt
PURPOSE:
calculate the tilt of the velocity ellipsoid by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
tilt in rad
HISTORY:
2012-12-23 - Written - Bovy (IAS)
2017-10-28 - Changed return unit to rad - Bovy (UofT)
"""
warnings.warn("In versions >1.3, the output unit of quasiisothermaldf.tilt has been changed to radian (from degree before)",galpyWarning)
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
tsigmar2= self._vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
tsigmaz2= self._vmomentdensity(R,z,0.,0.,2.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
tsigmarz= self._vmomentdensity(R,z,1.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
return 0.5*numpy.arctan(2.*tsigmarz/(tsigmar2-tsigmaz2))
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
tsigmar2= self._vmomentdensity(R,z,2.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
tsigmaz2= self._vmomentdensity(R,z,0.,0.,2.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
tsigmarz= self._vmomentdensity(R,z,1.,0.,1.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
return 0.5*numpy.arctan(2.*tsigmarz/(tsigmar2-tsigmaz2))
else:
raise NotImplementedError("Use either mc=True or gl=True") | def function[tilt, parameter[self, R, z, nsigma, mc, nmc, gl, ngl]]:
constant[
NAME:
tilt
PURPOSE:
calculate the tilt of the velocity ellipsoid by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
tilt in rad
HISTORY:
2012-12-23 - Written - Bovy (IAS)
2017-10-28 - Changed return unit to rad - Bovy (UofT)
]
call[name[warnings].warn, parameter[constant[In versions >1.3, the output unit of quasiisothermaldf.tilt has been changed to radian (from degree before)], name[galpyWarning]]]
if name[mc] begin[:]
<ast.Tuple object at 0x7da1b0ddca30> assign[=] call[name[self]._vmomentdensity, parameter[name[R], name[z], constant[0.0], constant[0.0], constant[0.0]]]
variable[tsigmar2] assign[=] binary_operation[call[name[self]._vmomentdensity, parameter[name[R], name[z], constant[2.0], constant[0.0], constant[0.0]]] / name[surfmass]]
variable[tsigmaz2] assign[=] binary_operation[call[name[self]._vmomentdensity, parameter[name[R], name[z], constant[0.0], constant[0.0], constant[2.0]]] / name[surfmass]]
variable[tsigmarz] assign[=] binary_operation[call[name[self]._vmomentdensity, parameter[name[R], name[z], constant[1.0], constant[0.0], constant[1.0]]] / name[surfmass]]
return[binary_operation[constant[0.5] * call[name[numpy].arctan, parameter[binary_operation[binary_operation[constant[2.0] * name[tsigmarz]] / binary_operation[name[tsigmar2] - name[tsigmaz2]]]]]]] | keyword[def] identifier[tilt] ( identifier[self] , identifier[R] , identifier[z] , identifier[nsigma] = keyword[None] , identifier[mc] = keyword[False] , identifier[nmc] = literal[int] ,
identifier[gl] = keyword[True] , identifier[ngl] = identifier[_DEFAULTNGL] ,** identifier[kwargs] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string] , identifier[galpyWarning] )
keyword[if] identifier[mc] :
identifier[surfmass] , identifier[vrs] , identifier[vts] , identifier[vzs] = identifier[self] . identifier[_vmomentdensity] ( identifier[R] , identifier[z] , literal[int] , literal[int] , literal[int] ,
identifier[nsigma] = identifier[nsigma] , identifier[mc] = identifier[mc] , identifier[nmc] = identifier[nmc] , identifier[_returnmc] = keyword[True] ,
** identifier[kwargs] )
identifier[tsigmar2] = identifier[self] . identifier[_vmomentdensity] ( identifier[R] , identifier[z] , literal[int] , literal[int] , literal[int] ,
identifier[nsigma] = identifier[nsigma] , identifier[mc] = identifier[mc] , identifier[nmc] = identifier[nmc] , identifier[_returnmc] = keyword[False] ,
identifier[_vrs] = identifier[vrs] , identifier[_vts] = identifier[vts] , identifier[_vzs] = identifier[vzs] ,
** identifier[kwargs] )/ identifier[surfmass]
identifier[tsigmaz2] = identifier[self] . identifier[_vmomentdensity] ( identifier[R] , identifier[z] , literal[int] , literal[int] , literal[int] ,
identifier[nsigma] = identifier[nsigma] , identifier[mc] = identifier[mc] , identifier[nmc] = identifier[nmc] , identifier[_returnmc] = keyword[False] ,
identifier[_vrs] = identifier[vrs] , identifier[_vts] = identifier[vts] , identifier[_vzs] = identifier[vzs] ,
** identifier[kwargs] )/ identifier[surfmass]
identifier[tsigmarz] = identifier[self] . identifier[_vmomentdensity] ( identifier[R] , identifier[z] , literal[int] , literal[int] , literal[int] ,
identifier[nsigma] = identifier[nsigma] , identifier[mc] = identifier[mc] , identifier[nmc] = identifier[nmc] , identifier[_returnmc] = keyword[False] ,
identifier[_vrs] = identifier[vrs] , identifier[_vts] = identifier[vts] , identifier[_vzs] = identifier[vzs] ,
** identifier[kwargs] )/ identifier[surfmass]
keyword[return] literal[int] * identifier[numpy] . identifier[arctan] ( literal[int] * identifier[tsigmarz] /( identifier[tsigmar2] - identifier[tsigmaz2] ))
keyword[elif] identifier[gl] :
identifier[surfmass] , identifier[glqeval] = identifier[self] . identifier[_vmomentdensity] ( identifier[R] , identifier[z] , literal[int] , literal[int] , literal[int] ,
identifier[gl] = identifier[gl] , identifier[ngl] = identifier[ngl] ,
identifier[_returngl] = keyword[True] ,
** identifier[kwargs] )
identifier[tsigmar2] = identifier[self] . identifier[_vmomentdensity] ( identifier[R] , identifier[z] , literal[int] , literal[int] , literal[int] ,
identifier[ngl] = identifier[ngl] , identifier[gl] = identifier[gl] ,
identifier[_glqeval] = identifier[glqeval] ,
** identifier[kwargs] )/ identifier[surfmass]
identifier[tsigmaz2] = identifier[self] . identifier[_vmomentdensity] ( identifier[R] , identifier[z] , literal[int] , literal[int] , literal[int] ,
identifier[ngl] = identifier[ngl] , identifier[gl] = identifier[gl] ,
identifier[_glqeval] = identifier[glqeval] ,
** identifier[kwargs] )/ identifier[surfmass]
identifier[tsigmarz] = identifier[self] . identifier[_vmomentdensity] ( identifier[R] , identifier[z] , literal[int] , literal[int] , literal[int] ,
identifier[ngl] = identifier[ngl] , identifier[gl] = identifier[gl] ,
identifier[_glqeval] = identifier[glqeval] ,
** identifier[kwargs] )/ identifier[surfmass]
keyword[return] literal[int] * identifier[numpy] . identifier[arctan] ( literal[int] * identifier[tsigmarz] /( identifier[tsigmar2] - identifier[tsigmaz2] ))
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] ) | def tilt(self, R, z, nsigma=None, mc=False, nmc=10000, gl=True, ngl=_DEFAULTNGL, **kwargs):
"""
NAME:
tilt
PURPOSE:
calculate the tilt of the velocity ellipsoid by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
tilt in rad
HISTORY:
2012-12-23 - Written - Bovy (IAS)
2017-10-28 - Changed return unit to rad - Bovy (UofT)
"""
warnings.warn('In versions >1.3, the output unit of quasiisothermaldf.tilt has been changed to radian (from degree before)', galpyWarning)
if mc:
(surfmass, vrs, vts, vzs) = self._vmomentdensity(R, z, 0.0, 0.0, 0.0, nsigma=nsigma, mc=mc, nmc=nmc, _returnmc=True, **kwargs)
tsigmar2 = self._vmomentdensity(R, z, 2.0, 0.0, 0.0, nsigma=nsigma, mc=mc, nmc=nmc, _returnmc=False, _vrs=vrs, _vts=vts, _vzs=vzs, **kwargs) / surfmass
tsigmaz2 = self._vmomentdensity(R, z, 0.0, 0.0, 2.0, nsigma=nsigma, mc=mc, nmc=nmc, _returnmc=False, _vrs=vrs, _vts=vts, _vzs=vzs, **kwargs) / surfmass
tsigmarz = self._vmomentdensity(R, z, 1.0, 0.0, 1.0, nsigma=nsigma, mc=mc, nmc=nmc, _returnmc=False, _vrs=vrs, _vts=vts, _vzs=vzs, **kwargs) / surfmass
return 0.5 * numpy.arctan(2.0 * tsigmarz / (tsigmar2 - tsigmaz2)) # depends on [control=['if'], data=[]]
elif gl:
(surfmass, glqeval) = self._vmomentdensity(R, z, 0.0, 0.0, 0.0, gl=gl, ngl=ngl, _returngl=True, **kwargs)
tsigmar2 = self._vmomentdensity(R, z, 2.0, 0.0, 0.0, ngl=ngl, gl=gl, _glqeval=glqeval, **kwargs) / surfmass
tsigmaz2 = self._vmomentdensity(R, z, 0.0, 0.0, 2.0, ngl=ngl, gl=gl, _glqeval=glqeval, **kwargs) / surfmass
tsigmarz = self._vmomentdensity(R, z, 1.0, 0.0, 1.0, ngl=ngl, gl=gl, _glqeval=glqeval, **kwargs) / surfmass
return 0.5 * numpy.arctan(2.0 * tsigmarz / (tsigmar2 - tsigmaz2)) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Use either mc=True or gl=True') |
def _image_xobjects(container):
"""Search for all XObject-based images in the container
Usually the container is a page, but it could also be a Form XObject
that contains images. Filter out the Form XObjects which are dealt with
elsewhere.
Generate a sequence of tuples (image, xobj container), where container,
where xobj is the name of the object and image is the object itself,
since the object does not know its own name.
"""
if '/Resources' not in container:
return
resources = container['/Resources']
if '/XObject' not in resources:
return
xobjs = resources['/XObject'].as_dict()
for xobj in xobjs:
candidate = xobjs[xobj]
if not '/Subtype' in candidate:
continue
if candidate['/Subtype'] == '/Image':
pdfimage = candidate
yield (pdfimage, xobj) | def function[_image_xobjects, parameter[container]]:
constant[Search for all XObject-based images in the container
Usually the container is a page, but it could also be a Form XObject
that contains images. Filter out the Form XObjects which are dealt with
elsewhere.
Generate a sequence of tuples (image, xobj container), where container,
where xobj is the name of the object and image is the object itself,
since the object does not know its own name.
]
if compare[constant[/Resources] <ast.NotIn object at 0x7da2590d7190> name[container]] begin[:]
return[None]
variable[resources] assign[=] call[name[container]][constant[/Resources]]
if compare[constant[/XObject] <ast.NotIn object at 0x7da2590d7190> name[resources]] begin[:]
return[None]
variable[xobjs] assign[=] call[call[name[resources]][constant[/XObject]].as_dict, parameter[]]
for taget[name[xobj]] in starred[name[xobjs]] begin[:]
variable[candidate] assign[=] call[name[xobjs]][name[xobj]]
if <ast.UnaryOp object at 0x7da1b1bc8310> begin[:]
continue
if compare[call[name[candidate]][constant[/Subtype]] equal[==] constant[/Image]] begin[:]
variable[pdfimage] assign[=] name[candidate]
<ast.Yield object at 0x7da1b1bc85b0> | keyword[def] identifier[_image_xobjects] ( identifier[container] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[container] :
keyword[return]
identifier[resources] = identifier[container] [ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[resources] :
keyword[return]
identifier[xobjs] = identifier[resources] [ literal[string] ]. identifier[as_dict] ()
keyword[for] identifier[xobj] keyword[in] identifier[xobjs] :
identifier[candidate] = identifier[xobjs] [ identifier[xobj] ]
keyword[if] keyword[not] literal[string] keyword[in] identifier[candidate] :
keyword[continue]
keyword[if] identifier[candidate] [ literal[string] ]== literal[string] :
identifier[pdfimage] = identifier[candidate]
keyword[yield] ( identifier[pdfimage] , identifier[xobj] ) | def _image_xobjects(container):
"""Search for all XObject-based images in the container
Usually the container is a page, but it could also be a Form XObject
that contains images. Filter out the Form XObjects which are dealt with
elsewhere.
Generate a sequence of tuples (image, xobj container), where container,
where xobj is the name of the object and image is the object itself,
since the object does not know its own name.
"""
if '/Resources' not in container:
return # depends on [control=['if'], data=[]]
resources = container['/Resources']
if '/XObject' not in resources:
return # depends on [control=['if'], data=[]]
xobjs = resources['/XObject'].as_dict()
for xobj in xobjs:
candidate = xobjs[xobj]
if not '/Subtype' in candidate:
continue # depends on [control=['if'], data=[]]
if candidate['/Subtype'] == '/Image':
pdfimage = candidate
yield (pdfimage, xobj) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['xobj']] |
def power_tick(val, pos, times_sign=r'\times'):
"""Custom power ticker function. """
if val == 0:
return r'$\mathregular{0}$'
elif val < 0:
exponent = int(np.log10(-val))
else:
exponent = int(np.log10(val))
coeff = val / 10**exponent
return r'$\mathregular{{{:.1f} {} 10^{:2d}}}$'.format(coeff,
times_sign,
exponent) | def function[power_tick, parameter[val, pos, times_sign]]:
constant[Custom power ticker function. ]
if compare[name[val] equal[==] constant[0]] begin[:]
return[constant[$\mathregular{0}$]]
variable[coeff] assign[=] binary_operation[name[val] / binary_operation[constant[10] ** name[exponent]]]
return[call[constant[$\mathregular{{{:.1f} {} 10^{:2d}}}$].format, parameter[name[coeff], name[times_sign], name[exponent]]]] | keyword[def] identifier[power_tick] ( identifier[val] , identifier[pos] , identifier[times_sign] = literal[string] ):
literal[string]
keyword[if] identifier[val] == literal[int] :
keyword[return] literal[string]
keyword[elif] identifier[val] < literal[int] :
identifier[exponent] = identifier[int] ( identifier[np] . identifier[log10] (- identifier[val] ))
keyword[else] :
identifier[exponent] = identifier[int] ( identifier[np] . identifier[log10] ( identifier[val] ))
identifier[coeff] = identifier[val] / literal[int] ** identifier[exponent]
keyword[return] literal[string] . identifier[format] ( identifier[coeff] ,
identifier[times_sign] ,
identifier[exponent] ) | def power_tick(val, pos, times_sign='\\times'):
"""Custom power ticker function. """
if val == 0:
return '$\\mathregular{0}$' # depends on [control=['if'], data=[]]
elif val < 0:
exponent = int(np.log10(-val)) # depends on [control=['if'], data=['val']]
else:
exponent = int(np.log10(val))
coeff = val / 10 ** exponent
return '$\\mathregular{{{:.1f} {} 10^{:2d}}}$'.format(coeff, times_sign, exponent) |
def from_localhost(self) -> bool:
"""True if :attr:`.peername` is a connection from a ``localhost``
address.
"""
sock_family = self.socket.family
if sock_family == _socket.AF_UNIX:
return True
elif sock_family not in (_socket.AF_INET, _socket.AF_INET6):
return False
sock_address, *_ = self.peername
ip = ipaddress.ip_address(sock_address)
if ip.version == 6 and ip.ipv4_mapped is not None:
ip = ipaddress.ip_address(ip.ipv4_mapped)
return ip.is_loopback | def function[from_localhost, parameter[self]]:
constant[True if :attr:`.peername` is a connection from a ``localhost``
address.
]
variable[sock_family] assign[=] name[self].socket.family
if compare[name[sock_family] equal[==] name[_socket].AF_UNIX] begin[:]
return[constant[True]]
<ast.Tuple object at 0x7da1b23451e0> assign[=] name[self].peername
variable[ip] assign[=] call[name[ipaddress].ip_address, parameter[name[sock_address]]]
if <ast.BoolOp object at 0x7da1b2345e10> begin[:]
variable[ip] assign[=] call[name[ipaddress].ip_address, parameter[name[ip].ipv4_mapped]]
return[name[ip].is_loopback] | keyword[def] identifier[from_localhost] ( identifier[self] )-> identifier[bool] :
literal[string]
identifier[sock_family] = identifier[self] . identifier[socket] . identifier[family]
keyword[if] identifier[sock_family] == identifier[_socket] . identifier[AF_UNIX] :
keyword[return] keyword[True]
keyword[elif] identifier[sock_family] keyword[not] keyword[in] ( identifier[_socket] . identifier[AF_INET] , identifier[_socket] . identifier[AF_INET6] ):
keyword[return] keyword[False]
identifier[sock_address] ,* identifier[_] = identifier[self] . identifier[peername]
identifier[ip] = identifier[ipaddress] . identifier[ip_address] ( identifier[sock_address] )
keyword[if] identifier[ip] . identifier[version] == literal[int] keyword[and] identifier[ip] . identifier[ipv4_mapped] keyword[is] keyword[not] keyword[None] :
identifier[ip] = identifier[ipaddress] . identifier[ip_address] ( identifier[ip] . identifier[ipv4_mapped] )
keyword[return] identifier[ip] . identifier[is_loopback] | def from_localhost(self) -> bool:
"""True if :attr:`.peername` is a connection from a ``localhost``
address.
"""
sock_family = self.socket.family
if sock_family == _socket.AF_UNIX:
return True # depends on [control=['if'], data=[]]
elif sock_family not in (_socket.AF_INET, _socket.AF_INET6):
return False # depends on [control=['if'], data=[]]
(sock_address, *_) = self.peername
ip = ipaddress.ip_address(sock_address)
if ip.version == 6 and ip.ipv4_mapped is not None:
ip = ipaddress.ip_address(ip.ipv4_mapped) # depends on [control=['if'], data=[]]
return ip.is_loopback |
def _find_descendents(self, url):
"""Return properties document for url and all children."""
# Ad-hoc query for URL starting with a prefix
map_fun = """function(doc) {
var url = doc.url + "/";
if(doc.type === 'properties' && url.indexOf('%s') === 0) {
emit(doc.url, { 'id': doc._id, 'url': doc.url });
}
}""" % (
url + "/"
)
vr = self.db.query(map_fun, include_docs=True)
for row in vr:
yield row.doc
return | def function[_find_descendents, parameter[self, url]]:
constant[Return properties document for url and all children.]
variable[map_fun] assign[=] binary_operation[constant[function(doc) {
var url = doc.url + "/";
if(doc.type === 'properties' && url.indexOf('%s') === 0) {
emit(doc.url, { 'id': doc._id, 'url': doc.url });
}
}] <ast.Mod object at 0x7da2590d6920> binary_operation[name[url] + constant[/]]]
variable[vr] assign[=] call[name[self].db.query, parameter[name[map_fun]]]
for taget[name[row]] in starred[name[vr]] begin[:]
<ast.Yield object at 0x7da1b00013f0>
return[None] | keyword[def] identifier[_find_descendents] ( identifier[self] , identifier[url] ):
literal[string]
identifier[map_fun] = literal[string] %(
identifier[url] + literal[string]
)
identifier[vr] = identifier[self] . identifier[db] . identifier[query] ( identifier[map_fun] , identifier[include_docs] = keyword[True] )
keyword[for] identifier[row] keyword[in] identifier[vr] :
keyword[yield] identifier[row] . identifier[doc]
keyword[return] | def _find_descendents(self, url):
"""Return properties document for url and all children."""
# Ad-hoc query for URL starting with a prefix
map_fun = 'function(doc) {\n var url = doc.url + "/";\n if(doc.type === \'properties\' && url.indexOf(\'%s\') === 0) {\n emit(doc.url, { \'id\': doc._id, \'url\': doc.url });\n }\n }' % (url + '/')
vr = self.db.query(map_fun, include_docs=True)
for row in vr:
yield row.doc # depends on [control=['for'], data=['row']]
return |
def random_color(_min=MIN_COLOR, _max=MAX_COLOR):
"""Returns a random color between min and max."""
return color(random.randint(_min, _max)) | def function[random_color, parameter[_min, _max]]:
constant[Returns a random color between min and max.]
return[call[name[color], parameter[call[name[random].randint, parameter[name[_min], name[_max]]]]]] | keyword[def] identifier[random_color] ( identifier[_min] = identifier[MIN_COLOR] , identifier[_max] = identifier[MAX_COLOR] ):
literal[string]
keyword[return] identifier[color] ( identifier[random] . identifier[randint] ( identifier[_min] , identifier[_max] )) | def random_color(_min=MIN_COLOR, _max=MAX_COLOR):
"""Returns a random color between min and max."""
return color(random.randint(_min, _max)) |
def _parse_proto(prototxt_fname):
"""Parse Caffe prototxt into symbol string
"""
proto = caffe_parser.read_prototxt(prototxt_fname)
# process data layer
input_name, input_dim, layers = _get_input(proto)
# only support single input, so always use `data` as the input data
mapping = {input_name: 'data'}
need_flatten = {input_name: False}
symbol_string = "import mxnet as mx\ndata = mx.symbol.Variable(name='data')\n"
flatten_count = 0
output_name = ""
prev_name = None
_output_name = {}
# convert reset layers one by one
for i, layer in enumerate(layers):
type_string = ''
param_string = ''
skip_layer = False
name = re.sub('[-/]', '_', layer.name)
for k in range(len(layer.bottom)):
if layer.bottom[k] in _output_name:
_output_name[layer.bottom[k]]['count'] = _output_name[layer.bottom[k]]['count']+1
else:
_output_name[layer.bottom[k]] = {'count':0}
for k in range(len(layer.top)):
if layer.top[k] in _output_name:
_output_name[layer.top[k]]['count'] = _output_name[layer.top[k]]['count']+1
else:
_output_name[layer.top[k]] = {'count':0, 'name':name}
if layer.type == 'Convolution' or layer.type == 4:
type_string = 'mx.symbol.Convolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True
if layer.type == 'Deconvolution' or layer.type == 39:
type_string = 'mx.symbol.Deconvolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True
if layer.type == 'Pooling' or layer.type == 17:
type_string = 'mx.symbol.Pooling'
param_string = _convert_pooling_param(layer.pooling_param)
need_flatten[name] = True
if layer.type == 'ReLU' or layer.type == 18:
type_string = 'mx.symbol.Activation'
param_string = "act_type='relu'"
param = layer.relu_param
if hasattr(param, 'negative_slope'):
if param.negative_slope > 0:
type_string = 'mx.symbol.LeakyReLU'
param_string = "act_type='leaky', slope=%f" % param.negative_slope
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'TanH' or layer.type == 23:
type_string = 'mx.symbol.Activation'
param_string = "act_type='tanh'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Sigmoid' or layer.type == 19:
type_string = 'mx.symbol.Activation'
param_string = "act_type='sigmoid'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'LRN' or layer.type == 15:
type_string = 'mx.symbol.LRN'
param = layer.lrn_param
param_string = "alpha=%f, beta=%f, knorm=%f, nsize=%d" % (
param.alpha, param.beta, param.k, param.local_size)
need_flatten[name] = True
if layer.type == 'InnerProduct' or layer.type == 14:
type_string = 'mx.symbol.FullyConnected'
param = layer.inner_product_param
param_string = "num_hidden=%d, no_bias=%s" % (
param.num_output, not param.bias_term)
need_flatten[name] = False
if layer.type == 'Dropout' or layer.type == 6:
type_string = 'mx.symbol.Dropout'
param = layer.dropout_param
param_string = "p=%f" % param.dropout_ratio
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Softmax' or layer.type == 20:
type_string = 'mx.symbol.SoftmaxOutput'
if layer.type == 'Flatten' or layer.type == 8:
type_string = 'mx.symbol.Flatten'
need_flatten[name] = False
if layer.type == 'Split' or layer.type == 22:
type_string = 'split' # will process later
if layer.type == 'Concat' or layer.type == 3:
type_string = 'mx.symbol.Concat'
need_flatten[name] = True
if layer.type == 'Crop':
type_string = 'mx.symbol.Crop'
need_flatten[name] = True
param_string = 'center_crop=True'
if layer.type == 'BatchNorm':
type_string = 'mx.symbol.BatchNorm'
param = layer.batch_norm_param
# CuDNN requires eps to be greater than 1e-05
# We compensate for this change in convert_model
epsilon = param.eps
if (epsilon <= 1e-05):
epsilon = 1e-04
# if next layer is scale, don't fix gamma
fix_gamma = layers[i+1].type != 'Scale'
param_string = 'use_global_stats=%s, fix_gamma=%s, eps=%f' % (
param.use_global_stats, fix_gamma, epsilon)
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Scale':
assert layers[i-1].type == 'BatchNorm'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
skip_layer = True
prev_name = re.sub('[-/]', '_', layers[i-1].name)
if layer.type == 'PReLU':
type_string = 'mx.symbol.LeakyReLU'
param = layer.prelu_param
param_string = "act_type='prelu', slope=%f" % param.filler.value
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Eltwise':
type_string = 'mx.symbol.broadcast_add'
param = layer.eltwise_param
param_string = ""
need_flatten[name] = False
if layer.type == 'Reshape':
type_string = 'mx.symbol.Reshape'
need_flatten[name] = False
param = layer.reshape_param
param_string = "shape=(%s)" % (','.join(param.shape.dim),)
if layer.type == 'AbsVal':
type_string = 'mx.symbol.abs'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if skip_layer:
assert len(layer.bottom) == 1
symbol_string += "%s = %s\n" % (name, prev_name)
elif type_string == '':
raise ValueError('Unknown layer %s!' % layer.type)
elif type_string != 'split':
bottom = layer.bottom
if param_string != "":
param_string = ", " + param_string
if len(bottom) == 1:
if need_flatten[mapping[bottom[0]]] and type_string == 'mx.symbol.FullyConnected':
flatten_name = "flatten_%d" % flatten_count
symbol_string += "%s=mx.symbol.Flatten(name='%s', data=%s)\n" % (
flatten_name, flatten_name, mapping[bottom[0]])
flatten_count += 1
need_flatten[flatten_name] = False
bottom[0] = flatten_name
mapping[bottom[0]] = bottom[0]
symbol_string += "%s = %s(name='%s', data=%s %s)\n" % (
name, type_string, name, mapping[bottom[0]], param_string)
else:
if layer.type == 'Eltwise' and param.operation == 1 and len(param.coeff) > 0:
symbol_string += "%s = " % name
symbol_string += " + ".join(["%s * %s" % (
mapping[bottom[i]], param.coeff[i]) for i in range(len(param.coeff))])
symbol_string += "\n"
else:
symbol_string += "%s = %s(name='%s', *[%s] %s)\n" % (
name, type_string, name, ','.join(
[mapping[x] for x in bottom]), param_string)
for j in range(len(layer.top)):
mapping[layer.top[j]] = name
output_name = name
output_name = []
for i in _output_name:
if 'name' in _output_name[i] and _output_name[i]['count'] == 0:
output_name.append(_output_name[i]['name'])
return symbol_string, output_name, input_dim | def function[_parse_proto, parameter[prototxt_fname]]:
constant[Parse Caffe prototxt into symbol string
]
variable[proto] assign[=] call[name[caffe_parser].read_prototxt, parameter[name[prototxt_fname]]]
<ast.Tuple object at 0x7da1b1fb41c0> assign[=] call[name[_get_input], parameter[name[proto]]]
variable[mapping] assign[=] dictionary[[<ast.Name object at 0x7da1b1fb73d0>], [<ast.Constant object at 0x7da1b1fb7400>]]
variable[need_flatten] assign[=] dictionary[[<ast.Name object at 0x7da1b1fb74c0>], [<ast.Constant object at 0x7da1b1fb74f0>]]
variable[symbol_string] assign[=] constant[import mxnet as mx
data = mx.symbol.Variable(name='data')
]
variable[flatten_count] assign[=] constant[0]
variable[output_name] assign[=] constant[]
variable[prev_name] assign[=] constant[None]
variable[_output_name] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1fb7850>, <ast.Name object at 0x7da1b1fb7880>]]] in starred[call[name[enumerate], parameter[name[layers]]]] begin[:]
variable[type_string] assign[=] constant[]
variable[param_string] assign[=] constant[]
variable[skip_layer] assign[=] constant[False]
variable[name] assign[=] call[name[re].sub, parameter[constant[[-/]], constant[_], name[layer].name]]
for taget[name[k]] in starred[call[name[range], parameter[call[name[len], parameter[name[layer].bottom]]]]] begin[:]
if compare[call[name[layer].bottom][name[k]] in name[_output_name]] begin[:]
call[call[name[_output_name]][call[name[layer].bottom][name[k]]]][constant[count]] assign[=] binary_operation[call[call[name[_output_name]][call[name[layer].bottom][name[k]]]][constant[count]] + constant[1]]
for taget[name[k]] in starred[call[name[range], parameter[call[name[len], parameter[name[layer].top]]]]] begin[:]
if compare[call[name[layer].top][name[k]] in name[_output_name]] begin[:]
call[call[name[_output_name]][call[name[layer].top][name[k]]]][constant[count]] assign[=] binary_operation[call[call[name[_output_name]][call[name[layer].top][name[k]]]][constant[count]] + constant[1]]
if <ast.BoolOp object at 0x7da1b1fb7220> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Convolution]
variable[param_string] assign[=] call[name[_convert_conv_param], parameter[name[layer].convolution_param]]
call[name[need_flatten]][name[name]] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1f743a0> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Deconvolution]
variable[param_string] assign[=] call[name[_convert_conv_param], parameter[name[layer].convolution_param]]
call[name[need_flatten]][name[name]] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1f74dc0> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Pooling]
variable[param_string] assign[=] call[name[_convert_pooling_param], parameter[name[layer].pooling_param]]
call[name[need_flatten]][name[name]] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1f75000> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Activation]
variable[param_string] assign[=] constant[act_type='relu']
variable[param] assign[=] name[layer].relu_param
if call[name[hasattr], parameter[name[param], constant[negative_slope]]] begin[:]
if compare[name[param].negative_slope greater[>] constant[0]] begin[:]
variable[type_string] assign[=] constant[mx.symbol.LeakyReLU]
variable[param_string] assign[=] binary_operation[constant[act_type='leaky', slope=%f] <ast.Mod object at 0x7da2590d6920> name[param].negative_slope]
call[name[need_flatten]][name[name]] assign[=] call[name[need_flatten]][call[name[mapping]][call[name[layer].bottom][constant[0]]]]
if <ast.BoolOp object at 0x7da1b1f770d0> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Activation]
variable[param_string] assign[=] constant[act_type='tanh']
call[name[need_flatten]][name[name]] assign[=] call[name[need_flatten]][call[name[mapping]][call[name[layer].bottom][constant[0]]]]
if <ast.BoolOp object at 0x7da1b1f747f0> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Activation]
variable[param_string] assign[=] constant[act_type='sigmoid']
call[name[need_flatten]][name[name]] assign[=] call[name[need_flatten]][call[name[mapping]][call[name[layer].bottom][constant[0]]]]
if <ast.BoolOp object at 0x7da1b1f76140> begin[:]
variable[type_string] assign[=] constant[mx.symbol.LRN]
variable[param] assign[=] name[layer].lrn_param
variable[param_string] assign[=] binary_operation[constant[alpha=%f, beta=%f, knorm=%f, nsize=%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1f77af0>, <ast.Attribute object at 0x7da1b1f74250>, <ast.Attribute object at 0x7da1b1f74040>, <ast.Attribute object at 0x7da1b1f74070>]]]
call[name[need_flatten]][name[name]] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1f741c0> begin[:]
variable[type_string] assign[=] constant[mx.symbol.FullyConnected]
variable[param] assign[=] name[layer].inner_product_param
variable[param_string] assign[=] binary_operation[constant[num_hidden=%d, no_bias=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1f77a90>, <ast.UnaryOp object at 0x7da1b1f77a60>]]]
call[name[need_flatten]][name[name]] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b1f742e0> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Dropout]
variable[param] assign[=] name[layer].dropout_param
variable[param_string] assign[=] binary_operation[constant[p=%f] <ast.Mod object at 0x7da2590d6920> name[param].dropout_ratio]
call[name[need_flatten]][name[name]] assign[=] call[name[need_flatten]][call[name[mapping]][call[name[layer].bottom][constant[0]]]]
if <ast.BoolOp object at 0x7da1b1f75240> begin[:]
variable[type_string] assign[=] constant[mx.symbol.SoftmaxOutput]
if <ast.BoolOp object at 0x7da1b1f75ba0> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Flatten]
call[name[need_flatten]][name[name]] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b1f75ab0> begin[:]
variable[type_string] assign[=] constant[split]
if <ast.BoolOp object at 0x7da1b1f76950> begin[:]
variable[type_string] assign[=] constant[mx.symbol.Concat]
call[name[need_flatten]][name[name]] assign[=] constant[True]
if compare[name[layer].type equal[==] constant[Crop]] begin[:]
variable[type_string] assign[=] constant[mx.symbol.Crop]
call[name[need_flatten]][name[name]] assign[=] constant[True]
variable[param_string] assign[=] constant[center_crop=True]
if compare[name[layer].type equal[==] constant[BatchNorm]] begin[:]
variable[type_string] assign[=] constant[mx.symbol.BatchNorm]
variable[param] assign[=] name[layer].batch_norm_param
variable[epsilon] assign[=] name[param].eps
if compare[name[epsilon] less_or_equal[<=] constant[1e-05]] begin[:]
variable[epsilon] assign[=] constant[0.0001]
variable[fix_gamma] assign[=] compare[call[name[layers]][binary_operation[name[i] + constant[1]]].type not_equal[!=] constant[Scale]]
variable[param_string] assign[=] binary_operation[constant[use_global_stats=%s, fix_gamma=%s, eps=%f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b200ab00>, <ast.Name object at 0x7da1b200a260>, <ast.Name object at 0x7da1b2009f60>]]]
call[name[need_flatten]][name[name]] assign[=] call[name[need_flatten]][call[name[mapping]][call[name[layer].bottom][constant[0]]]]
if compare[name[layer].type equal[==] constant[Scale]] begin[:]
assert[compare[call[name[layers]][binary_operation[name[i] - constant[1]]].type equal[==] constant[BatchNorm]]]
call[name[need_flatten]][name[name]] assign[=] call[name[need_flatten]][call[name[mapping]][call[name[layer].bottom][constant[0]]]]
variable[skip_layer] assign[=] constant[True]
variable[prev_name] assign[=] call[name[re].sub, parameter[constant[[-/]], constant[_], call[name[layers]][binary_operation[name[i] - constant[1]]].name]]
if compare[name[layer].type equal[==] constant[PReLU]] begin[:]
variable[type_string] assign[=] constant[mx.symbol.LeakyReLU]
variable[param] assign[=] name[layer].prelu_param
variable[param_string] assign[=] binary_operation[constant[act_type='prelu', slope=%f] <ast.Mod object at 0x7da2590d6920> name[param].filler.value]
call[name[need_flatten]][name[name]] assign[=] call[name[need_flatten]][call[name[mapping]][call[name[layer].bottom][constant[0]]]]
if compare[name[layer].type equal[==] constant[Eltwise]] begin[:]
variable[type_string] assign[=] constant[mx.symbol.broadcast_add]
variable[param] assign[=] name[layer].eltwise_param
variable[param_string] assign[=] constant[]
call[name[need_flatten]][name[name]] assign[=] constant[False]
if compare[name[layer].type equal[==] constant[Reshape]] begin[:]
variable[type_string] assign[=] constant[mx.symbol.Reshape]
call[name[need_flatten]][name[name]] assign[=] constant[False]
variable[param] assign[=] name[layer].reshape_param
variable[param_string] assign[=] binary_operation[constant[shape=(%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b200a7a0>]]]
if compare[name[layer].type equal[==] constant[AbsVal]] begin[:]
variable[type_string] assign[=] constant[mx.symbol.abs]
call[name[need_flatten]][name[name]] assign[=] call[name[need_flatten]][call[name[mapping]][call[name[layer].bottom][constant[0]]]]
if name[skip_layer] begin[:]
assert[compare[call[name[len], parameter[name[layer].bottom]] equal[==] constant[1]]]
<ast.AugAssign object at 0x7da1b2008ca0>
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[layer].top]]]]] begin[:]
call[name[mapping]][call[name[layer].top][name[j]]] assign[=] name[name]
variable[output_name] assign[=] name[name]
variable[output_name] assign[=] list[[]]
for taget[name[i]] in starred[name[_output_name]] begin[:]
if <ast.BoolOp object at 0x7da1b1ff6f20> begin[:]
call[name[output_name].append, parameter[call[call[name[_output_name]][name[i]]][constant[name]]]]
return[tuple[[<ast.Name object at 0x7da1b1ff68c0>, <ast.Name object at 0x7da1b1ff6830>, <ast.Name object at 0x7da1b1ff6920>]]] | keyword[def] identifier[_parse_proto] ( identifier[prototxt_fname] ):
literal[string]
identifier[proto] = identifier[caffe_parser] . identifier[read_prototxt] ( identifier[prototxt_fname] )
identifier[input_name] , identifier[input_dim] , identifier[layers] = identifier[_get_input] ( identifier[proto] )
identifier[mapping] ={ identifier[input_name] : literal[string] }
identifier[need_flatten] ={ identifier[input_name] : keyword[False] }
identifier[symbol_string] = literal[string]
identifier[flatten_count] = literal[int]
identifier[output_name] = literal[string]
identifier[prev_name] = keyword[None]
identifier[_output_name] ={}
keyword[for] identifier[i] , identifier[layer] keyword[in] identifier[enumerate] ( identifier[layers] ):
identifier[type_string] = literal[string]
identifier[param_string] = literal[string]
identifier[skip_layer] = keyword[False]
identifier[name] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[layer] . identifier[name] )
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[len] ( identifier[layer] . identifier[bottom] )):
keyword[if] identifier[layer] . identifier[bottom] [ identifier[k] ] keyword[in] identifier[_output_name] :
identifier[_output_name] [ identifier[layer] . identifier[bottom] [ identifier[k] ]][ literal[string] ]= identifier[_output_name] [ identifier[layer] . identifier[bottom] [ identifier[k] ]][ literal[string] ]+ literal[int]
keyword[else] :
identifier[_output_name] [ identifier[layer] . identifier[bottom] [ identifier[k] ]]={ literal[string] : literal[int] }
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[len] ( identifier[layer] . identifier[top] )):
keyword[if] identifier[layer] . identifier[top] [ identifier[k] ] keyword[in] identifier[_output_name] :
identifier[_output_name] [ identifier[layer] . identifier[top] [ identifier[k] ]][ literal[string] ]= identifier[_output_name] [ identifier[layer] . identifier[top] [ identifier[k] ]][ literal[string] ]+ literal[int]
keyword[else] :
identifier[_output_name] [ identifier[layer] . identifier[top] [ identifier[k] ]]={ literal[string] : literal[int] , literal[string] : identifier[name] }
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param_string] = identifier[_convert_conv_param] ( identifier[layer] . identifier[convolution_param] )
identifier[need_flatten] [ identifier[name] ]= keyword[True]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param_string] = identifier[_convert_conv_param] ( identifier[layer] . identifier[convolution_param] )
identifier[need_flatten] [ identifier[name] ]= keyword[True]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param_string] = identifier[_convert_pooling_param] ( identifier[layer] . identifier[pooling_param] )
identifier[need_flatten] [ identifier[name] ]= keyword[True]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param_string] = literal[string]
identifier[param] = identifier[layer] . identifier[relu_param]
keyword[if] identifier[hasattr] ( identifier[param] , literal[string] ):
keyword[if] identifier[param] . identifier[negative_slope] > literal[int] :
identifier[type_string] = literal[string]
identifier[param_string] = literal[string] % identifier[param] . identifier[negative_slope]
identifier[need_flatten] [ identifier[name] ]= identifier[need_flatten] [ identifier[mapping] [ identifier[layer] . identifier[bottom] [ literal[int] ]]]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param_string] = literal[string]
identifier[need_flatten] [ identifier[name] ]= identifier[need_flatten] [ identifier[mapping] [ identifier[layer] . identifier[bottom] [ literal[int] ]]]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param_string] = literal[string]
identifier[need_flatten] [ identifier[name] ]= identifier[need_flatten] [ identifier[mapping] [ identifier[layer] . identifier[bottom] [ literal[int] ]]]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param] = identifier[layer] . identifier[lrn_param]
identifier[param_string] = literal[string] %(
identifier[param] . identifier[alpha] , identifier[param] . identifier[beta] , identifier[param] . identifier[k] , identifier[param] . identifier[local_size] )
identifier[need_flatten] [ identifier[name] ]= keyword[True]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param] = identifier[layer] . identifier[inner_product_param]
identifier[param_string] = literal[string] %(
identifier[param] . identifier[num_output] , keyword[not] identifier[param] . identifier[bias_term] )
identifier[need_flatten] [ identifier[name] ]= keyword[False]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[param] = identifier[layer] . identifier[dropout_param]
identifier[param_string] = literal[string] % identifier[param] . identifier[dropout_ratio]
identifier[need_flatten] [ identifier[name] ]= identifier[need_flatten] [ identifier[mapping] [ identifier[layer] . identifier[bottom] [ literal[int] ]]]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[need_flatten] [ identifier[name] ]= keyword[False]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[or] identifier[layer] . identifier[type] == literal[int] :
identifier[type_string] = literal[string]
identifier[need_flatten] [ identifier[name] ]= keyword[True]
keyword[if] identifier[layer] . identifier[type] == literal[string] :
identifier[type_string] = literal[string]
identifier[need_flatten] [ identifier[name] ]= keyword[True]
identifier[param_string] = literal[string]
keyword[if] identifier[layer] . identifier[type] == literal[string] :
identifier[type_string] = literal[string]
identifier[param] = identifier[layer] . identifier[batch_norm_param]
identifier[epsilon] = identifier[param] . identifier[eps]
keyword[if] ( identifier[epsilon] <= literal[int] ):
identifier[epsilon] = literal[int]
identifier[fix_gamma] = identifier[layers] [ identifier[i] + literal[int] ]. identifier[type] != literal[string]
identifier[param_string] = literal[string] %(
identifier[param] . identifier[use_global_stats] , identifier[fix_gamma] , identifier[epsilon] )
identifier[need_flatten] [ identifier[name] ]= identifier[need_flatten] [ identifier[mapping] [ identifier[layer] . identifier[bottom] [ literal[int] ]]]
keyword[if] identifier[layer] . identifier[type] == literal[string] :
keyword[assert] identifier[layers] [ identifier[i] - literal[int] ]. identifier[type] == literal[string]
identifier[need_flatten] [ identifier[name] ]= identifier[need_flatten] [ identifier[mapping] [ identifier[layer] . identifier[bottom] [ literal[int] ]]]
identifier[skip_layer] = keyword[True]
identifier[prev_name] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[layers] [ identifier[i] - literal[int] ]. identifier[name] )
keyword[if] identifier[layer] . identifier[type] == literal[string] :
identifier[type_string] = literal[string]
identifier[param] = identifier[layer] . identifier[prelu_param]
identifier[param_string] = literal[string] % identifier[param] . identifier[filler] . identifier[value]
identifier[need_flatten] [ identifier[name] ]= identifier[need_flatten] [ identifier[mapping] [ identifier[layer] . identifier[bottom] [ literal[int] ]]]
keyword[if] identifier[layer] . identifier[type] == literal[string] :
identifier[type_string] = literal[string]
identifier[param] = identifier[layer] . identifier[eltwise_param]
identifier[param_string] = literal[string]
identifier[need_flatten] [ identifier[name] ]= keyword[False]
keyword[if] identifier[layer] . identifier[type] == literal[string] :
identifier[type_string] = literal[string]
identifier[need_flatten] [ identifier[name] ]= keyword[False]
identifier[param] = identifier[layer] . identifier[reshape_param]
identifier[param_string] = literal[string] %( literal[string] . identifier[join] ( identifier[param] . identifier[shape] . identifier[dim] ),)
keyword[if] identifier[layer] . identifier[type] == literal[string] :
identifier[type_string] = literal[string]
identifier[need_flatten] [ identifier[name] ]= identifier[need_flatten] [ identifier[mapping] [ identifier[layer] . identifier[bottom] [ literal[int] ]]]
keyword[if] identifier[skip_layer] :
keyword[assert] identifier[len] ( identifier[layer] . identifier[bottom] )== literal[int]
identifier[symbol_string] += literal[string] %( identifier[name] , identifier[prev_name] )
keyword[elif] identifier[type_string] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[layer] . identifier[type] )
keyword[elif] identifier[type_string] != literal[string] :
identifier[bottom] = identifier[layer] . identifier[bottom]
keyword[if] identifier[param_string] != literal[string] :
identifier[param_string] = literal[string] + identifier[param_string]
keyword[if] identifier[len] ( identifier[bottom] )== literal[int] :
keyword[if] identifier[need_flatten] [ identifier[mapping] [ identifier[bottom] [ literal[int] ]]] keyword[and] identifier[type_string] == literal[string] :
identifier[flatten_name] = literal[string] % identifier[flatten_count]
identifier[symbol_string] += literal[string] %(
identifier[flatten_name] , identifier[flatten_name] , identifier[mapping] [ identifier[bottom] [ literal[int] ]])
identifier[flatten_count] += literal[int]
identifier[need_flatten] [ identifier[flatten_name] ]= keyword[False]
identifier[bottom] [ literal[int] ]= identifier[flatten_name]
identifier[mapping] [ identifier[bottom] [ literal[int] ]]= identifier[bottom] [ literal[int] ]
identifier[symbol_string] += literal[string] %(
identifier[name] , identifier[type_string] , identifier[name] , identifier[mapping] [ identifier[bottom] [ literal[int] ]], identifier[param_string] )
keyword[else] :
keyword[if] identifier[layer] . identifier[type] == literal[string] keyword[and] identifier[param] . identifier[operation] == literal[int] keyword[and] identifier[len] ( identifier[param] . identifier[coeff] )> literal[int] :
identifier[symbol_string] += literal[string] % identifier[name]
identifier[symbol_string] += literal[string] . identifier[join] ([ literal[string] %(
identifier[mapping] [ identifier[bottom] [ identifier[i] ]], identifier[param] . identifier[coeff] [ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[param] . identifier[coeff] ))])
identifier[symbol_string] += literal[string]
keyword[else] :
identifier[symbol_string] += literal[string] %(
identifier[name] , identifier[type_string] , identifier[name] , literal[string] . identifier[join] (
[ identifier[mapping] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[bottom] ]), identifier[param_string] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[layer] . identifier[top] )):
identifier[mapping] [ identifier[layer] . identifier[top] [ identifier[j] ]]= identifier[name]
identifier[output_name] = identifier[name]
identifier[output_name] =[]
keyword[for] identifier[i] keyword[in] identifier[_output_name] :
keyword[if] literal[string] keyword[in] identifier[_output_name] [ identifier[i] ] keyword[and] identifier[_output_name] [ identifier[i] ][ literal[string] ]== literal[int] :
identifier[output_name] . identifier[append] ( identifier[_output_name] [ identifier[i] ][ literal[string] ])
keyword[return] identifier[symbol_string] , identifier[output_name] , identifier[input_dim] | def _parse_proto(prototxt_fname):
"""Parse Caffe prototxt into symbol string
"""
proto = caffe_parser.read_prototxt(prototxt_fname)
# process data layer
(input_name, input_dim, layers) = _get_input(proto)
# only support single input, so always use `data` as the input data
mapping = {input_name: 'data'}
need_flatten = {input_name: False}
symbol_string = "import mxnet as mx\ndata = mx.symbol.Variable(name='data')\n"
flatten_count = 0
output_name = ''
prev_name = None
_output_name = {}
# convert reset layers one by one
for (i, layer) in enumerate(layers):
type_string = ''
param_string = ''
skip_layer = False
name = re.sub('[-/]', '_', layer.name)
for k in range(len(layer.bottom)):
if layer.bottom[k] in _output_name:
_output_name[layer.bottom[k]]['count'] = _output_name[layer.bottom[k]]['count'] + 1 # depends on [control=['if'], data=['_output_name']]
else:
_output_name[layer.bottom[k]] = {'count': 0} # depends on [control=['for'], data=['k']]
for k in range(len(layer.top)):
if layer.top[k] in _output_name:
_output_name[layer.top[k]]['count'] = _output_name[layer.top[k]]['count'] + 1 # depends on [control=['if'], data=['_output_name']]
else:
_output_name[layer.top[k]] = {'count': 0, 'name': name} # depends on [control=['for'], data=['k']]
if layer.type == 'Convolution' or layer.type == 4:
type_string = 'mx.symbol.Convolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True # depends on [control=['if'], data=[]]
if layer.type == 'Deconvolution' or layer.type == 39:
type_string = 'mx.symbol.Deconvolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True # depends on [control=['if'], data=[]]
if layer.type == 'Pooling' or layer.type == 17:
type_string = 'mx.symbol.Pooling'
param_string = _convert_pooling_param(layer.pooling_param)
need_flatten[name] = True # depends on [control=['if'], data=[]]
if layer.type == 'ReLU' or layer.type == 18:
type_string = 'mx.symbol.Activation'
param_string = "act_type='relu'"
param = layer.relu_param
if hasattr(param, 'negative_slope'):
if param.negative_slope > 0:
type_string = 'mx.symbol.LeakyReLU'
param_string = "act_type='leaky', slope=%f" % param.negative_slope # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]] # depends on [control=['if'], data=[]]
if layer.type == 'TanH' or layer.type == 23:
type_string = 'mx.symbol.Activation'
param_string = "act_type='tanh'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]] # depends on [control=['if'], data=[]]
if layer.type == 'Sigmoid' or layer.type == 19:
type_string = 'mx.symbol.Activation'
param_string = "act_type='sigmoid'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]] # depends on [control=['if'], data=[]]
if layer.type == 'LRN' or layer.type == 15:
type_string = 'mx.symbol.LRN'
param = layer.lrn_param
param_string = 'alpha=%f, beta=%f, knorm=%f, nsize=%d' % (param.alpha, param.beta, param.k, param.local_size)
need_flatten[name] = True # depends on [control=['if'], data=[]]
if layer.type == 'InnerProduct' or layer.type == 14:
type_string = 'mx.symbol.FullyConnected'
param = layer.inner_product_param
param_string = 'num_hidden=%d, no_bias=%s' % (param.num_output, not param.bias_term)
need_flatten[name] = False # depends on [control=['if'], data=[]]
if layer.type == 'Dropout' or layer.type == 6:
type_string = 'mx.symbol.Dropout'
param = layer.dropout_param
param_string = 'p=%f' % param.dropout_ratio
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]] # depends on [control=['if'], data=[]]
if layer.type == 'Softmax' or layer.type == 20:
type_string = 'mx.symbol.SoftmaxOutput' # depends on [control=['if'], data=[]]
if layer.type == 'Flatten' or layer.type == 8:
type_string = 'mx.symbol.Flatten'
need_flatten[name] = False # depends on [control=['if'], data=[]]
if layer.type == 'Split' or layer.type == 22:
type_string = 'split' # will process later # depends on [control=['if'], data=[]]
if layer.type == 'Concat' or layer.type == 3:
type_string = 'mx.symbol.Concat'
need_flatten[name] = True # depends on [control=['if'], data=[]]
if layer.type == 'Crop':
type_string = 'mx.symbol.Crop'
need_flatten[name] = True
param_string = 'center_crop=True' # depends on [control=['if'], data=[]]
if layer.type == 'BatchNorm':
type_string = 'mx.symbol.BatchNorm'
param = layer.batch_norm_param
# CuDNN requires eps to be greater than 1e-05
# We compensate for this change in convert_model
epsilon = param.eps
if epsilon <= 1e-05:
epsilon = 0.0001 # depends on [control=['if'], data=['epsilon']]
# if next layer is scale, don't fix gamma
fix_gamma = layers[i + 1].type != 'Scale'
param_string = 'use_global_stats=%s, fix_gamma=%s, eps=%f' % (param.use_global_stats, fix_gamma, epsilon)
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]] # depends on [control=['if'], data=[]]
if layer.type == 'Scale':
assert layers[i - 1].type == 'BatchNorm'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
skip_layer = True
prev_name = re.sub('[-/]', '_', layers[i - 1].name) # depends on [control=['if'], data=[]]
if layer.type == 'PReLU':
type_string = 'mx.symbol.LeakyReLU'
param = layer.prelu_param
param_string = "act_type='prelu', slope=%f" % param.filler.value
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]] # depends on [control=['if'], data=[]]
if layer.type == 'Eltwise':
type_string = 'mx.symbol.broadcast_add'
param = layer.eltwise_param
param_string = ''
need_flatten[name] = False # depends on [control=['if'], data=[]]
if layer.type == 'Reshape':
type_string = 'mx.symbol.Reshape'
need_flatten[name] = False
param = layer.reshape_param
param_string = 'shape=(%s)' % (','.join(param.shape.dim),) # depends on [control=['if'], data=[]]
if layer.type == 'AbsVal':
type_string = 'mx.symbol.abs'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]] # depends on [control=['if'], data=[]]
if skip_layer:
assert len(layer.bottom) == 1
symbol_string += '%s = %s\n' % (name, prev_name) # depends on [control=['if'], data=[]]
elif type_string == '':
raise ValueError('Unknown layer %s!' % layer.type) # depends on [control=['if'], data=[]]
elif type_string != 'split':
bottom = layer.bottom
if param_string != '':
param_string = ', ' + param_string # depends on [control=['if'], data=['param_string']]
if len(bottom) == 1:
if need_flatten[mapping[bottom[0]]] and type_string == 'mx.symbol.FullyConnected':
flatten_name = 'flatten_%d' % flatten_count
symbol_string += "%s=mx.symbol.Flatten(name='%s', data=%s)\n" % (flatten_name, flatten_name, mapping[bottom[0]])
flatten_count += 1
need_flatten[flatten_name] = False
bottom[0] = flatten_name
mapping[bottom[0]] = bottom[0] # depends on [control=['if'], data=[]]
symbol_string += "%s = %s(name='%s', data=%s %s)\n" % (name, type_string, name, mapping[bottom[0]], param_string) # depends on [control=['if'], data=[]]
elif layer.type == 'Eltwise' and param.operation == 1 and (len(param.coeff) > 0):
symbol_string += '%s = ' % name
symbol_string += ' + '.join(['%s * %s' % (mapping[bottom[i]], param.coeff[i]) for i in range(len(param.coeff))])
symbol_string += '\n' # depends on [control=['if'], data=[]]
else:
symbol_string += "%s = %s(name='%s', *[%s] %s)\n" % (name, type_string, name, ','.join([mapping[x] for x in bottom]), param_string) # depends on [control=['if'], data=['type_string']]
for j in range(len(layer.top)):
mapping[layer.top[j]] = name # depends on [control=['for'], data=['j']]
output_name = name # depends on [control=['for'], data=[]]
output_name = []
for i in _output_name:
if 'name' in _output_name[i] and _output_name[i]['count'] == 0:
output_name.append(_output_name[i]['name']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return (symbol_string, output_name, input_dim) |
def start_all_linking(self, link_type, group_id):
"""Begin all linking"""
self.logger.info("start_all_linking for type %s group %s",
link_type, group_id)
self.direct_command_hub('0264' + link_type + group_id) | def function[start_all_linking, parameter[self, link_type, group_id]]:
constant[Begin all linking]
call[name[self].logger.info, parameter[constant[start_all_linking for type %s group %s], name[link_type], name[group_id]]]
call[name[self].direct_command_hub, parameter[binary_operation[binary_operation[constant[0264] + name[link_type]] + name[group_id]]]] | keyword[def] identifier[start_all_linking] ( identifier[self] , identifier[link_type] , identifier[group_id] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] ,
identifier[link_type] , identifier[group_id] )
identifier[self] . identifier[direct_command_hub] ( literal[string] + identifier[link_type] + identifier[group_id] ) | def start_all_linking(self, link_type, group_id):
"""Begin all linking"""
self.logger.info('start_all_linking for type %s group %s', link_type, group_id)
self.direct_command_hub('0264' + link_type + group_id) |
def set_all_ylims(self, ylim, dy, yscale, fontsize=None):
"""Set limits and ticks for y axis for whole figure.
This will set y axis limits and tick marks for the entire figure.
It can be overridden in the SinglePlot class.
Args:
ylim (len-2 list of floats): The limits for the axis.
dy (float): Amount to increment by between the limits.
yscale (str): Scale of the axis. Either `log` or `lin`.
fontsize (int, optional): Set fontsize for y axis tick marks.
Default is None.
"""
self._set_all_lims('y', ylim, dy, yscale, fontsize)
return | def function[set_all_ylims, parameter[self, ylim, dy, yscale, fontsize]]:
constant[Set limits and ticks for y axis for whole figure.
This will set y axis limits and tick marks for the entire figure.
It can be overridden in the SinglePlot class.
Args:
ylim (len-2 list of floats): The limits for the axis.
dy (float): Amount to increment by between the limits.
yscale (str): Scale of the axis. Either `log` or `lin`.
fontsize (int, optional): Set fontsize for y axis tick marks.
Default is None.
]
call[name[self]._set_all_lims, parameter[constant[y], name[ylim], name[dy], name[yscale], name[fontsize]]]
return[None] | keyword[def] identifier[set_all_ylims] ( identifier[self] , identifier[ylim] , identifier[dy] , identifier[yscale] , identifier[fontsize] = keyword[None] ):
literal[string]
identifier[self] . identifier[_set_all_lims] ( literal[string] , identifier[ylim] , identifier[dy] , identifier[yscale] , identifier[fontsize] )
keyword[return] | def set_all_ylims(self, ylim, dy, yscale, fontsize=None):
"""Set limits and ticks for y axis for whole figure.
This will set y axis limits and tick marks for the entire figure.
It can be overridden in the SinglePlot class.
Args:
ylim (len-2 list of floats): The limits for the axis.
dy (float): Amount to increment by between the limits.
yscale (str): Scale of the axis. Either `log` or `lin`.
fontsize (int, optional): Set fontsize for y axis tick marks.
Default is None.
"""
self._set_all_lims('y', ylim, dy, yscale, fontsize)
return |
def _thresholds_init(self):
"""
Initiate and check any thresholds set
"""
thresholds = getattr(self._py3status_module, "thresholds", [])
self._thresholds = {}
if isinstance(thresholds, list):
try:
thresholds.sort()
except TypeError:
pass
self._thresholds[None] = [(x[0], self._get_color(x[1])) for x in thresholds]
return
elif isinstance(thresholds, dict):
for key, value in thresholds.items():
if isinstance(value, list):
try:
value.sort()
except TypeError:
pass
self._thresholds[key] = [
(x[0], self._get_color(x[1])) for x in value
] | def function[_thresholds_init, parameter[self]]:
constant[
Initiate and check any thresholds set
]
variable[thresholds] assign[=] call[name[getattr], parameter[name[self]._py3status_module, constant[thresholds], list[[]]]]
name[self]._thresholds assign[=] dictionary[[], []]
if call[name[isinstance], parameter[name[thresholds], name[list]]] begin[:]
<ast.Try object at 0x7da20c9924a0>
call[name[self]._thresholds][constant[None]] assign[=] <ast.ListComp object at 0x7da20c991f30>
return[None] | keyword[def] identifier[_thresholds_init] ( identifier[self] ):
literal[string]
identifier[thresholds] = identifier[getattr] ( identifier[self] . identifier[_py3status_module] , literal[string] ,[])
identifier[self] . identifier[_thresholds] ={}
keyword[if] identifier[isinstance] ( identifier[thresholds] , identifier[list] ):
keyword[try] :
identifier[thresholds] . identifier[sort] ()
keyword[except] identifier[TypeError] :
keyword[pass]
identifier[self] . identifier[_thresholds] [ keyword[None] ]=[( identifier[x] [ literal[int] ], identifier[self] . identifier[_get_color] ( identifier[x] [ literal[int] ])) keyword[for] identifier[x] keyword[in] identifier[thresholds] ]
keyword[return]
keyword[elif] identifier[isinstance] ( identifier[thresholds] , identifier[dict] ):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[thresholds] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[try] :
identifier[value] . identifier[sort] ()
keyword[except] identifier[TypeError] :
keyword[pass]
identifier[self] . identifier[_thresholds] [ identifier[key] ]=[
( identifier[x] [ literal[int] ], identifier[self] . identifier[_get_color] ( identifier[x] [ literal[int] ])) keyword[for] identifier[x] keyword[in] identifier[value]
] | def _thresholds_init(self):
"""
Initiate and check any thresholds set
"""
thresholds = getattr(self._py3status_module, 'thresholds', [])
self._thresholds = {}
if isinstance(thresholds, list):
try:
thresholds.sort() # depends on [control=['try'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]]
self._thresholds[None] = [(x[0], self._get_color(x[1])) for x in thresholds]
return # depends on [control=['if'], data=[]]
elif isinstance(thresholds, dict):
for (key, value) in thresholds.items():
if isinstance(value, list):
try:
value.sort() # depends on [control=['try'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]]
self._thresholds[key] = [(x[0], self._get_color(x[1])) for x in value] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def get_directory_relative_to_git_root(directory: str):
"""
Gets the path to the given directory relative to the git repository root in which it is a subdirectory.
:param directory: the directory within a git repository
:return: the path to the directory relative to the git repository root
"""
return os.path.relpath(os.path.realpath(directory), get_git_root_directory(directory)) | def function[get_directory_relative_to_git_root, parameter[directory]]:
constant[
Gets the path to the given directory relative to the git repository root in which it is a subdirectory.
:param directory: the directory within a git repository
:return: the path to the directory relative to the git repository root
]
return[call[name[os].path.relpath, parameter[call[name[os].path.realpath, parameter[name[directory]]], call[name[get_git_root_directory], parameter[name[directory]]]]]] | keyword[def] identifier[get_directory_relative_to_git_root] ( identifier[directory] : identifier[str] ):
literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[relpath] ( identifier[os] . identifier[path] . identifier[realpath] ( identifier[directory] ), identifier[get_git_root_directory] ( identifier[directory] )) | def get_directory_relative_to_git_root(directory: str):
"""
Gets the path to the given directory relative to the git repository root in which it is a subdirectory.
:param directory: the directory within a git repository
:return: the path to the directory relative to the git repository root
"""
return os.path.relpath(os.path.realpath(directory), get_git_root_directory(directory)) |
def get_open_state_machine_of_file_system_path(self, file_system_path):
"""Return a reference to the state machine with respective path if open
"""
for sm in self.state_machines.values():
if sm.file_system_path == file_system_path:
return sm | def function[get_open_state_machine_of_file_system_path, parameter[self, file_system_path]]:
constant[Return a reference to the state machine with respective path if open
]
for taget[name[sm]] in starred[call[name[self].state_machines.values, parameter[]]] begin[:]
if compare[name[sm].file_system_path equal[==] name[file_system_path]] begin[:]
return[name[sm]] | keyword[def] identifier[get_open_state_machine_of_file_system_path] ( identifier[self] , identifier[file_system_path] ):
literal[string]
keyword[for] identifier[sm] keyword[in] identifier[self] . identifier[state_machines] . identifier[values] ():
keyword[if] identifier[sm] . identifier[file_system_path] == identifier[file_system_path] :
keyword[return] identifier[sm] | def get_open_state_machine_of_file_system_path(self, file_system_path):
"""Return a reference to the state machine with respective path if open
"""
for sm in self.state_machines.values():
if sm.file_system_path == file_system_path:
return sm # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sm']] |
def _get_report_string(time='hour', threshold='significant', online=False):
"""
Like :func:`get_report` except returns the raw data instead.
:param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days).
:param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported.
:returns: str
"""
key = _get_report_request(time, threshold)
result = _get(key) if _CONNECTED else _lookup(key)
if (_CONNECTED or online) and _EDITABLE:
_add_to_cache(key, result)
return result | def function[_get_report_string, parameter[time, threshold, online]]:
constant[
Like :func:`get_report` except returns the raw data instead.
:param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days).
:param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported.
:returns: str
]
variable[key] assign[=] call[name[_get_report_request], parameter[name[time], name[threshold]]]
variable[result] assign[=] <ast.IfExp object at 0x7da18ede4d60>
if <ast.BoolOp object at 0x7da18ede60e0> begin[:]
call[name[_add_to_cache], parameter[name[key], name[result]]]
return[name[result]] | keyword[def] identifier[_get_report_string] ( identifier[time] = literal[string] , identifier[threshold] = literal[string] , identifier[online] = keyword[False] ):
literal[string]
identifier[key] = identifier[_get_report_request] ( identifier[time] , identifier[threshold] )
identifier[result] = identifier[_get] ( identifier[key] ) keyword[if] identifier[_CONNECTED] keyword[else] identifier[_lookup] ( identifier[key] )
keyword[if] ( identifier[_CONNECTED] keyword[or] identifier[online] ) keyword[and] identifier[_EDITABLE] :
identifier[_add_to_cache] ( identifier[key] , identifier[result] )
keyword[return] identifier[result] | def _get_report_string(time='hour', threshold='significant', online=False):
"""
Like :func:`get_report` except returns the raw data instead.
:param str time: A string indicating the time range of earthquakes to report. Must be either "hour" (only earthquakes in the past hour), "day" (only earthquakes that happened today), "week" (only earthquakes that happened in the past 7 days), or "month" (only earthquakes that happened in the past 30 days).
:param str threshold: A string indicating what kind of earthquakes to report. Must be either "significant" (only significant earthquakes), "all" (all earthquakes, regardless of significance), "4.5", "2.5", or "1.0". Note that for the last three, all earthquakes at and above that level will be reported.
:returns: str
"""
key = _get_report_request(time, threshold)
result = _get(key) if _CONNECTED else _lookup(key)
if (_CONNECTED or online) and _EDITABLE:
_add_to_cache(key, result) # depends on [control=['if'], data=[]]
return result |
def teardown(self):
'''
cleanup the monitor data and
'''
self.monitor_thread.stop()
self.monitor_thread = None
super(BaseMonitor, self).teardown() | def function[teardown, parameter[self]]:
constant[
cleanup the monitor data and
]
call[name[self].monitor_thread.stop, parameter[]]
name[self].monitor_thread assign[=] constant[None]
call[call[name[super], parameter[name[BaseMonitor], name[self]]].teardown, parameter[]] | keyword[def] identifier[teardown] ( identifier[self] ):
literal[string]
identifier[self] . identifier[monitor_thread] . identifier[stop] ()
identifier[self] . identifier[monitor_thread] = keyword[None]
identifier[super] ( identifier[BaseMonitor] , identifier[self] ). identifier[teardown] () | def teardown(self):
"""
cleanup the monitor data and
"""
self.monitor_thread.stop()
self.monitor_thread = None
super(BaseMonitor, self).teardown() |
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist | def function[get_distributions, parameter[self]]:
constant[
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
]
if <ast.UnaryOp object at 0x7da1b1f81d20> begin[:]
for taget[name[dist]] in starred[call[name[self]._yield_distributions, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b1f837f0> | keyword[def] identifier[get_distributions] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_cache_enabled] :
keyword[for] identifier[dist] keyword[in] identifier[self] . identifier[_yield_distributions] ():
keyword[yield] identifier[dist]
keyword[else] :
identifier[self] . identifier[_generate_cache] ()
keyword[for] identifier[dist] keyword[in] identifier[self] . identifier[_cache] . identifier[path] . identifier[values] ():
keyword[yield] identifier[dist]
keyword[if] identifier[self] . identifier[_include_egg] :
keyword[for] identifier[dist] keyword[in] identifier[self] . identifier[_cache_egg] . identifier[path] . identifier[values] ():
keyword[yield] identifier[dist] | def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist # depends on [control=['for'], data=['dist']] # depends on [control=['if'], data=[]]
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist # depends on [control=['for'], data=['dist']]
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist # depends on [control=['for'], data=['dist']] # depends on [control=['if'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.