code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def enable_branching_model(self, project, repository): """ Enable branching model by setting it with default configuration :param project: :param repository: :return: """ default_model_data = {'development': {'refId': None, 'useDefault': True}, 'types': [{'displayName': 'Bugfix', 'enabled': True, 'id': 'BUGFIX', 'prefix': 'bugfix/'}, {'displayName': 'Feature', 'enabled': True, 'id': 'FEATURE', 'prefix': 'feature/'}, {'displayName': 'Hotfix', 'enabled': True, 'id': 'HOTFIX', 'prefix': 'hotfix/'}, {'displayName': 'Release', 'enabled': True, 'id': 'RELEASE', 'prefix': 'release/'}]} return self.set_branching_model(project, repository, default_model_data)
def function[enable_branching_model, parameter[self, project, repository]]: constant[ Enable branching model by setting it with default configuration :param project: :param repository: :return: ] variable[default_model_data] assign[=] dictionary[[<ast.Constant object at 0x7da18c4ceef0>, <ast.Constant object at 0x7da18c4cf4c0>], [<ast.Dict object at 0x7da18c4cdcc0>, <ast.List object at 0x7da18c4cfa30>]] return[call[name[self].set_branching_model, parameter[name[project], name[repository], name[default_model_data]]]]
keyword[def] identifier[enable_branching_model] ( identifier[self] , identifier[project] , identifier[repository] ): literal[string] identifier[default_model_data] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : keyword[True] }, literal[string] :[{ literal[string] : literal[string] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] : literal[string] }, { literal[string] : literal[string] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] : literal[string] }, { literal[string] : literal[string] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] : literal[string] }, { literal[string] : literal[string] , literal[string] : keyword[True] , literal[string] : literal[string] , literal[string] : literal[string] }]} keyword[return] identifier[self] . identifier[set_branching_model] ( identifier[project] , identifier[repository] , identifier[default_model_data] )
def enable_branching_model(self, project, repository): """ Enable branching model by setting it with default configuration :param project: :param repository: :return: """ default_model_data = {'development': {'refId': None, 'useDefault': True}, 'types': [{'displayName': 'Bugfix', 'enabled': True, 'id': 'BUGFIX', 'prefix': 'bugfix/'}, {'displayName': 'Feature', 'enabled': True, 'id': 'FEATURE', 'prefix': 'feature/'}, {'displayName': 'Hotfix', 'enabled': True, 'id': 'HOTFIX', 'prefix': 'hotfix/'}, {'displayName': 'Release', 'enabled': True, 'id': 'RELEASE', 'prefix': 'release/'}]} return self.set_branching_model(project, repository, default_model_data)
def open_session(self, protocol_factory, peer_jid, *, stanza_type=ibb_xso.IBBStanzaType.IQ, block_size=4096, sid=None): """ Establish an in-band bytestream session with `peer_jid` and return the transport and protocol. :param protocol_factory: the protocol factory :type protocol_factory: a nullary callable returning an :class:`asyncio.Protocol` instance :param peer_jid: the JID with which to establish the byte-stream. :type peer_jid: :class:`aioxmpp.JID` :param stanza_type: the stanza type to use :type stanza_type: class:`~aioxmpp.ibb.IBBStanzaType` :param block_size: the maximal size of blocks to transfer :type block_size: :class:`int` :param sid: the session id to use :type sid: :class:`str` (must be a valid NMTOKEN) :returns: the transport and protocol :rtype: a tuple of :class:`aioxmpp.ibb.service.IBBTransport` and :class:`asyncio.Protocol` """ if block_size > MAX_BLOCK_SIZE: raise ValueError("block_size too large") if sid is None: sid = utils.to_nmtoken(random.getrandbits(8*8)) open_ = ibb_xso.Open() open_.stanza = stanza_type open_.sid = sid open_.block_size = block_size # XXX: retry on XMPPModifyError with RESOURCE_CONSTRAINT yield from self.client.send( aioxmpp.IQ( aioxmpp.IQType.SET, to=peer_jid, payload=open_, ) ) handle = self._sessions[sid, peer_jid] = IBBTransport( self, peer_jid, sid, stanza_type, block_size, ) protocol = protocol_factory() handle.set_protocol(protocol) return handle, protocol
def function[open_session, parameter[self, protocol_factory, peer_jid]]: constant[ Establish an in-band bytestream session with `peer_jid` and return the transport and protocol. :param protocol_factory: the protocol factory :type protocol_factory: a nullary callable returning an :class:`asyncio.Protocol` instance :param peer_jid: the JID with which to establish the byte-stream. :type peer_jid: :class:`aioxmpp.JID` :param stanza_type: the stanza type to use :type stanza_type: class:`~aioxmpp.ibb.IBBStanzaType` :param block_size: the maximal size of blocks to transfer :type block_size: :class:`int` :param sid: the session id to use :type sid: :class:`str` (must be a valid NMTOKEN) :returns: the transport and protocol :rtype: a tuple of :class:`aioxmpp.ibb.service.IBBTransport` and :class:`asyncio.Protocol` ] if compare[name[block_size] greater[>] name[MAX_BLOCK_SIZE]] begin[:] <ast.Raise object at 0x7da18ede65f0> if compare[name[sid] is constant[None]] begin[:] variable[sid] assign[=] call[name[utils].to_nmtoken, parameter[call[name[random].getrandbits, parameter[binary_operation[constant[8] * constant[8]]]]]] variable[open_] assign[=] call[name[ibb_xso].Open, parameter[]] name[open_].stanza assign[=] name[stanza_type] name[open_].sid assign[=] name[sid] name[open_].block_size assign[=] name[block_size] <ast.YieldFrom object at 0x7da18ede45e0> variable[handle] assign[=] call[name[IBBTransport], parameter[name[self], name[peer_jid], name[sid], name[stanza_type], name[block_size]]] variable[protocol] assign[=] call[name[protocol_factory], parameter[]] call[name[handle].set_protocol, parameter[name[protocol]]] return[tuple[[<ast.Name object at 0x7da18ede5810>, <ast.Name object at 0x7da18ede43a0>]]]
keyword[def] identifier[open_session] ( identifier[self] , identifier[protocol_factory] , identifier[peer_jid] ,*, identifier[stanza_type] = identifier[ibb_xso] . identifier[IBBStanzaType] . identifier[IQ] , identifier[block_size] = literal[int] , identifier[sid] = keyword[None] ): literal[string] keyword[if] identifier[block_size] > identifier[MAX_BLOCK_SIZE] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[sid] keyword[is] keyword[None] : identifier[sid] = identifier[utils] . identifier[to_nmtoken] ( identifier[random] . identifier[getrandbits] ( literal[int] * literal[int] )) identifier[open_] = identifier[ibb_xso] . identifier[Open] () identifier[open_] . identifier[stanza] = identifier[stanza_type] identifier[open_] . identifier[sid] = identifier[sid] identifier[open_] . identifier[block_size] = identifier[block_size] keyword[yield] keyword[from] identifier[self] . identifier[client] . identifier[send] ( identifier[aioxmpp] . identifier[IQ] ( identifier[aioxmpp] . identifier[IQType] . identifier[SET] , identifier[to] = identifier[peer_jid] , identifier[payload] = identifier[open_] , ) ) identifier[handle] = identifier[self] . identifier[_sessions] [ identifier[sid] , identifier[peer_jid] ]= identifier[IBBTransport] ( identifier[self] , identifier[peer_jid] , identifier[sid] , identifier[stanza_type] , identifier[block_size] , ) identifier[protocol] = identifier[protocol_factory] () identifier[handle] . identifier[set_protocol] ( identifier[protocol] ) keyword[return] identifier[handle] , identifier[protocol]
def open_session(self, protocol_factory, peer_jid, *, stanza_type=ibb_xso.IBBStanzaType.IQ, block_size=4096, sid=None): """ Establish an in-band bytestream session with `peer_jid` and return the transport and protocol. :param protocol_factory: the protocol factory :type protocol_factory: a nullary callable returning an :class:`asyncio.Protocol` instance :param peer_jid: the JID with which to establish the byte-stream. :type peer_jid: :class:`aioxmpp.JID` :param stanza_type: the stanza type to use :type stanza_type: class:`~aioxmpp.ibb.IBBStanzaType` :param block_size: the maximal size of blocks to transfer :type block_size: :class:`int` :param sid: the session id to use :type sid: :class:`str` (must be a valid NMTOKEN) :returns: the transport and protocol :rtype: a tuple of :class:`aioxmpp.ibb.service.IBBTransport` and :class:`asyncio.Protocol` """ if block_size > MAX_BLOCK_SIZE: raise ValueError('block_size too large') # depends on [control=['if'], data=[]] if sid is None: sid = utils.to_nmtoken(random.getrandbits(8 * 8)) # depends on [control=['if'], data=['sid']] open_ = ibb_xso.Open() open_.stanza = stanza_type open_.sid = sid open_.block_size = block_size # XXX: retry on XMPPModifyError with RESOURCE_CONSTRAINT yield from self.client.send(aioxmpp.IQ(aioxmpp.IQType.SET, to=peer_jid, payload=open_)) handle = self._sessions[sid, peer_jid] = IBBTransport(self, peer_jid, sid, stanza_type, block_size) protocol = protocol_factory() handle.set_protocol(protocol) return (handle, protocol)
def is_root(self, el): """ Return whether element is a root element. We check that the element is the root of the tree (which we have already pre-calculated), and we check if it is the root element under an `iframe`. """ root = self.root and self.root is el if not root: parent = self.get_parent(el) root = parent is not None and self.is_html and self.is_iframe(parent) return root
def function[is_root, parameter[self, el]]: constant[ Return whether element is a root element. We check that the element is the root of the tree (which we have already pre-calculated), and we check if it is the root element under an `iframe`. ] variable[root] assign[=] <ast.BoolOp object at 0x7da204963700> if <ast.UnaryOp object at 0x7da204960640> begin[:] variable[parent] assign[=] call[name[self].get_parent, parameter[name[el]]] variable[root] assign[=] <ast.BoolOp object at 0x7da204962380> return[name[root]]
keyword[def] identifier[is_root] ( identifier[self] , identifier[el] ): literal[string] identifier[root] = identifier[self] . identifier[root] keyword[and] identifier[self] . identifier[root] keyword[is] identifier[el] keyword[if] keyword[not] identifier[root] : identifier[parent] = identifier[self] . identifier[get_parent] ( identifier[el] ) identifier[root] = identifier[parent] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[is_html] keyword[and] identifier[self] . identifier[is_iframe] ( identifier[parent] ) keyword[return] identifier[root]
def is_root(self, el): """ Return whether element is a root element. We check that the element is the root of the tree (which we have already pre-calculated), and we check if it is the root element under an `iframe`. """ root = self.root and self.root is el if not root: parent = self.get_parent(el) root = parent is not None and self.is_html and self.is_iframe(parent) # depends on [control=['if'], data=[]] return root
def substitute(prev, *args, **kw): '''alias of string.Template.substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.substitute(data)
def function[substitute, parameter[prev]]: constant[alias of string.Template.substitute] variable[template_obj] assign[=] call[name[string].Template, parameter[<ast.Starred object at 0x7da18f58d840>]] for taget[name[data]] in starred[name[prev]] begin[:] <ast.Yield object at 0x7da18f58cbe0>
keyword[def] identifier[substitute] ( identifier[prev] ,* identifier[args] ,** identifier[kw] ): literal[string] identifier[template_obj] = identifier[string] . identifier[Template] (* identifier[args] ,** identifier[kw] ) keyword[for] identifier[data] keyword[in] identifier[prev] : keyword[yield] identifier[template_obj] . identifier[substitute] ( identifier[data] )
def substitute(prev, *args, **kw): """alias of string.Template.substitute""" template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.substitute(data) # depends on [control=['for'], data=['data']]
def perform_authorization(self): """ Check if the request should be permitted. Raises an appropriate exception if the request is not permitted. """ for permission in self.permissions: if not permission.has_permission(): if request.user: raise errors.PermissionDenied() else: raise errors.NotAuthenticated()
def function[perform_authorization, parameter[self]]: constant[ Check if the request should be permitted. Raises an appropriate exception if the request is not permitted. ] for taget[name[permission]] in starred[name[self].permissions] begin[:] if <ast.UnaryOp object at 0x7da20c9915d0> begin[:] if name[request].user begin[:] <ast.Raise object at 0x7da20c991a50>
keyword[def] identifier[perform_authorization] ( identifier[self] ): literal[string] keyword[for] identifier[permission] keyword[in] identifier[self] . identifier[permissions] : keyword[if] keyword[not] identifier[permission] . identifier[has_permission] (): keyword[if] identifier[request] . identifier[user] : keyword[raise] identifier[errors] . identifier[PermissionDenied] () keyword[else] : keyword[raise] identifier[errors] . identifier[NotAuthenticated] ()
def perform_authorization(self): """ Check if the request should be permitted. Raises an appropriate exception if the request is not permitted. """ for permission in self.permissions: if not permission.has_permission(): if request.user: raise errors.PermissionDenied() # depends on [control=['if'], data=[]] else: raise errors.NotAuthenticated() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['permission']]
def setup_logdir(self): # todo: locking on logdir creation """Create logdir for task/job/run. No-op if the task is not chief (0'th task of 0'th job of run) """ run_name = ncluster_globals.get_run_for_task(self) self.log("Creating logdir for run "+run_name) logdir_root = ncluster_globals.LOGDIR_ROOT assert logdir_root self.run(f'mkdir -p {logdir_root}') find_command = f'find {logdir_root} -maxdepth 1 -type d' stdout, stderr = self.run_with_output(find_command) logdir = f"{logdir_root}/{run_name}" counter = 0 while logdir in stdout: counter += 1 new_logdir = f'{logdir_root}/{run_name}.{counter:02d}' self.log(f'Warning, logdir {logdir} exists, deduping to {new_logdir}') logdir = new_logdir self.run(f'mkdir -p {logdir}') ncluster_globals.set_logdir(run_name, logdir) return logdir
def function[setup_logdir, parameter[self]]: constant[Create logdir for task/job/run. No-op if the task is not chief (0'th task of 0'th job of run) ] variable[run_name] assign[=] call[name[ncluster_globals].get_run_for_task, parameter[name[self]]] call[name[self].log, parameter[binary_operation[constant[Creating logdir for run ] + name[run_name]]]] variable[logdir_root] assign[=] name[ncluster_globals].LOGDIR_ROOT assert[name[logdir_root]] call[name[self].run, parameter[<ast.JoinedStr object at 0x7da18bccbf10>]] variable[find_command] assign[=] <ast.JoinedStr object at 0x7da18bcca920> <ast.Tuple object at 0x7da18bcca830> assign[=] call[name[self].run_with_output, parameter[name[find_command]]] variable[logdir] assign[=] <ast.JoinedStr object at 0x7da18bcc8370> variable[counter] assign[=] constant[0] while compare[name[logdir] in name[stdout]] begin[:] <ast.AugAssign object at 0x7da18ede47f0> variable[new_logdir] assign[=] <ast.JoinedStr object at 0x7da18ede6dd0> call[name[self].log, parameter[<ast.JoinedStr object at 0x7da18dc98b80>]] variable[logdir] assign[=] name[new_logdir] call[name[self].run, parameter[<ast.JoinedStr object at 0x7da18dc981f0>]] call[name[ncluster_globals].set_logdir, parameter[name[run_name], name[logdir]]] return[name[logdir]]
keyword[def] identifier[setup_logdir] ( identifier[self] ): literal[string] identifier[run_name] = identifier[ncluster_globals] . identifier[get_run_for_task] ( identifier[self] ) identifier[self] . identifier[log] ( literal[string] + identifier[run_name] ) identifier[logdir_root] = identifier[ncluster_globals] . identifier[LOGDIR_ROOT] keyword[assert] identifier[logdir_root] identifier[self] . identifier[run] ( literal[string] ) identifier[find_command] = literal[string] identifier[stdout] , identifier[stderr] = identifier[self] . identifier[run_with_output] ( identifier[find_command] ) identifier[logdir] = literal[string] identifier[counter] = literal[int] keyword[while] identifier[logdir] keyword[in] identifier[stdout] : identifier[counter] += literal[int] identifier[new_logdir] = literal[string] identifier[self] . identifier[log] ( literal[string] ) identifier[logdir] = identifier[new_logdir] identifier[self] . identifier[run] ( literal[string] ) identifier[ncluster_globals] . identifier[set_logdir] ( identifier[run_name] , identifier[logdir] ) keyword[return] identifier[logdir]
def setup_logdir(self): # todo: locking on logdir creation "Create logdir for task/job/run. No-op if the task is not chief (0'th task of 0'th job of run)\n " run_name = ncluster_globals.get_run_for_task(self) self.log('Creating logdir for run ' + run_name) logdir_root = ncluster_globals.LOGDIR_ROOT assert logdir_root self.run(f'mkdir -p {logdir_root}') find_command = f'find {logdir_root} -maxdepth 1 -type d' (stdout, stderr) = self.run_with_output(find_command) logdir = f'{logdir_root}/{run_name}' counter = 0 while logdir in stdout: counter += 1 new_logdir = f'{logdir_root}/{run_name}.{counter:02d}' self.log(f'Warning, logdir {logdir} exists, deduping to {new_logdir}') logdir = new_logdir # depends on [control=['while'], data=['logdir']] self.run(f'mkdir -p {logdir}') ncluster_globals.set_logdir(run_name, logdir) return logdir
def _update_pvalcorr(ntmt, corrected_pvals): """Add data members to store multiple test corrections.""" if corrected_pvals is None: return for rec, val in zip(ntmt.results, corrected_pvals): rec.set_corrected_pval(ntmt.nt_method, val)
def function[_update_pvalcorr, parameter[ntmt, corrected_pvals]]: constant[Add data members to store multiple test corrections.] if compare[name[corrected_pvals] is constant[None]] begin[:] return[None] for taget[tuple[[<ast.Name object at 0x7da20c76fe20>, <ast.Name object at 0x7da20c76c730>]]] in starred[call[name[zip], parameter[name[ntmt].results, name[corrected_pvals]]]] begin[:] call[name[rec].set_corrected_pval, parameter[name[ntmt].nt_method, name[val]]]
keyword[def] identifier[_update_pvalcorr] ( identifier[ntmt] , identifier[corrected_pvals] ): literal[string] keyword[if] identifier[corrected_pvals] keyword[is] keyword[None] : keyword[return] keyword[for] identifier[rec] , identifier[val] keyword[in] identifier[zip] ( identifier[ntmt] . identifier[results] , identifier[corrected_pvals] ): identifier[rec] . identifier[set_corrected_pval] ( identifier[ntmt] . identifier[nt_method] , identifier[val] )
def _update_pvalcorr(ntmt, corrected_pvals): """Add data members to store multiple test corrections.""" if corrected_pvals is None: return # depends on [control=['if'], data=[]] for (rec, val) in zip(ntmt.results, corrected_pvals): rec.set_corrected_pval(ntmt.nt_method, val) # depends on [control=['for'], data=[]]
def visualize(): """Trigger a visualization via the REST API Takes a single image and generates the visualization data, returning the output exactly as given by the target visualization. """ session['settings'] = {} image_uid = request.args.get('image') vis_name = request.args.get('visualizer') vis = get_visualizations()[vis_name] if vis.ALLOWED_SETTINGS: for key in vis.ALLOWED_SETTINGS.keys(): if request.args.get(key) is not None: session['settings'][key] = request.args.get(key) else: session['settings'][key] = vis.ALLOWED_SETTINGS[key][0] else: logger.debug('Selected Visualizer {0} has no settings.'.format(vis_name)) inputs = [] for image in session['image_list']: if image['uid'] == int(image_uid): full_path = os.path.join(session['img_input_dir'], image['filename']) entry = dict() entry['filename'] = image['filename'] entry['data'] = Image.open(full_path) inputs.append(entry) vis.update_settings(session['settings']) output = vis.make_visualization( inputs, output_dir=session['img_output_dir']) return jsonify(output[0])
def function[visualize, parameter[]]: constant[Trigger a visualization via the REST API Takes a single image and generates the visualization data, returning the output exactly as given by the target visualization. ] call[name[session]][constant[settings]] assign[=] dictionary[[], []] variable[image_uid] assign[=] call[name[request].args.get, parameter[constant[image]]] variable[vis_name] assign[=] call[name[request].args.get, parameter[constant[visualizer]]] variable[vis] assign[=] call[call[name[get_visualizations], parameter[]]][name[vis_name]] if name[vis].ALLOWED_SETTINGS begin[:] for taget[name[key]] in starred[call[name[vis].ALLOWED_SETTINGS.keys, parameter[]]] begin[:] if compare[call[name[request].args.get, parameter[name[key]]] is_not constant[None]] begin[:] call[call[name[session]][constant[settings]]][name[key]] assign[=] call[name[request].args.get, parameter[name[key]]] variable[inputs] assign[=] list[[]] for taget[name[image]] in starred[call[name[session]][constant[image_list]]] begin[:] if compare[call[name[image]][constant[uid]] equal[==] call[name[int], parameter[name[image_uid]]]] begin[:] variable[full_path] assign[=] call[name[os].path.join, parameter[call[name[session]][constant[img_input_dir]], call[name[image]][constant[filename]]]] variable[entry] assign[=] call[name[dict], parameter[]] call[name[entry]][constant[filename]] assign[=] call[name[image]][constant[filename]] call[name[entry]][constant[data]] assign[=] call[name[Image].open, parameter[name[full_path]]] call[name[inputs].append, parameter[name[entry]]] call[name[vis].update_settings, parameter[call[name[session]][constant[settings]]]] variable[output] assign[=] call[name[vis].make_visualization, parameter[name[inputs]]] return[call[name[jsonify], parameter[call[name[output]][constant[0]]]]]
keyword[def] identifier[visualize] (): literal[string] identifier[session] [ literal[string] ]={} identifier[image_uid] = identifier[request] . identifier[args] . identifier[get] ( literal[string] ) identifier[vis_name] = identifier[request] . identifier[args] . identifier[get] ( literal[string] ) identifier[vis] = identifier[get_visualizations] ()[ identifier[vis_name] ] keyword[if] identifier[vis] . identifier[ALLOWED_SETTINGS] : keyword[for] identifier[key] keyword[in] identifier[vis] . identifier[ALLOWED_SETTINGS] . identifier[keys] (): keyword[if] identifier[request] . identifier[args] . identifier[get] ( identifier[key] ) keyword[is] keyword[not] keyword[None] : identifier[session] [ literal[string] ][ identifier[key] ]= identifier[request] . identifier[args] . identifier[get] ( identifier[key] ) keyword[else] : identifier[session] [ literal[string] ][ identifier[key] ]= identifier[vis] . identifier[ALLOWED_SETTINGS] [ identifier[key] ][ literal[int] ] keyword[else] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[vis_name] )) identifier[inputs] =[] keyword[for] identifier[image] keyword[in] identifier[session] [ literal[string] ]: keyword[if] identifier[image] [ literal[string] ]== identifier[int] ( identifier[image_uid] ): identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[session] [ literal[string] ], identifier[image] [ literal[string] ]) identifier[entry] = identifier[dict] () identifier[entry] [ literal[string] ]= identifier[image] [ literal[string] ] identifier[entry] [ literal[string] ]= identifier[Image] . identifier[open] ( identifier[full_path] ) identifier[inputs] . identifier[append] ( identifier[entry] ) identifier[vis] . identifier[update_settings] ( identifier[session] [ literal[string] ]) identifier[output] = identifier[vis] . identifier[make_visualization] ( identifier[inputs] , identifier[output_dir] = identifier[session] [ literal[string] ]) keyword[return] identifier[jsonify] ( identifier[output] [ literal[int] ])
def visualize(): """Trigger a visualization via the REST API Takes a single image and generates the visualization data, returning the output exactly as given by the target visualization. """ session['settings'] = {} image_uid = request.args.get('image') vis_name = request.args.get('visualizer') vis = get_visualizations()[vis_name] if vis.ALLOWED_SETTINGS: for key in vis.ALLOWED_SETTINGS.keys(): if request.args.get(key) is not None: session['settings'][key] = request.args.get(key) # depends on [control=['if'], data=[]] else: session['settings'][key] = vis.ALLOWED_SETTINGS[key][0] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]] else: logger.debug('Selected Visualizer {0} has no settings.'.format(vis_name)) inputs = [] for image in session['image_list']: if image['uid'] == int(image_uid): full_path = os.path.join(session['img_input_dir'], image['filename']) entry = dict() entry['filename'] = image['filename'] entry['data'] = Image.open(full_path) inputs.append(entry) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['image']] vis.update_settings(session['settings']) output = vis.make_visualization(inputs, output_dir=session['img_output_dir']) return jsonify(output[0])
def active_language(self): """ Returns active language. """ # Current instance language (if user uses activate_language() method) if self._language is not None: return self._language # Current site language (translation.get_language()) current = utils.get_language() if current in self.supported_languages: return current # Default language descriptor return self.default_language
def function[active_language, parameter[self]]: constant[ Returns active language. ] if compare[name[self]._language is_not constant[None]] begin[:] return[name[self]._language] variable[current] assign[=] call[name[utils].get_language, parameter[]] if compare[name[current] in name[self].supported_languages] begin[:] return[name[current]] return[name[self].default_language]
keyword[def] identifier[active_language] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_language] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[self] . identifier[_language] identifier[current] = identifier[utils] . identifier[get_language] () keyword[if] identifier[current] keyword[in] identifier[self] . identifier[supported_languages] : keyword[return] identifier[current] keyword[return] identifier[self] . identifier[default_language]
def active_language(self): """ Returns active language. """ # Current instance language (if user uses activate_language() method) if self._language is not None: return self._language # depends on [control=['if'], data=[]] # Current site language (translation.get_language()) current = utils.get_language() if current in self.supported_languages: return current # depends on [control=['if'], data=['current']] # Default language descriptor return self.default_language
def register_trainable(name, trainable): """Register a trainable function or class. Args: name (str): Name to register. trainable (obj): Function or tune.Trainable class. Functions must take (config, status_reporter) as arguments and will be automatically converted into a class during registration. """ from ray.tune.trainable import Trainable from ray.tune.function_runner import wrap_function if isinstance(trainable, type): logger.debug("Detected class for trainable.") elif isinstance(trainable, FunctionType): logger.debug("Detected function for trainable.") trainable = wrap_function(trainable) elif callable(trainable): logger.warning( "Detected unknown callable for trainable. Converting to class.") trainable = wrap_function(trainable) if not issubclass(trainable, Trainable): raise TypeError("Second argument must be convertable to Trainable", trainable) _global_registry.register(TRAINABLE_CLASS, name, trainable)
def function[register_trainable, parameter[name, trainable]]: constant[Register a trainable function or class. Args: name (str): Name to register. trainable (obj): Function or tune.Trainable class. Functions must take (config, status_reporter) as arguments and will be automatically converted into a class during registration. ] from relative_module[ray.tune.trainable] import module[Trainable] from relative_module[ray.tune.function_runner] import module[wrap_function] if call[name[isinstance], parameter[name[trainable], name[type]]] begin[:] call[name[logger].debug, parameter[constant[Detected class for trainable.]]] if <ast.UnaryOp object at 0x7da18f09e170> begin[:] <ast.Raise object at 0x7da18f09e1a0> call[name[_global_registry].register, parameter[name[TRAINABLE_CLASS], name[name], name[trainable]]]
keyword[def] identifier[register_trainable] ( identifier[name] , identifier[trainable] ): literal[string] keyword[from] identifier[ray] . identifier[tune] . identifier[trainable] keyword[import] identifier[Trainable] keyword[from] identifier[ray] . identifier[tune] . identifier[function_runner] keyword[import] identifier[wrap_function] keyword[if] identifier[isinstance] ( identifier[trainable] , identifier[type] ): identifier[logger] . identifier[debug] ( literal[string] ) keyword[elif] identifier[isinstance] ( identifier[trainable] , identifier[FunctionType] ): identifier[logger] . identifier[debug] ( literal[string] ) identifier[trainable] = identifier[wrap_function] ( identifier[trainable] ) keyword[elif] identifier[callable] ( identifier[trainable] ): identifier[logger] . identifier[warning] ( literal[string] ) identifier[trainable] = identifier[wrap_function] ( identifier[trainable] ) keyword[if] keyword[not] identifier[issubclass] ( identifier[trainable] , identifier[Trainable] ): keyword[raise] identifier[TypeError] ( literal[string] , identifier[trainable] ) identifier[_global_registry] . identifier[register] ( identifier[TRAINABLE_CLASS] , identifier[name] , identifier[trainable] )
def register_trainable(name, trainable): """Register a trainable function or class. Args: name (str): Name to register. trainable (obj): Function or tune.Trainable class. Functions must take (config, status_reporter) as arguments and will be automatically converted into a class during registration. """ from ray.tune.trainable import Trainable from ray.tune.function_runner import wrap_function if isinstance(trainable, type): logger.debug('Detected class for trainable.') # depends on [control=['if'], data=[]] elif isinstance(trainable, FunctionType): logger.debug('Detected function for trainable.') trainable = wrap_function(trainable) # depends on [control=['if'], data=[]] elif callable(trainable): logger.warning('Detected unknown callable for trainable. Converting to class.') trainable = wrap_function(trainable) # depends on [control=['if'], data=[]] if not issubclass(trainable, Trainable): raise TypeError('Second argument must be convertable to Trainable', trainable) # depends on [control=['if'], data=[]] _global_registry.register(TRAINABLE_CLASS, name, trainable)
def delete_sourcesystem_cd(cls, tables: I2B2Tables, sourcesystem_cd: str) -> int: """ Delete all records with the supplied sourcesystem_cd :param tables: i2b2 sql connection :param sourcesystem_cd: sourcesystem_cd to remove :return: number or records that were deleted """ return cls._delete_sourcesystem_cd(tables.crc_connection, tables.observation_fact, sourcesystem_cd)
def function[delete_sourcesystem_cd, parameter[cls, tables, sourcesystem_cd]]: constant[ Delete all records with the supplied sourcesystem_cd :param tables: i2b2 sql connection :param sourcesystem_cd: sourcesystem_cd to remove :return: number or records that were deleted ] return[call[name[cls]._delete_sourcesystem_cd, parameter[name[tables].crc_connection, name[tables].observation_fact, name[sourcesystem_cd]]]]
keyword[def] identifier[delete_sourcesystem_cd] ( identifier[cls] , identifier[tables] : identifier[I2B2Tables] , identifier[sourcesystem_cd] : identifier[str] )-> identifier[int] : literal[string] keyword[return] identifier[cls] . identifier[_delete_sourcesystem_cd] ( identifier[tables] . identifier[crc_connection] , identifier[tables] . identifier[observation_fact] , identifier[sourcesystem_cd] )
def delete_sourcesystem_cd(cls, tables: I2B2Tables, sourcesystem_cd: str) -> int: """ Delete all records with the supplied sourcesystem_cd :param tables: i2b2 sql connection :param sourcesystem_cd: sourcesystem_cd to remove :return: number or records that were deleted """ return cls._delete_sourcesystem_cd(tables.crc_connection, tables.observation_fact, sourcesystem_cd)
def check_decade_apostrophes_short(text): """Check the text for dates of the form X0's.""" err = "dates_times.dates" msg = u"Apostrophes aren't needed for decades." regex = "\d0\'s" return existence_check( text, [regex], err, msg, excluded_topics=["50 Cent"])
def function[check_decade_apostrophes_short, parameter[text]]: constant[Check the text for dates of the form X0's.] variable[err] assign[=] constant[dates_times.dates] variable[msg] assign[=] constant[Apostrophes aren't needed for decades.] variable[regex] assign[=] constant[\d0's] return[call[name[existence_check], parameter[name[text], list[[<ast.Name object at 0x7da1b08a7f70>]], name[err], name[msg]]]]
keyword[def] identifier[check_decade_apostrophes_short] ( identifier[text] ): literal[string] identifier[err] = literal[string] identifier[msg] = literal[string] identifier[regex] = literal[string] keyword[return] identifier[existence_check] ( identifier[text] ,[ identifier[regex] ], identifier[err] , identifier[msg] , identifier[excluded_topics] =[ literal[string] ])
def check_decade_apostrophes_short(text): """Check the text for dates of the form X0's.""" err = 'dates_times.dates' msg = u"Apostrophes aren't needed for decades." regex = "\\d0's" return existence_check(text, [regex], err, msg, excluded_topics=['50 Cent'])
def repo_id(self, repo: str) -> str: """ Returns an unique identifier from a repo URL for the folder the repo is gonna be pulled in. """ if repo.startswith("http"): repo_id = re.sub(r"https?://(.www)?", "", repo) repo_id = re.sub(r"\.git/?$", "", repo_id) else: repo_id = repo.replace("file://", "") repo_id = re.sub(r"\.git/?$", "", repo_id) if repo_id.startswith("~"): repo_id = str(Path(repo_id).resolve()) # replaces everything that isn't alphanumeric, a dot or an underscore # to make sure it's a valid folder name and to keep it readable # multiple consecutive invalid characters replaced with a single underscore repo_id = re.sub(r"[^a-zA-Z0-9._]+", "_", repo_id) # and add a hash of the original to make it absolutely unique return repo_id + hashlib.sha256(repo.encode("utf-8")).hexdigest()
def function[repo_id, parameter[self, repo]]: constant[ Returns an unique identifier from a repo URL for the folder the repo is gonna be pulled in. ] if call[name[repo].startswith, parameter[constant[http]]] begin[:] variable[repo_id] assign[=] call[name[re].sub, parameter[constant[https?://(.www)?], constant[], name[repo]]] variable[repo_id] assign[=] call[name[re].sub, parameter[constant[\.git/?$], constant[], name[repo_id]]] variable[repo_id] assign[=] call[name[re].sub, parameter[constant[[^a-zA-Z0-9._]+], constant[_], name[repo_id]]] return[binary_operation[name[repo_id] + call[call[name[hashlib].sha256, parameter[call[name[repo].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]]]
keyword[def] identifier[repo_id] ( identifier[self] , identifier[repo] : identifier[str] )-> identifier[str] : literal[string] keyword[if] identifier[repo] . identifier[startswith] ( literal[string] ): identifier[repo_id] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[repo] ) identifier[repo_id] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[repo_id] ) keyword[else] : identifier[repo_id] = identifier[repo] . identifier[replace] ( literal[string] , literal[string] ) identifier[repo_id] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[repo_id] ) keyword[if] identifier[repo_id] . identifier[startswith] ( literal[string] ): identifier[repo_id] = identifier[str] ( identifier[Path] ( identifier[repo_id] ). identifier[resolve] ()) identifier[repo_id] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[repo_id] ) keyword[return] identifier[repo_id] + identifier[hashlib] . identifier[sha256] ( identifier[repo] . identifier[encode] ( literal[string] )). identifier[hexdigest] ()
def repo_id(self, repo: str) -> str: """ Returns an unique identifier from a repo URL for the folder the repo is gonna be pulled in. """ if repo.startswith('http'): repo_id = re.sub('https?://(.www)?', '', repo) repo_id = re.sub('\\.git/?$', '', repo_id) # depends on [control=['if'], data=[]] else: repo_id = repo.replace('file://', '') repo_id = re.sub('\\.git/?$', '', repo_id) if repo_id.startswith('~'): repo_id = str(Path(repo_id).resolve()) # depends on [control=['if'], data=[]] # replaces everything that isn't alphanumeric, a dot or an underscore # to make sure it's a valid folder name and to keep it readable # multiple consecutive invalid characters replaced with a single underscore repo_id = re.sub('[^a-zA-Z0-9._]+', '_', repo_id) # and add a hash of the original to make it absolutely unique return repo_id + hashlib.sha256(repo.encode('utf-8')).hexdigest()
def pairwise_alignment_stats(reference_seq_aln, other_seq_aln): """Get a report of a pairwise alignment. Args: reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form other_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form Returns: dict: Dictionary of information on mutations, insertions, sequence identity, etc. """ if len(reference_seq_aln) != len(other_seq_aln): raise ValueError('Sequence lengths not equal - was an alignment run?') reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln) other_seq_aln = ssbio.protein.sequence.utils.cast_to_str(other_seq_aln) infodict = {} # Percent identity to the reference sequence stats_percent_ident = get_percent_identity(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln) infodict['percent_identity'] = stats_percent_ident # Other alignment results aln_df = get_alignment_df(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln) infodict['deletions'] = get_deletions(aln_df) infodict['insertions'] = get_insertions(aln_df) infodict['mutations'] = get_mutations(aln_df) infodict['unresolved'] = get_unresolved(aln_df) return infodict
def function[pairwise_alignment_stats, parameter[reference_seq_aln, other_seq_aln]]: constant[Get a report of a pairwise alignment. Args: reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form other_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form Returns: dict: Dictionary of information on mutations, insertions, sequence identity, etc. ] if compare[call[name[len], parameter[name[reference_seq_aln]]] not_equal[!=] call[name[len], parameter[name[other_seq_aln]]]] begin[:] <ast.Raise object at 0x7da1b0e2dae0> variable[reference_seq_aln] assign[=] call[name[ssbio].protein.sequence.utils.cast_to_str, parameter[name[reference_seq_aln]]] variable[other_seq_aln] assign[=] call[name[ssbio].protein.sequence.utils.cast_to_str, parameter[name[other_seq_aln]]] variable[infodict] assign[=] dictionary[[], []] variable[stats_percent_ident] assign[=] call[name[get_percent_identity], parameter[]] call[name[infodict]][constant[percent_identity]] assign[=] name[stats_percent_ident] variable[aln_df] assign[=] call[name[get_alignment_df], parameter[]] call[name[infodict]][constant[deletions]] assign[=] call[name[get_deletions], parameter[name[aln_df]]] call[name[infodict]][constant[insertions]] assign[=] call[name[get_insertions], parameter[name[aln_df]]] call[name[infodict]][constant[mutations]] assign[=] call[name[get_mutations], parameter[name[aln_df]]] call[name[infodict]][constant[unresolved]] assign[=] call[name[get_unresolved], parameter[name[aln_df]]] return[name[infodict]]
keyword[def] identifier[pairwise_alignment_stats] ( identifier[reference_seq_aln] , identifier[other_seq_aln] ): literal[string] keyword[if] identifier[len] ( identifier[reference_seq_aln] )!= identifier[len] ( identifier[other_seq_aln] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[reference_seq_aln] = identifier[ssbio] . identifier[protein] . identifier[sequence] . identifier[utils] . identifier[cast_to_str] ( identifier[reference_seq_aln] ) identifier[other_seq_aln] = identifier[ssbio] . identifier[protein] . identifier[sequence] . identifier[utils] . identifier[cast_to_str] ( identifier[other_seq_aln] ) identifier[infodict] ={} identifier[stats_percent_ident] = identifier[get_percent_identity] ( identifier[a_aln_seq] = identifier[reference_seq_aln] , identifier[b_aln_seq] = identifier[other_seq_aln] ) identifier[infodict] [ literal[string] ]= identifier[stats_percent_ident] identifier[aln_df] = identifier[get_alignment_df] ( identifier[a_aln_seq] = identifier[reference_seq_aln] , identifier[b_aln_seq] = identifier[other_seq_aln] ) identifier[infodict] [ literal[string] ]= identifier[get_deletions] ( identifier[aln_df] ) identifier[infodict] [ literal[string] ]= identifier[get_insertions] ( identifier[aln_df] ) identifier[infodict] [ literal[string] ]= identifier[get_mutations] ( identifier[aln_df] ) identifier[infodict] [ literal[string] ]= identifier[get_unresolved] ( identifier[aln_df] ) keyword[return] identifier[infodict]
def pairwise_alignment_stats(reference_seq_aln, other_seq_aln): """Get a report of a pairwise alignment. Args: reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form other_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form Returns: dict: Dictionary of information on mutations, insertions, sequence identity, etc. """ if len(reference_seq_aln) != len(other_seq_aln): raise ValueError('Sequence lengths not equal - was an alignment run?') # depends on [control=['if'], data=[]] reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln) other_seq_aln = ssbio.protein.sequence.utils.cast_to_str(other_seq_aln) infodict = {} # Percent identity to the reference sequence stats_percent_ident = get_percent_identity(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln) infodict['percent_identity'] = stats_percent_ident # Other alignment results aln_df = get_alignment_df(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln) infodict['deletions'] = get_deletions(aln_df) infodict['insertions'] = get_insertions(aln_df) infodict['mutations'] = get_mutations(aln_df) infodict['unresolved'] = get_unresolved(aln_df) return infodict
def _stream_out(self, outfile, append=False): ''' Internal. Writes all stdout into outfile. :param outfile: Filename or file-like object for writing. :param append: Opens filename with append. :return: This command's returncode. ''' if type(outfile) in (str, unicode): outfile = os.path.expanduser(os.path.expandvars(outfile)) outfile = open(outfile, 'a' if append else 'w') self._run(outfile) self._pop.wait()
def function[_stream_out, parameter[self, outfile, append]]: constant[ Internal. Writes all stdout into outfile. :param outfile: Filename or file-like object for writing. :param append: Opens filename with append. :return: This command's returncode. ] if compare[call[name[type], parameter[name[outfile]]] in tuple[[<ast.Name object at 0x7da20c6e79d0>, <ast.Name object at 0x7da20c6e70d0>]]] begin[:] variable[outfile] assign[=] call[name[os].path.expanduser, parameter[call[name[os].path.expandvars, parameter[name[outfile]]]]] variable[outfile] assign[=] call[name[open], parameter[name[outfile], <ast.IfExp object at 0x7da20c6e43a0>]] call[name[self]._run, parameter[name[outfile]]] call[name[self]._pop.wait, parameter[]]
keyword[def] identifier[_stream_out] ( identifier[self] , identifier[outfile] , identifier[append] = keyword[False] ): literal[string] keyword[if] identifier[type] ( identifier[outfile] ) keyword[in] ( identifier[str] , identifier[unicode] ): identifier[outfile] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[os] . identifier[path] . identifier[expandvars] ( identifier[outfile] )) identifier[outfile] = identifier[open] ( identifier[outfile] , literal[string] keyword[if] identifier[append] keyword[else] literal[string] ) identifier[self] . identifier[_run] ( identifier[outfile] ) identifier[self] . identifier[_pop] . identifier[wait] ()
def _stream_out(self, outfile, append=False): """ Internal. Writes all stdout into outfile. :param outfile: Filename or file-like object for writing. :param append: Opens filename with append. :return: This command's returncode. """ if type(outfile) in (str, unicode): outfile = os.path.expanduser(os.path.expandvars(outfile)) outfile = open(outfile, 'a' if append else 'w') # depends on [control=['if'], data=[]] self._run(outfile) self._pop.wait()
def update(self, callback=int): """Process all pending model modifications.""" # print(self._pending_modifications) add_var = self._pending_modifications.add_var if len(add_var) > 0: self._add_variables(add_var) self._pending_modifications.add_var = [] callback() add_constr = self._pending_modifications.add_constr if len(add_constr) > 0: self._add_constraints(add_constr) self._pending_modifications.add_constr = [] add_constr_sloppy = self._pending_modifications.add_constr_sloppy if len(add_constr_sloppy) > 0: self._add_constraints(add_constr_sloppy, sloppy=True) self._pending_modifications.add_constr_sloppy = [] var_lb = self._pending_modifications.var_lb var_ub = self._pending_modifications.var_ub if len(var_lb) > 0 or len(var_ub) > 0: self._set_variable_bounds_on_problem(var_lb, var_ub) self._pending_modifications.var_lb = [] self._pending_modifications.var_ub = [] rm_var = self._pending_modifications.rm_var if len(rm_var) > 0: self._remove_variables(rm_var) self._pending_modifications.rm_var = [] callback() rm_constr = self._pending_modifications.rm_constr if len(rm_constr) > 0: self._remove_constraints(rm_constr) self._pending_modifications.rm_constr = []
def function[update, parameter[self, callback]]: constant[Process all pending model modifications.] variable[add_var] assign[=] name[self]._pending_modifications.add_var if compare[call[name[len], parameter[name[add_var]]] greater[>] constant[0]] begin[:] call[name[self]._add_variables, parameter[name[add_var]]] name[self]._pending_modifications.add_var assign[=] list[[]] call[name[callback], parameter[]] variable[add_constr] assign[=] name[self]._pending_modifications.add_constr if compare[call[name[len], parameter[name[add_constr]]] greater[>] constant[0]] begin[:] call[name[self]._add_constraints, parameter[name[add_constr]]] name[self]._pending_modifications.add_constr assign[=] list[[]] variable[add_constr_sloppy] assign[=] name[self]._pending_modifications.add_constr_sloppy if compare[call[name[len], parameter[name[add_constr_sloppy]]] greater[>] constant[0]] begin[:] call[name[self]._add_constraints, parameter[name[add_constr_sloppy]]] name[self]._pending_modifications.add_constr_sloppy assign[=] list[[]] variable[var_lb] assign[=] name[self]._pending_modifications.var_lb variable[var_ub] assign[=] name[self]._pending_modifications.var_ub if <ast.BoolOp object at 0x7da1b0e3bb50> begin[:] call[name[self]._set_variable_bounds_on_problem, parameter[name[var_lb], name[var_ub]]] name[self]._pending_modifications.var_lb assign[=] list[[]] name[self]._pending_modifications.var_ub assign[=] list[[]] variable[rm_var] assign[=] name[self]._pending_modifications.rm_var if compare[call[name[len], parameter[name[rm_var]]] greater[>] constant[0]] begin[:] call[name[self]._remove_variables, parameter[name[rm_var]]] name[self]._pending_modifications.rm_var assign[=] list[[]] call[name[callback], parameter[]] variable[rm_constr] assign[=] name[self]._pending_modifications.rm_constr if compare[call[name[len], parameter[name[rm_constr]]] greater[>] constant[0]] begin[:] call[name[self]._remove_constraints, parameter[name[rm_constr]]] name[self]._pending_modifications.rm_constr assign[=] list[[]]
keyword[def] identifier[update] ( identifier[self] , identifier[callback] = identifier[int] ): literal[string] identifier[add_var] = identifier[self] . identifier[_pending_modifications] . identifier[add_var] keyword[if] identifier[len] ( identifier[add_var] )> literal[int] : identifier[self] . identifier[_add_variables] ( identifier[add_var] ) identifier[self] . identifier[_pending_modifications] . identifier[add_var] =[] identifier[callback] () identifier[add_constr] = identifier[self] . identifier[_pending_modifications] . identifier[add_constr] keyword[if] identifier[len] ( identifier[add_constr] )> literal[int] : identifier[self] . identifier[_add_constraints] ( identifier[add_constr] ) identifier[self] . identifier[_pending_modifications] . identifier[add_constr] =[] identifier[add_constr_sloppy] = identifier[self] . identifier[_pending_modifications] . identifier[add_constr_sloppy] keyword[if] identifier[len] ( identifier[add_constr_sloppy] )> literal[int] : identifier[self] . identifier[_add_constraints] ( identifier[add_constr_sloppy] , identifier[sloppy] = keyword[True] ) identifier[self] . identifier[_pending_modifications] . identifier[add_constr_sloppy] =[] identifier[var_lb] = identifier[self] . identifier[_pending_modifications] . identifier[var_lb] identifier[var_ub] = identifier[self] . identifier[_pending_modifications] . identifier[var_ub] keyword[if] identifier[len] ( identifier[var_lb] )> literal[int] keyword[or] identifier[len] ( identifier[var_ub] )> literal[int] : identifier[self] . identifier[_set_variable_bounds_on_problem] ( identifier[var_lb] , identifier[var_ub] ) identifier[self] . identifier[_pending_modifications] . identifier[var_lb] =[] identifier[self] . identifier[_pending_modifications] . identifier[var_ub] =[] identifier[rm_var] = identifier[self] . identifier[_pending_modifications] . identifier[rm_var] keyword[if] identifier[len] ( identifier[rm_var] )> literal[int] : identifier[self] . identifier[_remove_variables] ( identifier[rm_var] ) identifier[self] . identifier[_pending_modifications] . identifier[rm_var] =[] identifier[callback] () identifier[rm_constr] = identifier[self] . identifier[_pending_modifications] . identifier[rm_constr] keyword[if] identifier[len] ( identifier[rm_constr] )> literal[int] : identifier[self] . identifier[_remove_constraints] ( identifier[rm_constr] ) identifier[self] . identifier[_pending_modifications] . identifier[rm_constr] =[]
def update(self, callback=int): """Process all pending model modifications.""" # print(self._pending_modifications) add_var = self._pending_modifications.add_var if len(add_var) > 0: self._add_variables(add_var) self._pending_modifications.add_var = [] # depends on [control=['if'], data=[]] callback() add_constr = self._pending_modifications.add_constr if len(add_constr) > 0: self._add_constraints(add_constr) self._pending_modifications.add_constr = [] # depends on [control=['if'], data=[]] add_constr_sloppy = self._pending_modifications.add_constr_sloppy if len(add_constr_sloppy) > 0: self._add_constraints(add_constr_sloppy, sloppy=True) self._pending_modifications.add_constr_sloppy = [] # depends on [control=['if'], data=[]] var_lb = self._pending_modifications.var_lb var_ub = self._pending_modifications.var_ub if len(var_lb) > 0 or len(var_ub) > 0: self._set_variable_bounds_on_problem(var_lb, var_ub) self._pending_modifications.var_lb = [] self._pending_modifications.var_ub = [] # depends on [control=['if'], data=[]] rm_var = self._pending_modifications.rm_var if len(rm_var) > 0: self._remove_variables(rm_var) self._pending_modifications.rm_var = [] # depends on [control=['if'], data=[]] callback() rm_constr = self._pending_modifications.rm_constr if len(rm_constr) > 0: self._remove_constraints(rm_constr) self._pending_modifications.rm_constr = [] # depends on [control=['if'], data=[]]
def _getAnalysis(self, axis, analysis, ref=None): """ gets the named analysis on the given axis and caches the result (or reads from the cache if data is available already) :param axis: the named axis. :param analysis: the analysis name. :return: the analysis tuple. """ cache = self.cache.get(str(ref)) if cache is None: cache = {'x': {}, 'y': {}, 'z': {}, 'sum': {}} self.cache[str(ref)] = cache if axis in cache: data = self.cache['raw'].get(axis, None) cachedAxis = cache.get(axis) if cachedAxis.get(analysis) is None: if axis == 'sum': if self._canSum(analysis): fx, Pxx = self._getAnalysis('x', analysis) fy, Pxy = self._getAnalysis('y', analysis) fz, Pxz = self._getAnalysis('z', analysis) # calculate the sum of the squares with an additional weighting for x and y Psum = (((Pxx * 2.2) ** 2) + ((Pxy * 2.4) ** 2) + (Pxz ** 2)) ** 0.5 if ref is not None: Psum = librosa.amplitude_to_db(Psum, ref) cachedAxis[analysis] = (fx, Psum) else: return None else: cachedAxis[analysis] = getattr(data.highPass(), analysis)(ref=ref) return cachedAxis[analysis] else: return None
def function[_getAnalysis, parameter[self, axis, analysis, ref]]: constant[ gets the named analysis on the given axis and caches the result (or reads from the cache if data is available already) :param axis: the named axis. :param analysis: the analysis name. :return: the analysis tuple. ] variable[cache] assign[=] call[name[self].cache.get, parameter[call[name[str], parameter[name[ref]]]]] if compare[name[cache] is constant[None]] begin[:] variable[cache] assign[=] dictionary[[<ast.Constant object at 0x7da1b0edd0c0>, <ast.Constant object at 0x7da1b0edcc10>, <ast.Constant object at 0x7da1b0ede620>, <ast.Constant object at 0x7da1b0edce50>], [<ast.Dict object at 0x7da1b0edd6f0>, <ast.Dict object at 0x7da1b0edfb80>, <ast.Dict object at 0x7da1b0edc6a0>, <ast.Dict object at 0x7da1b0edd7e0>]] call[name[self].cache][call[name[str], parameter[name[ref]]]] assign[=] name[cache] if compare[name[axis] in name[cache]] begin[:] variable[data] assign[=] call[call[name[self].cache][constant[raw]].get, parameter[name[axis], constant[None]]] variable[cachedAxis] assign[=] call[name[cache].get, parameter[name[axis]]] if compare[call[name[cachedAxis].get, parameter[name[analysis]]] is constant[None]] begin[:] if compare[name[axis] equal[==] constant[sum]] begin[:] if call[name[self]._canSum, parameter[name[analysis]]] begin[:] <ast.Tuple object at 0x7da1b0edfdf0> assign[=] call[name[self]._getAnalysis, parameter[constant[x], name[analysis]]] <ast.Tuple object at 0x7da1b0edcc40> assign[=] call[name[self]._getAnalysis, parameter[constant[y], name[analysis]]] <ast.Tuple object at 0x7da1b0e32b00> assign[=] call[name[self]._getAnalysis, parameter[constant[z], name[analysis]]] variable[Psum] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[Pxx] * constant[2.2]] ** constant[2]] + binary_operation[binary_operation[name[Pxy] * constant[2.4]] ** constant[2]]] + binary_operation[name[Pxz] ** constant[2]]] ** constant[0.5]] if compare[name[ref] is_not constant[None]] begin[:] variable[Psum] assign[=] call[name[librosa].amplitude_to_db, parameter[name[Psum], name[ref]]] call[name[cachedAxis]][name[analysis]] assign[=] tuple[[<ast.Name object at 0x7da1b0e62080>, <ast.Name object at 0x7da1b0e61e70>]] return[call[name[cachedAxis]][name[analysis]]]
keyword[def] identifier[_getAnalysis] ( identifier[self] , identifier[axis] , identifier[analysis] , identifier[ref] = keyword[None] ): literal[string] identifier[cache] = identifier[self] . identifier[cache] . identifier[get] ( identifier[str] ( identifier[ref] )) keyword[if] identifier[cache] keyword[is] keyword[None] : identifier[cache] ={ literal[string] :{}, literal[string] :{}, literal[string] :{}, literal[string] :{}} identifier[self] . identifier[cache] [ identifier[str] ( identifier[ref] )]= identifier[cache] keyword[if] identifier[axis] keyword[in] identifier[cache] : identifier[data] = identifier[self] . identifier[cache] [ literal[string] ]. identifier[get] ( identifier[axis] , keyword[None] ) identifier[cachedAxis] = identifier[cache] . identifier[get] ( identifier[axis] ) keyword[if] identifier[cachedAxis] . identifier[get] ( identifier[analysis] ) keyword[is] keyword[None] : keyword[if] identifier[axis] == literal[string] : keyword[if] identifier[self] . identifier[_canSum] ( identifier[analysis] ): identifier[fx] , identifier[Pxx] = identifier[self] . identifier[_getAnalysis] ( literal[string] , identifier[analysis] ) identifier[fy] , identifier[Pxy] = identifier[self] . identifier[_getAnalysis] ( literal[string] , identifier[analysis] ) identifier[fz] , identifier[Pxz] = identifier[self] . identifier[_getAnalysis] ( literal[string] , identifier[analysis] ) identifier[Psum] =((( identifier[Pxx] * literal[int] )** literal[int] )+(( identifier[Pxy] * literal[int] )** literal[int] )+( identifier[Pxz] ** literal[int] ))** literal[int] keyword[if] identifier[ref] keyword[is] keyword[not] keyword[None] : identifier[Psum] = identifier[librosa] . identifier[amplitude_to_db] ( identifier[Psum] , identifier[ref] ) identifier[cachedAxis] [ identifier[analysis] ]=( identifier[fx] , identifier[Psum] ) keyword[else] : keyword[return] keyword[None] keyword[else] : identifier[cachedAxis] [ identifier[analysis] ]= identifier[getattr] ( identifier[data] . identifier[highPass] (), identifier[analysis] )( identifier[ref] = identifier[ref] ) keyword[return] identifier[cachedAxis] [ identifier[analysis] ] keyword[else] : keyword[return] keyword[None]
def _getAnalysis(self, axis, analysis, ref=None): """ gets the named analysis on the given axis and caches the result (or reads from the cache if data is available already) :param axis: the named axis. :param analysis: the analysis name. :return: the analysis tuple. """ cache = self.cache.get(str(ref)) if cache is None: cache = {'x': {}, 'y': {}, 'z': {}, 'sum': {}} self.cache[str(ref)] = cache # depends on [control=['if'], data=['cache']] if axis in cache: data = self.cache['raw'].get(axis, None) cachedAxis = cache.get(axis) if cachedAxis.get(analysis) is None: if axis == 'sum': if self._canSum(analysis): (fx, Pxx) = self._getAnalysis('x', analysis) (fy, Pxy) = self._getAnalysis('y', analysis) (fz, Pxz) = self._getAnalysis('z', analysis) # calculate the sum of the squares with an additional weighting for x and y Psum = ((Pxx * 2.2) ** 2 + (Pxy * 2.4) ** 2 + Pxz ** 2) ** 0.5 if ref is not None: Psum = librosa.amplitude_to_db(Psum, ref) # depends on [control=['if'], data=['ref']] cachedAxis[analysis] = (fx, Psum) # depends on [control=['if'], data=[]] else: return None # depends on [control=['if'], data=[]] else: cachedAxis[analysis] = getattr(data.highPass(), analysis)(ref=ref) # depends on [control=['if'], data=[]] return cachedAxis[analysis] # depends on [control=['if'], data=['axis', 'cache']] else: return None
def write_metadata_to_filestream(filedir, filestream, max_bytes=MAX_FILE_DEFAULT): """ Make metadata file for all files in a directory(helper function) :param filedir: This field is the filepath of the directory whose csv has to be made. :param filestream: This field is a stream for writing to the csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """ csv_out = csv.writer(filestream) subdirs = [os.path.join(filedir, i) for i in os.listdir(filedir) if os.path.isdir(os.path.join(filedir, i))] if subdirs: logging.info('Making metadata for subdirs of {}'.format(filedir)) if not all([re.match('^[0-9]{8}$', os.path.basename(d)) for d in subdirs]): raise ValueError("Subdirs not all project member ID format!") csv_out.writerow(['project_member_id', 'filename', 'tags', 'description', 'md5', 'creation_date']) for subdir in subdirs: file_info = characterize_local_files( filedir=subdir, max_bytes=max_bytes) proj_member_id = os.path.basename(subdir) if not file_info: csv_out.writerow([proj_member_id, 'None', 'NA', 'NA', 'NA', 'NA']) continue for filename in file_info: csv_out.writerow([proj_member_id, filename, ', '.join(file_info[filename]['tags']), file_info[filename]['description'], file_info[filename]['md5'], file_info[filename]['creation_date'], ]) else: csv_out.writerow(['filename', 'tags', 'description', 'md5', 'creation_date']) file_info = characterize_local_files( filedir=filedir, max_bytes=max_bytes) for filename in file_info: csv_out.writerow([filename, ', '.join(file_info[filename]['tags']), file_info[filename]['description'], file_info[filename]['md5'], file_info[filename]['creation_date'], ])
def function[write_metadata_to_filestream, parameter[filedir, filestream, max_bytes]]: constant[ Make metadata file for all files in a directory(helper function) :param filedir: This field is the filepath of the directory whose csv has to be made. :param filestream: This field is a stream for writing to the csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. ] variable[csv_out] assign[=] call[name[csv].writer, parameter[name[filestream]]] variable[subdirs] assign[=] <ast.ListComp object at 0x7da1b10a4d30> if name[subdirs] begin[:] call[name[logging].info, parameter[call[constant[Making metadata for subdirs of {}].format, parameter[name[filedir]]]]] if <ast.UnaryOp object at 0x7da1b0f1f6a0> begin[:] <ast.Raise object at 0x7da1b0f1de40> call[name[csv_out].writerow, parameter[list[[<ast.Constant object at 0x7da1b0f1f040>, <ast.Constant object at 0x7da1b0f1dab0>, <ast.Constant object at 0x7da1b0f1dc30>, <ast.Constant object at 0x7da1b0f1f3d0>, <ast.Constant object at 0x7da1b0f1e890>, <ast.Constant object at 0x7da1b0f1ea10>]]]] for taget[name[subdir]] in starred[name[subdirs]] begin[:] variable[file_info] assign[=] call[name[characterize_local_files], parameter[]] variable[proj_member_id] assign[=] call[name[os].path.basename, parameter[name[subdir]]] if <ast.UnaryOp object at 0x7da1b0f1ded0> begin[:] call[name[csv_out].writerow, parameter[list[[<ast.Name object at 0x7da1b0f1e290>, <ast.Constant object at 0x7da1b0f1d9f0>, <ast.Constant object at 0x7da1b0f1c730>, <ast.Constant object at 0x7da1b0f1c2e0>, <ast.Constant object at 0x7da1b0f1eec0>, <ast.Constant object at 0x7da1b0f1da50>]]]] continue for taget[name[filename]] in starred[name[file_info]] begin[:] call[name[csv_out].writerow, parameter[list[[<ast.Name object at 0x7da1b0f1f130>, <ast.Name object at 0x7da1b0f1d300>, <ast.Call object at 0x7da1b0f1d9c0>, <ast.Subscript object at 0x7da1b0f1e440>, <ast.Subscript object at 0x7da1b0f1f730>, <ast.Subscript object at 0x7da1b0f1dfc0>]]]]
keyword[def] identifier[write_metadata_to_filestream] ( identifier[filedir] , identifier[filestream] , identifier[max_bytes] = identifier[MAX_FILE_DEFAULT] ): literal[string] identifier[csv_out] = identifier[csv] . identifier[writer] ( identifier[filestream] ) identifier[subdirs] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[filedir] , identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[os] . identifier[listdir] ( identifier[filedir] ) keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[filedir] , identifier[i] ))] keyword[if] identifier[subdirs] : identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[filedir] )) keyword[if] keyword[not] identifier[all] ([ identifier[re] . identifier[match] ( literal[string] , identifier[os] . identifier[path] . identifier[basename] ( identifier[d] )) keyword[for] identifier[d] keyword[in] identifier[subdirs] ]): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[csv_out] . identifier[writerow] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[for] identifier[subdir] keyword[in] identifier[subdirs] : identifier[file_info] = identifier[characterize_local_files] ( identifier[filedir] = identifier[subdir] , identifier[max_bytes] = identifier[max_bytes] ) identifier[proj_member_id] = identifier[os] . identifier[path] . identifier[basename] ( identifier[subdir] ) keyword[if] keyword[not] identifier[file_info] : identifier[csv_out] . identifier[writerow] ([ identifier[proj_member_id] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[continue] keyword[for] identifier[filename] keyword[in] identifier[file_info] : identifier[csv_out] . identifier[writerow] ([ identifier[proj_member_id] , identifier[filename] , literal[string] . identifier[join] ( identifier[file_info] [ identifier[filename] ][ literal[string] ]), identifier[file_info] [ identifier[filename] ][ literal[string] ], identifier[file_info] [ identifier[filename] ][ literal[string] ], identifier[file_info] [ identifier[filename] ][ literal[string] ], ]) keyword[else] : identifier[csv_out] . identifier[writerow] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) identifier[file_info] = identifier[characterize_local_files] ( identifier[filedir] = identifier[filedir] , identifier[max_bytes] = identifier[max_bytes] ) keyword[for] identifier[filename] keyword[in] identifier[file_info] : identifier[csv_out] . identifier[writerow] ([ identifier[filename] , literal[string] . identifier[join] ( identifier[file_info] [ identifier[filename] ][ literal[string] ]), identifier[file_info] [ identifier[filename] ][ literal[string] ], identifier[file_info] [ identifier[filename] ][ literal[string] ], identifier[file_info] [ identifier[filename] ][ literal[string] ], ])
def write_metadata_to_filestream(filedir, filestream, max_bytes=MAX_FILE_DEFAULT): """ Make metadata file for all files in a directory(helper function) :param filedir: This field is the filepath of the directory whose csv has to be made. :param filestream: This field is a stream for writing to the csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """ csv_out = csv.writer(filestream) subdirs = [os.path.join(filedir, i) for i in os.listdir(filedir) if os.path.isdir(os.path.join(filedir, i))] if subdirs: logging.info('Making metadata for subdirs of {}'.format(filedir)) if not all([re.match('^[0-9]{8}$', os.path.basename(d)) for d in subdirs]): raise ValueError('Subdirs not all project member ID format!') # depends on [control=['if'], data=[]] csv_out.writerow(['project_member_id', 'filename', 'tags', 'description', 'md5', 'creation_date']) for subdir in subdirs: file_info = characterize_local_files(filedir=subdir, max_bytes=max_bytes) proj_member_id = os.path.basename(subdir) if not file_info: csv_out.writerow([proj_member_id, 'None', 'NA', 'NA', 'NA', 'NA']) continue # depends on [control=['if'], data=[]] for filename in file_info: csv_out.writerow([proj_member_id, filename, ', '.join(file_info[filename]['tags']), file_info[filename]['description'], file_info[filename]['md5'], file_info[filename]['creation_date']]) # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=['subdir']] # depends on [control=['if'], data=[]] else: csv_out.writerow(['filename', 'tags', 'description', 'md5', 'creation_date']) file_info = characterize_local_files(filedir=filedir, max_bytes=max_bytes) for filename in file_info: csv_out.writerow([filename, ', '.join(file_info[filename]['tags']), file_info[filename]['description'], file_info[filename]['md5'], file_info[filename]['creation_date']]) # depends on [control=['for'], data=['filename']]
def google_text_emphasis(style): """return a list of all emphasis modifiers of the element""" emphasis = [] if 'text-decoration' in style: emphasis.append(style['text-decoration']) if 'font-style' in style: emphasis.append(style['font-style']) if 'font-weight' in style: emphasis.append(style['font-weight']) return emphasis
def function[google_text_emphasis, parameter[style]]: constant[return a list of all emphasis modifiers of the element] variable[emphasis] assign[=] list[[]] if compare[constant[text-decoration] in name[style]] begin[:] call[name[emphasis].append, parameter[call[name[style]][constant[text-decoration]]]] if compare[constant[font-style] in name[style]] begin[:] call[name[emphasis].append, parameter[call[name[style]][constant[font-style]]]] if compare[constant[font-weight] in name[style]] begin[:] call[name[emphasis].append, parameter[call[name[style]][constant[font-weight]]]] return[name[emphasis]]
keyword[def] identifier[google_text_emphasis] ( identifier[style] ): literal[string] identifier[emphasis] =[] keyword[if] literal[string] keyword[in] identifier[style] : identifier[emphasis] . identifier[append] ( identifier[style] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[style] : identifier[emphasis] . identifier[append] ( identifier[style] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[style] : identifier[emphasis] . identifier[append] ( identifier[style] [ literal[string] ]) keyword[return] identifier[emphasis]
def google_text_emphasis(style): """return a list of all emphasis modifiers of the element""" emphasis = [] if 'text-decoration' in style: emphasis.append(style['text-decoration']) # depends on [control=['if'], data=['style']] if 'font-style' in style: emphasis.append(style['font-style']) # depends on [control=['if'], data=['style']] if 'font-weight' in style: emphasis.append(style['font-weight']) # depends on [control=['if'], data=['style']] return emphasis
def log(self, level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1) """ self._baseLogger.log(self, level, self.getExtendedMsg(msg), *args, **kwargs)
def function[log, parameter[self, level, msg]]: constant[ Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1) ] call[name[self]._baseLogger.log, parameter[name[self], name[level], call[name[self].getExtendedMsg, parameter[name[msg]]], <ast.Starred object at 0x7da20c7caa40>]]
keyword[def] identifier[log] ( identifier[self] , identifier[level] , identifier[msg] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[_baseLogger] . identifier[log] ( identifier[self] , identifier[level] , identifier[self] . identifier[getExtendedMsg] ( identifier[msg] ),* identifier[args] , ** identifier[kwargs] )
def log(self, level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1) """ self._baseLogger.log(self, level, self.getExtendedMsg(msg), *args, **kwargs)
def _loads(self, response): """ Parse the BSER packet """ return bser.loads( response, True, value_encoding=encoding.get_local_encoding(), value_errors=encoding.default_local_errors, )
def function[_loads, parameter[self, response]]: constant[ Parse the BSER packet ] return[call[name[bser].loads, parameter[name[response], constant[True]]]]
keyword[def] identifier[_loads] ( identifier[self] , identifier[response] ): literal[string] keyword[return] identifier[bser] . identifier[loads] ( identifier[response] , keyword[True] , identifier[value_encoding] = identifier[encoding] . identifier[get_local_encoding] (), identifier[value_errors] = identifier[encoding] . identifier[default_local_errors] , )
def _loads(self, response): """ Parse the BSER packet """ return bser.loads(response, True, value_encoding=encoding.get_local_encoding(), value_errors=encoding.default_local_errors)
def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"): """Return a unicode object representing 's'. Treats bytes using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, six.text_type): return s if strings_only and not isinstance(s, six.string_types): return s if not isinstance(s, six.string_types): if hasattr(s, "__unicode__"): s = s.__unicode__() else: if six.PY3: if isinstance(s, six.binary_type): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) else: s = six.text_type(six.binary_type(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, encoding, # errors), so that if s is a SafeBytes, it ends up being a # SafeText at the end. s = s.decode(encoding, errors) return s
def function[smart_text, parameter[s, encoding, strings_only, errors]]: constant[Return a unicode object representing 's'. Treats bytes using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. ] if call[name[isinstance], parameter[name[s], name[six].text_type]] begin[:] return[name[s]] if <ast.BoolOp object at 0x7da1b1196b60> begin[:] return[name[s]] if <ast.UnaryOp object at 0x7da1b1196440> begin[:] if call[name[hasattr], parameter[name[s], constant[__unicode__]]] begin[:] variable[s] assign[=] call[name[s].__unicode__, parameter[]] return[name[s]]
keyword[def] identifier[smart_text] ( identifier[s] , identifier[encoding] = literal[string] , identifier[strings_only] = keyword[False] , identifier[errors] = literal[string] ): literal[string] keyword[if] identifier[isinstance] ( identifier[s] , identifier[six] . identifier[text_type] ): keyword[return] identifier[s] keyword[if] identifier[strings_only] keyword[and] keyword[not] identifier[isinstance] ( identifier[s] , identifier[six] . identifier[string_types] ): keyword[return] identifier[s] keyword[if] keyword[not] identifier[isinstance] ( identifier[s] , identifier[six] . identifier[string_types] ): keyword[if] identifier[hasattr] ( identifier[s] , literal[string] ): identifier[s] = identifier[s] . identifier[__unicode__] () keyword[else] : keyword[if] identifier[six] . identifier[PY3] : keyword[if] identifier[isinstance] ( identifier[s] , identifier[six] . identifier[binary_type] ): identifier[s] = identifier[six] . identifier[text_type] ( identifier[s] , identifier[encoding] , identifier[errors] ) keyword[else] : identifier[s] = identifier[six] . identifier[text_type] ( identifier[s] ) keyword[else] : identifier[s] = identifier[six] . identifier[text_type] ( identifier[six] . identifier[binary_type] ( identifier[s] ), identifier[encoding] , identifier[errors] ) keyword[else] : identifier[s] = identifier[s] . identifier[decode] ( identifier[encoding] , identifier[errors] ) keyword[return] identifier[s]
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'): """Return a unicode object representing 's'. Treats bytes using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, six.text_type): return s # depends on [control=['if'], data=[]] if strings_only and (not isinstance(s, six.string_types)): return s # depends on [control=['if'], data=[]] if not isinstance(s, six.string_types): if hasattr(s, '__unicode__'): s = s.__unicode__() # depends on [control=['if'], data=[]] elif six.PY3: if isinstance(s, six.binary_type): s = six.text_type(s, encoding, errors) # depends on [control=['if'], data=[]] else: s = six.text_type(s) # depends on [control=['if'], data=[]] else: s = six.text_type(six.binary_type(s), encoding, errors) # depends on [control=['if'], data=[]] else: # Note: We use .decode() here, instead of six.text_type(s, encoding, # errors), so that if s is a SafeBytes, it ends up being a # SafeText at the end. s = s.decode(encoding, errors) return s
def filter_bool(n: Node, query: str) -> bool: """ Filter and ensure that the returned value is of type bool. """ return _scalariter2item(n, query, bool)
def function[filter_bool, parameter[n, query]]: constant[ Filter and ensure that the returned value is of type bool. ] return[call[name[_scalariter2item], parameter[name[n], name[query], name[bool]]]]
keyword[def] identifier[filter_bool] ( identifier[n] : identifier[Node] , identifier[query] : identifier[str] )-> identifier[bool] : literal[string] keyword[return] identifier[_scalariter2item] ( identifier[n] , identifier[query] , identifier[bool] )
def filter_bool(n: Node, query: str) -> bool: """ Filter and ensure that the returned value is of type bool. """ return _scalariter2item(n, query, bool)
def _build_final_method_name( method_name, dataset_name, dataprovider_name, repeat_suffix, ): """ Return a nice human friendly name, that almost looks like code. Example: a test called 'test_something' with a dataset of (5, 'hello') Return: "test_something(5, 'hello')" Example: a test called 'test_other_stuff' with dataset of (9) and repeats Return: "test_other_stuff(9) iteration_<X>" :param method_name: Base name of the method to add. :type method_name: `unicode` :param dataset_name: Base name of the data set. :type dataset_name: `unicode` or None :param dataprovider_name: If there's a dataprovider involved, then this is its name. :type dataprovider_name: `unicode` or None :param repeat_suffix: Suffix to append to the name of the generated method. :type repeat_suffix: `unicode` or None :return: The fully composed name of the generated test method. :rtype: `unicode` """ # For tests using a dataprovider, append "_<dataprovider_name>" to # the test method name suffix = '' if dataprovider_name: suffix = '_{0}'.format(dataprovider_name) if not dataset_name and not repeat_suffix: return '{0}{1}'.format(method_name, suffix) if dataset_name: # Nosetest multi-processing code parses the full test name # to discern package/module names. Thus any periods in the test-name # causes that code to fail. So replace any periods with the unicode # middle-dot character. Yes, this change is applied independent # of the test runner being used... and that's fine since there is # no real contract as to how the fabricated tests are named. dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR) # Place data_set info inside parens, as if it were a function call suffix = '{0}({1})'.format(suffix, dataset_name or "") if repeat_suffix: suffix = '{0} {1}'.format(suffix, repeat_suffix) test_method_name_for_dataset = "{0}{1}".format( method_name, suffix, ) return test_method_name_for_dataset
def function[_build_final_method_name, parameter[method_name, dataset_name, dataprovider_name, repeat_suffix]]: constant[ Return a nice human friendly name, that almost looks like code. Example: a test called 'test_something' with a dataset of (5, 'hello') Return: "test_something(5, 'hello')" Example: a test called 'test_other_stuff' with dataset of (9) and repeats Return: "test_other_stuff(9) iteration_<X>" :param method_name: Base name of the method to add. :type method_name: `unicode` :param dataset_name: Base name of the data set. :type dataset_name: `unicode` or None :param dataprovider_name: If there's a dataprovider involved, then this is its name. :type dataprovider_name: `unicode` or None :param repeat_suffix: Suffix to append to the name of the generated method. :type repeat_suffix: `unicode` or None :return: The fully composed name of the generated test method. :rtype: `unicode` ] variable[suffix] assign[=] constant[] if name[dataprovider_name] begin[:] variable[suffix] assign[=] call[constant[_{0}].format, parameter[name[dataprovider_name]]] if <ast.BoolOp object at 0x7da20e963610> begin[:] return[call[constant[{0}{1}].format, parameter[name[method_name], name[suffix]]]] if name[dataset_name] begin[:] variable[dataset_name] assign[=] call[name[dataset_name].replace, parameter[constant[.], name[REPLACE_FOR_PERIOD_CHAR]]] variable[suffix] assign[=] call[constant[{0}({1})].format, parameter[name[suffix], <ast.BoolOp object at 0x7da20e9634c0>]] if name[repeat_suffix] begin[:] variable[suffix] assign[=] call[constant[{0} {1}].format, parameter[name[suffix], name[repeat_suffix]]] variable[test_method_name_for_dataset] assign[=] call[constant[{0}{1}].format, parameter[name[method_name], name[suffix]]] return[name[test_method_name_for_dataset]]
keyword[def] identifier[_build_final_method_name] ( identifier[method_name] , identifier[dataset_name] , identifier[dataprovider_name] , identifier[repeat_suffix] , ): literal[string] identifier[suffix] = literal[string] keyword[if] identifier[dataprovider_name] : identifier[suffix] = literal[string] . identifier[format] ( identifier[dataprovider_name] ) keyword[if] keyword[not] identifier[dataset_name] keyword[and] keyword[not] identifier[repeat_suffix] : keyword[return] literal[string] . identifier[format] ( identifier[method_name] , identifier[suffix] ) keyword[if] identifier[dataset_name] : identifier[dataset_name] = identifier[dataset_name] . identifier[replace] ( literal[string] , identifier[REPLACE_FOR_PERIOD_CHAR] ) identifier[suffix] = literal[string] . identifier[format] ( identifier[suffix] , identifier[dataset_name] keyword[or] literal[string] ) keyword[if] identifier[repeat_suffix] : identifier[suffix] = literal[string] . identifier[format] ( identifier[suffix] , identifier[repeat_suffix] ) identifier[test_method_name_for_dataset] = literal[string] . identifier[format] ( identifier[method_name] , identifier[suffix] , ) keyword[return] identifier[test_method_name_for_dataset]
def _build_final_method_name(method_name, dataset_name, dataprovider_name, repeat_suffix): """ Return a nice human friendly name, that almost looks like code. Example: a test called 'test_something' with a dataset of (5, 'hello') Return: "test_something(5, 'hello')" Example: a test called 'test_other_stuff' with dataset of (9) and repeats Return: "test_other_stuff(9) iteration_<X>" :param method_name: Base name of the method to add. :type method_name: `unicode` :param dataset_name: Base name of the data set. :type dataset_name: `unicode` or None :param dataprovider_name: If there's a dataprovider involved, then this is its name. :type dataprovider_name: `unicode` or None :param repeat_suffix: Suffix to append to the name of the generated method. :type repeat_suffix: `unicode` or None :return: The fully composed name of the generated test method. :rtype: `unicode` """ # For tests using a dataprovider, append "_<dataprovider_name>" to # the test method name suffix = '' if dataprovider_name: suffix = '_{0}'.format(dataprovider_name) # depends on [control=['if'], data=[]] if not dataset_name and (not repeat_suffix): return '{0}{1}'.format(method_name, suffix) # depends on [control=['if'], data=[]] if dataset_name: # Nosetest multi-processing code parses the full test name # to discern package/module names. Thus any periods in the test-name # causes that code to fail. So replace any periods with the unicode # middle-dot character. Yes, this change is applied independent # of the test runner being used... and that's fine since there is # no real contract as to how the fabricated tests are named. dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR) # depends on [control=['if'], data=[]] # Place data_set info inside parens, as if it were a function call suffix = '{0}({1})'.format(suffix, dataset_name or '') if repeat_suffix: suffix = '{0} {1}'.format(suffix, repeat_suffix) # depends on [control=['if'], data=[]] test_method_name_for_dataset = '{0}{1}'.format(method_name, suffix) return test_method_name_for_dataset
def query(self,query_string,toprint=False): """Prints words matching the given query. Eg: [-voice] (Syllable: (Onset: [+voice]) (Coda: [+voice]))""" qq=SearchTerm(query_string) matchcount=0 matches=[] for word in self.words(): for match in word.search(qq): matches.append(match) matchcount+=1 if "Word" in str(type(match)): matchstr="" else: matchstr=str(match) if toprint: word.om(makeminlength(str(matchcount),int(being.linelen/6))+"\t"+makeminlength(str(word),int(being.linelen))+"\t"+matchstr) return matches
def function[query, parameter[self, query_string, toprint]]: constant[Prints words matching the given query. Eg: [-voice] (Syllable: (Onset: [+voice]) (Coda: [+voice]))] variable[qq] assign[=] call[name[SearchTerm], parameter[name[query_string]]] variable[matchcount] assign[=] constant[0] variable[matches] assign[=] list[[]] for taget[name[word]] in starred[call[name[self].words, parameter[]]] begin[:] for taget[name[match]] in starred[call[name[word].search, parameter[name[qq]]]] begin[:] call[name[matches].append, parameter[name[match]]] <ast.AugAssign object at 0x7da20e955c00> if compare[constant[Word] in call[name[str], parameter[call[name[type], parameter[name[match]]]]]] begin[:] variable[matchstr] assign[=] constant[] if name[toprint] begin[:] call[name[word].om, parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[name[makeminlength], parameter[call[name[str], parameter[name[matchcount]]], call[name[int], parameter[binary_operation[name[being].linelen / constant[6]]]]]] + constant[ ]] + call[name[makeminlength], parameter[call[name[str], parameter[name[word]]], call[name[int], parameter[name[being].linelen]]]]] + constant[ ]] + name[matchstr]]]] return[name[matches]]
keyword[def] identifier[query] ( identifier[self] , identifier[query_string] , identifier[toprint] = keyword[False] ): literal[string] identifier[qq] = identifier[SearchTerm] ( identifier[query_string] ) identifier[matchcount] = literal[int] identifier[matches] =[] keyword[for] identifier[word] keyword[in] identifier[self] . identifier[words] (): keyword[for] identifier[match] keyword[in] identifier[word] . identifier[search] ( identifier[qq] ): identifier[matches] . identifier[append] ( identifier[match] ) identifier[matchcount] += literal[int] keyword[if] literal[string] keyword[in] identifier[str] ( identifier[type] ( identifier[match] )): identifier[matchstr] = literal[string] keyword[else] : identifier[matchstr] = identifier[str] ( identifier[match] ) keyword[if] identifier[toprint] : identifier[word] . identifier[om] ( identifier[makeminlength] ( identifier[str] ( identifier[matchcount] ), identifier[int] ( identifier[being] . identifier[linelen] / literal[int] ))+ literal[string] + identifier[makeminlength] ( identifier[str] ( identifier[word] ), identifier[int] ( identifier[being] . identifier[linelen] ))+ literal[string] + identifier[matchstr] ) keyword[return] identifier[matches]
def query(self, query_string, toprint=False): """Prints words matching the given query. Eg: [-voice] (Syllable: (Onset: [+voice]) (Coda: [+voice]))""" qq = SearchTerm(query_string) matchcount = 0 matches = [] for word in self.words(): for match in word.search(qq): matches.append(match) matchcount += 1 if 'Word' in str(type(match)): matchstr = '' # depends on [control=['if'], data=[]] else: matchstr = str(match) if toprint: word.om(makeminlength(str(matchcount), int(being.linelen / 6)) + '\t' + makeminlength(str(word), int(being.linelen)) + '\t' + matchstr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['match']] # depends on [control=['for'], data=['word']] return matches
def last(pipe, items=1): ''' this function simply returns the last item in an iterable ''' if items == 1: tmp=None for i in pipe: tmp=i return tmp else: return tuple(deque(pipe, maxlen=items))
def function[last, parameter[pipe, items]]: constant[ this function simply returns the last item in an iterable ] if compare[name[items] equal[==] constant[1]] begin[:] variable[tmp] assign[=] constant[None] for taget[name[i]] in starred[name[pipe]] begin[:] variable[tmp] assign[=] name[i] return[name[tmp]]
keyword[def] identifier[last] ( identifier[pipe] , identifier[items] = literal[int] ): literal[string] keyword[if] identifier[items] == literal[int] : identifier[tmp] = keyword[None] keyword[for] identifier[i] keyword[in] identifier[pipe] : identifier[tmp] = identifier[i] keyword[return] identifier[tmp] keyword[else] : keyword[return] identifier[tuple] ( identifier[deque] ( identifier[pipe] , identifier[maxlen] = identifier[items] ))
def last(pipe, items=1): """ this function simply returns the last item in an iterable """ if items == 1: tmp = None for i in pipe: tmp = i # depends on [control=['for'], data=['i']] return tmp # depends on [control=['if'], data=[]] else: return tuple(deque(pipe, maxlen=items))
def to_shape_list(region_list, coordinate_system='fk5'): """ Converts a list of regions into a `regions.ShapeList` object. Parameters ---------- region_list: python list Lists of `regions.Region` objects format_type: str ('DS9' or 'CRTF') The format type of the Shape object. Default is 'DS9'. coordinate_system: str The astropy coordinate system frame in which all the coordinates present in the `region_list` will be converted. Default is 'fk5'. Returns ------- shape_list: `regions.ShapeList` object list of `regions.Shape` objects. """ shape_list = ShapeList() for region in region_list: coord = [] if isinstance(region, SkyRegion): reg_type = region.__class__.__name__[:-9].lower() else: reg_type = region.__class__.__name__[:-11].lower() for val in regions_attributes[reg_type]: coord.append(getattr(region, val)) if reg_type == 'polygon': coord = [x for x in region.vertices] if coordinate_system: coordsys = coordinate_system else: if isinstance(region, SkyRegion): coordsys = coord[0].name else: coordsys = 'image' frame = coordinates.frame_transform_graph.lookup_name(coordsys) new_coord = [] for val in coord: if isinstance(val, Angle) or isinstance(val, u.Quantity) or isinstance(val, numbers.Number): new_coord.append(val) elif isinstance(val, PixCoord): new_coord.append(u.Quantity(val.x, u.dimensionless_unscaled)) new_coord.append(u.Quantity(val.y, u.dimensionless_unscaled)) else: new_coord.append(Angle(val.transform_to(frame).spherical.lon)) new_coord.append(Angle(val.transform_to(frame).spherical.lat)) meta = dict(region.meta) meta.update(region.visual) if reg_type == 'text': meta['text'] = meta.get('text', meta.pop('label', '')) include = region.meta.pop('include', True) shape_list.append(Shape(coordsys, reg_type, new_coord, meta, False, include)) return shape_list
def function[to_shape_list, parameter[region_list, coordinate_system]]: constant[ Converts a list of regions into a `regions.ShapeList` object. Parameters ---------- region_list: python list Lists of `regions.Region` objects format_type: str ('DS9' or 'CRTF') The format type of the Shape object. Default is 'DS9'. coordinate_system: str The astropy coordinate system frame in which all the coordinates present in the `region_list` will be converted. Default is 'fk5'. Returns ------- shape_list: `regions.ShapeList` object list of `regions.Shape` objects. ] variable[shape_list] assign[=] call[name[ShapeList], parameter[]] for taget[name[region]] in starred[name[region_list]] begin[:] variable[coord] assign[=] list[[]] if call[name[isinstance], parameter[name[region], name[SkyRegion]]] begin[:] variable[reg_type] assign[=] call[call[name[region].__class__.__name__][<ast.Slice object at 0x7da204963a60>].lower, parameter[]] for taget[name[val]] in starred[call[name[regions_attributes]][name[reg_type]]] begin[:] call[name[coord].append, parameter[call[name[getattr], parameter[name[region], name[val]]]]] if compare[name[reg_type] equal[==] constant[polygon]] begin[:] variable[coord] assign[=] <ast.ListComp object at 0x7da204961f30> if name[coordinate_system] begin[:] variable[coordsys] assign[=] name[coordinate_system] variable[frame] assign[=] call[name[coordinates].frame_transform_graph.lookup_name, parameter[name[coordsys]]] variable[new_coord] assign[=] list[[]] for taget[name[val]] in starred[name[coord]] begin[:] if <ast.BoolOp object at 0x7da204962650> begin[:] call[name[new_coord].append, parameter[name[val]]] variable[meta] assign[=] call[name[dict], parameter[name[region].meta]] call[name[meta].update, parameter[name[region].visual]] if compare[name[reg_type] equal[==] constant[text]] begin[:] call[name[meta]][constant[text]] assign[=] call[name[meta].get, parameter[constant[text], call[name[meta].pop, parameter[constant[label], constant[]]]]] variable[include] assign[=] call[name[region].meta.pop, parameter[constant[include], constant[True]]] call[name[shape_list].append, parameter[call[name[Shape], parameter[name[coordsys], name[reg_type], name[new_coord], name[meta], constant[False], name[include]]]]] return[name[shape_list]]
keyword[def] identifier[to_shape_list] ( identifier[region_list] , identifier[coordinate_system] = literal[string] ): literal[string] identifier[shape_list] = identifier[ShapeList] () keyword[for] identifier[region] keyword[in] identifier[region_list] : identifier[coord] =[] keyword[if] identifier[isinstance] ( identifier[region] , identifier[SkyRegion] ): identifier[reg_type] = identifier[region] . identifier[__class__] . identifier[__name__] [:- literal[int] ]. identifier[lower] () keyword[else] : identifier[reg_type] = identifier[region] . identifier[__class__] . identifier[__name__] [:- literal[int] ]. identifier[lower] () keyword[for] identifier[val] keyword[in] identifier[regions_attributes] [ identifier[reg_type] ]: identifier[coord] . identifier[append] ( identifier[getattr] ( identifier[region] , identifier[val] )) keyword[if] identifier[reg_type] == literal[string] : identifier[coord] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[region] . identifier[vertices] ] keyword[if] identifier[coordinate_system] : identifier[coordsys] = identifier[coordinate_system] keyword[else] : keyword[if] identifier[isinstance] ( identifier[region] , identifier[SkyRegion] ): identifier[coordsys] = identifier[coord] [ literal[int] ]. identifier[name] keyword[else] : identifier[coordsys] = literal[string] identifier[frame] = identifier[coordinates] . identifier[frame_transform_graph] . identifier[lookup_name] ( identifier[coordsys] ) identifier[new_coord] =[] keyword[for] identifier[val] keyword[in] identifier[coord] : keyword[if] identifier[isinstance] ( identifier[val] , identifier[Angle] ) keyword[or] identifier[isinstance] ( identifier[val] , identifier[u] . identifier[Quantity] ) keyword[or] identifier[isinstance] ( identifier[val] , identifier[numbers] . identifier[Number] ): identifier[new_coord] . identifier[append] ( identifier[val] ) keyword[elif] identifier[isinstance] ( identifier[val] , identifier[PixCoord] ): identifier[new_coord] . identifier[append] ( identifier[u] . identifier[Quantity] ( identifier[val] . identifier[x] , identifier[u] . identifier[dimensionless_unscaled] )) identifier[new_coord] . identifier[append] ( identifier[u] . identifier[Quantity] ( identifier[val] . identifier[y] , identifier[u] . identifier[dimensionless_unscaled] )) keyword[else] : identifier[new_coord] . identifier[append] ( identifier[Angle] ( identifier[val] . identifier[transform_to] ( identifier[frame] ). identifier[spherical] . identifier[lon] )) identifier[new_coord] . identifier[append] ( identifier[Angle] ( identifier[val] . identifier[transform_to] ( identifier[frame] ). identifier[spherical] . identifier[lat] )) identifier[meta] = identifier[dict] ( identifier[region] . identifier[meta] ) identifier[meta] . identifier[update] ( identifier[region] . identifier[visual] ) keyword[if] identifier[reg_type] == literal[string] : identifier[meta] [ literal[string] ]= identifier[meta] . identifier[get] ( literal[string] , identifier[meta] . identifier[pop] ( literal[string] , literal[string] )) identifier[include] = identifier[region] . identifier[meta] . identifier[pop] ( literal[string] , keyword[True] ) identifier[shape_list] . identifier[append] ( identifier[Shape] ( identifier[coordsys] , identifier[reg_type] , identifier[new_coord] , identifier[meta] , keyword[False] , identifier[include] )) keyword[return] identifier[shape_list]
def to_shape_list(region_list, coordinate_system='fk5'): """ Converts a list of regions into a `regions.ShapeList` object. Parameters ---------- region_list: python list Lists of `regions.Region` objects format_type: str ('DS9' or 'CRTF') The format type of the Shape object. Default is 'DS9'. coordinate_system: str The astropy coordinate system frame in which all the coordinates present in the `region_list` will be converted. Default is 'fk5'. Returns ------- shape_list: `regions.ShapeList` object list of `regions.Shape` objects. """ shape_list = ShapeList() for region in region_list: coord = [] if isinstance(region, SkyRegion): reg_type = region.__class__.__name__[:-9].lower() # depends on [control=['if'], data=[]] else: reg_type = region.__class__.__name__[:-11].lower() for val in regions_attributes[reg_type]: coord.append(getattr(region, val)) # depends on [control=['for'], data=['val']] if reg_type == 'polygon': coord = [x for x in region.vertices] # depends on [control=['if'], data=[]] if coordinate_system: coordsys = coordinate_system # depends on [control=['if'], data=[]] elif isinstance(region, SkyRegion): coordsys = coord[0].name # depends on [control=['if'], data=[]] else: coordsys = 'image' frame = coordinates.frame_transform_graph.lookup_name(coordsys) new_coord = [] for val in coord: if isinstance(val, Angle) or isinstance(val, u.Quantity) or isinstance(val, numbers.Number): new_coord.append(val) # depends on [control=['if'], data=[]] elif isinstance(val, PixCoord): new_coord.append(u.Quantity(val.x, u.dimensionless_unscaled)) new_coord.append(u.Quantity(val.y, u.dimensionless_unscaled)) # depends on [control=['if'], data=[]] else: new_coord.append(Angle(val.transform_to(frame).spherical.lon)) new_coord.append(Angle(val.transform_to(frame).spherical.lat)) # depends on [control=['for'], data=['val']] meta = dict(region.meta) meta.update(region.visual) if reg_type == 'text': meta['text'] = meta.get('text', meta.pop('label', '')) # depends on [control=['if'], data=[]] include = region.meta.pop('include', True) shape_list.append(Shape(coordsys, reg_type, new_coord, meta, False, include)) # depends on [control=['for'], data=['region']] return shape_list
def prepare_xml(args, parser): """Prepares XML files for stripping. This process creates a single, normalised TEI XML file for each work. """ if args.source == constants.TEI_SOURCE_CBETA_GITHUB: corpus_class = tacl.TEICorpusCBETAGitHub else: raise Exception('Unsupported TEI source option provided') corpus = corpus_class(args.input, args.output) corpus.tidy()
def function[prepare_xml, parameter[args, parser]]: constant[Prepares XML files for stripping. This process creates a single, normalised TEI XML file for each work. ] if compare[name[args].source equal[==] name[constants].TEI_SOURCE_CBETA_GITHUB] begin[:] variable[corpus_class] assign[=] name[tacl].TEICorpusCBETAGitHub variable[corpus] assign[=] call[name[corpus_class], parameter[name[args].input, name[args].output]] call[name[corpus].tidy, parameter[]]
keyword[def] identifier[prepare_xml] ( identifier[args] , identifier[parser] ): literal[string] keyword[if] identifier[args] . identifier[source] == identifier[constants] . identifier[TEI_SOURCE_CBETA_GITHUB] : identifier[corpus_class] = identifier[tacl] . identifier[TEICorpusCBETAGitHub] keyword[else] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[corpus] = identifier[corpus_class] ( identifier[args] . identifier[input] , identifier[args] . identifier[output] ) identifier[corpus] . identifier[tidy] ()
def prepare_xml(args, parser): """Prepares XML files for stripping. This process creates a single, normalised TEI XML file for each work. """ if args.source == constants.TEI_SOURCE_CBETA_GITHUB: corpus_class = tacl.TEICorpusCBETAGitHub # depends on [control=['if'], data=[]] else: raise Exception('Unsupported TEI source option provided') corpus = corpus_class(args.input, args.output) corpus.tidy()
def imagetransformer1d_base_8l_64by64(): """hparams fo 12 layer big 1d model for imagenet 64x64.""" hparams = image_transformer_base() hparams.num_heads = 8 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.num_decoder_layers = 8 hparams.batch_size = 1 hparams.block_length = 512 hparams.block_width = 768 hparams.layer_prepostprocess_dropout = 0.1 hparams.max_length = 14000 hparams.unconditional = int(False) return hparams
def function[imagetransformer1d_base_8l_64by64, parameter[]]: constant[hparams fo 12 layer big 1d model for imagenet 64x64.] variable[hparams] assign[=] call[name[image_transformer_base], parameter[]] name[hparams].num_heads assign[=] constant[8] name[hparams].hidden_size assign[=] constant[512] name[hparams].filter_size assign[=] constant[2048] name[hparams].num_decoder_layers assign[=] constant[8] name[hparams].batch_size assign[=] constant[1] name[hparams].block_length assign[=] constant[512] name[hparams].block_width assign[=] constant[768] name[hparams].layer_prepostprocess_dropout assign[=] constant[0.1] name[hparams].max_length assign[=] constant[14000] name[hparams].unconditional assign[=] call[name[int], parameter[constant[False]]] return[name[hparams]]
keyword[def] identifier[imagetransformer1d_base_8l_64by64] (): literal[string] identifier[hparams] = identifier[image_transformer_base] () identifier[hparams] . identifier[num_heads] = literal[int] identifier[hparams] . identifier[hidden_size] = literal[int] identifier[hparams] . identifier[filter_size] = literal[int] identifier[hparams] . identifier[num_decoder_layers] = literal[int] identifier[hparams] . identifier[batch_size] = literal[int] identifier[hparams] . identifier[block_length] = literal[int] identifier[hparams] . identifier[block_width] = literal[int] identifier[hparams] . identifier[layer_prepostprocess_dropout] = literal[int] identifier[hparams] . identifier[max_length] = literal[int] identifier[hparams] . identifier[unconditional] = identifier[int] ( keyword[False] ) keyword[return] identifier[hparams]
def imagetransformer1d_base_8l_64by64(): """hparams fo 12 layer big 1d model for imagenet 64x64.""" hparams = image_transformer_base() hparams.num_heads = 8 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.num_decoder_layers = 8 hparams.batch_size = 1 hparams.block_length = 512 hparams.block_width = 768 hparams.layer_prepostprocess_dropout = 0.1 hparams.max_length = 14000 hparams.unconditional = int(False) return hparams
def swagger_ui_script_template(request, **kwargs): """ :param request: :return: Generates the <script> code that bootstraps Swagger UI, it will be injected into index template """ swagger_spec_url = request.route_url('cornice_swagger.open_api_path') template = pkg_resources.resource_string( 'cornice_swagger', 'templates/index_script_template.html' ).decode('utf8') return Template(template).safe_substitute( swagger_spec_url=swagger_spec_url, )
def function[swagger_ui_script_template, parameter[request]]: constant[ :param request: :return: Generates the <script> code that bootstraps Swagger UI, it will be injected into index template ] variable[swagger_spec_url] assign[=] call[name[request].route_url, parameter[constant[cornice_swagger.open_api_path]]] variable[template] assign[=] call[call[name[pkg_resources].resource_string, parameter[constant[cornice_swagger], constant[templates/index_script_template.html]]].decode, parameter[constant[utf8]]] return[call[call[name[Template], parameter[name[template]]].safe_substitute, parameter[]]]
keyword[def] identifier[swagger_ui_script_template] ( identifier[request] ,** identifier[kwargs] ): literal[string] identifier[swagger_spec_url] = identifier[request] . identifier[route_url] ( literal[string] ) identifier[template] = identifier[pkg_resources] . identifier[resource_string] ( literal[string] , literal[string] ). identifier[decode] ( literal[string] ) keyword[return] identifier[Template] ( identifier[template] ). identifier[safe_substitute] ( identifier[swagger_spec_url] = identifier[swagger_spec_url] , )
def swagger_ui_script_template(request, **kwargs): """ :param request: :return: Generates the <script> code that bootstraps Swagger UI, it will be injected into index template """ swagger_spec_url = request.route_url('cornice_swagger.open_api_path') template = pkg_resources.resource_string('cornice_swagger', 'templates/index_script_template.html').decode('utf8') return Template(template).safe_substitute(swagger_spec_url=swagger_spec_url)
def get_username(self, claims): """Generate username based on claims.""" # bluntly stolen from django-browserid # https://github.com/mozilla/django-browserid/blob/master/django_browserid/auth.py username_algo = self.get_settings('OIDC_USERNAME_ALGO', None) if username_algo: if isinstance(username_algo, six.string_types): username_algo = import_string(username_algo) return username_algo(claims.get('email')) return default_username_algo(claims.get('email'))
def function[get_username, parameter[self, claims]]: constant[Generate username based on claims.] variable[username_algo] assign[=] call[name[self].get_settings, parameter[constant[OIDC_USERNAME_ALGO], constant[None]]] if name[username_algo] begin[:] if call[name[isinstance], parameter[name[username_algo], name[six].string_types]] begin[:] variable[username_algo] assign[=] call[name[import_string], parameter[name[username_algo]]] return[call[name[username_algo], parameter[call[name[claims].get, parameter[constant[email]]]]]] return[call[name[default_username_algo], parameter[call[name[claims].get, parameter[constant[email]]]]]]
keyword[def] identifier[get_username] ( identifier[self] , identifier[claims] ): literal[string] identifier[username_algo] = identifier[self] . identifier[get_settings] ( literal[string] , keyword[None] ) keyword[if] identifier[username_algo] : keyword[if] identifier[isinstance] ( identifier[username_algo] , identifier[six] . identifier[string_types] ): identifier[username_algo] = identifier[import_string] ( identifier[username_algo] ) keyword[return] identifier[username_algo] ( identifier[claims] . identifier[get] ( literal[string] )) keyword[return] identifier[default_username_algo] ( identifier[claims] . identifier[get] ( literal[string] ))
def get_username(self, claims): """Generate username based on claims.""" # bluntly stolen from django-browserid # https://github.com/mozilla/django-browserid/blob/master/django_browserid/auth.py username_algo = self.get_settings('OIDC_USERNAME_ALGO', None) if username_algo: if isinstance(username_algo, six.string_types): username_algo = import_string(username_algo) # depends on [control=['if'], data=[]] return username_algo(claims.get('email')) # depends on [control=['if'], data=[]] return default_username_algo(claims.get('email'))
def t_NOTEQUAL(self, t): r"!\=" t.endlexpos = t.lexpos + len(t.value) return t
def function[t_NOTEQUAL, parameter[self, t]]: constant[!\=] name[t].endlexpos assign[=] binary_operation[name[t].lexpos + call[name[len], parameter[name[t].value]]] return[name[t]]
keyword[def] identifier[t_NOTEQUAL] ( identifier[self] , identifier[t] ): literal[string] identifier[t] . identifier[endlexpos] = identifier[t] . identifier[lexpos] + identifier[len] ( identifier[t] . identifier[value] ) keyword[return] identifier[t]
def t_NOTEQUAL(self, t): """!\\=""" t.endlexpos = t.lexpos + len(t.value) return t
def get_mac_address_table_output_mac_address_table_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table output = ET.SubElement(get_mac_address_table, "output") mac_address_table = ET.SubElement(output, "mac-address-table") vlanid_key = ET.SubElement(mac_address_table, "vlanid") vlanid_key.text = kwargs.pop('vlanid') mac_address = ET.SubElement(mac_address_table, "mac-address") mac_address.text = kwargs.pop('mac_address') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[get_mac_address_table_output_mac_address_table_mac_address, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[get_mac_address_table] assign[=] call[name[ET].Element, parameter[constant[get_mac_address_table]]] variable[config] assign[=] name[get_mac_address_table] variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_mac_address_table], constant[output]]] variable[mac_address_table] assign[=] call[name[ET].SubElement, parameter[name[output], constant[mac-address-table]]] variable[vlanid_key] assign[=] call[name[ET].SubElement, parameter[name[mac_address_table], constant[vlanid]]] name[vlanid_key].text assign[=] call[name[kwargs].pop, parameter[constant[vlanid]]] variable[mac_address] assign[=] call[name[ET].SubElement, parameter[name[mac_address_table], constant[mac-address]]] name[mac_address].text assign[=] call[name[kwargs].pop, parameter[constant[mac_address]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[get_mac_address_table_output_mac_address_table_mac_address] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[get_mac_address_table] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[get_mac_address_table] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_mac_address_table] , literal[string] ) identifier[mac_address_table] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[vlanid_key] = identifier[ET] . identifier[SubElement] ( identifier[mac_address_table] , literal[string] ) identifier[vlanid_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[mac_address] = identifier[ET] . identifier[SubElement] ( identifier[mac_address_table] , literal[string] ) identifier[mac_address] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def get_mac_address_table_output_mac_address_table_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') get_mac_address_table = ET.Element('get_mac_address_table') config = get_mac_address_table output = ET.SubElement(get_mac_address_table, 'output') mac_address_table = ET.SubElement(output, 'mac-address-table') vlanid_key = ET.SubElement(mac_address_table, 'vlanid') vlanid_key.text = kwargs.pop('vlanid') mac_address = ET.SubElement(mac_address_table, 'mac-address') mac_address.text = kwargs.pop('mac_address') callback = kwargs.pop('callback', self._callback) return callback(config)
def _timedelta_to_seconds(td): """Convert a datetime.timedelta object into a seconds interval for rotating file ouput. :param td: datetime.timedelta :return: time in seconds :rtype: int """ if isinstance(td, numbers.Real): td = datetime.timedelta(seconds=td) return td.total_seconds()
def function[_timedelta_to_seconds, parameter[td]]: constant[Convert a datetime.timedelta object into a seconds interval for rotating file ouput. :param td: datetime.timedelta :return: time in seconds :rtype: int ] if call[name[isinstance], parameter[name[td], name[numbers].Real]] begin[:] variable[td] assign[=] call[name[datetime].timedelta, parameter[]] return[call[name[td].total_seconds, parameter[]]]
keyword[def] identifier[_timedelta_to_seconds] ( identifier[td] ): literal[string] keyword[if] identifier[isinstance] ( identifier[td] , identifier[numbers] . identifier[Real] ): identifier[td] = identifier[datetime] . identifier[timedelta] ( identifier[seconds] = identifier[td] ) keyword[return] identifier[td] . identifier[total_seconds] ()
def _timedelta_to_seconds(td): """Convert a datetime.timedelta object into a seconds interval for rotating file ouput. :param td: datetime.timedelta :return: time in seconds :rtype: int """ if isinstance(td, numbers.Real): td = datetime.timedelta(seconds=td) # depends on [control=['if'], data=[]] return td.total_seconds()
def get_string(self, stringid): '''Returns the localized string from strings.xml for the given stringid. ''' stringid = int(stringid) if not hasattr(self, '_strings'): self._strings = {} if not stringid in self._strings: self._strings[stringid] = self.addon.getLocalizedString(stringid) return self._strings[stringid]
def function[get_string, parameter[self, stringid]]: constant[Returns the localized string from strings.xml for the given stringid. ] variable[stringid] assign[=] call[name[int], parameter[name[stringid]]] if <ast.UnaryOp object at 0x7da1b183ab30> begin[:] name[self]._strings assign[=] dictionary[[], []] if <ast.UnaryOp object at 0x7da1b183a470> begin[:] call[name[self]._strings][name[stringid]] assign[=] call[name[self].addon.getLocalizedString, parameter[name[stringid]]] return[call[name[self]._strings][name[stringid]]]
keyword[def] identifier[get_string] ( identifier[self] , identifier[stringid] ): literal[string] identifier[stringid] = identifier[int] ( identifier[stringid] ) keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_strings] ={} keyword[if] keyword[not] identifier[stringid] keyword[in] identifier[self] . identifier[_strings] : identifier[self] . identifier[_strings] [ identifier[stringid] ]= identifier[self] . identifier[addon] . identifier[getLocalizedString] ( identifier[stringid] ) keyword[return] identifier[self] . identifier[_strings] [ identifier[stringid] ]
def get_string(self, stringid): """Returns the localized string from strings.xml for the given stringid. """ stringid = int(stringid) if not hasattr(self, '_strings'): self._strings = {} # depends on [control=['if'], data=[]] if not stringid in self._strings: self._strings[stringid] = self.addon.getLocalizedString(stringid) # depends on [control=['if'], data=[]] return self._strings[stringid]
def _get_prompt_cursor(self): """ Convenience method that returns a cursor for the prompt position. """ cursor = self._control.textCursor() cursor.setPosition(self._prompt_pos) return cursor
def function[_get_prompt_cursor, parameter[self]]: constant[ Convenience method that returns a cursor for the prompt position. ] variable[cursor] assign[=] call[name[self]._control.textCursor, parameter[]] call[name[cursor].setPosition, parameter[name[self]._prompt_pos]] return[name[cursor]]
keyword[def] identifier[_get_prompt_cursor] ( identifier[self] ): literal[string] identifier[cursor] = identifier[self] . identifier[_control] . identifier[textCursor] () identifier[cursor] . identifier[setPosition] ( identifier[self] . identifier[_prompt_pos] ) keyword[return] identifier[cursor]
def _get_prompt_cursor(self): """ Convenience method that returns a cursor for the prompt position. """ cursor = self._control.textCursor() cursor.setPosition(self._prompt_pos) return cursor
def html_to_text(cls, html): """Return stripped HTML, keeping only MathML.""" s = cls() s.feed(html) unescaped_data = s.unescape(s.get_data()) return escape_for_xml(unescaped_data, tags_to_keep=s.mathml_elements)
def function[html_to_text, parameter[cls, html]]: constant[Return stripped HTML, keeping only MathML.] variable[s] assign[=] call[name[cls], parameter[]] call[name[s].feed, parameter[name[html]]] variable[unescaped_data] assign[=] call[name[s].unescape, parameter[call[name[s].get_data, parameter[]]]] return[call[name[escape_for_xml], parameter[name[unescaped_data]]]]
keyword[def] identifier[html_to_text] ( identifier[cls] , identifier[html] ): literal[string] identifier[s] = identifier[cls] () identifier[s] . identifier[feed] ( identifier[html] ) identifier[unescaped_data] = identifier[s] . identifier[unescape] ( identifier[s] . identifier[get_data] ()) keyword[return] identifier[escape_for_xml] ( identifier[unescaped_data] , identifier[tags_to_keep] = identifier[s] . identifier[mathml_elements] )
def html_to_text(cls, html): """Return stripped HTML, keeping only MathML.""" s = cls() s.feed(html) unescaped_data = s.unescape(s.get_data()) return escape_for_xml(unescaped_data, tags_to_keep=s.mathml_elements)
def gold_parameter(time_residual): """stolen from thomas""" gold = np.exp(-1 * time_residual * time_residual / (2 * 1.5 * 1.5)) / len(time_residual) gold = np.sum(gold)
def function[gold_parameter, parameter[time_residual]]: constant[stolen from thomas] variable[gold] assign[=] binary_operation[call[name[np].exp, parameter[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b2352f50> * name[time_residual]] * name[time_residual]] / binary_operation[binary_operation[constant[2] * constant[1.5]] * constant[1.5]]]]] / call[name[len], parameter[name[time_residual]]]] variable[gold] assign[=] call[name[np].sum, parameter[name[gold]]]
keyword[def] identifier[gold_parameter] ( identifier[time_residual] ): literal[string] identifier[gold] = identifier[np] . identifier[exp] (- literal[int] * identifier[time_residual] * identifier[time_residual] / ( literal[int] * literal[int] * literal[int] ))/ identifier[len] ( identifier[time_residual] ) identifier[gold] = identifier[np] . identifier[sum] ( identifier[gold] )
def gold_parameter(time_residual): """stolen from thomas""" gold = np.exp(-1 * time_residual * time_residual / (2 * 1.5 * 1.5)) / len(time_residual) gold = np.sum(gold)
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) response = get(uri, params=kwargs.get('params', None)) return self._handle_response(response).json()
def function[_request, parameter[self, method]]: constant[Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses ] variable[uri] assign[=] call[name[self]._create_api_uri, parameter[<ast.Starred object at 0x7da18bcc99f0>]] variable[response] assign[=] call[name[get], parameter[name[uri]]] return[call[call[name[self]._handle_response, parameter[name[response]]].json, parameter[]]]
keyword[def] identifier[_request] ( identifier[self] , identifier[method] ,* identifier[relative_path_parts] ,** identifier[kwargs] ): literal[string] identifier[uri] = identifier[self] . identifier[_create_api_uri] (* identifier[relative_path_parts] ) identifier[response] = identifier[get] ( identifier[uri] , identifier[params] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )) keyword[return] identifier[self] . identifier[_handle_response] ( identifier[response] ). identifier[json] ()
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) response = get(uri, params=kwargs.get('params', None)) return self._handle_response(response).json()
def _bright_star_match( self, matchedObjects, catalogueName, magnitudeLimitFilter, lowerMagnitudeLimit): """*perform a bright star match on the crossmatch results if required by the catalogue search* **Key Arguments:** - ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch - ``catalogueName`` -- the name of the catalogue the crossmatch results from - ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on - ``lowerMagnitudeLimit`` -- the lower magnitude limit to match bright stars against **Return:** - ``brightStarMatches`` -- the trimmed matched sources (bright stars associations only) .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``_bright_star_match`` method') import decimal decimal.getcontext().prec = 10 # MATCH BRIGHT STAR ASSOCIATIONS brightStarMatches = [] for row in matchedObjects: mag = decimal.Decimal(row[magnitudeLimitFilter]) if mag and mag < lowerMagnitudeLimit: sep = decimal.Decimal(row["separationArcsec"]) if sep < decimal.Decimal(decimal.Decimal(10)**(-decimal.Decimal(0.2) * mag + decimal.Decimal(3.7))) and sep < 20.: brightStarMatches.append(row) self.log.debug('completed the ``_bright_star_match`` method') return brightStarMatches
def function[_bright_star_match, parameter[self, matchedObjects, catalogueName, magnitudeLimitFilter, lowerMagnitudeLimit]]: constant[*perform a bright star match on the crossmatch results if required by the catalogue search* **Key Arguments:** - ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch - ``catalogueName`` -- the name of the catalogue the crossmatch results from - ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on - ``lowerMagnitudeLimit`` -- the lower magnitude limit to match bright stars against **Return:** - ``brightStarMatches`` -- the trimmed matched sources (bright stars associations only) .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring ] call[name[self].log.debug, parameter[constant[starting the ``_bright_star_match`` method]]] import module[decimal] call[name[decimal].getcontext, parameter[]].prec assign[=] constant[10] variable[brightStarMatches] assign[=] list[[]] for taget[name[row]] in starred[name[matchedObjects]] begin[:] variable[mag] assign[=] call[name[decimal].Decimal, parameter[call[name[row]][name[magnitudeLimitFilter]]]] if <ast.BoolOp object at 0x7da1b1a74580> begin[:] variable[sep] assign[=] call[name[decimal].Decimal, parameter[call[name[row]][constant[separationArcsec]]]] if <ast.BoolOp object at 0x7da18bcc8850> begin[:] call[name[brightStarMatches].append, parameter[name[row]]] call[name[self].log.debug, parameter[constant[completed the ``_bright_star_match`` method]]] return[name[brightStarMatches]]
keyword[def] identifier[_bright_star_match] ( identifier[self] , identifier[matchedObjects] , identifier[catalogueName] , identifier[magnitudeLimitFilter] , identifier[lowerMagnitudeLimit] ): literal[string] identifier[self] . identifier[log] . identifier[debug] ( literal[string] ) keyword[import] identifier[decimal] identifier[decimal] . identifier[getcontext] (). identifier[prec] = literal[int] identifier[brightStarMatches] =[] keyword[for] identifier[row] keyword[in] identifier[matchedObjects] : identifier[mag] = identifier[decimal] . identifier[Decimal] ( identifier[row] [ identifier[magnitudeLimitFilter] ]) keyword[if] identifier[mag] keyword[and] identifier[mag] < identifier[lowerMagnitudeLimit] : identifier[sep] = identifier[decimal] . identifier[Decimal] ( identifier[row] [ literal[string] ]) keyword[if] identifier[sep] < identifier[decimal] . identifier[Decimal] ( identifier[decimal] . identifier[Decimal] ( literal[int] )**(- identifier[decimal] . identifier[Decimal] ( literal[int] )* identifier[mag] + identifier[decimal] . identifier[Decimal] ( literal[int] ))) keyword[and] identifier[sep] < literal[int] : identifier[brightStarMatches] . identifier[append] ( identifier[row] ) identifier[self] . identifier[log] . identifier[debug] ( literal[string] ) keyword[return] identifier[brightStarMatches]
def _bright_star_match(self, matchedObjects, catalogueName, magnitudeLimitFilter, lowerMagnitudeLimit): """*perform a bright star match on the crossmatch results if required by the catalogue search* **Key Arguments:** - ``matchedObjects`` -- the list of matched sources from the catalogue crossmatch - ``catalogueName`` -- the name of the catalogue the crossmatch results from - ``magnitudeLimitFilter`` -- the name of the column containing the magnitude to filter on - ``lowerMagnitudeLimit`` -- the lower magnitude limit to match bright stars against **Return:** - ``brightStarMatches`` -- the trimmed matched sources (bright stars associations only) .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``_bright_star_match`` method') import decimal decimal.getcontext().prec = 10 # MATCH BRIGHT STAR ASSOCIATIONS brightStarMatches = [] for row in matchedObjects: mag = decimal.Decimal(row[magnitudeLimitFilter]) if mag and mag < lowerMagnitudeLimit: sep = decimal.Decimal(row['separationArcsec']) if sep < decimal.Decimal(decimal.Decimal(10) ** (-decimal.Decimal(0.2) * mag + decimal.Decimal(3.7))) and sep < 20.0: brightStarMatches.append(row) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']] self.log.debug('completed the ``_bright_star_match`` method') return brightStarMatches
def pinyin_syllable_to_ipa(s): """Convert Pinyin syllable *s* to an IPA syllable.""" pinyin_syllable, tone = _parse_pinyin_syllable(s) try: ipa_syllable = _PINYIN_MAP[pinyin_syllable.lower()]['IPA'] except KeyError: raise ValueError('Not a valid syllable: %s' % s) return ipa_syllable + _IPA_TONES[tone]
def function[pinyin_syllable_to_ipa, parameter[s]]: constant[Convert Pinyin syllable *s* to an IPA syllable.] <ast.Tuple object at 0x7da18fe90100> assign[=] call[name[_parse_pinyin_syllable], parameter[name[s]]] <ast.Try object at 0x7da18fe93f40> return[binary_operation[name[ipa_syllable] + call[name[_IPA_TONES]][name[tone]]]]
keyword[def] identifier[pinyin_syllable_to_ipa] ( identifier[s] ): literal[string] identifier[pinyin_syllable] , identifier[tone] = identifier[_parse_pinyin_syllable] ( identifier[s] ) keyword[try] : identifier[ipa_syllable] = identifier[_PINYIN_MAP] [ identifier[pinyin_syllable] . identifier[lower] ()][ literal[string] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[s] ) keyword[return] identifier[ipa_syllable] + identifier[_IPA_TONES] [ identifier[tone] ]
def pinyin_syllable_to_ipa(s): """Convert Pinyin syllable *s* to an IPA syllable.""" (pinyin_syllable, tone) = _parse_pinyin_syllable(s) try: ipa_syllable = _PINYIN_MAP[pinyin_syllable.lower()]['IPA'] # depends on [control=['try'], data=[]] except KeyError: raise ValueError('Not a valid syllable: %s' % s) # depends on [control=['except'], data=[]] return ipa_syllable + _IPA_TONES[tone]
def uninstall(self, package): """Uninstalls the given package (given in pip's package syntax or a tuple of ('name', 'ver')) from this virtual environment.""" if isinstance(package, tuple): package = '=='.join(package) if not self.is_installed(package): self._write_to_log('%s is not installed, skipping' % package) return try: self._execute_pip(['uninstall', '-y', package]) except subprocess.CalledProcessError as e: raise PackageRemovalException((e.returncode, e.output, package))
def function[uninstall, parameter[self, package]]: constant[Uninstalls the given package (given in pip's package syntax or a tuple of ('name', 'ver')) from this virtual environment.] if call[name[isinstance], parameter[name[package], name[tuple]]] begin[:] variable[package] assign[=] call[constant[==].join, parameter[name[package]]] if <ast.UnaryOp object at 0x7da1b26141f0> begin[:] call[name[self]._write_to_log, parameter[binary_operation[constant[%s is not installed, skipping] <ast.Mod object at 0x7da2590d6920> name[package]]]] return[None] <ast.Try object at 0x7da1b26158d0>
keyword[def] identifier[uninstall] ( identifier[self] , identifier[package] ): literal[string] keyword[if] identifier[isinstance] ( identifier[package] , identifier[tuple] ): identifier[package] = literal[string] . identifier[join] ( identifier[package] ) keyword[if] keyword[not] identifier[self] . identifier[is_installed] ( identifier[package] ): identifier[self] . identifier[_write_to_log] ( literal[string] % identifier[package] ) keyword[return] keyword[try] : identifier[self] . identifier[_execute_pip] ([ literal[string] , literal[string] , identifier[package] ]) keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[e] : keyword[raise] identifier[PackageRemovalException] (( identifier[e] . identifier[returncode] , identifier[e] . identifier[output] , identifier[package] ))
def uninstall(self, package): """Uninstalls the given package (given in pip's package syntax or a tuple of ('name', 'ver')) from this virtual environment.""" if isinstance(package, tuple): package = '=='.join(package) # depends on [control=['if'], data=[]] if not self.is_installed(package): self._write_to_log('%s is not installed, skipping' % package) return # depends on [control=['if'], data=[]] try: self._execute_pip(['uninstall', '-y', package]) # depends on [control=['try'], data=[]] except subprocess.CalledProcessError as e: raise PackageRemovalException((e.returncode, e.output, package)) # depends on [control=['except'], data=['e']]
def validate(self, data, schema, **kwargs): """Validate data using schema with ``JSONResolver``.""" if not isinstance(schema, dict): schema = {'$ref': schema} return validate( data, schema, resolver=self.ref_resolver_cls.from_schema(schema), types=self.app.config.get('RECORDS_VALIDATION_TYPES', {}), **kwargs )
def function[validate, parameter[self, data, schema]]: constant[Validate data using schema with ``JSONResolver``.] if <ast.UnaryOp object at 0x7da1b0c88040> begin[:] variable[schema] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c88220>], [<ast.Name object at 0x7da1b0c88fd0>]] return[call[name[validate], parameter[name[data], name[schema]]]]
keyword[def] identifier[validate] ( identifier[self] , identifier[data] , identifier[schema] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[schema] , identifier[dict] ): identifier[schema] ={ literal[string] : identifier[schema] } keyword[return] identifier[validate] ( identifier[data] , identifier[schema] , identifier[resolver] = identifier[self] . identifier[ref_resolver_cls] . identifier[from_schema] ( identifier[schema] ), identifier[types] = identifier[self] . identifier[app] . identifier[config] . identifier[get] ( literal[string] ,{}), ** identifier[kwargs] )
def validate(self, data, schema, **kwargs): """Validate data using schema with ``JSONResolver``.""" if not isinstance(schema, dict): schema = {'$ref': schema} # depends on [control=['if'], data=[]] return validate(data, schema, resolver=self.ref_resolver_cls.from_schema(schema), types=self.app.config.get('RECORDS_VALIDATION_TYPES', {}), **kwargs)
def remove_bucket(self, bucket_name): """ Remove a bucket. :param bucket_name: Bucket to remove """ is_valid_bucket_name(bucket_name) self._url_open('DELETE', bucket_name=bucket_name) # Make sure to purge bucket_name from region cache. self._delete_bucket_region(bucket_name)
def function[remove_bucket, parameter[self, bucket_name]]: constant[ Remove a bucket. :param bucket_name: Bucket to remove ] call[name[is_valid_bucket_name], parameter[name[bucket_name]]] call[name[self]._url_open, parameter[constant[DELETE]]] call[name[self]._delete_bucket_region, parameter[name[bucket_name]]]
keyword[def] identifier[remove_bucket] ( identifier[self] , identifier[bucket_name] ): literal[string] identifier[is_valid_bucket_name] ( identifier[bucket_name] ) identifier[self] . identifier[_url_open] ( literal[string] , identifier[bucket_name] = identifier[bucket_name] ) identifier[self] . identifier[_delete_bucket_region] ( identifier[bucket_name] )
def remove_bucket(self, bucket_name): """ Remove a bucket. :param bucket_name: Bucket to remove """ is_valid_bucket_name(bucket_name) self._url_open('DELETE', bucket_name=bucket_name) # Make sure to purge bucket_name from region cache. self._delete_bucket_region(bucket_name)
def on_next_button(self, event): """ update figures and text when a next button is selected """ self.do_auto_save() self.selected_meas = [] index = self.specimens.index(self.s) try: fit_index = self.pmag_results_data['specimens'][self.s].index( self.current_fit) except KeyError: fit_index = None except ValueError: fit_index = None if index == len(self.specimens)-1: index = 0 else: index += 1 # sets self.s calculates params etc. self.initialize_CART_rot(str(self.specimens[index])) self.specimens_box.SetStringSelection(str(self.s)) if fit_index != None and self.s in self.pmag_results_data['specimens']: try: self.current_fit = self.pmag_results_data['specimens'][self.s][fit_index] except IndexError: self.current_fit = None else: self.current_fit = None if self.ie_open: self.ie.change_selected(self.current_fit) self.update_selection()
def function[on_next_button, parameter[self, event]]: constant[ update figures and text when a next button is selected ] call[name[self].do_auto_save, parameter[]] name[self].selected_meas assign[=] list[[]] variable[index] assign[=] call[name[self].specimens.index, parameter[name[self].s]] <ast.Try object at 0x7da2041dae30> if compare[name[index] equal[==] binary_operation[call[name[len], parameter[name[self].specimens]] - constant[1]]] begin[:] variable[index] assign[=] constant[0] call[name[self].initialize_CART_rot, parameter[call[name[str], parameter[call[name[self].specimens][name[index]]]]]] call[name[self].specimens_box.SetStringSelection, parameter[call[name[str], parameter[name[self].s]]]] if <ast.BoolOp object at 0x7da2041d8d90> begin[:] <ast.Try object at 0x7da2041d8310> if name[self].ie_open begin[:] call[name[self].ie.change_selected, parameter[name[self].current_fit]] call[name[self].update_selection, parameter[]]
keyword[def] identifier[on_next_button] ( identifier[self] , identifier[event] ): literal[string] identifier[self] . identifier[do_auto_save] () identifier[self] . identifier[selected_meas] =[] identifier[index] = identifier[self] . identifier[specimens] . identifier[index] ( identifier[self] . identifier[s] ) keyword[try] : identifier[fit_index] = identifier[self] . identifier[pmag_results_data] [ literal[string] ][ identifier[self] . identifier[s] ]. identifier[index] ( identifier[self] . identifier[current_fit] ) keyword[except] identifier[KeyError] : identifier[fit_index] = keyword[None] keyword[except] identifier[ValueError] : identifier[fit_index] = keyword[None] keyword[if] identifier[index] == identifier[len] ( identifier[self] . identifier[specimens] )- literal[int] : identifier[index] = literal[int] keyword[else] : identifier[index] += literal[int] identifier[self] . identifier[initialize_CART_rot] ( identifier[str] ( identifier[self] . identifier[specimens] [ identifier[index] ])) identifier[self] . identifier[specimens_box] . identifier[SetStringSelection] ( identifier[str] ( identifier[self] . identifier[s] )) keyword[if] identifier[fit_index] != keyword[None] keyword[and] identifier[self] . identifier[s] keyword[in] identifier[self] . identifier[pmag_results_data] [ literal[string] ]: keyword[try] : identifier[self] . identifier[current_fit] = identifier[self] . identifier[pmag_results_data] [ literal[string] ][ identifier[self] . identifier[s] ][ identifier[fit_index] ] keyword[except] identifier[IndexError] : identifier[self] . identifier[current_fit] = keyword[None] keyword[else] : identifier[self] . identifier[current_fit] = keyword[None] keyword[if] identifier[self] . identifier[ie_open] : identifier[self] . identifier[ie] . identifier[change_selected] ( identifier[self] . identifier[current_fit] ) identifier[self] . identifier[update_selection] ()
def on_next_button(self, event): """ update figures and text when a next button is selected """ self.do_auto_save() self.selected_meas = [] index = self.specimens.index(self.s) try: fit_index = self.pmag_results_data['specimens'][self.s].index(self.current_fit) # depends on [control=['try'], data=[]] except KeyError: fit_index = None # depends on [control=['except'], data=[]] except ValueError: fit_index = None # depends on [control=['except'], data=[]] if index == len(self.specimens) - 1: index = 0 # depends on [control=['if'], data=['index']] else: index += 1 # sets self.s calculates params etc. self.initialize_CART_rot(str(self.specimens[index])) self.specimens_box.SetStringSelection(str(self.s)) if fit_index != None and self.s in self.pmag_results_data['specimens']: try: self.current_fit = self.pmag_results_data['specimens'][self.s][fit_index] # depends on [control=['try'], data=[]] except IndexError: self.current_fit = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: self.current_fit = None if self.ie_open: self.ie.change_selected(self.current_fit) # depends on [control=['if'], data=[]] self.update_selection()
def set_vo(vo): """ NAME: set_vo PURPOSE: set the global configuration value of vo (velocity scale) INPUT: vo - scale in km/s or astropy Quantity OUTPUT: (none) HISTORY: 2016-01-05 - Written - Bovy (UofT) """ if _APY_LOADED and isinstance(vo,units.Quantity): vo= vo.to(units.km/units.s).value __config__.set('normalization','vo',str(vo))
def function[set_vo, parameter[vo]]: constant[ NAME: set_vo PURPOSE: set the global configuration value of vo (velocity scale) INPUT: vo - scale in km/s or astropy Quantity OUTPUT: (none) HISTORY: 2016-01-05 - Written - Bovy (UofT) ] if <ast.BoolOp object at 0x7da204566c80> begin[:] variable[vo] assign[=] call[name[vo].to, parameter[binary_operation[name[units].km / name[units].s]]].value call[name[__config__].set, parameter[constant[normalization], constant[vo], call[name[str], parameter[name[vo]]]]]
keyword[def] identifier[set_vo] ( identifier[vo] ): literal[string] keyword[if] identifier[_APY_LOADED] keyword[and] identifier[isinstance] ( identifier[vo] , identifier[units] . identifier[Quantity] ): identifier[vo] = identifier[vo] . identifier[to] ( identifier[units] . identifier[km] / identifier[units] . identifier[s] ). identifier[value] identifier[__config__] . identifier[set] ( literal[string] , literal[string] , identifier[str] ( identifier[vo] ))
def set_vo(vo): """ NAME: set_vo PURPOSE: set the global configuration value of vo (velocity scale) INPUT: vo - scale in km/s or astropy Quantity OUTPUT: (none) HISTORY: 2016-01-05 - Written - Bovy (UofT) """ if _APY_LOADED and isinstance(vo, units.Quantity): vo = vo.to(units.km / units.s).value # depends on [control=['if'], data=[]] __config__.set('normalization', 'vo', str(vo))
def append_tz_timestamp(self, tag, timestamp=None, precision=3, header=False): """Append a field with a TZTimestamp value, derived from local time. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a local datetime, such as created by datetime.datetime.now(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.now() is used to get the current local time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.""" # Get float offset from Unix epoch. if timestamp is None: now = time.time() elif type(timestamp) is float: now = timestamp else: now = time.mktime(timestamp.timetuple()) + \ (timestamp.microsecond * 1e-6) # Get offset of local timezone east of UTC. utc = datetime.datetime.utcfromtimestamp(now) local = datetime.datetime.fromtimestamp(now) td = local - utc offset = int(((td.days * 86400) + td.seconds) / 60) s = local.strftime("%Y%m%d-%H:%M:%S") if precision == 3: s += ".%03u" % (local.microsecond / 1000) elif precision == 6: s += ".%06u" % local.microsecond elif precision != 0: raise ValueError("Precision (%u) should be one of " "0, 3 or 6 digits" % precision) s += self._tz_offset_string(offset) return self.append_pair(tag, s, header=header)
def function[append_tz_timestamp, parameter[self, tag, timestamp, precision, header]]: constant[Append a field with a TZTimestamp value, derived from local time. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a local datetime, such as created by datetime.datetime.now(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.now() is used to get the current local time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.] if compare[name[timestamp] is constant[None]] begin[:] variable[now] assign[=] call[name[time].time, parameter[]] variable[utc] assign[=] call[name[datetime].datetime.utcfromtimestamp, parameter[name[now]]] variable[local] assign[=] call[name[datetime].datetime.fromtimestamp, parameter[name[now]]] variable[td] assign[=] binary_operation[name[local] - name[utc]] variable[offset] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[name[td].days * constant[86400]] + name[td].seconds] / constant[60]]]] variable[s] assign[=] call[name[local].strftime, parameter[constant[%Y%m%d-%H:%M:%S]]] if compare[name[precision] equal[==] constant[3]] begin[:] <ast.AugAssign object at 0x7da1b1d06f20> <ast.AugAssign object at 0x7da1b1d05630> return[call[name[self].append_pair, parameter[name[tag], name[s]]]]
keyword[def] identifier[append_tz_timestamp] ( identifier[self] , identifier[tag] , identifier[timestamp] = keyword[None] , identifier[precision] = literal[int] , identifier[header] = keyword[False] ): literal[string] keyword[if] identifier[timestamp] keyword[is] keyword[None] : identifier[now] = identifier[time] . identifier[time] () keyword[elif] identifier[type] ( identifier[timestamp] ) keyword[is] identifier[float] : identifier[now] = identifier[timestamp] keyword[else] : identifier[now] = identifier[time] . identifier[mktime] ( identifier[timestamp] . identifier[timetuple] ())+( identifier[timestamp] . identifier[microsecond] * literal[int] ) identifier[utc] = identifier[datetime] . identifier[datetime] . identifier[utcfromtimestamp] ( identifier[now] ) identifier[local] = identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[now] ) identifier[td] = identifier[local] - identifier[utc] identifier[offset] = identifier[int] ((( identifier[td] . identifier[days] * literal[int] )+ identifier[td] . identifier[seconds] )/ literal[int] ) identifier[s] = identifier[local] . identifier[strftime] ( literal[string] ) keyword[if] identifier[precision] == literal[int] : identifier[s] += literal[string] %( identifier[local] . identifier[microsecond] / literal[int] ) keyword[elif] identifier[precision] == literal[int] : identifier[s] += literal[string] % identifier[local] . identifier[microsecond] keyword[elif] identifier[precision] != literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] % identifier[precision] ) identifier[s] += identifier[self] . identifier[_tz_offset_string] ( identifier[offset] ) keyword[return] identifier[self] . identifier[append_pair] ( identifier[tag] , identifier[s] , identifier[header] = identifier[header] )
def append_tz_timestamp(self, tag, timestamp=None, precision=3, header=False): """Append a field with a TZTimestamp value, derived from local time. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a local datetime, such as created by datetime.datetime.now(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.now() is used to get the current local time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.""" # Get float offset from Unix epoch. if timestamp is None: now = time.time() # depends on [control=['if'], data=[]] elif type(timestamp) is float: now = timestamp # depends on [control=['if'], data=[]] else: now = time.mktime(timestamp.timetuple()) + timestamp.microsecond * 1e-06 # Get offset of local timezone east of UTC. utc = datetime.datetime.utcfromtimestamp(now) local = datetime.datetime.fromtimestamp(now) td = local - utc offset = int((td.days * 86400 + td.seconds) / 60) s = local.strftime('%Y%m%d-%H:%M:%S') if precision == 3: s += '.%03u' % (local.microsecond / 1000) # depends on [control=['if'], data=[]] elif precision == 6: s += '.%06u' % local.microsecond # depends on [control=['if'], data=[]] elif precision != 0: raise ValueError('Precision (%u) should be one of 0, 3 or 6 digits' % precision) # depends on [control=['if'], data=['precision']] s += self._tz_offset_string(offset) return self.append_pair(tag, s, header=header)
def get_reference_template(self, ref_type): """Return the reference template for the type as an ordered dictionary. Zotero.item_template() caches data after the first API call. """ template = self._zotero_lib.item_template(ref_type) return OrderedDict(sorted(template.items(), key=lambda x: x[0]))
def function[get_reference_template, parameter[self, ref_type]]: constant[Return the reference template for the type as an ordered dictionary. Zotero.item_template() caches data after the first API call. ] variable[template] assign[=] call[name[self]._zotero_lib.item_template, parameter[name[ref_type]]] return[call[name[OrderedDict], parameter[call[name[sorted], parameter[call[name[template].items, parameter[]]]]]]]
keyword[def] identifier[get_reference_template] ( identifier[self] , identifier[ref_type] ): literal[string] identifier[template] = identifier[self] . identifier[_zotero_lib] . identifier[item_template] ( identifier[ref_type] ) keyword[return] identifier[OrderedDict] ( identifier[sorted] ( identifier[template] . identifier[items] (), identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]))
def get_reference_template(self, ref_type): """Return the reference template for the type as an ordered dictionary. Zotero.item_template() caches data after the first API call. """ template = self._zotero_lib.item_template(ref_type) return OrderedDict(sorted(template.items(), key=lambda x: x[0]))
def create_window(width, height, title, monitor, share): """ Creates a window and its associated context. Wrapper for: GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share); """ return _glfw.glfwCreateWindow(width, height, _to_char_p(title), monitor, share)
def function[create_window, parameter[width, height, title, monitor, share]]: constant[ Creates a window and its associated context. Wrapper for: GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share); ] return[call[name[_glfw].glfwCreateWindow, parameter[name[width], name[height], call[name[_to_char_p], parameter[name[title]]], name[monitor], name[share]]]]
keyword[def] identifier[create_window] ( identifier[width] , identifier[height] , identifier[title] , identifier[monitor] , identifier[share] ): literal[string] keyword[return] identifier[_glfw] . identifier[glfwCreateWindow] ( identifier[width] , identifier[height] , identifier[_to_char_p] ( identifier[title] ), identifier[monitor] , identifier[share] )
def create_window(width, height, title, monitor, share): """ Creates a window and its associated context. Wrapper for: GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share); """ return _glfw.glfwCreateWindow(width, height, _to_char_p(title), monitor, share)
def _validate_jpx_box_sequence(self, boxes): """Run through series of tests for JPX box legality.""" self._validate_label(boxes) self._validate_jpx_compatibility(boxes, boxes[1].compatibility_list) self._validate_singletons(boxes) self._validate_top_level(boxes)
def function[_validate_jpx_box_sequence, parameter[self, boxes]]: constant[Run through series of tests for JPX box legality.] call[name[self]._validate_label, parameter[name[boxes]]] call[name[self]._validate_jpx_compatibility, parameter[name[boxes], call[name[boxes]][constant[1]].compatibility_list]] call[name[self]._validate_singletons, parameter[name[boxes]]] call[name[self]._validate_top_level, parameter[name[boxes]]]
keyword[def] identifier[_validate_jpx_box_sequence] ( identifier[self] , identifier[boxes] ): literal[string] identifier[self] . identifier[_validate_label] ( identifier[boxes] ) identifier[self] . identifier[_validate_jpx_compatibility] ( identifier[boxes] , identifier[boxes] [ literal[int] ]. identifier[compatibility_list] ) identifier[self] . identifier[_validate_singletons] ( identifier[boxes] ) identifier[self] . identifier[_validate_top_level] ( identifier[boxes] )
def _validate_jpx_box_sequence(self, boxes): """Run through series of tests for JPX box legality.""" self._validate_label(boxes) self._validate_jpx_compatibility(boxes, boxes[1].compatibility_list) self._validate_singletons(boxes) self._validate_top_level(boxes)
def _verify_params(self): """Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used. """ reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params) if reserved_in_use: raise ValueError("Using a reserved parameter", reserved_in_use)
def function[_verify_params, parameter[self]]: constant[Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used. ] variable[reserved_in_use] assign[=] call[name[self]._RESERVED_PARAMS.intersection, parameter[name[self].extra_params]] if name[reserved_in_use] begin[:] <ast.Raise object at 0x7da2041da380>
keyword[def] identifier[_verify_params] ( identifier[self] ): literal[string] identifier[reserved_in_use] = identifier[self] . identifier[_RESERVED_PARAMS] . identifier[intersection] ( identifier[self] . identifier[extra_params] ) keyword[if] identifier[reserved_in_use] : keyword[raise] identifier[ValueError] ( literal[string] , identifier[reserved_in_use] )
def _verify_params(self): """Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used. """ reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params) if reserved_in_use: raise ValueError('Using a reserved parameter', reserved_in_use) # depends on [control=['if'], data=[]]
def gather(self): """Fetch results from separate queue """ limit = self.op_sequence - self.op_sequence_start results = MessageIterator(self.sqs, self.reduce_queue, limit) for m in results: # sequence_id from above msg_id = int(m['MessageAttributes']['sequence_id']['StringValue']) if (not msg_id > self.op_sequence_start or not msg_id <= self.op_sequence or msg_id not in self.futures): raise RuntimeError( "Concurrent queue user from different " "process or previous results") f = self.futures[msg_id] f.set_result(m) results.ack(m)
def function[gather, parameter[self]]: constant[Fetch results from separate queue ] variable[limit] assign[=] binary_operation[name[self].op_sequence - name[self].op_sequence_start] variable[results] assign[=] call[name[MessageIterator], parameter[name[self].sqs, name[self].reduce_queue, name[limit]]] for taget[name[m]] in starred[name[results]] begin[:] variable[msg_id] assign[=] call[name[int], parameter[call[call[call[name[m]][constant[MessageAttributes]]][constant[sequence_id]]][constant[StringValue]]]] if <ast.BoolOp object at 0x7da18c4ccac0> begin[:] <ast.Raise object at 0x7da1b1c3fac0> variable[f] assign[=] call[name[self].futures][name[msg_id]] call[name[f].set_result, parameter[name[m]]] call[name[results].ack, parameter[name[m]]]
keyword[def] identifier[gather] ( identifier[self] ): literal[string] identifier[limit] = identifier[self] . identifier[op_sequence] - identifier[self] . identifier[op_sequence_start] identifier[results] = identifier[MessageIterator] ( identifier[self] . identifier[sqs] , identifier[self] . identifier[reduce_queue] , identifier[limit] ) keyword[for] identifier[m] keyword[in] identifier[results] : identifier[msg_id] = identifier[int] ( identifier[m] [ literal[string] ][ literal[string] ][ literal[string] ]) keyword[if] ( keyword[not] identifier[msg_id] > identifier[self] . identifier[op_sequence_start] keyword[or] keyword[not] identifier[msg_id] <= identifier[self] . identifier[op_sequence] keyword[or] identifier[msg_id] keyword[not] keyword[in] identifier[self] . identifier[futures] ): keyword[raise] identifier[RuntimeError] ( literal[string] literal[string] ) identifier[f] = identifier[self] . identifier[futures] [ identifier[msg_id] ] identifier[f] . identifier[set_result] ( identifier[m] ) identifier[results] . identifier[ack] ( identifier[m] )
def gather(self): """Fetch results from separate queue """ limit = self.op_sequence - self.op_sequence_start results = MessageIterator(self.sqs, self.reduce_queue, limit) for m in results: # sequence_id from above msg_id = int(m['MessageAttributes']['sequence_id']['StringValue']) if not msg_id > self.op_sequence_start or not msg_id <= self.op_sequence or msg_id not in self.futures: raise RuntimeError('Concurrent queue user from different process or previous results') # depends on [control=['if'], data=[]] f = self.futures[msg_id] f.set_result(m) results.ack(m) # depends on [control=['for'], data=['m']]
def get_line_in_facet(self, facet): """ Returns the sorted pts in a facet used to draw a line """ lines = list(facet.outer_lines) pt = [] prev = None while len(lines) > 0: if prev is None: l = lines.pop(0) else: for i, l in enumerate(lines): if prev in l: l = lines.pop(i) if l[1] == prev: l.reverse() break # make sure the lines are connected one by one. # find the way covering all pts and facets pt.append(self.wulff_pt_list[l[0]].tolist()) pt.append(self.wulff_pt_list[l[1]].tolist()) prev = l[1] return pt
def function[get_line_in_facet, parameter[self, facet]]: constant[ Returns the sorted pts in a facet used to draw a line ] variable[lines] assign[=] call[name[list], parameter[name[facet].outer_lines]] variable[pt] assign[=] list[[]] variable[prev] assign[=] constant[None] while compare[call[name[len], parameter[name[lines]]] greater[>] constant[0]] begin[:] if compare[name[prev] is constant[None]] begin[:] variable[l] assign[=] call[name[lines].pop, parameter[constant[0]]] call[name[pt].append, parameter[call[call[name[self].wulff_pt_list][call[name[l]][constant[0]]].tolist, parameter[]]]] call[name[pt].append, parameter[call[call[name[self].wulff_pt_list][call[name[l]][constant[1]]].tolist, parameter[]]]] variable[prev] assign[=] call[name[l]][constant[1]] return[name[pt]]
keyword[def] identifier[get_line_in_facet] ( identifier[self] , identifier[facet] ): literal[string] identifier[lines] = identifier[list] ( identifier[facet] . identifier[outer_lines] ) identifier[pt] =[] identifier[prev] = keyword[None] keyword[while] identifier[len] ( identifier[lines] )> literal[int] : keyword[if] identifier[prev] keyword[is] keyword[None] : identifier[l] = identifier[lines] . identifier[pop] ( literal[int] ) keyword[else] : keyword[for] identifier[i] , identifier[l] keyword[in] identifier[enumerate] ( identifier[lines] ): keyword[if] identifier[prev] keyword[in] identifier[l] : identifier[l] = identifier[lines] . identifier[pop] ( identifier[i] ) keyword[if] identifier[l] [ literal[int] ]== identifier[prev] : identifier[l] . identifier[reverse] () keyword[break] identifier[pt] . identifier[append] ( identifier[self] . identifier[wulff_pt_list] [ identifier[l] [ literal[int] ]]. identifier[tolist] ()) identifier[pt] . identifier[append] ( identifier[self] . identifier[wulff_pt_list] [ identifier[l] [ literal[int] ]]. identifier[tolist] ()) identifier[prev] = identifier[l] [ literal[int] ] keyword[return] identifier[pt]
def get_line_in_facet(self, facet): """ Returns the sorted pts in a facet used to draw a line """ lines = list(facet.outer_lines) pt = [] prev = None while len(lines) > 0: if prev is None: l = lines.pop(0) # depends on [control=['if'], data=[]] else: for (i, l) in enumerate(lines): if prev in l: l = lines.pop(i) if l[1] == prev: l.reverse() # depends on [control=['if'], data=[]] break # depends on [control=['if'], data=['prev', 'l']] # depends on [control=['for'], data=[]] # make sure the lines are connected one by one. # find the way covering all pts and facets pt.append(self.wulff_pt_list[l[0]].tolist()) pt.append(self.wulff_pt_list[l[1]].tolist()) prev = l[1] # depends on [control=['while'], data=[]] return pt
def iter_settings(mixed: Settings) -> Iterator[Tuple[str, Any]]: """Iterate over settings values from settings module or dict-like instance. :param mixed: Settings instance to iterate. """ if isinstance(mixed, types.ModuleType): for attr in dir(mixed): if not is_setting_key(attr): continue yield (attr, getattr(mixed, attr)) else: yield from filter(lambda item: is_setting_key(item[0]), mixed.items())
def function[iter_settings, parameter[mixed]]: constant[Iterate over settings values from settings module or dict-like instance. :param mixed: Settings instance to iterate. ] if call[name[isinstance], parameter[name[mixed], name[types].ModuleType]] begin[:] for taget[name[attr]] in starred[call[name[dir], parameter[name[mixed]]]] begin[:] if <ast.UnaryOp object at 0x7da1b1a3e6b0> begin[:] continue <ast.Yield object at 0x7da1b1a3d000>
keyword[def] identifier[iter_settings] ( identifier[mixed] : identifier[Settings] )-> identifier[Iterator] [ identifier[Tuple] [ identifier[str] , identifier[Any] ]]: literal[string] keyword[if] identifier[isinstance] ( identifier[mixed] , identifier[types] . identifier[ModuleType] ): keyword[for] identifier[attr] keyword[in] identifier[dir] ( identifier[mixed] ): keyword[if] keyword[not] identifier[is_setting_key] ( identifier[attr] ): keyword[continue] keyword[yield] ( identifier[attr] , identifier[getattr] ( identifier[mixed] , identifier[attr] )) keyword[else] : keyword[yield] keyword[from] identifier[filter] ( keyword[lambda] identifier[item] : identifier[is_setting_key] ( identifier[item] [ literal[int] ]), identifier[mixed] . identifier[items] ())
def iter_settings(mixed: Settings) -> Iterator[Tuple[str, Any]]: """Iterate over settings values from settings module or dict-like instance. :param mixed: Settings instance to iterate. """ if isinstance(mixed, types.ModuleType): for attr in dir(mixed): if not is_setting_key(attr): continue # depends on [control=['if'], data=[]] yield (attr, getattr(mixed, attr)) # depends on [control=['for'], data=['attr']] # depends on [control=['if'], data=[]] else: yield from filter(lambda item: is_setting_key(item[0]), mixed.items())
def text_length(elem): """Returns length of the content in this element. Return value is not correct but it is **good enough***. """ if not elem: return 0 value = elem.value() try: value = len(value) except: value = 0 try: for a in elem.elements: value += len(a.value()) except: pass return value
def function[text_length, parameter[elem]]: constant[Returns length of the content in this element. Return value is not correct but it is **good enough***. ] if <ast.UnaryOp object at 0x7da18f58d960> begin[:] return[constant[0]] variable[value] assign[=] call[name[elem].value, parameter[]] <ast.Try object at 0x7da18f58dab0> <ast.Try object at 0x7da18f58f340> return[name[value]]
keyword[def] identifier[text_length] ( identifier[elem] ): literal[string] keyword[if] keyword[not] identifier[elem] : keyword[return] literal[int] identifier[value] = identifier[elem] . identifier[value] () keyword[try] : identifier[value] = identifier[len] ( identifier[value] ) keyword[except] : identifier[value] = literal[int] keyword[try] : keyword[for] identifier[a] keyword[in] identifier[elem] . identifier[elements] : identifier[value] += identifier[len] ( identifier[a] . identifier[value] ()) keyword[except] : keyword[pass] keyword[return] identifier[value]
def text_length(elem): """Returns length of the content in this element. Return value is not correct but it is **good enough***. """ if not elem: return 0 # depends on [control=['if'], data=[]] value = elem.value() try: value = len(value) # depends on [control=['try'], data=[]] except: value = 0 # depends on [control=['except'], data=[]] try: for a in elem.elements: value += len(a.value()) # depends on [control=['for'], data=['a']] # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] return value
def _try_reconnect(self): """Try to recover an interrupted connection.""" try: if self.connection_interrupted: self.connect_direct(self.connection_string, force=True) self.connection_interrupted = False self.connected = True # Reenable streaming interface if that was open before as well if self._reports is not None: self._loop.run_coroutine(self.adapter.open_interface(0, 'streaming')) # Reenable tracing interface if that was open before as well if self._traces is not None: self._loop.run_coroutine(self.adapter.open_interface(0, 'tracing')) except HardwareError as exc: self._logger.exception("Error reconnecting to device after an unexpected disconnect") raise HardwareError("Device disconnected unexpectedly and we could not reconnect", reconnect_error=exc) from exc
def function[_try_reconnect, parameter[self]]: constant[Try to recover an interrupted connection.] <ast.Try object at 0x7da20c76d0c0>
keyword[def] identifier[_try_reconnect] ( identifier[self] ): literal[string] keyword[try] : keyword[if] identifier[self] . identifier[connection_interrupted] : identifier[self] . identifier[connect_direct] ( identifier[self] . identifier[connection_string] , identifier[force] = keyword[True] ) identifier[self] . identifier[connection_interrupted] = keyword[False] identifier[self] . identifier[connected] = keyword[True] keyword[if] identifier[self] . identifier[_reports] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_loop] . identifier[run_coroutine] ( identifier[self] . identifier[adapter] . identifier[open_interface] ( literal[int] , literal[string] )) keyword[if] identifier[self] . identifier[_traces] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_loop] . identifier[run_coroutine] ( identifier[self] . identifier[adapter] . identifier[open_interface] ( literal[int] , literal[string] )) keyword[except] identifier[HardwareError] keyword[as] identifier[exc] : identifier[self] . identifier[_logger] . identifier[exception] ( literal[string] ) keyword[raise] identifier[HardwareError] ( literal[string] , identifier[reconnect_error] = identifier[exc] ) keyword[from] identifier[exc]
def _try_reconnect(self): """Try to recover an interrupted connection.""" try: if self.connection_interrupted: self.connect_direct(self.connection_string, force=True) self.connection_interrupted = False self.connected = True # Reenable streaming interface if that was open before as well if self._reports is not None: self._loop.run_coroutine(self.adapter.open_interface(0, 'streaming')) # depends on [control=['if'], data=[]] # Reenable tracing interface if that was open before as well if self._traces is not None: self._loop.run_coroutine(self.adapter.open_interface(0, 'tracing')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except HardwareError as exc: self._logger.exception('Error reconnecting to device after an unexpected disconnect') raise HardwareError('Device disconnected unexpectedly and we could not reconnect', reconnect_error=exc) from exc # depends on [control=['except'], data=['exc']]
def strip_stop(tokens, start, result): """Remove trailing full stop from tokens.""" for e in result: for child in e.iter(): if child.text.endswith('.'): child.text = child.text[:-1] return result
def function[strip_stop, parameter[tokens, start, result]]: constant[Remove trailing full stop from tokens.] for taget[name[e]] in starred[name[result]] begin[:] for taget[name[child]] in starred[call[name[e].iter, parameter[]]] begin[:] if call[name[child].text.endswith, parameter[constant[.]]] begin[:] name[child].text assign[=] call[name[child].text][<ast.Slice object at 0x7da20e961780>] return[name[result]]
keyword[def] identifier[strip_stop] ( identifier[tokens] , identifier[start] , identifier[result] ): literal[string] keyword[for] identifier[e] keyword[in] identifier[result] : keyword[for] identifier[child] keyword[in] identifier[e] . identifier[iter] (): keyword[if] identifier[child] . identifier[text] . identifier[endswith] ( literal[string] ): identifier[child] . identifier[text] = identifier[child] . identifier[text] [:- literal[int] ] keyword[return] identifier[result]
def strip_stop(tokens, start, result): """Remove trailing full stop from tokens.""" for e in result: for child in e.iter(): if child.text.endswith('.'): child.text = child.text[:-1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # depends on [control=['for'], data=['e']] return result
def constant_tuples_ordered_by_id(self): """ Returns ------- constants: [(str, Constant)] A list of tuples mapping strings to constants constants ordered by id """ return sorted(list(self.constant_tuple_dict), key=lambda constant_tuple: constant_tuple.constant.id)
def function[constant_tuples_ordered_by_id, parameter[self]]: constant[ Returns ------- constants: [(str, Constant)] A list of tuples mapping strings to constants constants ordered by id ] return[call[name[sorted], parameter[call[name[list], parameter[name[self].constant_tuple_dict]]]]]
keyword[def] identifier[constant_tuples_ordered_by_id] ( identifier[self] ): literal[string] keyword[return] identifier[sorted] ( identifier[list] ( identifier[self] . identifier[constant_tuple_dict] ), identifier[key] = keyword[lambda] identifier[constant_tuple] : identifier[constant_tuple] . identifier[constant] . identifier[id] )
def constant_tuples_ordered_by_id(self): """ Returns ------- constants: [(str, Constant)] A list of tuples mapping strings to constants constants ordered by id """ return sorted(list(self.constant_tuple_dict), key=lambda constant_tuple: constant_tuple.constant.id)
def commit_handler(self, cmd): """Process a CommitCommand.""" self.cmd_counts[cmd.name] += 1 self.committers.add(cmd.committer) if cmd.author is not None: self.separate_authors_found = True for fc in cmd.iter_files(): self.file_cmd_counts[fc.name] += 1 if isinstance(fc, commands.FileModifyCommand): if fc.mode & 0o111: self.executables_found = True if stat.S_ISLNK(fc.mode): self.symlinks_found = True if fc.dataref is not None: if fc.dataref[0] == ':': self._track_blob(fc.dataref) else: self.sha_blob_references = True elif isinstance(fc, commands.FileRenameCommand): self.rename_old_paths.setdefault(cmd.id, set()).add(fc.old_path) elif isinstance(fc, commands.FileCopyCommand): self.copy_source_paths.setdefault(cmd.id, set()).add(fc.src_path) # Track the heads parents = self.reftracker.track_heads(cmd) # Track the parent counts parent_count = len(parents) try: self.parent_counts[parent_count] += 1 except KeyError: self.parent_counts[parent_count] = 1 if parent_count > self.max_parent_count: self.max_parent_count = parent_count # Remember the merges if cmd.merges: #self.merges.setdefault(cmd.ref, set()).update(cmd.merges) for merge in cmd.merges: if merge in self.merges: self.merges[merge] += 1 else: self.merges[merge] = 1
def function[commit_handler, parameter[self, cmd]]: constant[Process a CommitCommand.] <ast.AugAssign object at 0x7da1b0ac8f10> call[name[self].committers.add, parameter[name[cmd].committer]] if compare[name[cmd].author is_not constant[None]] begin[:] name[self].separate_authors_found assign[=] constant[True] for taget[name[fc]] in starred[call[name[cmd].iter_files, parameter[]]] begin[:] <ast.AugAssign object at 0x7da1b0ac88e0> if call[name[isinstance], parameter[name[fc], name[commands].FileModifyCommand]] begin[:] if binary_operation[name[fc].mode <ast.BitAnd object at 0x7da2590d6b60> constant[73]] begin[:] name[self].executables_found assign[=] constant[True] if call[name[stat].S_ISLNK, parameter[name[fc].mode]] begin[:] name[self].symlinks_found assign[=] constant[True] if compare[name[fc].dataref is_not constant[None]] begin[:] if compare[call[name[fc].dataref][constant[0]] equal[==] constant[:]] begin[:] call[name[self]._track_blob, parameter[name[fc].dataref]] variable[parents] assign[=] call[name[self].reftracker.track_heads, parameter[name[cmd]]] variable[parent_count] assign[=] call[name[len], parameter[name[parents]]] <ast.Try object at 0x7da1b0aef520> if name[cmd].merges begin[:] for taget[name[merge]] in starred[name[cmd].merges] begin[:] if compare[name[merge] in name[self].merges] begin[:] <ast.AugAssign object at 0x7da1b0a703d0>
keyword[def] identifier[commit_handler] ( identifier[self] , identifier[cmd] ): literal[string] identifier[self] . identifier[cmd_counts] [ identifier[cmd] . identifier[name] ]+= literal[int] identifier[self] . identifier[committers] . identifier[add] ( identifier[cmd] . identifier[committer] ) keyword[if] identifier[cmd] . identifier[author] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[separate_authors_found] = keyword[True] keyword[for] identifier[fc] keyword[in] identifier[cmd] . identifier[iter_files] (): identifier[self] . identifier[file_cmd_counts] [ identifier[fc] . identifier[name] ]+= literal[int] keyword[if] identifier[isinstance] ( identifier[fc] , identifier[commands] . identifier[FileModifyCommand] ): keyword[if] identifier[fc] . identifier[mode] & literal[int] : identifier[self] . identifier[executables_found] = keyword[True] keyword[if] identifier[stat] . identifier[S_ISLNK] ( identifier[fc] . identifier[mode] ): identifier[self] . identifier[symlinks_found] = keyword[True] keyword[if] identifier[fc] . identifier[dataref] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[fc] . identifier[dataref] [ literal[int] ]== literal[string] : identifier[self] . identifier[_track_blob] ( identifier[fc] . identifier[dataref] ) keyword[else] : identifier[self] . identifier[sha_blob_references] = keyword[True] keyword[elif] identifier[isinstance] ( identifier[fc] , identifier[commands] . identifier[FileRenameCommand] ): identifier[self] . identifier[rename_old_paths] . identifier[setdefault] ( identifier[cmd] . identifier[id] , identifier[set] ()). identifier[add] ( identifier[fc] . identifier[old_path] ) keyword[elif] identifier[isinstance] ( identifier[fc] , identifier[commands] . identifier[FileCopyCommand] ): identifier[self] . identifier[copy_source_paths] . identifier[setdefault] ( identifier[cmd] . identifier[id] , identifier[set] ()). identifier[add] ( identifier[fc] . identifier[src_path] ) identifier[parents] = identifier[self] . identifier[reftracker] . identifier[track_heads] ( identifier[cmd] ) identifier[parent_count] = identifier[len] ( identifier[parents] ) keyword[try] : identifier[self] . identifier[parent_counts] [ identifier[parent_count] ]+= literal[int] keyword[except] identifier[KeyError] : identifier[self] . identifier[parent_counts] [ identifier[parent_count] ]= literal[int] keyword[if] identifier[parent_count] > identifier[self] . identifier[max_parent_count] : identifier[self] . identifier[max_parent_count] = identifier[parent_count] keyword[if] identifier[cmd] . identifier[merges] : keyword[for] identifier[merge] keyword[in] identifier[cmd] . identifier[merges] : keyword[if] identifier[merge] keyword[in] identifier[self] . identifier[merges] : identifier[self] . identifier[merges] [ identifier[merge] ]+= literal[int] keyword[else] : identifier[self] . identifier[merges] [ identifier[merge] ]= literal[int]
def commit_handler(self, cmd): """Process a CommitCommand.""" self.cmd_counts[cmd.name] += 1 self.committers.add(cmd.committer) if cmd.author is not None: self.separate_authors_found = True # depends on [control=['if'], data=[]] for fc in cmd.iter_files(): self.file_cmd_counts[fc.name] += 1 if isinstance(fc, commands.FileModifyCommand): if fc.mode & 73: self.executables_found = True # depends on [control=['if'], data=[]] if stat.S_ISLNK(fc.mode): self.symlinks_found = True # depends on [control=['if'], data=[]] if fc.dataref is not None: if fc.dataref[0] == ':': self._track_blob(fc.dataref) # depends on [control=['if'], data=[]] else: self.sha_blob_references = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif isinstance(fc, commands.FileRenameCommand): self.rename_old_paths.setdefault(cmd.id, set()).add(fc.old_path) # depends on [control=['if'], data=[]] elif isinstance(fc, commands.FileCopyCommand): self.copy_source_paths.setdefault(cmd.id, set()).add(fc.src_path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fc']] # Track the heads parents = self.reftracker.track_heads(cmd) # Track the parent counts parent_count = len(parents) try: self.parent_counts[parent_count] += 1 # depends on [control=['try'], data=[]] except KeyError: self.parent_counts[parent_count] = 1 if parent_count > self.max_parent_count: self.max_parent_count = parent_count # depends on [control=['if'], data=['parent_count']] # depends on [control=['except'], data=[]] # Remember the merges if cmd.merges: #self.merges.setdefault(cmd.ref, set()).update(cmd.merges) for merge in cmd.merges: if merge in self.merges: self.merges[merge] += 1 # depends on [control=['if'], data=['merge']] else: self.merges[merge] = 1 # depends on [control=['for'], data=['merge']] # depends on [control=['if'], data=[]]
def to_planar(self, to_2D=None, normal=None, check=True): """ Check to see if current vectors are all coplanar. If they are, return a Path2D and a transform which will transform the 2D representation back into 3 dimensions Parameters ----------- to_2D: (4,4) float Homogenous transformation matrix to apply, If not passed a plane will be fitted to vertices. normal: (3,) float, or None Approximate normal of direction of plane If to_2D is not specified sign will be applied to fit plane normal check: bool If True: Raise a ValueError if points aren't coplanar Returns ----------- planar : trimesh.path.Path2D Current path transformed onto plane to_3D : (4,4) float Homeogenous transformation to move planar back into 3D space """ # which vertices are actually referenced referenced = self.referenced_vertices # if nothing is referenced return an empty path if len(referenced) == 0: return Path2D(), np.eye(4) # no explicit transform passed if to_2D is None: # fit a plane to our vertices C, N = plane_fit(self.vertices[referenced]) # apply the normal sign hint if normal is not None: normal = np.asanyarray(normal, dtype=np.float64) if normal.shape == (3,): N *= np.sign(np.dot(N, normal)) N = normal else: log.warning( "passed normal not used: {}".format( normal.shape)) # create a transform from fit plane to XY to_2D = plane_transform(origin=C, normal=N) # make sure we've extracted a transform to_2D = np.asanyarray(to_2D, dtype=np.float64) if to_2D.shape != (4, 4): raise ValueError('unable to create transform!') # transform all vertices to 2D plane flat = transformations.transform_points(self.vertices, to_2D) # Z values of vertices which are referenced heights = flat[referenced][:, 2] # points are not on a plane because Z varies if heights.ptp() > tol.planar: # since Z is inconsistent set height to zero height = 0.0 if check: raise ValueError('points are not flat!') else: # if the points were planar store the height height = heights.mean() # the transform from 2D to 3D to_3D = np.linalg.inv(to_2D) # if the transform didn't move the path to # exactly Z=0 adjust it so the returned transform does if np.abs(height) > tol.planar: # adjust to_3D transform by height adjust = transformations.translation_matrix( [0, 0, height]) # apply the height adjustment to_3D to_3D = np.dot(to_3D, adjust) # copy metadata to new object metadata = copy.deepcopy(self.metadata) # store transform we used to move it onto the plane metadata['to_3D'] = to_3D # create the Path2D with the same entities # and XY values of vertices projected onto the plane planar = Path2D(entities=copy.deepcopy(self.entities), vertices=flat[:, :2], metadata=metadata, process=False) return planar, to_3D
def function[to_planar, parameter[self, to_2D, normal, check]]: constant[ Check to see if current vectors are all coplanar. If they are, return a Path2D and a transform which will transform the 2D representation back into 3 dimensions Parameters ----------- to_2D: (4,4) float Homogenous transformation matrix to apply, If not passed a plane will be fitted to vertices. normal: (3,) float, or None Approximate normal of direction of plane If to_2D is not specified sign will be applied to fit plane normal check: bool If True: Raise a ValueError if points aren't coplanar Returns ----------- planar : trimesh.path.Path2D Current path transformed onto plane to_3D : (4,4) float Homeogenous transformation to move planar back into 3D space ] variable[referenced] assign[=] name[self].referenced_vertices if compare[call[name[len], parameter[name[referenced]]] equal[==] constant[0]] begin[:] return[tuple[[<ast.Call object at 0x7da2047ebd90>, <ast.Call object at 0x7da2047ea470>]]] if compare[name[to_2D] is constant[None]] begin[:] <ast.Tuple object at 0x7da2047e8df0> assign[=] call[name[plane_fit], parameter[call[name[self].vertices][name[referenced]]]] if compare[name[normal] is_not constant[None]] begin[:] variable[normal] assign[=] call[name[np].asanyarray, parameter[name[normal]]] if compare[name[normal].shape equal[==] tuple[[<ast.Constant object at 0x7da2047eb4f0>]]] begin[:] <ast.AugAssign object at 0x7da2047eb010> variable[N] assign[=] name[normal] variable[to_2D] assign[=] call[name[plane_transform], parameter[]] variable[to_2D] assign[=] call[name[np].asanyarray, parameter[name[to_2D]]] if compare[name[to_2D].shape not_equal[!=] tuple[[<ast.Constant object at 0x7da18ede70d0>, <ast.Constant object at 0x7da18ede4bb0>]]] begin[:] <ast.Raise object at 0x7da18ede6e90> variable[flat] assign[=] call[name[transformations].transform_points, parameter[name[self].vertices, name[to_2D]]] variable[heights] assign[=] call[call[name[flat]][name[referenced]]][tuple[[<ast.Slice object at 0x7da18ede5420>, <ast.Constant object at 0x7da18ede4610>]]] if compare[call[name[heights].ptp, parameter[]] greater[>] name[tol].planar] begin[:] variable[height] assign[=] constant[0.0] if name[check] begin[:] <ast.Raise object at 0x7da18ede7010> variable[to_3D] assign[=] call[name[np].linalg.inv, parameter[name[to_2D]]] if compare[call[name[np].abs, parameter[name[height]]] greater[>] name[tol].planar] begin[:] variable[adjust] assign[=] call[name[transformations].translation_matrix, parameter[list[[<ast.Constant object at 0x7da20c76ff40>, <ast.Constant object at 0x7da20c76ed70>, <ast.Name object at 0x7da20c76e050>]]]] variable[to_3D] assign[=] call[name[np].dot, parameter[name[to_3D], name[adjust]]] variable[metadata] assign[=] call[name[copy].deepcopy, parameter[name[self].metadata]] call[name[metadata]][constant[to_3D]] assign[=] name[to_3D] variable[planar] assign[=] call[name[Path2D], parameter[]] return[tuple[[<ast.Name object at 0x7da20c76e680>, <ast.Name object at 0x7da20c76f7f0>]]]
keyword[def] identifier[to_planar] ( identifier[self] , identifier[to_2D] = keyword[None] , identifier[normal] = keyword[None] , identifier[check] = keyword[True] ): literal[string] identifier[referenced] = identifier[self] . identifier[referenced_vertices] keyword[if] identifier[len] ( identifier[referenced] )== literal[int] : keyword[return] identifier[Path2D] (), identifier[np] . identifier[eye] ( literal[int] ) keyword[if] identifier[to_2D] keyword[is] keyword[None] : identifier[C] , identifier[N] = identifier[plane_fit] ( identifier[self] . identifier[vertices] [ identifier[referenced] ]) keyword[if] identifier[normal] keyword[is] keyword[not] keyword[None] : identifier[normal] = identifier[np] . identifier[asanyarray] ( identifier[normal] , identifier[dtype] = identifier[np] . identifier[float64] ) keyword[if] identifier[normal] . identifier[shape] ==( literal[int] ,): identifier[N] *= identifier[np] . identifier[sign] ( identifier[np] . identifier[dot] ( identifier[N] , identifier[normal] )) identifier[N] = identifier[normal] keyword[else] : identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[normal] . identifier[shape] )) identifier[to_2D] = identifier[plane_transform] ( identifier[origin] = identifier[C] , identifier[normal] = identifier[N] ) identifier[to_2D] = identifier[np] . identifier[asanyarray] ( identifier[to_2D] , identifier[dtype] = identifier[np] . identifier[float64] ) keyword[if] identifier[to_2D] . identifier[shape] !=( literal[int] , literal[int] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[flat] = identifier[transformations] . identifier[transform_points] ( identifier[self] . identifier[vertices] , identifier[to_2D] ) identifier[heights] = identifier[flat] [ identifier[referenced] ][:, literal[int] ] keyword[if] identifier[heights] . identifier[ptp] ()> identifier[tol] . identifier[planar] : identifier[height] = literal[int] keyword[if] identifier[check] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : identifier[height] = identifier[heights] . identifier[mean] () identifier[to_3D] = identifier[np] . identifier[linalg] . identifier[inv] ( identifier[to_2D] ) keyword[if] identifier[np] . identifier[abs] ( identifier[height] )> identifier[tol] . identifier[planar] : identifier[adjust] = identifier[transformations] . identifier[translation_matrix] ( [ literal[int] , literal[int] , identifier[height] ]) identifier[to_3D] = identifier[np] . identifier[dot] ( identifier[to_3D] , identifier[adjust] ) identifier[metadata] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[metadata] ) identifier[metadata] [ literal[string] ]= identifier[to_3D] identifier[planar] = identifier[Path2D] ( identifier[entities] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[entities] ), identifier[vertices] = identifier[flat] [:,: literal[int] ], identifier[metadata] = identifier[metadata] , identifier[process] = keyword[False] ) keyword[return] identifier[planar] , identifier[to_3D]
def to_planar(self, to_2D=None, normal=None, check=True): """ Check to see if current vectors are all coplanar. If they are, return a Path2D and a transform which will transform the 2D representation back into 3 dimensions Parameters ----------- to_2D: (4,4) float Homogenous transformation matrix to apply, If not passed a plane will be fitted to vertices. normal: (3,) float, or None Approximate normal of direction of plane If to_2D is not specified sign will be applied to fit plane normal check: bool If True: Raise a ValueError if points aren't coplanar Returns ----------- planar : trimesh.path.Path2D Current path transformed onto plane to_3D : (4,4) float Homeogenous transformation to move planar back into 3D space """ # which vertices are actually referenced referenced = self.referenced_vertices # if nothing is referenced return an empty path if len(referenced) == 0: return (Path2D(), np.eye(4)) # depends on [control=['if'], data=[]] # no explicit transform passed if to_2D is None: # fit a plane to our vertices (C, N) = plane_fit(self.vertices[referenced]) # apply the normal sign hint if normal is not None: normal = np.asanyarray(normal, dtype=np.float64) if normal.shape == (3,): N *= np.sign(np.dot(N, normal)) N = normal # depends on [control=['if'], data=[]] else: log.warning('passed normal not used: {}'.format(normal.shape)) # depends on [control=['if'], data=['normal']] # create a transform from fit plane to XY to_2D = plane_transform(origin=C, normal=N) # depends on [control=['if'], data=['to_2D']] # make sure we've extracted a transform to_2D = np.asanyarray(to_2D, dtype=np.float64) if to_2D.shape != (4, 4): raise ValueError('unable to create transform!') # depends on [control=['if'], data=[]] # transform all vertices to 2D plane flat = transformations.transform_points(self.vertices, to_2D) # Z values of vertices which are referenced heights = flat[referenced][:, 2] # points are not on a plane because Z varies if heights.ptp() > tol.planar: # since Z is inconsistent set height to zero height = 0.0 if check: raise ValueError('points are not flat!') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # if the points were planar store the height height = heights.mean() # the transform from 2D to 3D to_3D = np.linalg.inv(to_2D) # if the transform didn't move the path to # exactly Z=0 adjust it so the returned transform does if np.abs(height) > tol.planar: # adjust to_3D transform by height adjust = transformations.translation_matrix([0, 0, height]) # apply the height adjustment to_3D to_3D = np.dot(to_3D, adjust) # depends on [control=['if'], data=[]] # copy metadata to new object metadata = copy.deepcopy(self.metadata) # store transform we used to move it onto the plane metadata['to_3D'] = to_3D # create the Path2D with the same entities # and XY values of vertices projected onto the plane planar = Path2D(entities=copy.deepcopy(self.entities), vertices=flat[:, :2], metadata=metadata, process=False) return (planar, to_3D)
def sg_train(**kwargs): r"""Trains the model. Args: **kwargs: optim: A name for optimizer. 'MaxProp' (default), 'AdaMax', 'Adam', 'RMSProp' or 'sgd'. loss: A 0-D `Tensor` containing the value to minimize. lr: A Python Scalar (optional). Learning rate. Default is .001. beta1: A Python Scalar (optional). Default is .9. beta2: A Python Scalar (optional). Default is .99. save_dir: A string. The root path to which checkpoint and log files are saved. Default is `asset/train`. max_ep: A positive integer. Maximum number of epochs. Default is 1000. ep_size: A positive integer. Number of Total batches in an epoch. For proper display of log. Default is 1e5. save_interval: A Python scalar. The interval of saving checkpoint files. By default, for every 600 seconds, a checkpoint file is written. log_interval: A Python scalar. The interval of recoding logs. By default, for every 60 seconds, logging is executed. max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5. keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour. category: Scope name or list to train eval_metric: A list of tensors containing the value to evaluate. Default is []. tqdm: Boolean. If True (Default), progress bars are shown. If False, a series of loss will be shown on the console. """ opt = tf.sg_opt(kwargs) assert opt.loss is not None, 'loss is mandatory.' # default training options opt += tf.sg_opt(optim='MaxProp', lr=0.001, beta1=0.9, beta2=0.99, category='', ep_size=100000) # get optimizer train_op = sg_optim(opt.loss, optim=opt.optim, lr=0.001, beta1=opt.beta1, beta2=opt.beta2, category=opt.category) # for console logging loss_ = opt.loss # use only first loss when multiple GPU case if isinstance(opt.loss, (tuple, list)): loss_ = opt.loss[0] # define train function # noinspection PyUnusedLocal @sg_train_func def train_func(sess, arg): return sess.run([loss_, train_op])[0] # run train function train_func(**opt)
def function[sg_train, parameter[]]: constant[Trains the model. Args: **kwargs: optim: A name for optimizer. 'MaxProp' (default), 'AdaMax', 'Adam', 'RMSProp' or 'sgd'. loss: A 0-D `Tensor` containing the value to minimize. lr: A Python Scalar (optional). Learning rate. Default is .001. beta1: A Python Scalar (optional). Default is .9. beta2: A Python Scalar (optional). Default is .99. save_dir: A string. The root path to which checkpoint and log files are saved. Default is `asset/train`. max_ep: A positive integer. Maximum number of epochs. Default is 1000. ep_size: A positive integer. Number of Total batches in an epoch. For proper display of log. Default is 1e5. save_interval: A Python scalar. The interval of saving checkpoint files. By default, for every 600 seconds, a checkpoint file is written. log_interval: A Python scalar. The interval of recoding logs. By default, for every 60 seconds, logging is executed. max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5. keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour. category: Scope name or list to train eval_metric: A list of tensors containing the value to evaluate. Default is []. tqdm: Boolean. If True (Default), progress bars are shown. If False, a series of loss will be shown on the console. ] variable[opt] assign[=] call[name[tf].sg_opt, parameter[name[kwargs]]] assert[compare[name[opt].loss is_not constant[None]]] <ast.AugAssign object at 0x7da2041daf50> variable[train_op] assign[=] call[name[sg_optim], parameter[name[opt].loss]] variable[loss_] assign[=] name[opt].loss if call[name[isinstance], parameter[name[opt].loss, tuple[[<ast.Name object at 0x7da1b1234f70>, <ast.Name object at 0x7da1b1235ab0>]]]] begin[:] variable[loss_] assign[=] call[name[opt].loss][constant[0]] def function[train_func, parameter[sess, arg]]: return[call[call[name[sess].run, parameter[list[[<ast.Name object at 0x7da1b1234730>, <ast.Name object at 0x7da1b1234fd0>]]]]][constant[0]]] call[name[train_func], parameter[]]
keyword[def] identifier[sg_train] (** identifier[kwargs] ): literal[string] identifier[opt] = identifier[tf] . identifier[sg_opt] ( identifier[kwargs] ) keyword[assert] identifier[opt] . identifier[loss] keyword[is] keyword[not] keyword[None] , literal[string] identifier[opt] += identifier[tf] . identifier[sg_opt] ( identifier[optim] = literal[string] , identifier[lr] = literal[int] , identifier[beta1] = literal[int] , identifier[beta2] = literal[int] , identifier[category] = literal[string] , identifier[ep_size] = literal[int] ) identifier[train_op] = identifier[sg_optim] ( identifier[opt] . identifier[loss] , identifier[optim] = identifier[opt] . identifier[optim] , identifier[lr] = literal[int] , identifier[beta1] = identifier[opt] . identifier[beta1] , identifier[beta2] = identifier[opt] . identifier[beta2] , identifier[category] = identifier[opt] . identifier[category] ) identifier[loss_] = identifier[opt] . identifier[loss] keyword[if] identifier[isinstance] ( identifier[opt] . identifier[loss] ,( identifier[tuple] , identifier[list] )): identifier[loss_] = identifier[opt] . identifier[loss] [ literal[int] ] @ identifier[sg_train_func] keyword[def] identifier[train_func] ( identifier[sess] , identifier[arg] ): keyword[return] identifier[sess] . identifier[run] ([ identifier[loss_] , identifier[train_op] ])[ literal[int] ] identifier[train_func] (** identifier[opt] )
def sg_train(**kwargs): """Trains the model. Args: **kwargs: optim: A name for optimizer. 'MaxProp' (default), 'AdaMax', 'Adam', 'RMSProp' or 'sgd'. loss: A 0-D `Tensor` containing the value to minimize. lr: A Python Scalar (optional). Learning rate. Default is .001. beta1: A Python Scalar (optional). Default is .9. beta2: A Python Scalar (optional). Default is .99. save_dir: A string. The root path to which checkpoint and log files are saved. Default is `asset/train`. max_ep: A positive integer. Maximum number of epochs. Default is 1000. ep_size: A positive integer. Number of Total batches in an epoch. For proper display of log. Default is 1e5. save_interval: A Python scalar. The interval of saving checkpoint files. By default, for every 600 seconds, a checkpoint file is written. log_interval: A Python scalar. The interval of recoding logs. By default, for every 60 seconds, logging is executed. max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5. keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour. category: Scope name or list to train eval_metric: A list of tensors containing the value to evaluate. Default is []. tqdm: Boolean. If True (Default), progress bars are shown. If False, a series of loss will be shown on the console. """ opt = tf.sg_opt(kwargs) assert opt.loss is not None, 'loss is mandatory.' # default training options opt += tf.sg_opt(optim='MaxProp', lr=0.001, beta1=0.9, beta2=0.99, category='', ep_size=100000) # get optimizer train_op = sg_optim(opt.loss, optim=opt.optim, lr=0.001, beta1=opt.beta1, beta2=opt.beta2, category=opt.category) # for console logging loss_ = opt.loss # use only first loss when multiple GPU case if isinstance(opt.loss, (tuple, list)): loss_ = opt.loss[0] # depends on [control=['if'], data=[]] # define train function # noinspection PyUnusedLocal @sg_train_func def train_func(sess, arg): return sess.run([loss_, train_op])[0] # run train function train_func(**opt)
def _expopts(self, opts): """ :param opts: a dictionary containing any of the following Proc Export options(delimiter, putnames): - delimiter is a character - putnames is a boolean .. code-block:: python {'delimiter' : ','' 'putnames' : True } :return: str """ optstr = '' if len(opts): for key in opts: if len(str(opts[key])): if key == 'delimiter': optstr += 'delimiter=' optstr += "'" + '%02x' % ord(opts[key].encode(self._io.sascfg.encoding)) + "'x; " elif key == 'putnames': optstr += 'putnames=' if opts[key]: optstr += 'YES; ' else: optstr += 'NO; ' return optstr
def function[_expopts, parameter[self, opts]]: constant[ :param opts: a dictionary containing any of the following Proc Export options(delimiter, putnames): - delimiter is a character - putnames is a boolean .. code-block:: python {'delimiter' : ','' 'putnames' : True } :return: str ] variable[optstr] assign[=] constant[] if call[name[len], parameter[name[opts]]] begin[:] for taget[name[key]] in starred[name[opts]] begin[:] if call[name[len], parameter[call[name[str], parameter[call[name[opts]][name[key]]]]]] begin[:] if compare[name[key] equal[==] constant[delimiter]] begin[:] <ast.AugAssign object at 0x7da20c7cb370> <ast.AugAssign object at 0x7da20c7c9660> return[name[optstr]]
keyword[def] identifier[_expopts] ( identifier[self] , identifier[opts] ): literal[string] identifier[optstr] = literal[string] keyword[if] identifier[len] ( identifier[opts] ): keyword[for] identifier[key] keyword[in] identifier[opts] : keyword[if] identifier[len] ( identifier[str] ( identifier[opts] [ identifier[key] ])): keyword[if] identifier[key] == literal[string] : identifier[optstr] += literal[string] identifier[optstr] += literal[string] + literal[string] % identifier[ord] ( identifier[opts] [ identifier[key] ]. identifier[encode] ( identifier[self] . identifier[_io] . identifier[sascfg] . identifier[encoding] ))+ literal[string] keyword[elif] identifier[key] == literal[string] : identifier[optstr] += literal[string] keyword[if] identifier[opts] [ identifier[key] ]: identifier[optstr] += literal[string] keyword[else] : identifier[optstr] += literal[string] keyword[return] identifier[optstr]
def _expopts(self, opts): """ :param opts: a dictionary containing any of the following Proc Export options(delimiter, putnames): - delimiter is a character - putnames is a boolean .. code-block:: python {'delimiter' : ','' 'putnames' : True } :return: str """ optstr = '' if len(opts): for key in opts: if len(str(opts[key])): if key == 'delimiter': optstr += 'delimiter=' optstr += "'" + '%02x' % ord(opts[key].encode(self._io.sascfg.encoding)) + "'x; " # depends on [control=['if'], data=['key']] elif key == 'putnames': optstr += 'putnames=' if opts[key]: optstr += 'YES; ' # depends on [control=['if'], data=[]] else: optstr += 'NO; ' # depends on [control=['if'], data=['key']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]] return optstr
def R_isrk(self, k): """ Function returns the inverse square root of R matrix on step k. """ ind = int(self.index[self.R_time_var_index, k]) R = self.R[:, :, ind] if (R.shape[0] == 1): # 1-D case handle simplier. No storage # of the result, just compute it each time. inv_square_root = np.sqrt(1.0/R) else: if self.svd_each_time: (U, S, Vh) = sp.linalg.svd(R, full_matrices=False, compute_uv=True, overwrite_a=False, check_finite=True) inv_square_root = U * 1.0/np.sqrt(S) else: if ind in self.R_square_root: inv_square_root = self.R_square_root[ind] else: (U, S, Vh) = sp.linalg.svd(R, full_matrices=False, compute_uv=True, overwrite_a=False, check_finite=True) inv_square_root = U * 1.0/np.sqrt(S) self.R_square_root[ind] = inv_square_root return inv_square_root
def function[R_isrk, parameter[self, k]]: constant[ Function returns the inverse square root of R matrix on step k. ] variable[ind] assign[=] call[name[int], parameter[call[name[self].index][tuple[[<ast.Attribute object at 0x7da18fe93e20>, <ast.Name object at 0x7da18fe93460>]]]]] variable[R] assign[=] call[name[self].R][tuple[[<ast.Slice object at 0x7da18fe91de0>, <ast.Slice object at 0x7da18fe90a30>, <ast.Name object at 0x7da18fe92410>]]] if compare[call[name[R].shape][constant[0]] equal[==] constant[1]] begin[:] variable[inv_square_root] assign[=] call[name[np].sqrt, parameter[binary_operation[constant[1.0] / name[R]]]] return[name[inv_square_root]]
keyword[def] identifier[R_isrk] ( identifier[self] , identifier[k] ): literal[string] identifier[ind] = identifier[int] ( identifier[self] . identifier[index] [ identifier[self] . identifier[R_time_var_index] , identifier[k] ]) identifier[R] = identifier[self] . identifier[R] [:,:, identifier[ind] ] keyword[if] ( identifier[R] . identifier[shape] [ literal[int] ]== literal[int] ): identifier[inv_square_root] = identifier[np] . identifier[sqrt] ( literal[int] / identifier[R] ) keyword[else] : keyword[if] identifier[self] . identifier[svd_each_time] : ( identifier[U] , identifier[S] , identifier[Vh] )= identifier[sp] . identifier[linalg] . identifier[svd] ( identifier[R] , identifier[full_matrices] = keyword[False] , identifier[compute_uv] = keyword[True] , identifier[overwrite_a] = keyword[False] , identifier[check_finite] = keyword[True] ) identifier[inv_square_root] = identifier[U] * literal[int] / identifier[np] . identifier[sqrt] ( identifier[S] ) keyword[else] : keyword[if] identifier[ind] keyword[in] identifier[self] . identifier[R_square_root] : identifier[inv_square_root] = identifier[self] . identifier[R_square_root] [ identifier[ind] ] keyword[else] : ( identifier[U] , identifier[S] , identifier[Vh] )= identifier[sp] . identifier[linalg] . identifier[svd] ( identifier[R] , identifier[full_matrices] = keyword[False] , identifier[compute_uv] = keyword[True] , identifier[overwrite_a] = keyword[False] , identifier[check_finite] = keyword[True] ) identifier[inv_square_root] = identifier[U] * literal[int] / identifier[np] . identifier[sqrt] ( identifier[S] ) identifier[self] . identifier[R_square_root] [ identifier[ind] ]= identifier[inv_square_root] keyword[return] identifier[inv_square_root]
def R_isrk(self, k): """ Function returns the inverse square root of R matrix on step k. """ ind = int(self.index[self.R_time_var_index, k]) R = self.R[:, :, ind] if R.shape[0] == 1: # 1-D case handle simplier. No storage # of the result, just compute it each time. inv_square_root = np.sqrt(1.0 / R) # depends on [control=['if'], data=[]] elif self.svd_each_time: (U, S, Vh) = sp.linalg.svd(R, full_matrices=False, compute_uv=True, overwrite_a=False, check_finite=True) inv_square_root = U * 1.0 / np.sqrt(S) # depends on [control=['if'], data=[]] elif ind in self.R_square_root: inv_square_root = self.R_square_root[ind] # depends on [control=['if'], data=['ind']] else: (U, S, Vh) = sp.linalg.svd(R, full_matrices=False, compute_uv=True, overwrite_a=False, check_finite=True) inv_square_root = U * 1.0 / np.sqrt(S) self.R_square_root[ind] = inv_square_root return inv_square_root
def set_config( self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs ): """ Create a configuration file for MAGICC. Writes a fortran namelist in run_dir. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file """ kwargs = self._format_config(kwargs) fname = join(self.run_dir, filename) conf = {top_level_key: kwargs} f90nml.write(conf, fname, force=True) return conf
def function[set_config, parameter[self, filename, top_level_key]]: constant[ Create a configuration file for MAGICC. Writes a fortran namelist in run_dir. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file ] variable[kwargs] assign[=] call[name[self]._format_config, parameter[name[kwargs]]] variable[fname] assign[=] call[name[join], parameter[name[self].run_dir, name[filename]]] variable[conf] assign[=] dictionary[[<ast.Name object at 0x7da18fe92650>], [<ast.Name object at 0x7da18fe913c0>]] call[name[f90nml].write, parameter[name[conf], name[fname]]] return[name[conf]]
keyword[def] identifier[set_config] ( identifier[self] , identifier[filename] = literal[string] , identifier[top_level_key] = literal[string] ,** identifier[kwargs] ): literal[string] identifier[kwargs] = identifier[self] . identifier[_format_config] ( identifier[kwargs] ) identifier[fname] = identifier[join] ( identifier[self] . identifier[run_dir] , identifier[filename] ) identifier[conf] ={ identifier[top_level_key] : identifier[kwargs] } identifier[f90nml] . identifier[write] ( identifier[conf] , identifier[fname] , identifier[force] = keyword[True] ) keyword[return] identifier[conf]
def set_config(self, filename='MAGTUNE_PYMAGICC.CFG', top_level_key='nml_allcfgs', **kwargs): """ Create a configuration file for MAGICC. Writes a fortran namelist in run_dir. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file """ kwargs = self._format_config(kwargs) fname = join(self.run_dir, filename) conf = {top_level_key: kwargs} f90nml.write(conf, fname, force=True) return conf
def pupatizeElements(self) : """Transform all raba object into pupas""" for i in range(len(self)) : self[i] = self[i].pupa()
def function[pupatizeElements, parameter[self]]: constant[Transform all raba object into pupas] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]]]]]] begin[:] call[name[self]][name[i]] assign[=] call[call[name[self]][name[i]].pupa, parameter[]]
keyword[def] identifier[pupatizeElements] ( identifier[self] ): literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] )): identifier[self] [ identifier[i] ]= identifier[self] [ identifier[i] ]. identifier[pupa] ()
def pupatizeElements(self): """Transform all raba object into pupas""" for i in range(len(self)): self[i] = self[i].pupa() # depends on [control=['for'], data=['i']]
def options(self, *args, **kwargs): """Implement the HTTP OPTIONS method :param list args: Positional arguments :param dict kwargs: Keyword arguments """ self.set_header('Allow', ', '.join(self.ALLOW)) self.set_status(204) self.finish()
def function[options, parameter[self]]: constant[Implement the HTTP OPTIONS method :param list args: Positional arguments :param dict kwargs: Keyword arguments ] call[name[self].set_header, parameter[constant[Allow], call[constant[, ].join, parameter[name[self].ALLOW]]]] call[name[self].set_status, parameter[constant[204]]] call[name[self].finish, parameter[]]
keyword[def] identifier[options] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[set_header] ( literal[string] , literal[string] . identifier[join] ( identifier[self] . identifier[ALLOW] )) identifier[self] . identifier[set_status] ( literal[int] ) identifier[self] . identifier[finish] ()
def options(self, *args, **kwargs): """Implement the HTTP OPTIONS method :param list args: Positional arguments :param dict kwargs: Keyword arguments """ self.set_header('Allow', ', '.join(self.ALLOW)) self.set_status(204) self.finish()
def _on_error(self, exc): """Helper for :meth:`__iter__`.""" # restart the read scan from AFTER the last successfully read row retry_request = self.request if self.last_scanned_row_key: retry_request = self._create_retry_request() self.response_iterator = self.read_method(retry_request)
def function[_on_error, parameter[self, exc]]: constant[Helper for :meth:`__iter__`.] variable[retry_request] assign[=] name[self].request if name[self].last_scanned_row_key begin[:] variable[retry_request] assign[=] call[name[self]._create_retry_request, parameter[]] name[self].response_iterator assign[=] call[name[self].read_method, parameter[name[retry_request]]]
keyword[def] identifier[_on_error] ( identifier[self] , identifier[exc] ): literal[string] identifier[retry_request] = identifier[self] . identifier[request] keyword[if] identifier[self] . identifier[last_scanned_row_key] : identifier[retry_request] = identifier[self] . identifier[_create_retry_request] () identifier[self] . identifier[response_iterator] = identifier[self] . identifier[read_method] ( identifier[retry_request] )
def _on_error(self, exc): """Helper for :meth:`__iter__`.""" # restart the read scan from AFTER the last successfully read row retry_request = self.request if self.last_scanned_row_key: retry_request = self._create_retry_request() # depends on [control=['if'], data=[]] self.response_iterator = self.read_method(retry_request)
def easy_time_extrator(self, string): """简单时间抽取,即年月日同时出现 Keyword arguments: string -- 含有时间的文本,str类型 """ try: if not self.year_check and not self.month_check and not self.day_check: str_all = re.search('([\u4e00-\u9fa5〇○]{4})年([\u4e00-\u9fa5]{1,3})月([\u4e00-\u9fa5]{1,3})日', string) str_year = self.str_to_num(str_all.group(1)) str_month = self.str_to_num(str_all.group(2)) str_day = self.str_to_num(str_all.group(3)) check_year = datetime.datetime.now().year if str_year in range(1970, check_year + 1) and str_month in range(1, 13) and str_day in range(1, 32): self.year = str_year self.month = str_month self.day = str_day self.year_check = True self.month_check = True self.day_check = True except: pass try: if not self.year_check and not self.month_check and not self.day_check: str_all = re.search('(\d{4})[-._年](\d{1,2})[-._月](\d{1,2})', string) str_year = int(str_all.group(1)) str_month = int(str_all.group(2)) str_day = int(str_all.group(3)) check_year = datetime.datetime.now().year if str_year in range(1970, check_year + 1) and str_month in range(1, 13) and str_day in range(1, 32): self.year = str_year self.month = str_month self.day = str_day self.year_check = True self.month_check = True self.day_check = True except: pass
def function[easy_time_extrator, parameter[self, string]]: constant[简单时间抽取,即年月日同时出现 Keyword arguments: string -- 含有时间的文本,str类型 ] <ast.Try object at 0x7da18bcc8e80> <ast.Try object at 0x7da18bcc9e40>
keyword[def] identifier[easy_time_extrator] ( identifier[self] , identifier[string] ): literal[string] keyword[try] : keyword[if] keyword[not] identifier[self] . identifier[year_check] keyword[and] keyword[not] identifier[self] . identifier[month_check] keyword[and] keyword[not] identifier[self] . identifier[day_check] : identifier[str_all] = identifier[re] . identifier[search] ( literal[string] , identifier[string] ) identifier[str_year] = identifier[self] . identifier[str_to_num] ( identifier[str_all] . identifier[group] ( literal[int] )) identifier[str_month] = identifier[self] . identifier[str_to_num] ( identifier[str_all] . identifier[group] ( literal[int] )) identifier[str_day] = identifier[self] . identifier[str_to_num] ( identifier[str_all] . identifier[group] ( literal[int] )) identifier[check_year] = identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[year] keyword[if] identifier[str_year] keyword[in] identifier[range] ( literal[int] , identifier[check_year] + literal[int] ) keyword[and] identifier[str_month] keyword[in] identifier[range] ( literal[int] , literal[int] ) keyword[and] identifier[str_day] keyword[in] identifier[range] ( literal[int] , literal[int] ): identifier[self] . identifier[year] = identifier[str_year] identifier[self] . identifier[month] = identifier[str_month] identifier[self] . identifier[day] = identifier[str_day] identifier[self] . identifier[year_check] = keyword[True] identifier[self] . identifier[month_check] = keyword[True] identifier[self] . identifier[day_check] = keyword[True] keyword[except] : keyword[pass] keyword[try] : keyword[if] keyword[not] identifier[self] . identifier[year_check] keyword[and] keyword[not] identifier[self] . identifier[month_check] keyword[and] keyword[not] identifier[self] . identifier[day_check] : identifier[str_all] = identifier[re] . identifier[search] ( literal[string] , identifier[string] ) identifier[str_year] = identifier[int] ( identifier[str_all] . identifier[group] ( literal[int] )) identifier[str_month] = identifier[int] ( identifier[str_all] . identifier[group] ( literal[int] )) identifier[str_day] = identifier[int] ( identifier[str_all] . identifier[group] ( literal[int] )) identifier[check_year] = identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[year] keyword[if] identifier[str_year] keyword[in] identifier[range] ( literal[int] , identifier[check_year] + literal[int] ) keyword[and] identifier[str_month] keyword[in] identifier[range] ( literal[int] , literal[int] ) keyword[and] identifier[str_day] keyword[in] identifier[range] ( literal[int] , literal[int] ): identifier[self] . identifier[year] = identifier[str_year] identifier[self] . identifier[month] = identifier[str_month] identifier[self] . identifier[day] = identifier[str_day] identifier[self] . identifier[year_check] = keyword[True] identifier[self] . identifier[month_check] = keyword[True] identifier[self] . identifier[day_check] = keyword[True] keyword[except] : keyword[pass]
def easy_time_extrator(self, string): """简单时间抽取,即年月日同时出现 Keyword arguments: string -- 含有时间的文本,str类型 """ try: if not self.year_check and (not self.month_check) and (not self.day_check): str_all = re.search('([一-龥〇○]{4})年([一-龥]{1,3})月([一-龥]{1,3})日', string) str_year = self.str_to_num(str_all.group(1)) str_month = self.str_to_num(str_all.group(2)) str_day = self.str_to_num(str_all.group(3)) check_year = datetime.datetime.now().year if str_year in range(1970, check_year + 1) and str_month in range(1, 13) and (str_day in range(1, 32)): self.year = str_year self.month = str_month self.day = str_day self.year_check = True self.month_check = True self.day_check = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] try: if not self.year_check and (not self.month_check) and (not self.day_check): str_all = re.search('(\\d{4})[-._年](\\d{1,2})[-._月](\\d{1,2})', string) str_year = int(str_all.group(1)) str_month = int(str_all.group(2)) str_day = int(str_all.group(3)) check_year = datetime.datetime.now().year if str_year in range(1970, check_year + 1) and str_month in range(1, 13) and (str_day in range(1, 32)): self.year = str_year self.month = str_month self.day = str_day self.year_check = True self.month_check = True self.day_check = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]]
def update_colorbox(): """Update Colorbox code from vendor tree""" base_name = os.path.dirname(__file__) destination = os.path.join(base_name, "armstrong", "apps", "images", "static", "colorbox") colorbox_source = os.path.join(base_name, "vendor", "colorbox") colorbox_files = [ os.path.join(colorbox_source, "example1", "colorbox.css"), os.path.join(colorbox_source, "example1", "images"), os.path.join(colorbox_source, "colorbox", "jquery.colorbox-min.js"), ] local("cp -R %s %s" % (" ".join(colorbox_files), destination)) # We're not supporting IE6, so we can drop the backfill local("rm -rf %s" % (os.path.join(destination, "images", "ie6")))
def function[update_colorbox, parameter[]]: constant[Update Colorbox code from vendor tree] variable[base_name] assign[=] call[name[os].path.dirname, parameter[name[__file__]]] variable[destination] assign[=] call[name[os].path.join, parameter[name[base_name], constant[armstrong], constant[apps], constant[images], constant[static], constant[colorbox]]] variable[colorbox_source] assign[=] call[name[os].path.join, parameter[name[base_name], constant[vendor], constant[colorbox]]] variable[colorbox_files] assign[=] list[[<ast.Call object at 0x7da204960f40>, <ast.Call object at 0x7da2049629b0>, <ast.Call object at 0x7da204961720>]] call[name[local], parameter[binary_operation[constant[cp -R %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da204960b20>, <ast.Name object at 0x7da204961780>]]]]] call[name[local], parameter[binary_operation[constant[rm -rf %s] <ast.Mod object at 0x7da2590d6920> call[name[os].path.join, parameter[name[destination], constant[images], constant[ie6]]]]]]
keyword[def] identifier[update_colorbox] (): literal[string] identifier[base_name] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ) identifier[destination] = identifier[os] . identifier[path] . identifier[join] ( identifier[base_name] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) identifier[colorbox_source] = identifier[os] . identifier[path] . identifier[join] ( identifier[base_name] , literal[string] , literal[string] ) identifier[colorbox_files] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[colorbox_source] , literal[string] , literal[string] ), identifier[os] . identifier[path] . identifier[join] ( identifier[colorbox_source] , literal[string] , literal[string] ), identifier[os] . identifier[path] . identifier[join] ( identifier[colorbox_source] , literal[string] , literal[string] ), ] identifier[local] ( literal[string] %( literal[string] . identifier[join] ( identifier[colorbox_files] ), identifier[destination] )) identifier[local] ( literal[string] %( identifier[os] . identifier[path] . identifier[join] ( identifier[destination] , literal[string] , literal[string] )))
def update_colorbox(): """Update Colorbox code from vendor tree""" base_name = os.path.dirname(__file__) destination = os.path.join(base_name, 'armstrong', 'apps', 'images', 'static', 'colorbox') colorbox_source = os.path.join(base_name, 'vendor', 'colorbox') colorbox_files = [os.path.join(colorbox_source, 'example1', 'colorbox.css'), os.path.join(colorbox_source, 'example1', 'images'), os.path.join(colorbox_source, 'colorbox', 'jquery.colorbox-min.js')] local('cp -R %s %s' % (' '.join(colorbox_files), destination)) # We're not supporting IE6, so we can drop the backfill local('rm -rf %s' % os.path.join(destination, 'images', 'ie6'))
def from_similars(cls, learn, layer_ls:list=[0, 7, 2], **kwargs): "Gets the indices for the most similar images." train_ds, train_idxs = cls.get_similars_idxs(learn, layer_ls, **kwargs) return train_ds, train_idxs
def function[from_similars, parameter[cls, learn, layer_ls]]: constant[Gets the indices for the most similar images.] <ast.Tuple object at 0x7da1b1e9ab00> assign[=] call[name[cls].get_similars_idxs, parameter[name[learn], name[layer_ls]]] return[tuple[[<ast.Name object at 0x7da1b1e9a890>, <ast.Name object at 0x7da1b1e9b4c0>]]]
keyword[def] identifier[from_similars] ( identifier[cls] , identifier[learn] , identifier[layer_ls] : identifier[list] =[ literal[int] , literal[int] , literal[int] ],** identifier[kwargs] ): literal[string] identifier[train_ds] , identifier[train_idxs] = identifier[cls] . identifier[get_similars_idxs] ( identifier[learn] , identifier[layer_ls] ,** identifier[kwargs] ) keyword[return] identifier[train_ds] , identifier[train_idxs]
def from_similars(cls, learn, layer_ls: list=[0, 7, 2], **kwargs): """Gets the indices for the most similar images.""" (train_ds, train_idxs) = cls.get_similars_idxs(learn, layer_ls, **kwargs) return (train_ds, train_idxs)
def performApplicationPrelaunchCheck(self, pchAppKey): """ Returns errors that would prevent the specified application from launching immediately. Calling this function will cause the current scene application to quit, so only call it when you are actually about to launch something else. What the caller should do about these failures depends on the failure: VRApplicationError_OldApplicationQuitting - An existing application has been told to quit. Wait for a VREvent_ProcessQuit and try again. VRApplicationError_ApplicationAlreadyStarting - This application is already starting. This is a permanent failure. VRApplicationError_LaunchInProgress - A different application is already starting. This is a permanent failure. VRApplicationError_None - Go ahead and launch. Everything is clear. """ fn = self.function_table.performApplicationPrelaunchCheck result = fn(pchAppKey) return result
def function[performApplicationPrelaunchCheck, parameter[self, pchAppKey]]: constant[ Returns errors that would prevent the specified application from launching immediately. Calling this function will cause the current scene application to quit, so only call it when you are actually about to launch something else. What the caller should do about these failures depends on the failure: VRApplicationError_OldApplicationQuitting - An existing application has been told to quit. Wait for a VREvent_ProcessQuit and try again. VRApplicationError_ApplicationAlreadyStarting - This application is already starting. This is a permanent failure. VRApplicationError_LaunchInProgress - A different application is already starting. This is a permanent failure. VRApplicationError_None - Go ahead and launch. Everything is clear. ] variable[fn] assign[=] name[self].function_table.performApplicationPrelaunchCheck variable[result] assign[=] call[name[fn], parameter[name[pchAppKey]]] return[name[result]]
keyword[def] identifier[performApplicationPrelaunchCheck] ( identifier[self] , identifier[pchAppKey] ): literal[string] identifier[fn] = identifier[self] . identifier[function_table] . identifier[performApplicationPrelaunchCheck] identifier[result] = identifier[fn] ( identifier[pchAppKey] ) keyword[return] identifier[result]
def performApplicationPrelaunchCheck(self, pchAppKey): """ Returns errors that would prevent the specified application from launching immediately. Calling this function will cause the current scene application to quit, so only call it when you are actually about to launch something else. What the caller should do about these failures depends on the failure: VRApplicationError_OldApplicationQuitting - An existing application has been told to quit. Wait for a VREvent_ProcessQuit and try again. VRApplicationError_ApplicationAlreadyStarting - This application is already starting. This is a permanent failure. VRApplicationError_LaunchInProgress - A different application is already starting. This is a permanent failure. VRApplicationError_None - Go ahead and launch. Everything is clear. """ fn = self.function_table.performApplicationPrelaunchCheck result = fn(pchAppKey) return result
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False): """Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode.""" train_op = self.optimize(loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu) if use_tpu: if self._hparams.warm_start_from: def scaffold_fn(): self.initialize_from_ckpt(self._hparams.warm_start_from) return tf.train.Scaffold() else: scaffold_fn = None # Note: important to call this before remove_summaries() if self.hparams.tpu_enable_host_call: host_call = self.create_train_host_call() else: host_call = None remove_summaries() return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) else: if self._hparams.warm_start_from: self.initialize_from_ckpt(self._hparams.warm_start_from) # When loading weights from a pre-trained model, you want to be able to # load separate weights into the encoder and decoder. if self._hparams.warm_start_from_second: self.initialize_from_ckpt(self._hparams.warm_start_from_second) return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
def function[estimator_spec_train, parameter[self, loss, num_async_replicas, use_tpu]]: constant[Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode.] variable[train_op] assign[=] call[name[self].optimize, parameter[name[loss]]] if name[use_tpu] begin[:] if name[self]._hparams.warm_start_from begin[:] def function[scaffold_fn, parameter[]]: call[name[self].initialize_from_ckpt, parameter[name[self]._hparams.warm_start_from]] return[call[name[tf].train.Scaffold, parameter[]]] if name[self].hparams.tpu_enable_host_call begin[:] variable[host_call] assign[=] call[name[self].create_train_host_call, parameter[]] call[name[remove_summaries], parameter[]] return[call[name[tf].contrib.tpu.TPUEstimatorSpec, parameter[name[tf].estimator.ModeKeys.TRAIN]]]
keyword[def] identifier[estimator_spec_train] ( identifier[self] , identifier[loss] , identifier[num_async_replicas] = literal[int] , identifier[use_tpu] = keyword[False] ): literal[string] identifier[train_op] = identifier[self] . identifier[optimize] ( identifier[loss] , identifier[num_async_replicas] = identifier[num_async_replicas] , identifier[use_tpu] = identifier[use_tpu] ) keyword[if] identifier[use_tpu] : keyword[if] identifier[self] . identifier[_hparams] . identifier[warm_start_from] : keyword[def] identifier[scaffold_fn] (): identifier[self] . identifier[initialize_from_ckpt] ( identifier[self] . identifier[_hparams] . identifier[warm_start_from] ) keyword[return] identifier[tf] . identifier[train] . identifier[Scaffold] () keyword[else] : identifier[scaffold_fn] = keyword[None] keyword[if] identifier[self] . identifier[hparams] . identifier[tpu_enable_host_call] : identifier[host_call] = identifier[self] . identifier[create_train_host_call] () keyword[else] : identifier[host_call] = keyword[None] identifier[remove_summaries] () keyword[return] identifier[tf] . identifier[contrib] . identifier[tpu] . identifier[TPUEstimatorSpec] ( identifier[tf] . identifier[estimator] . identifier[ModeKeys] . identifier[TRAIN] , identifier[loss] = identifier[loss] , identifier[train_op] = identifier[train_op] , identifier[host_call] = identifier[host_call] , identifier[scaffold_fn] = identifier[scaffold_fn] ) keyword[else] : keyword[if] identifier[self] . identifier[_hparams] . identifier[warm_start_from] : identifier[self] . identifier[initialize_from_ckpt] ( identifier[self] . identifier[_hparams] . identifier[warm_start_from] ) keyword[if] identifier[self] . identifier[_hparams] . identifier[warm_start_from_second] : identifier[self] . identifier[initialize_from_ckpt] ( identifier[self] . identifier[_hparams] . identifier[warm_start_from_second] ) keyword[return] identifier[tf] . identifier[estimator] . identifier[EstimatorSpec] ( identifier[tf] . identifier[estimator] . identifier[ModeKeys] . identifier[TRAIN] , identifier[loss] = identifier[loss] , identifier[train_op] = identifier[train_op] )
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False): """Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode.""" train_op = self.optimize(loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu) if use_tpu: if self._hparams.warm_start_from: def scaffold_fn(): self.initialize_from_ckpt(self._hparams.warm_start_from) return tf.train.Scaffold() # depends on [control=['if'], data=[]] else: scaffold_fn = None # Note: important to call this before remove_summaries() if self.hparams.tpu_enable_host_call: host_call = self.create_train_host_call() # depends on [control=['if'], data=[]] else: host_call = None remove_summaries() return tf.contrib.tpu.TPUEstimatorSpec(tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) # depends on [control=['if'], data=[]] else: if self._hparams.warm_start_from: self.initialize_from_ckpt(self._hparams.warm_start_from) # depends on [control=['if'], data=[]] # When loading weights from a pre-trained model, you want to be able to # load separate weights into the encoder and decoder. if self._hparams.warm_start_from_second: self.initialize_from_ckpt(self._hparams.warm_start_from_second) # depends on [control=['if'], data=[]] return tf.estimator.EstimatorSpec(tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
def get_cache_context(self): ''' Retrieve a context cache from disk ''' with salt.utils.files.fopen(self.cache_path, 'rb') as cache: return salt.utils.data.decode(self.serial.load(cache))
def function[get_cache_context, parameter[self]]: constant[ Retrieve a context cache from disk ] with call[name[salt].utils.files.fopen, parameter[name[self].cache_path, constant[rb]]] begin[:] return[call[name[salt].utils.data.decode, parameter[call[name[self].serial.load, parameter[name[cache]]]]]]
keyword[def] identifier[get_cache_context] ( identifier[self] ): literal[string] keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[self] . identifier[cache_path] , literal[string] ) keyword[as] identifier[cache] : keyword[return] identifier[salt] . identifier[utils] . identifier[data] . identifier[decode] ( identifier[self] . identifier[serial] . identifier[load] ( identifier[cache] ))
def get_cache_context(self): """ Retrieve a context cache from disk """ with salt.utils.files.fopen(self.cache_path, 'rb') as cache: return salt.utils.data.decode(self.serial.load(cache)) # depends on [control=['with'], data=['cache']]
def handle_triple(self, lhs, relation, rhs): """ Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on relations * de-inverts all inverted relations * sets empty relations to `None` * casts numeric string sources and targets to their numeric types (e.g. float, int) Args: lhs: the left hand side of an observed triple relation: the triple relation (possibly inverted) rhs: the right hand side of an observed triple Returns: The processed (source, relation, target) triple. By default, it is returned as a Triple object. """ relation = relation.replace(':', '', 1) # remove leading : if self.is_relation_inverted(relation): # deinvert source, target, inverted = rhs, lhs, True relation = self.invert_relation(relation) else: source, target, inverted = lhs, rhs, False source = _default_cast(source) target = _default_cast(target) if relation == '': # set empty relations to None relation = None return Triple(source, relation, target, inverted)
def function[handle_triple, parameter[self, lhs, relation, rhs]]: constant[ Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on relations * de-inverts all inverted relations * sets empty relations to `None` * casts numeric string sources and targets to their numeric types (e.g. float, int) Args: lhs: the left hand side of an observed triple relation: the triple relation (possibly inverted) rhs: the right hand side of an observed triple Returns: The processed (source, relation, target) triple. By default, it is returned as a Triple object. ] variable[relation] assign[=] call[name[relation].replace, parameter[constant[:], constant[], constant[1]]] if call[name[self].is_relation_inverted, parameter[name[relation]]] begin[:] <ast.Tuple object at 0x7da18dc05f30> assign[=] tuple[[<ast.Name object at 0x7da18dc05f90>, <ast.Name object at 0x7da18dc04610>, <ast.Constant object at 0x7da18dc05d80>]] variable[relation] assign[=] call[name[self].invert_relation, parameter[name[relation]]] variable[source] assign[=] call[name[_default_cast], parameter[name[source]]] variable[target] assign[=] call[name[_default_cast], parameter[name[target]]] if compare[name[relation] equal[==] constant[]] begin[:] variable[relation] assign[=] constant[None] return[call[name[Triple], parameter[name[source], name[relation], name[target], name[inverted]]]]
keyword[def] identifier[handle_triple] ( identifier[self] , identifier[lhs] , identifier[relation] , identifier[rhs] ): literal[string] identifier[relation] = identifier[relation] . identifier[replace] ( literal[string] , literal[string] , literal[int] ) keyword[if] identifier[self] . identifier[is_relation_inverted] ( identifier[relation] ): identifier[source] , identifier[target] , identifier[inverted] = identifier[rhs] , identifier[lhs] , keyword[True] identifier[relation] = identifier[self] . identifier[invert_relation] ( identifier[relation] ) keyword[else] : identifier[source] , identifier[target] , identifier[inverted] = identifier[lhs] , identifier[rhs] , keyword[False] identifier[source] = identifier[_default_cast] ( identifier[source] ) identifier[target] = identifier[_default_cast] ( identifier[target] ) keyword[if] identifier[relation] == literal[string] : identifier[relation] = keyword[None] keyword[return] identifier[Triple] ( identifier[source] , identifier[relation] , identifier[target] , identifier[inverted] )
def handle_triple(self, lhs, relation, rhs): """ Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on relations * de-inverts all inverted relations * sets empty relations to `None` * casts numeric string sources and targets to their numeric types (e.g. float, int) Args: lhs: the left hand side of an observed triple relation: the triple relation (possibly inverted) rhs: the right hand side of an observed triple Returns: The processed (source, relation, target) triple. By default, it is returned as a Triple object. """ relation = relation.replace(':', '', 1) # remove leading : if self.is_relation_inverted(relation): # deinvert (source, target, inverted) = (rhs, lhs, True) relation = self.invert_relation(relation) # depends on [control=['if'], data=[]] else: (source, target, inverted) = (lhs, rhs, False) source = _default_cast(source) target = _default_cast(target) if relation == '': # set empty relations to None relation = None # depends on [control=['if'], data=['relation']] return Triple(source, relation, target, inverted)
def update_constants(): """Recreate channel name constants with changed settings. This kludge is mostly needed due to the way Django settings are patched for testing and how modules need to be imported throughout the project. On import time, settings are not patched yet, but some of the code needs static values immediately. Updating functions such as this one are then needed to fix dummy values. """ global MANAGER_CONTROL_CHANNEL, MANAGER_EXECUTOR_CHANNELS # pylint: disable=global-statement global MANAGER_LISTENER_STATS, MANAGER_STATE_PREFIX # pylint: disable=global-statement redis_prefix = getattr(settings, 'FLOW_MANAGER', {}).get('REDIS_PREFIX', '') MANAGER_CONTROL_CHANNEL = '{}.control'.format(redis_prefix) MANAGER_EXECUTOR_CHANNELS = ManagerChannelPair( '{}.result_queue'.format(redis_prefix), '{}.result_queue_response'.format(redis_prefix), ) MANAGER_STATE_PREFIX = '{}.state'.format(redis_prefix) MANAGER_LISTENER_STATS = '{}.listener_stats'.format(redis_prefix)
def function[update_constants, parameter[]]: constant[Recreate channel name constants with changed settings. This kludge is mostly needed due to the way Django settings are patched for testing and how modules need to be imported throughout the project. On import time, settings are not patched yet, but some of the code needs static values immediately. Updating functions such as this one are then needed to fix dummy values. ] <ast.Global object at 0x7da1b19b4640> <ast.Global object at 0x7da1b19b6f50> variable[redis_prefix] assign[=] call[call[name[getattr], parameter[name[settings], constant[FLOW_MANAGER], dictionary[[], []]]].get, parameter[constant[REDIS_PREFIX], constant[]]] variable[MANAGER_CONTROL_CHANNEL] assign[=] call[constant[{}.control].format, parameter[name[redis_prefix]]] variable[MANAGER_EXECUTOR_CHANNELS] assign[=] call[name[ManagerChannelPair], parameter[call[constant[{}.result_queue].format, parameter[name[redis_prefix]]], call[constant[{}.result_queue_response].format, parameter[name[redis_prefix]]]]] variable[MANAGER_STATE_PREFIX] assign[=] call[constant[{}.state].format, parameter[name[redis_prefix]]] variable[MANAGER_LISTENER_STATS] assign[=] call[constant[{}.listener_stats].format, parameter[name[redis_prefix]]]
keyword[def] identifier[update_constants] (): literal[string] keyword[global] identifier[MANAGER_CONTROL_CHANNEL] , identifier[MANAGER_EXECUTOR_CHANNELS] keyword[global] identifier[MANAGER_LISTENER_STATS] , identifier[MANAGER_STATE_PREFIX] identifier[redis_prefix] = identifier[getattr] ( identifier[settings] , literal[string] ,{}). identifier[get] ( literal[string] , literal[string] ) identifier[MANAGER_CONTROL_CHANNEL] = literal[string] . identifier[format] ( identifier[redis_prefix] ) identifier[MANAGER_EXECUTOR_CHANNELS] = identifier[ManagerChannelPair] ( literal[string] . identifier[format] ( identifier[redis_prefix] ), literal[string] . identifier[format] ( identifier[redis_prefix] ), ) identifier[MANAGER_STATE_PREFIX] = literal[string] . identifier[format] ( identifier[redis_prefix] ) identifier[MANAGER_LISTENER_STATS] = literal[string] . identifier[format] ( identifier[redis_prefix] )
def update_constants(): """Recreate channel name constants with changed settings. This kludge is mostly needed due to the way Django settings are patched for testing and how modules need to be imported throughout the project. On import time, settings are not patched yet, but some of the code needs static values immediately. Updating functions such as this one are then needed to fix dummy values. """ global MANAGER_CONTROL_CHANNEL, MANAGER_EXECUTOR_CHANNELS # pylint: disable=global-statement global MANAGER_LISTENER_STATS, MANAGER_STATE_PREFIX # pylint: disable=global-statement redis_prefix = getattr(settings, 'FLOW_MANAGER', {}).get('REDIS_PREFIX', '') MANAGER_CONTROL_CHANNEL = '{}.control'.format(redis_prefix) MANAGER_EXECUTOR_CHANNELS = ManagerChannelPair('{}.result_queue'.format(redis_prefix), '{}.result_queue_response'.format(redis_prefix)) MANAGER_STATE_PREFIX = '{}.state'.format(redis_prefix) MANAGER_LISTENER_STATS = '{}.listener_stats'.format(redis_prefix)
def express_route_circuit_connections(self): """Instance depends on the API version: * 2018-02-01: :class:`ExpressRouteCircuitConnectionsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCircuitConnectionsOperations>` * 2018-04-01: :class:`ExpressRouteCircuitConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCircuitConnectionsOperations>` """ api_version = self._get_api_version('express_route_circuit_connections') if api_version == '2018-02-01': from .v2018_02_01.operations import ExpressRouteCircuitConnectionsOperations as OperationClass elif api_version == '2018-04-01': from .v2018_04_01.operations import ExpressRouteCircuitConnectionsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def function[express_route_circuit_connections, parameter[self]]: constant[Instance depends on the API version: * 2018-02-01: :class:`ExpressRouteCircuitConnectionsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCircuitConnectionsOperations>` * 2018-04-01: :class:`ExpressRouteCircuitConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCircuitConnectionsOperations>` ] variable[api_version] assign[=] call[name[self]._get_api_version, parameter[constant[express_route_circuit_connections]]] if compare[name[api_version] equal[==] constant[2018-02-01]] begin[:] from relative_module[v2018_02_01.operations] import module[ExpressRouteCircuitConnectionsOperations] return[call[name[OperationClass], parameter[name[self]._client, name[self].config, call[name[Serializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]], call[name[Deserializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]]]]]
keyword[def] identifier[express_route_circuit_connections] ( identifier[self] ): literal[string] identifier[api_version] = identifier[self] . identifier[_get_api_version] ( literal[string] ) keyword[if] identifier[api_version] == literal[string] : keyword[from] . identifier[v2018_02_01] . identifier[operations] keyword[import] identifier[ExpressRouteCircuitConnectionsOperations] keyword[as] identifier[OperationClass] keyword[elif] identifier[api_version] == literal[string] : keyword[from] . identifier[v2018_04_01] . identifier[operations] keyword[import] identifier[ExpressRouteCircuitConnectionsOperations] keyword[as] identifier[OperationClass] keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] )) keyword[return] identifier[OperationClass] ( identifier[self] . identifier[_client] , identifier[self] . identifier[config] , identifier[Serializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )), identifier[Deserializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )))
def express_route_circuit_connections(self): """Instance depends on the API version: * 2018-02-01: :class:`ExpressRouteCircuitConnectionsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCircuitConnectionsOperations>` * 2018-04-01: :class:`ExpressRouteCircuitConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCircuitConnectionsOperations>` """ api_version = self._get_api_version('express_route_circuit_connections') if api_version == '2018-02-01': from .v2018_02_01.operations import ExpressRouteCircuitConnectionsOperations as OperationClass # depends on [control=['if'], data=[]] elif api_version == '2018-04-01': from .v2018_04_01.operations import ExpressRouteCircuitConnectionsOperations as OperationClass # depends on [control=['if'], data=[]] else: raise NotImplementedError('APIVersion {} is not available'.format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def metric_calculator(tpf, fpf, P, N, metric_list, anal=True): """ Calculates VS metrics & (optionally) their 95% conficdence limits """ metrics = {} # build dictionary to store performance metrics for vs_metric in [float(x) for x in metric_list if '.' in x]: if N >= 1 / vs_metric: # enrichment factor (ef) dictionary {#_of_req_decoys : [FPF,EF]} metrics[round(vs_metric * N)] = [vs_metric, 0] # non enrichment factor dictionary {'name' : ['name',value]} for vs_metric in [x for x in metric_list if '.' not in x]: metrics[vs_metric] = [vs_metric, 0, 0, 0] # calculate enrichment factors & auc metrics['AUC'][1] = 0 for index in range(len(tpf)): # calculate and assign enrichment factors if N * fpf[index] in metrics.keys() and N * fpf[index] > 0: ef = tpf[index] / fpf[index] metrics[N * fpf[index]][1] = ef # calculate auc if fpf[index] != fpf[index - 1]: metrics['AUC'][1] = tpf[index] + metrics['AUC'][1] metrics['AUC'][1] = metrics['AUC'][1] / N # reshape metrics dictionary, discarding the #_of_req_decoys, i.e: {'fpf',[value,low,high]} reshaped = {} for k,v in metrics.iteritems(): if type(v) is float or type(v) is int: reshaped[k] = v else: reshaped[v[0]] = v[1:] return reshaped
def function[metric_calculator, parameter[tpf, fpf, P, N, metric_list, anal]]: constant[ Calculates VS metrics & (optionally) their 95% conficdence limits ] variable[metrics] assign[=] dictionary[[], []] for taget[name[vs_metric]] in starred[<ast.ListComp object at 0x7da18fe90430>] begin[:] if compare[name[N] greater_or_equal[>=] binary_operation[constant[1] / name[vs_metric]]] begin[:] call[name[metrics]][call[name[round], parameter[binary_operation[name[vs_metric] * name[N]]]]] assign[=] list[[<ast.Name object at 0x7da18fe92cb0>, <ast.Constant object at 0x7da18fe909a0>]] for taget[name[vs_metric]] in starred[<ast.ListComp object at 0x7da18fe935b0>] begin[:] call[name[metrics]][name[vs_metric]] assign[=] list[[<ast.Name object at 0x7da18fe91000>, <ast.Constant object at 0x7da18fe90f40>, <ast.Constant object at 0x7da18fe90eb0>, <ast.Constant object at 0x7da18fe90400>]] call[call[name[metrics]][constant[AUC]]][constant[1]] assign[=] constant[0] for taget[name[index]] in starred[call[name[range], parameter[call[name[len], parameter[name[tpf]]]]]] begin[:] if <ast.BoolOp object at 0x7da18fe92bf0> begin[:] variable[ef] assign[=] binary_operation[call[name[tpf]][name[index]] / call[name[fpf]][name[index]]] call[call[name[metrics]][binary_operation[name[N] * call[name[fpf]][name[index]]]]][constant[1]] assign[=] name[ef] if compare[call[name[fpf]][name[index]] not_equal[!=] call[name[fpf]][binary_operation[name[index] - constant[1]]]] begin[:] call[call[name[metrics]][constant[AUC]]][constant[1]] assign[=] binary_operation[call[name[tpf]][name[index]] + call[call[name[metrics]][constant[AUC]]][constant[1]]] call[call[name[metrics]][constant[AUC]]][constant[1]] assign[=] binary_operation[call[call[name[metrics]][constant[AUC]]][constant[1]] / name[N]] variable[reshaped] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b16044c0>, <ast.Name object at 0x7da1b16054e0>]]] in starred[call[name[metrics].iteritems, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b1604400> begin[:] call[name[reshaped]][name[k]] assign[=] name[v] return[name[reshaped]]
keyword[def] identifier[metric_calculator] ( identifier[tpf] , identifier[fpf] , identifier[P] , identifier[N] , identifier[metric_list] , identifier[anal] = keyword[True] ): literal[string] identifier[metrics] ={} keyword[for] identifier[vs_metric] keyword[in] [ identifier[float] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[metric_list] keyword[if] literal[string] keyword[in] identifier[x] ]: keyword[if] identifier[N] >= literal[int] / identifier[vs_metric] : identifier[metrics] [ identifier[round] ( identifier[vs_metric] * identifier[N] )]=[ identifier[vs_metric] , literal[int] ] keyword[for] identifier[vs_metric] keyword[in] [ identifier[x] keyword[for] identifier[x] keyword[in] identifier[metric_list] keyword[if] literal[string] keyword[not] keyword[in] identifier[x] ]: identifier[metrics] [ identifier[vs_metric] ]=[ identifier[vs_metric] , literal[int] , literal[int] , literal[int] ] identifier[metrics] [ literal[string] ][ literal[int] ]= literal[int] keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[len] ( identifier[tpf] )): keyword[if] identifier[N] * identifier[fpf] [ identifier[index] ] keyword[in] identifier[metrics] . identifier[keys] () keyword[and] identifier[N] * identifier[fpf] [ identifier[index] ]> literal[int] : identifier[ef] = identifier[tpf] [ identifier[index] ]/ identifier[fpf] [ identifier[index] ] identifier[metrics] [ identifier[N] * identifier[fpf] [ identifier[index] ]][ literal[int] ]= identifier[ef] keyword[if] identifier[fpf] [ identifier[index] ]!= identifier[fpf] [ identifier[index] - literal[int] ]: identifier[metrics] [ literal[string] ][ literal[int] ]= identifier[tpf] [ identifier[index] ]+ identifier[metrics] [ literal[string] ][ literal[int] ] identifier[metrics] [ literal[string] ][ literal[int] ]= identifier[metrics] [ literal[string] ][ literal[int] ]/ identifier[N] identifier[reshaped] ={} keyword[for] identifier[k] , identifier[v] keyword[in] identifier[metrics] . identifier[iteritems] (): keyword[if] identifier[type] ( identifier[v] ) keyword[is] identifier[float] keyword[or] identifier[type] ( identifier[v] ) keyword[is] identifier[int] : identifier[reshaped] [ identifier[k] ]= identifier[v] keyword[else] : identifier[reshaped] [ identifier[v] [ literal[int] ]]= identifier[v] [ literal[int] :] keyword[return] identifier[reshaped]
def metric_calculator(tpf, fpf, P, N, metric_list, anal=True): """ Calculates VS metrics & (optionally) their 95% conficdence limits """ metrics = {} # build dictionary to store performance metrics for vs_metric in [float(x) for x in metric_list if '.' in x]: if N >= 1 / vs_metric: # enrichment factor (ef) dictionary {#_of_req_decoys : [FPF,EF]} metrics[round(vs_metric * N)] = [vs_metric, 0] # depends on [control=['if'], data=['N']] # depends on [control=['for'], data=['vs_metric']] # non enrichment factor dictionary {'name' : ['name',value]} for vs_metric in [x for x in metric_list if '.' not in x]: metrics[vs_metric] = [vs_metric, 0, 0, 0] # depends on [control=['for'], data=['vs_metric']] # calculate enrichment factors & auc metrics['AUC'][1] = 0 for index in range(len(tpf)): # calculate and assign enrichment factors if N * fpf[index] in metrics.keys() and N * fpf[index] > 0: ef = tpf[index] / fpf[index] metrics[N * fpf[index]][1] = ef # depends on [control=['if'], data=[]] # calculate auc if fpf[index] != fpf[index - 1]: metrics['AUC'][1] = tpf[index] + metrics['AUC'][1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']] metrics['AUC'][1] = metrics['AUC'][1] / N # reshape metrics dictionary, discarding the #_of_req_decoys, i.e: {'fpf',[value,low,high]} reshaped = {} for (k, v) in metrics.iteritems(): if type(v) is float or type(v) is int: reshaped[k] = v # depends on [control=['if'], data=[]] else: reshaped[v[0]] = v[1:] # depends on [control=['for'], data=[]] return reshaped
def ImpulseNoise(p=0, name=None, deterministic=False, random_state=None): """ Creates an augmenter to apply impulse noise to an image. This is identical to ``SaltAndPepper``, except that per_channel is always set to True. dtype support:: See ``imgaug.augmenters.arithmetic.SaltAndPepper``. """ return SaltAndPepper(p=p, per_channel=True, name=name, deterministic=deterministic, random_state=random_state)
def function[ImpulseNoise, parameter[p, name, deterministic, random_state]]: constant[ Creates an augmenter to apply impulse noise to an image. This is identical to ``SaltAndPepper``, except that per_channel is always set to True. dtype support:: See ``imgaug.augmenters.arithmetic.SaltAndPepper``. ] return[call[name[SaltAndPepper], parameter[]]]
keyword[def] identifier[ImpulseNoise] ( identifier[p] = literal[int] , identifier[name] = keyword[None] , identifier[deterministic] = keyword[False] , identifier[random_state] = keyword[None] ): literal[string] keyword[return] identifier[SaltAndPepper] ( identifier[p] = identifier[p] , identifier[per_channel] = keyword[True] , identifier[name] = identifier[name] , identifier[deterministic] = identifier[deterministic] , identifier[random_state] = identifier[random_state] )
def ImpulseNoise(p=0, name=None, deterministic=False, random_state=None): """ Creates an augmenter to apply impulse noise to an image. This is identical to ``SaltAndPepper``, except that per_channel is always set to True. dtype support:: See ``imgaug.augmenters.arithmetic.SaltAndPepper``. """ return SaltAndPepper(p=p, per_channel=True, name=name, deterministic=deterministic, random_state=random_state)
def p_partselect_minus(self, p): 'partselect : identifier LBRACKET expression MINUSCOLON expression RBRACKET' p[0] = Partselect(p[1], p[3], Minus( p[3], p[5], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def function[p_partselect_minus, parameter[self, p]]: constant[partselect : identifier LBRACKET expression MINUSCOLON expression RBRACKET] call[name[p]][constant[0]] assign[=] call[name[Partselect], parameter[call[name[p]][constant[1]], call[name[p]][constant[3]], call[name[Minus], parameter[call[name[p]][constant[3]], call[name[p]][constant[5]]]]]] call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]]
keyword[def] identifier[p_partselect_minus] ( identifier[self] , identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[Partselect] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[Minus] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )), identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] ))
def p_partselect_minus(self, p): """partselect : identifier LBRACKET expression MINUSCOLON expression RBRACKET""" p[0] = Partselect(p[1], p[3], Minus(p[3], p[5], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def load_document(self, document, file): """ Loads given document into a new **Script_Editor_tabWidget** Widget tab Model editor. :param document: Document to load. :type document: QTextDocument :param file: Document file. :type file: unicode :return: Method success. :rtype: bool """ if not foundations.common.path_exists(file): raise foundations.exceptions.FileExistsError("{0} | '{1}' file doesn't exists!".format( self.__class__.__name__, file)) if self.get_editor(file): LOGGER.info("{0} | '{1}' is already loaded!".format(self.__class__.__name__, file)) return True self.close_first_file() language = self.__languages_model.get_language(self.__default_language) editor = Editor(parent=self, language=language) if not editor.new_file(): return False LOGGER.info("{0} | Loading '{1}' file document!".format(self.__class__.__name__, file)) language = self.__languages_model.get_file_language( file) or self.__languages_model.get_language(self.__default_language) if not editor.load_document(document, file, language): return False if self.__model.set_authoring_nodes(editor): self.__store_recent_file(file) self.file_loaded.emit(file) return True
def function[load_document, parameter[self, document, file]]: constant[ Loads given document into a new **Script_Editor_tabWidget** Widget tab Model editor. :param document: Document to load. :type document: QTextDocument :param file: Document file. :type file: unicode :return: Method success. :rtype: bool ] if <ast.UnaryOp object at 0x7da1b090e4d0> begin[:] <ast.Raise object at 0x7da1b090e470> if call[name[self].get_editor, parameter[name[file]]] begin[:] call[name[LOGGER].info, parameter[call[constant[{0} | '{1}' is already loaded!].format, parameter[name[self].__class__.__name__, name[file]]]]] return[constant[True]] call[name[self].close_first_file, parameter[]] variable[language] assign[=] call[name[self].__languages_model.get_language, parameter[name[self].__default_language]] variable[editor] assign[=] call[name[Editor], parameter[]] if <ast.UnaryOp object at 0x7da1b09d2980> begin[:] return[constant[False]] call[name[LOGGER].info, parameter[call[constant[{0} | Loading '{1}' file document!].format, parameter[name[self].__class__.__name__, name[file]]]]] variable[language] assign[=] <ast.BoolOp object at 0x7da1b09d03a0> if <ast.UnaryOp object at 0x7da1b09d0610> begin[:] return[constant[False]] if call[name[self].__model.set_authoring_nodes, parameter[name[editor]]] begin[:] call[name[self].__store_recent_file, parameter[name[file]]] call[name[self].file_loaded.emit, parameter[name[file]]] return[constant[True]]
keyword[def] identifier[load_document] ( identifier[self] , identifier[document] , identifier[file] ): literal[string] keyword[if] keyword[not] identifier[foundations] . identifier[common] . identifier[path_exists] ( identifier[file] ): keyword[raise] identifier[foundations] . identifier[exceptions] . identifier[FileExistsError] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[file] )) keyword[if] identifier[self] . identifier[get_editor] ( identifier[file] ): identifier[LOGGER] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[file] )) keyword[return] keyword[True] identifier[self] . identifier[close_first_file] () identifier[language] = identifier[self] . identifier[__languages_model] . identifier[get_language] ( identifier[self] . identifier[__default_language] ) identifier[editor] = identifier[Editor] ( identifier[parent] = identifier[self] , identifier[language] = identifier[language] ) keyword[if] keyword[not] identifier[editor] . identifier[new_file] (): keyword[return] keyword[False] identifier[LOGGER] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[file] )) identifier[language] = identifier[self] . identifier[__languages_model] . identifier[get_file_language] ( identifier[file] ) keyword[or] identifier[self] . identifier[__languages_model] . identifier[get_language] ( identifier[self] . identifier[__default_language] ) keyword[if] keyword[not] identifier[editor] . identifier[load_document] ( identifier[document] , identifier[file] , identifier[language] ): keyword[return] keyword[False] keyword[if] identifier[self] . identifier[__model] . identifier[set_authoring_nodes] ( identifier[editor] ): identifier[self] . identifier[__store_recent_file] ( identifier[file] ) identifier[self] . identifier[file_loaded] . identifier[emit] ( identifier[file] ) keyword[return] keyword[True]
def load_document(self, document, file): """ Loads given document into a new **Script_Editor_tabWidget** Widget tab Model editor. :param document: Document to load. :type document: QTextDocument :param file: Document file. :type file: unicode :return: Method success. :rtype: bool """ if not foundations.common.path_exists(file): raise foundations.exceptions.FileExistsError("{0} | '{1}' file doesn't exists!".format(self.__class__.__name__, file)) # depends on [control=['if'], data=[]] if self.get_editor(file): LOGGER.info("{0} | '{1}' is already loaded!".format(self.__class__.__name__, file)) return True # depends on [control=['if'], data=[]] self.close_first_file() language = self.__languages_model.get_language(self.__default_language) editor = Editor(parent=self, language=language) if not editor.new_file(): return False # depends on [control=['if'], data=[]] LOGGER.info("{0} | Loading '{1}' file document!".format(self.__class__.__name__, file)) language = self.__languages_model.get_file_language(file) or self.__languages_model.get_language(self.__default_language) if not editor.load_document(document, file, language): return False # depends on [control=['if'], data=[]] if self.__model.set_authoring_nodes(editor): self.__store_recent_file(file) self.file_loaded.emit(file) return True # depends on [control=['if'], data=[]]
def format_explanation(explanation, original_msg=None): """This formats an explanation Normally all embedded newlines are escaped, however there are three exceptions: \n{, \n} and \n~. The first two are intended cover nested explanations, see function and attribute explanations for examples (.visit_Call(), visit_Attribute()). The last one is for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ if not conf.is_message_introspection_enabled() and original_msg: return original_msg explanation = ecu(explanation) lines = _split_explanation(explanation) result = _format_lines(lines) return u('\n').join(result)
def function[format_explanation, parameter[explanation, original_msg]]: constant[This formats an explanation Normally all embedded newlines are escaped, however there are three exceptions: {, } and ~. The first two are intended cover nested explanations, see function and attribute explanations for examples (.visit_Call(), visit_Attribute()). The last one is for when one explanation needs to span multiple lines, e.g. when displaying diffs. ] if <ast.BoolOp object at 0x7da1b1605330> begin[:] return[name[original_msg]] variable[explanation] assign[=] call[name[ecu], parameter[name[explanation]]] variable[lines] assign[=] call[name[_split_explanation], parameter[name[explanation]]] variable[result] assign[=] call[name[_format_lines], parameter[name[lines]]] return[call[call[name[u], parameter[constant[ ]]].join, parameter[name[result]]]]
keyword[def] identifier[format_explanation] ( identifier[explanation] , identifier[original_msg] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[conf] . identifier[is_message_introspection_enabled] () keyword[and] identifier[original_msg] : keyword[return] identifier[original_msg] identifier[explanation] = identifier[ecu] ( identifier[explanation] ) identifier[lines] = identifier[_split_explanation] ( identifier[explanation] ) identifier[result] = identifier[_format_lines] ( identifier[lines] ) keyword[return] identifier[u] ( literal[string] ). identifier[join] ( identifier[result] )
def format_explanation(explanation, original_msg=None): """This formats an explanation Normally all embedded newlines are escaped, however there are three exceptions: {, } and ~. The first two are intended cover nested explanations, see function and attribute explanations for examples (.visit_Call(), visit_Attribute()). The last one is for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ if not conf.is_message_introspection_enabled() and original_msg: return original_msg # depends on [control=['if'], data=[]] explanation = ecu(explanation) lines = _split_explanation(explanation) result = _format_lines(lines) return u('\n').join(result)
def python_file_with_version(self): """Return Python filename with ``__version__`` marker, if configured. Enable this by adding a ``python-file-with-version`` option:: [zest.releaser] python-file-with-version = reinout/maurits.py Return None when nothing has been configured. """ default = None if self.config is None: return default try: result = self.config.get('zest.releaser', 'python-file-with-version') except (NoSectionError, NoOptionError, ValueError): return default return result
def function[python_file_with_version, parameter[self]]: constant[Return Python filename with ``__version__`` marker, if configured. Enable this by adding a ``python-file-with-version`` option:: [zest.releaser] python-file-with-version = reinout/maurits.py Return None when nothing has been configured. ] variable[default] assign[=] constant[None] if compare[name[self].config is constant[None]] begin[:] return[name[default]] <ast.Try object at 0x7da18bcc97e0> return[name[result]]
keyword[def] identifier[python_file_with_version] ( identifier[self] ): literal[string] identifier[default] = keyword[None] keyword[if] identifier[self] . identifier[config] keyword[is] keyword[None] : keyword[return] identifier[default] keyword[try] : identifier[result] = identifier[self] . identifier[config] . identifier[get] ( literal[string] , literal[string] ) keyword[except] ( identifier[NoSectionError] , identifier[NoOptionError] , identifier[ValueError] ): keyword[return] identifier[default] keyword[return] identifier[result]
def python_file_with_version(self): """Return Python filename with ``__version__`` marker, if configured. Enable this by adding a ``python-file-with-version`` option:: [zest.releaser] python-file-with-version = reinout/maurits.py Return None when nothing has been configured. """ default = None if self.config is None: return default # depends on [control=['if'], data=[]] try: result = self.config.get('zest.releaser', 'python-file-with-version') # depends on [control=['try'], data=[]] except (NoSectionError, NoOptionError, ValueError): return default # depends on [control=['except'], data=[]] return result
def is_a_string(var, allow_none=False): """ Returns True if var is a string (ascii or unicode) Result py-2 py-3 ----------------- ----- ----- b'bytes literal' True False 'string literal' True True u'unicode literal' True True Also returns True if the var is a numpy string (numpy.string_, numpy.unicode_). """ return isinstance(var, six.string_types) or (var is None and allow_none)
def function[is_a_string, parameter[var, allow_none]]: constant[ Returns True if var is a string (ascii or unicode) Result py-2 py-3 ----------------- ----- ----- b'bytes literal' True False 'string literal' True True u'unicode literal' True True Also returns True if the var is a numpy string (numpy.string_, numpy.unicode_). ] return[<ast.BoolOp object at 0x7da1b0415ba0>]
keyword[def] identifier[is_a_string] ( identifier[var] , identifier[allow_none] = keyword[False] ): literal[string] keyword[return] identifier[isinstance] ( identifier[var] , identifier[six] . identifier[string_types] ) keyword[or] ( identifier[var] keyword[is] keyword[None] keyword[and] identifier[allow_none] )
def is_a_string(var, allow_none=False): """ Returns True if var is a string (ascii or unicode) Result py-2 py-3 ----------------- ----- ----- b'bytes literal' True False 'string literal' True True u'unicode literal' True True Also returns True if the var is a numpy string (numpy.string_, numpy.unicode_). """ return isinstance(var, six.string_types) or (var is None and allow_none)
def elcm_profile_create(irmc_info, param_path): """send an eLCM request to create profile To create a profile, a new session is spawned with status 'running'. When profile is created completely, the session ends. :param irmc_info: node info :param param_path: path of profile :returns: dict object of session info if succeed { 'Session': { 'Id': id 'Status': 'activated' ... } } :raises: SCCIClientError if SCCI failed """ # Send POST request to the server # NOTE: This task may take time, so set a timeout _irmc_info = dict(irmc_info) _irmc_info['irmc_client_timeout'] = PROFILE_CREATE_TIMEOUT resp = elcm_request(_irmc_info, method='POST', path=URL_PATH_PROFILE_MGMT + 'get', params={'PARAM_PATH': param_path}) if resp.status_code == 202: return _parse_elcm_response_body_as_json(resp) else: raise scci.SCCIClientError(('Failed to create profile for path ' '"%(param_path)s" with error code ' '%(error)s' % {'param_path': param_path, 'error': resp.status_code}))
def function[elcm_profile_create, parameter[irmc_info, param_path]]: constant[send an eLCM request to create profile To create a profile, a new session is spawned with status 'running'. When profile is created completely, the session ends. :param irmc_info: node info :param param_path: path of profile :returns: dict object of session info if succeed { 'Session': { 'Id': id 'Status': 'activated' ... } } :raises: SCCIClientError if SCCI failed ] variable[_irmc_info] assign[=] call[name[dict], parameter[name[irmc_info]]] call[name[_irmc_info]][constant[irmc_client_timeout]] assign[=] name[PROFILE_CREATE_TIMEOUT] variable[resp] assign[=] call[name[elcm_request], parameter[name[_irmc_info]]] if compare[name[resp].status_code equal[==] constant[202]] begin[:] return[call[name[_parse_elcm_response_body_as_json], parameter[name[resp]]]]
keyword[def] identifier[elcm_profile_create] ( identifier[irmc_info] , identifier[param_path] ): literal[string] identifier[_irmc_info] = identifier[dict] ( identifier[irmc_info] ) identifier[_irmc_info] [ literal[string] ]= identifier[PROFILE_CREATE_TIMEOUT] identifier[resp] = identifier[elcm_request] ( identifier[_irmc_info] , identifier[method] = literal[string] , identifier[path] = identifier[URL_PATH_PROFILE_MGMT] + literal[string] , identifier[params] ={ literal[string] : identifier[param_path] }) keyword[if] identifier[resp] . identifier[status_code] == literal[int] : keyword[return] identifier[_parse_elcm_response_body_as_json] ( identifier[resp] ) keyword[else] : keyword[raise] identifier[scci] . identifier[SCCIClientError] (( literal[string] literal[string] literal[string] % { literal[string] : identifier[param_path] , literal[string] : identifier[resp] . identifier[status_code] }))
def elcm_profile_create(irmc_info, param_path): """send an eLCM request to create profile To create a profile, a new session is spawned with status 'running'. When profile is created completely, the session ends. :param irmc_info: node info :param param_path: path of profile :returns: dict object of session info if succeed { 'Session': { 'Id': id 'Status': 'activated' ... } } :raises: SCCIClientError if SCCI failed """ # Send POST request to the server # NOTE: This task may take time, so set a timeout _irmc_info = dict(irmc_info) _irmc_info['irmc_client_timeout'] = PROFILE_CREATE_TIMEOUT resp = elcm_request(_irmc_info, method='POST', path=URL_PATH_PROFILE_MGMT + 'get', params={'PARAM_PATH': param_path}) if resp.status_code == 202: return _parse_elcm_response_body_as_json(resp) # depends on [control=['if'], data=[]] else: raise scci.SCCIClientError('Failed to create profile for path "%(param_path)s" with error code %(error)s' % {'param_path': param_path, 'error': resp.status_code})
def drop_columns(self, col): """ Drop columns in dataframe. Parameters ---------- col : str Column to drop. """ try: self.cleaned_data.drop(col, axis=1, inplace=True) except Exception as e: raise e
def function[drop_columns, parameter[self, col]]: constant[ Drop columns in dataframe. Parameters ---------- col : str Column to drop. ] <ast.Try object at 0x7da18bc70220>
keyword[def] identifier[drop_columns] ( identifier[self] , identifier[col] ): literal[string] keyword[try] : identifier[self] . identifier[cleaned_data] . identifier[drop] ( identifier[col] , identifier[axis] = literal[int] , identifier[inplace] = keyword[True] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[raise] identifier[e]
def drop_columns(self, col): """ Drop columns in dataframe. Parameters ---------- col : str Column to drop. """ try: self.cleaned_data.drop(col, axis=1, inplace=True) # depends on [control=['try'], data=[]] except Exception as e: raise e # depends on [control=['except'], data=['e']]
def to_dict(self): """ Return a dictionary representation of the dataset. """ d = dict(individual_doses=self.individual_doses, responses=self.responses) d.update(self.kwargs) return d
def function[to_dict, parameter[self]]: constant[ Return a dictionary representation of the dataset. ] variable[d] assign[=] call[name[dict], parameter[]] call[name[d].update, parameter[name[self].kwargs]] return[name[d]]
keyword[def] identifier[to_dict] ( identifier[self] ): literal[string] identifier[d] = identifier[dict] ( identifier[individual_doses] = identifier[self] . identifier[individual_doses] , identifier[responses] = identifier[self] . identifier[responses] ) identifier[d] . identifier[update] ( identifier[self] . identifier[kwargs] ) keyword[return] identifier[d]
def to_dict(self): """ Return a dictionary representation of the dataset. """ d = dict(individual_doses=self.individual_doses, responses=self.responses) d.update(self.kwargs) return d
def create_folder(self, name, parent): '''Create a new folder. Args: name (srt): The name of the folder. parent (str): The UUID of the parent entity. The parent must be a project or a folder. Returns: A dictionary of details of the created folder:: { u'created_by': u'303447', u'created_on': u'2017-03-21T14:06:32.293902Z', u'description': u'', u'entity_type': u'folder', u'modified_by': u'303447', u'modified_on': u'2017-03-21T14:06:32.293967Z', u'name': u'myfolder', u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682', u'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40' } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' if not is_valid_uuid(parent): raise StorageArgumentException( 'Invalid UUID for parent: {0}'.format(parent)) return self._authenticated_request \ .to_endpoint('folder/') \ .with_json_body(self._prep_params(locals())) \ .return_body() \ .post()
def function[create_folder, parameter[self, name, parent]]: constant[Create a new folder. Args: name (srt): The name of the folder. parent (str): The UUID of the parent entity. The parent must be a project or a folder. Returns: A dictionary of details of the created folder:: { u'created_by': u'303447', u'created_on': u'2017-03-21T14:06:32.293902Z', u'description': u'', u'entity_type': u'folder', u'modified_by': u'303447', u'modified_on': u'2017-03-21T14:06:32.293967Z', u'name': u'myfolder', u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682', u'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40' } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ] if <ast.UnaryOp object at 0x7da18eb572b0> begin[:] <ast.Raise object at 0x7da18eb547c0> return[call[call[call[call[name[self]._authenticated_request.to_endpoint, parameter[constant[folder/]]].with_json_body, parameter[call[name[self]._prep_params, parameter[call[name[locals], parameter[]]]]]].return_body, parameter[]].post, parameter[]]]
keyword[def] identifier[create_folder] ( identifier[self] , identifier[name] , identifier[parent] ): literal[string] keyword[if] keyword[not] identifier[is_valid_uuid] ( identifier[parent] ): keyword[raise] identifier[StorageArgumentException] ( literal[string] . identifier[format] ( identifier[parent] )) keyword[return] identifier[self] . identifier[_authenticated_request] . identifier[to_endpoint] ( literal[string] ). identifier[with_json_body] ( identifier[self] . identifier[_prep_params] ( identifier[locals] ())). identifier[return_body] (). identifier[post] ()
def create_folder(self, name, parent): """Create a new folder. Args: name (srt): The name of the folder. parent (str): The UUID of the parent entity. The parent must be a project or a folder. Returns: A dictionary of details of the created folder:: { u'created_by': u'303447', u'created_on': u'2017-03-21T14:06:32.293902Z', u'description': u'', u'entity_type': u'folder', u'modified_by': u'303447', u'modified_on': u'2017-03-21T14:06:32.293967Z', u'name': u'myfolder', u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682', u'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40' } Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes """ if not is_valid_uuid(parent): raise StorageArgumentException('Invalid UUID for parent: {0}'.format(parent)) # depends on [control=['if'], data=[]] return self._authenticated_request.to_endpoint('folder/').with_json_body(self._prep_params(locals())).return_body().post()
def _handle_message(self, data): """ Handle messages. """ if data.type == MSG_SERVER_SETTINGS: _LOGGER.info(data.payload) elif data.type == MSG_SAMPLE_FORMAT: _LOGGER.info(data.payload) self._connected = True elif data.type == MSG_TIME: if not self._buffered: _LOGGER.info('Buffering') elif data.type == MSG_HEADER: # Push to app source and start playing. _LOGGER.info(data.payload.codec.decode('ascii')) self._source.push(data.payload.header) self._source.play() elif data.type == MSG_WIRE_CHUNK: # Add chunks to play queue. self._buffer.put(data.payload.chunk) if self._buffer.qsize() > BUFFER_SIZE: self._buffered = True if self._buffer.empty(): self._buffered = False
def function[_handle_message, parameter[self, data]]: constant[ Handle messages. ] if compare[name[data].type equal[==] name[MSG_SERVER_SETTINGS]] begin[:] call[name[_LOGGER].info, parameter[name[data].payload]]
keyword[def] identifier[_handle_message] ( identifier[self] , identifier[data] ): literal[string] keyword[if] identifier[data] . identifier[type] == identifier[MSG_SERVER_SETTINGS] : identifier[_LOGGER] . identifier[info] ( identifier[data] . identifier[payload] ) keyword[elif] identifier[data] . identifier[type] == identifier[MSG_SAMPLE_FORMAT] : identifier[_LOGGER] . identifier[info] ( identifier[data] . identifier[payload] ) identifier[self] . identifier[_connected] = keyword[True] keyword[elif] identifier[data] . identifier[type] == identifier[MSG_TIME] : keyword[if] keyword[not] identifier[self] . identifier[_buffered] : identifier[_LOGGER] . identifier[info] ( literal[string] ) keyword[elif] identifier[data] . identifier[type] == identifier[MSG_HEADER] : identifier[_LOGGER] . identifier[info] ( identifier[data] . identifier[payload] . identifier[codec] . identifier[decode] ( literal[string] )) identifier[self] . identifier[_source] . identifier[push] ( identifier[data] . identifier[payload] . identifier[header] ) identifier[self] . identifier[_source] . identifier[play] () keyword[elif] identifier[data] . identifier[type] == identifier[MSG_WIRE_CHUNK] : identifier[self] . identifier[_buffer] . identifier[put] ( identifier[data] . identifier[payload] . identifier[chunk] ) keyword[if] identifier[self] . identifier[_buffer] . identifier[qsize] ()> identifier[BUFFER_SIZE] : identifier[self] . identifier[_buffered] = keyword[True] keyword[if] identifier[self] . identifier[_buffer] . identifier[empty] (): identifier[self] . identifier[_buffered] = keyword[False]
def _handle_message(self, data): """ Handle messages. """ if data.type == MSG_SERVER_SETTINGS: _LOGGER.info(data.payload) # depends on [control=['if'], data=[]] elif data.type == MSG_SAMPLE_FORMAT: _LOGGER.info(data.payload) self._connected = True # depends on [control=['if'], data=[]] elif data.type == MSG_TIME: if not self._buffered: _LOGGER.info('Buffering') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif data.type == MSG_HEADER: # Push to app source and start playing. _LOGGER.info(data.payload.codec.decode('ascii')) self._source.push(data.payload.header) self._source.play() # depends on [control=['if'], data=[]] elif data.type == MSG_WIRE_CHUNK: # Add chunks to play queue. self._buffer.put(data.payload.chunk) if self._buffer.qsize() > BUFFER_SIZE: self._buffered = True # depends on [control=['if'], data=[]] if self._buffer.empty(): self._buffered = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def limit_mem(limit=(4 * 1024**3)): "Set soft memory limit" rsrc = resource.RLIMIT_DATA soft, hard = resource.getrlimit(rsrc) resource.setrlimit(rsrc, (limit, hard)) # 4GB softnew, _ = resource.getrlimit(rsrc) assert softnew == limit _log = logging.getLogger(__name__) _log.debug('Set soft memory limit: %s => %s', soft, softnew)
def function[limit_mem, parameter[limit]]: constant[Set soft memory limit] variable[rsrc] assign[=] name[resource].RLIMIT_DATA <ast.Tuple object at 0x7da1b1b7de40> assign[=] call[name[resource].getrlimit, parameter[name[rsrc]]] call[name[resource].setrlimit, parameter[name[rsrc], tuple[[<ast.Name object at 0x7da1b1b7e500>, <ast.Name object at 0x7da1b1b7c310>]]]] <ast.Tuple object at 0x7da1b1b7f250> assign[=] call[name[resource].getrlimit, parameter[name[rsrc]]] assert[compare[name[softnew] equal[==] name[limit]]] variable[_log] assign[=] call[name[logging].getLogger, parameter[name[__name__]]] call[name[_log].debug, parameter[constant[Set soft memory limit: %s => %s], name[soft], name[softnew]]]
keyword[def] identifier[limit_mem] ( identifier[limit] =( literal[int] * literal[int] ** literal[int] )): literal[string] identifier[rsrc] = identifier[resource] . identifier[RLIMIT_DATA] identifier[soft] , identifier[hard] = identifier[resource] . identifier[getrlimit] ( identifier[rsrc] ) identifier[resource] . identifier[setrlimit] ( identifier[rsrc] ,( identifier[limit] , identifier[hard] )) identifier[softnew] , identifier[_] = identifier[resource] . identifier[getrlimit] ( identifier[rsrc] ) keyword[assert] identifier[softnew] == identifier[limit] identifier[_log] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) identifier[_log] . identifier[debug] ( literal[string] , identifier[soft] , identifier[softnew] )
def limit_mem(limit=4 * 1024 ** 3): """Set soft memory limit""" rsrc = resource.RLIMIT_DATA (soft, hard) = resource.getrlimit(rsrc) resource.setrlimit(rsrc, (limit, hard)) # 4GB (softnew, _) = resource.getrlimit(rsrc) assert softnew == limit _log = logging.getLogger(__name__) _log.debug('Set soft memory limit: %s => %s', soft, softnew)
def get_xml_string_with_self_contained_assertion_within_encrypted_assertion( self, assertion_tag): """ Makes a encrypted assertion only containing self contained namespaces. :param assertion_tag: Tag for the assertion to be transformed. :return: A new samlp.Resonse in string representation. """ prefix_map = self.get_prefix_map( [self.encrypted_assertion._to_element_tree().find(assertion_tag)]) tree = self._to_element_tree() self.set_prefixes( tree.find( self.encrypted_assertion._to_element_tree().tag).find( assertion_tag), prefix_map) return ElementTree.tostring(tree, encoding="UTF-8").decode('utf-8')
def function[get_xml_string_with_self_contained_assertion_within_encrypted_assertion, parameter[self, assertion_tag]]: constant[ Makes a encrypted assertion only containing self contained namespaces. :param assertion_tag: Tag for the assertion to be transformed. :return: A new samlp.Resonse in string representation. ] variable[prefix_map] assign[=] call[name[self].get_prefix_map, parameter[list[[<ast.Call object at 0x7da18f812b60>]]]] variable[tree] assign[=] call[name[self]._to_element_tree, parameter[]] call[name[self].set_prefixes, parameter[call[call[name[tree].find, parameter[call[name[self].encrypted_assertion._to_element_tree, parameter[]].tag]].find, parameter[name[assertion_tag]]], name[prefix_map]]] return[call[call[name[ElementTree].tostring, parameter[name[tree]]].decode, parameter[constant[utf-8]]]]
keyword[def] identifier[get_xml_string_with_self_contained_assertion_within_encrypted_assertion] ( identifier[self] , identifier[assertion_tag] ): literal[string] identifier[prefix_map] = identifier[self] . identifier[get_prefix_map] ( [ identifier[self] . identifier[encrypted_assertion] . identifier[_to_element_tree] (). identifier[find] ( identifier[assertion_tag] )]) identifier[tree] = identifier[self] . identifier[_to_element_tree] () identifier[self] . identifier[set_prefixes] ( identifier[tree] . identifier[find] ( identifier[self] . identifier[encrypted_assertion] . identifier[_to_element_tree] (). identifier[tag] ). identifier[find] ( identifier[assertion_tag] ), identifier[prefix_map] ) keyword[return] identifier[ElementTree] . identifier[tostring] ( identifier[tree] , identifier[encoding] = literal[string] ). identifier[decode] ( literal[string] )
def get_xml_string_with_self_contained_assertion_within_encrypted_assertion(self, assertion_tag): """ Makes a encrypted assertion only containing self contained namespaces. :param assertion_tag: Tag for the assertion to be transformed. :return: A new samlp.Resonse in string representation. """ prefix_map = self.get_prefix_map([self.encrypted_assertion._to_element_tree().find(assertion_tag)]) tree = self._to_element_tree() self.set_prefixes(tree.find(self.encrypted_assertion._to_element_tree().tag).find(assertion_tag), prefix_map) return ElementTree.tostring(tree, encoding='UTF-8').decode('utf-8')
def get_filenames(dirname, full_path=False, match_regex=None, extension=None): """ Get all filenames under ``dirname`` that match ``match_regex`` or have file extension equal to ``extension``, optionally prepending the full path. Args: dirname (str): /path/to/dir on disk where files to read are saved full_path (bool): if False, return filenames without path; if True, return filenames with path, as ``os.path.join(dirname, fname)`` match_regex (str): include files whose names match this regex pattern extension (str): if files only of a certain type are wanted, specify the file extension (e.g. ".txt") Yields: str: next matching filename """ if not os.path.exists(dirname): raise OSError('directory "{}" does not exist'.format(dirname)) match_regex = re.compile(match_regex) if match_regex else None for filename in sorted(os.listdir(dirname)): if extension and not os.path.splitext(filename)[-1] == extension: continue if match_regex and not match_regex.search(filename): continue if full_path is True: yield os.path.join(dirname, filename) else: yield filename
def function[get_filenames, parameter[dirname, full_path, match_regex, extension]]: constant[ Get all filenames under ``dirname`` that match ``match_regex`` or have file extension equal to ``extension``, optionally prepending the full path. Args: dirname (str): /path/to/dir on disk where files to read are saved full_path (bool): if False, return filenames without path; if True, return filenames with path, as ``os.path.join(dirname, fname)`` match_regex (str): include files whose names match this regex pattern extension (str): if files only of a certain type are wanted, specify the file extension (e.g. ".txt") Yields: str: next matching filename ] if <ast.UnaryOp object at 0x7da1b1eb6260> begin[:] <ast.Raise object at 0x7da1b1eb6380> variable[match_regex] assign[=] <ast.IfExp object at 0x7da1b1eb6a70> for taget[name[filename]] in starred[call[name[sorted], parameter[call[name[os].listdir, parameter[name[dirname]]]]]] begin[:] if <ast.BoolOp object at 0x7da1b1eb65f0> begin[:] continue if <ast.BoolOp object at 0x7da1b1eb5ff0> begin[:] continue if compare[name[full_path] is constant[True]] begin[:] <ast.Yield object at 0x7da1b1eb5a50>
keyword[def] identifier[get_filenames] ( identifier[dirname] , identifier[full_path] = keyword[False] , identifier[match_regex] = keyword[None] , identifier[extension] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[dirname] ): keyword[raise] identifier[OSError] ( literal[string] . identifier[format] ( identifier[dirname] )) identifier[match_regex] = identifier[re] . identifier[compile] ( identifier[match_regex] ) keyword[if] identifier[match_regex] keyword[else] keyword[None] keyword[for] identifier[filename] keyword[in] identifier[sorted] ( identifier[os] . identifier[listdir] ( identifier[dirname] )): keyword[if] identifier[extension] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[- literal[int] ]== identifier[extension] : keyword[continue] keyword[if] identifier[match_regex] keyword[and] keyword[not] identifier[match_regex] . identifier[search] ( identifier[filename] ): keyword[continue] keyword[if] identifier[full_path] keyword[is] keyword[True] : keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , identifier[filename] ) keyword[else] : keyword[yield] identifier[filename]
def get_filenames(dirname, full_path=False, match_regex=None, extension=None): """ Get all filenames under ``dirname`` that match ``match_regex`` or have file extension equal to ``extension``, optionally prepending the full path. Args: dirname (str): /path/to/dir on disk where files to read are saved full_path (bool): if False, return filenames without path; if True, return filenames with path, as ``os.path.join(dirname, fname)`` match_regex (str): include files whose names match this regex pattern extension (str): if files only of a certain type are wanted, specify the file extension (e.g. ".txt") Yields: str: next matching filename """ if not os.path.exists(dirname): raise OSError('directory "{}" does not exist'.format(dirname)) # depends on [control=['if'], data=[]] match_regex = re.compile(match_regex) if match_regex else None for filename in sorted(os.listdir(dirname)): if extension and (not os.path.splitext(filename)[-1] == extension): continue # depends on [control=['if'], data=[]] if match_regex and (not match_regex.search(filename)): continue # depends on [control=['if'], data=[]] if full_path is True: yield os.path.join(dirname, filename) # depends on [control=['if'], data=[]] else: yield filename # depends on [control=['for'], data=['filename']]
def load(patterns, full_reindex): ''' Load one or more CADA CSV files matching patterns ''' header('Loading CSV files') for pattern in patterns: for filename in iglob(pattern): echo('Loading {}'.format(white(filename))) with open(filename) as f: reader = csv.reader(f) # Skip header reader.next() for idx, row in enumerate(reader, 1): try: advice = csv.from_row(row) skipped = False if not full_reindex: index(advice) echo('.' if idx % 50 else white(idx), nl=False) except Exception: echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False) skipped = True if skipped: echo(white('{}(s)'.format(idx)) if idx % 50 else '') else: echo(white(idx) if idx % 50 else '') success('Processed {0} rows'.format(idx)) if full_reindex: reindex()
def function[load, parameter[patterns, full_reindex]]: constant[ Load one or more CADA CSV files matching patterns ] call[name[header], parameter[constant[Loading CSV files]]] for taget[name[pattern]] in starred[name[patterns]] begin[:] for taget[name[filename]] in starred[call[name[iglob], parameter[name[pattern]]]] begin[:] call[name[echo], parameter[call[constant[Loading {}].format, parameter[call[name[white], parameter[name[filename]]]]]]] with call[name[open], parameter[name[filename]]] begin[:] variable[reader] assign[=] call[name[csv].reader, parameter[name[f]]] call[name[reader].next, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b0b3bca0>, <ast.Name object at 0x7da1b0b39030>]]] in starred[call[name[enumerate], parameter[name[reader], constant[1]]]] begin[:] <ast.Try object at 0x7da1b0b39db0> if name[skipped] begin[:] call[name[echo], parameter[<ast.IfExp object at 0x7da20c6aab90>]] call[name[success], parameter[call[constant[Processed {0} rows].format, parameter[name[idx]]]]] if name[full_reindex] begin[:] call[name[reindex], parameter[]]
keyword[def] identifier[load] ( identifier[patterns] , identifier[full_reindex] ): literal[string] identifier[header] ( literal[string] ) keyword[for] identifier[pattern] keyword[in] identifier[patterns] : keyword[for] identifier[filename] keyword[in] identifier[iglob] ( identifier[pattern] ): identifier[echo] ( literal[string] . identifier[format] ( identifier[white] ( identifier[filename] ))) keyword[with] identifier[open] ( identifier[filename] ) keyword[as] identifier[f] : identifier[reader] = identifier[csv] . identifier[reader] ( identifier[f] ) identifier[reader] . identifier[next] () keyword[for] identifier[idx] , identifier[row] keyword[in] identifier[enumerate] ( identifier[reader] , literal[int] ): keyword[try] : identifier[advice] = identifier[csv] . identifier[from_row] ( identifier[row] ) identifier[skipped] = keyword[False] keyword[if] keyword[not] identifier[full_reindex] : identifier[index] ( identifier[advice] ) identifier[echo] ( literal[string] keyword[if] identifier[idx] % literal[int] keyword[else] identifier[white] ( identifier[idx] ), identifier[nl] = keyword[False] ) keyword[except] identifier[Exception] : identifier[echo] ( identifier[cyan] ( literal[string] ) keyword[if] identifier[idx] % literal[int] keyword[else] identifier[white] ( literal[string] . identifier[format] ( identifier[idx] )), identifier[nl] = keyword[False] ) identifier[skipped] = keyword[True] keyword[if] identifier[skipped] : identifier[echo] ( identifier[white] ( literal[string] . identifier[format] ( identifier[idx] )) keyword[if] identifier[idx] % literal[int] keyword[else] literal[string] ) keyword[else] : identifier[echo] ( identifier[white] ( identifier[idx] ) keyword[if] identifier[idx] % literal[int] keyword[else] literal[string] ) identifier[success] ( literal[string] . identifier[format] ( identifier[idx] )) keyword[if] identifier[full_reindex] : identifier[reindex] ()
def load(patterns, full_reindex): """ Load one or more CADA CSV files matching patterns """ header('Loading CSV files') for pattern in patterns: for filename in iglob(pattern): echo('Loading {}'.format(white(filename))) with open(filename) as f: reader = csv.reader(f) # Skip header reader.next() for (idx, row) in enumerate(reader, 1): try: advice = csv.from_row(row) skipped = False if not full_reindex: index(advice) # depends on [control=['if'], data=[]] echo('.' if idx % 50 else white(idx), nl=False) # depends on [control=['try'], data=[]] except Exception: echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False) skipped = True # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] if skipped: echo(white('{}(s)'.format(idx)) if idx % 50 else '') # depends on [control=['if'], data=[]] else: echo(white(idx) if idx % 50 else '') success('Processed {0} rows'.format(idx)) # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=['pattern']] if full_reindex: reindex() # depends on [control=['if'], data=[]]
def Wang_Chiang_Lu(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1): r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997) correlation given in [1]_ and reviewed in [2]_ and [3]_. .. math:: \Delta P = \Delta P_{g} \phi_g^2 .. math:: \phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s .. math:: \phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes} .. math:: C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g} \right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1} .. math:: X^2 = \frac{\Delta P_l}{\Delta P_g} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Examples -------- >>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, ... mug=14E-6, D=0.05, roughness=0, L=1) 448.29981978639154 References ---------- .. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a 6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4 (November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11-12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen. "Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December 2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007. ''' G_tp = m/(pi/4*D**2) # Actual Liquid flow v_l = m*(1-x)/rhol/(pi/4*D**2) Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D) fd_l = friction_factor(Re=Re_l, eD=roughness/D) dP_l = fd_l*L/D*(0.5*rhol*v_l**2) # Actual gas flow v_g = m*x/rhog/(pi/4*D**2) Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D) fd_g = friction_factor(Re=Re_g, eD=roughness/D) dP_g = fd_g*L/D*(0.5*rhog*v_g**2) X = (dP_l/dP_g)**0.5 if G_tp >= 200: phi_g2 = 1 + 9.397*X**0.62 + 0.564*X**2.45 else: # Liquid-only flow; Re_lo is oddly needed v_lo = m/rhol/(pi/4*D**2) Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D) C = 0.000004566*X**0.128*Re_lo**0.938*(rhol/rhog)**-2.15*(mul/mug)**5.1 phi_g2 = 1 + C*X + X**2 return dP_g*phi_g2
def function[Wang_Chiang_Lu, parameter[m, x, rhol, rhog, mul, mug, D, roughness, L]]: constant[Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997) correlation given in [1]_ and reviewed in [2]_ and [3]_. .. math:: \Delta P = \Delta P_{g} \phi_g^2 .. math:: \phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s .. math:: \phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes} .. math:: C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g} \right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1} .. math:: X^2 = \frac{\Delta P_l}{\Delta P_g} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Examples -------- >>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, ... mug=14E-6, D=0.05, roughness=0, L=1) 448.29981978639154 References ---------- .. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a 6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4 (November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11-12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen. "Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December 2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007. ] variable[G_tp] assign[=] binary_operation[name[m] / binary_operation[binary_operation[name[pi] / constant[4]] * binary_operation[name[D] ** constant[2]]]] variable[v_l] assign[=] binary_operation[binary_operation[binary_operation[name[m] * binary_operation[constant[1] - name[x]]] / name[rhol]] / binary_operation[binary_operation[name[pi] / constant[4]] * binary_operation[name[D] ** constant[2]]]] variable[Re_l] assign[=] call[name[Reynolds], parameter[]] variable[fd_l] assign[=] call[name[friction_factor], parameter[]] variable[dP_l] assign[=] binary_operation[binary_operation[binary_operation[name[fd_l] * name[L]] / name[D]] * binary_operation[binary_operation[constant[0.5] * name[rhol]] * binary_operation[name[v_l] ** constant[2]]]] variable[v_g] assign[=] binary_operation[binary_operation[binary_operation[name[m] * name[x]] / name[rhog]] / binary_operation[binary_operation[name[pi] / constant[4]] * binary_operation[name[D] ** constant[2]]]] variable[Re_g] assign[=] call[name[Reynolds], parameter[]] variable[fd_g] assign[=] call[name[friction_factor], parameter[]] variable[dP_g] assign[=] binary_operation[binary_operation[binary_operation[name[fd_g] * name[L]] / name[D]] * binary_operation[binary_operation[constant[0.5] * name[rhog]] * binary_operation[name[v_g] ** constant[2]]]] variable[X] assign[=] binary_operation[binary_operation[name[dP_l] / name[dP_g]] ** constant[0.5]] if compare[name[G_tp] greater_or_equal[>=] constant[200]] begin[:] variable[phi_g2] assign[=] binary_operation[binary_operation[constant[1] + binary_operation[constant[9.397] * binary_operation[name[X] ** constant[0.62]]]] + binary_operation[constant[0.564] * binary_operation[name[X] ** constant[2.45]]]] return[binary_operation[name[dP_g] * name[phi_g2]]]
keyword[def] identifier[Wang_Chiang_Lu] ( identifier[m] , identifier[x] , identifier[rhol] , identifier[rhog] , identifier[mul] , identifier[mug] , identifier[D] , identifier[roughness] = literal[int] , identifier[L] = literal[int] ): literal[string] identifier[G_tp] = identifier[m] /( identifier[pi] / literal[int] * identifier[D] ** literal[int] ) identifier[v_l] = identifier[m] *( literal[int] - identifier[x] )/ identifier[rhol] /( identifier[pi] / literal[int] * identifier[D] ** literal[int] ) identifier[Re_l] = identifier[Reynolds] ( identifier[V] = identifier[v_l] , identifier[rho] = identifier[rhol] , identifier[mu] = identifier[mul] , identifier[D] = identifier[D] ) identifier[fd_l] = identifier[friction_factor] ( identifier[Re] = identifier[Re_l] , identifier[eD] = identifier[roughness] / identifier[D] ) identifier[dP_l] = identifier[fd_l] * identifier[L] / identifier[D] *( literal[int] * identifier[rhol] * identifier[v_l] ** literal[int] ) identifier[v_g] = identifier[m] * identifier[x] / identifier[rhog] /( identifier[pi] / literal[int] * identifier[D] ** literal[int] ) identifier[Re_g] = identifier[Reynolds] ( identifier[V] = identifier[v_g] , identifier[rho] = identifier[rhog] , identifier[mu] = identifier[mug] , identifier[D] = identifier[D] ) identifier[fd_g] = identifier[friction_factor] ( identifier[Re] = identifier[Re_g] , identifier[eD] = identifier[roughness] / identifier[D] ) identifier[dP_g] = identifier[fd_g] * identifier[L] / identifier[D] *( literal[int] * identifier[rhog] * identifier[v_g] ** literal[int] ) identifier[X] =( identifier[dP_l] / identifier[dP_g] )** literal[int] keyword[if] identifier[G_tp] >= literal[int] : identifier[phi_g2] = literal[int] + literal[int] * identifier[X] ** literal[int] + literal[int] * identifier[X] ** literal[int] keyword[else] : identifier[v_lo] = identifier[m] / identifier[rhol] /( identifier[pi] / literal[int] * identifier[D] ** literal[int] ) identifier[Re_lo] = identifier[Reynolds] ( identifier[V] = identifier[v_lo] , identifier[rho] = identifier[rhol] , identifier[mu] = identifier[mul] , identifier[D] = identifier[D] ) identifier[C] = literal[int] * identifier[X] ** literal[int] * identifier[Re_lo] ** literal[int] *( identifier[rhol] / identifier[rhog] )**- literal[int] *( identifier[mul] / identifier[mug] )** literal[int] identifier[phi_g2] = literal[int] + identifier[C] * identifier[X] + identifier[X] ** literal[int] keyword[return] identifier[dP_g] * identifier[phi_g2]
def Wang_Chiang_Lu(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1): """Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997) correlation given in [1]_ and reviewed in [2]_ and [3]_. .. math:: \\Delta P = \\Delta P_{g} \\phi_g^2 .. math:: \\phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \\text{ for } G >= 200 kg/m^2/s .. math:: \\phi_g^2 = 1 + CX + X^2 \\text{ for lower mass fluxes} .. math:: C = 0.000004566X^{0.128}Re_{lo}^{0.938}\\left(\\frac{\\rho_l}{\\rho_g} \\right)^{-2.15}\\left(\\frac{\\mu_l}{\\mu_g}\\right)^{5.1} .. math:: X^2 = \\frac{\\Delta P_l}{\\Delta P_g} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Examples -------- >>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6, ... mug=14E-6, D=0.05, roughness=0, L=1) 448.29981978639154 References ---------- .. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a 6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4 (November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11-12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen. "Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow in Pipes." Nuclear Engineering and Design, SI\u202f: CFD4NRS-3, 253 (December 2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007. """ G_tp = m / (pi / 4 * D ** 2) # Actual Liquid flow v_l = m * (1 - x) / rhol / (pi / 4 * D ** 2) Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D) fd_l = friction_factor(Re=Re_l, eD=roughness / D) dP_l = fd_l * L / D * (0.5 * rhol * v_l ** 2) # Actual gas flow v_g = m * x / rhog / (pi / 4 * D ** 2) Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D) fd_g = friction_factor(Re=Re_g, eD=roughness / D) dP_g = fd_g * L / D * (0.5 * rhog * v_g ** 2) X = (dP_l / dP_g) ** 0.5 if G_tp >= 200: phi_g2 = 1 + 9.397 * X ** 0.62 + 0.564 * X ** 2.45 # depends on [control=['if'], data=[]] else: # Liquid-only flow; Re_lo is oddly needed v_lo = m / rhol / (pi / 4 * D ** 2) Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D) C = 4.566e-06 * X ** 0.128 * Re_lo ** 0.938 * (rhol / rhog) ** (-2.15) * (mul / mug) ** 5.1 phi_g2 = 1 + C * X + X ** 2 return dP_g * phi_g2
def get_logger(cls): """ Initializes and returns our logger instance. """ if cls.logger is None: class NullHandler(logging.Handler): def emit(self, record): pass cls.logger = logging.getLogger('django_auth_ldap') cls.logger.addHandler(NullHandler()) return cls.logger
def function[get_logger, parameter[cls]]: constant[ Initializes and returns our logger instance. ] if compare[name[cls].logger is constant[None]] begin[:] class class[NullHandler, parameter[]] begin[:] def function[emit, parameter[self, record]]: pass name[cls].logger assign[=] call[name[logging].getLogger, parameter[constant[django_auth_ldap]]] call[name[cls].logger.addHandler, parameter[call[name[NullHandler], parameter[]]]] return[name[cls].logger]
keyword[def] identifier[get_logger] ( identifier[cls] ): literal[string] keyword[if] identifier[cls] . identifier[logger] keyword[is] keyword[None] : keyword[class] identifier[NullHandler] ( identifier[logging] . identifier[Handler] ): keyword[def] identifier[emit] ( identifier[self] , identifier[record] ): keyword[pass] identifier[cls] . identifier[logger] = identifier[logging] . identifier[getLogger] ( literal[string] ) identifier[cls] . identifier[logger] . identifier[addHandler] ( identifier[NullHandler] ()) keyword[return] identifier[cls] . identifier[logger]
def get_logger(cls): """ Initializes and returns our logger instance. """ if cls.logger is None: class NullHandler(logging.Handler): def emit(self, record): pass cls.logger = logging.getLogger('django_auth_ldap') cls.logger.addHandler(NullHandler()) # depends on [control=['if'], data=[]] return cls.logger
def reduce_list_size(li): """Return two lists - the last N items of li whose total size is less than MAX_SIZE - the rest of the original list li """ # sys.getsizeof is nearly useless. All our data is stringable so rather # use that as a measure of size. size = len(repr(li)) keep = li toss = [] n = len(li) decrement_by = max(n / 10, 10) while (size >= MAX_SIZE) and (n > 0): n -= decrement_by toss = li[:-n] keep = li[-n:] size = len(repr(keep)) return keep, toss
def function[reduce_list_size, parameter[li]]: constant[Return two lists - the last N items of li whose total size is less than MAX_SIZE - the rest of the original list li ] variable[size] assign[=] call[name[len], parameter[call[name[repr], parameter[name[li]]]]] variable[keep] assign[=] name[li] variable[toss] assign[=] list[[]] variable[n] assign[=] call[name[len], parameter[name[li]]] variable[decrement_by] assign[=] call[name[max], parameter[binary_operation[name[n] / constant[10]], constant[10]]] while <ast.BoolOp object at 0x7da204344460> begin[:] <ast.AugAssign object at 0x7da204347dc0> variable[toss] assign[=] call[name[li]][<ast.Slice object at 0x7da204344ac0>] variable[keep] assign[=] call[name[li]][<ast.Slice object at 0x7da204344940>] variable[size] assign[=] call[name[len], parameter[call[name[repr], parameter[name[keep]]]]] return[tuple[[<ast.Name object at 0x7da204344790>, <ast.Name object at 0x7da204345c30>]]]
keyword[def] identifier[reduce_list_size] ( identifier[li] ): literal[string] identifier[size] = identifier[len] ( identifier[repr] ( identifier[li] )) identifier[keep] = identifier[li] identifier[toss] =[] identifier[n] = identifier[len] ( identifier[li] ) identifier[decrement_by] = identifier[max] ( identifier[n] / literal[int] , literal[int] ) keyword[while] ( identifier[size] >= identifier[MAX_SIZE] ) keyword[and] ( identifier[n] > literal[int] ): identifier[n] -= identifier[decrement_by] identifier[toss] = identifier[li] [:- identifier[n] ] identifier[keep] = identifier[li] [- identifier[n] :] identifier[size] = identifier[len] ( identifier[repr] ( identifier[keep] )) keyword[return] identifier[keep] , identifier[toss]
def reduce_list_size(li): """Return two lists - the last N items of li whose total size is less than MAX_SIZE - the rest of the original list li """ # sys.getsizeof is nearly useless. All our data is stringable so rather # use that as a measure of size. size = len(repr(li)) keep = li toss = [] n = len(li) decrement_by = max(n / 10, 10) while size >= MAX_SIZE and n > 0: n -= decrement_by toss = li[:-n] keep = li[-n:] size = len(repr(keep)) # depends on [control=['while'], data=[]] return (keep, toss)
def ReadTag(buffer, pos): """Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. We return the raw bytes of the tag rather than decoding them. The raw bytes can then be used to look up the proper decoder. This effectively allows us to trade some work that would be done in pure-python (decoding a varint) for work that is done in C (searching for a byte string in a hash table). In a low-level language it would be much cheaper to decode the varint and use that, but not in Python. """ start = pos while six.indexbytes(buffer, pos) & 0x80: pos += 1 pos += 1 return (buffer[start:pos], pos)
def function[ReadTag, parameter[buffer, pos]]: constant[Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. We return the raw bytes of the tag rather than decoding them. The raw bytes can then be used to look up the proper decoder. This effectively allows us to trade some work that would be done in pure-python (decoding a varint) for work that is done in C (searching for a byte string in a hash table). In a low-level language it would be much cheaper to decode the varint and use that, but not in Python. ] variable[start] assign[=] name[pos] while binary_operation[call[name[six].indexbytes, parameter[name[buffer], name[pos]]] <ast.BitAnd object at 0x7da2590d6b60> constant[128]] begin[:] <ast.AugAssign object at 0x7da1b20666e0> <ast.AugAssign object at 0x7da204344e80> return[tuple[[<ast.Subscript object at 0x7da204347760>, <ast.Name object at 0x7da2044c1780>]]]
keyword[def] identifier[ReadTag] ( identifier[buffer] , identifier[pos] ): literal[string] identifier[start] = identifier[pos] keyword[while] identifier[six] . identifier[indexbytes] ( identifier[buffer] , identifier[pos] )& literal[int] : identifier[pos] += literal[int] identifier[pos] += literal[int] keyword[return] ( identifier[buffer] [ identifier[start] : identifier[pos] ], identifier[pos] )
def ReadTag(buffer, pos): """Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple. We return the raw bytes of the tag rather than decoding them. The raw bytes can then be used to look up the proper decoder. This effectively allows us to trade some work that would be done in pure-python (decoding a varint) for work that is done in C (searching for a byte string in a hash table). In a low-level language it would be much cheaper to decode the varint and use that, but not in Python. """ start = pos while six.indexbytes(buffer, pos) & 128: pos += 1 # depends on [control=['while'], data=[]] pos += 1 return (buffer[start:pos], pos)
def remove_cgc_attachments(self): """ Remove CGC attachments. :return: True if CGC attachments are found and removed, False otherwise :rtype: bool """ cgc_package_list = None cgc_extended_application = None for data in self.data: if data.sort == 'cgc-package-list': cgc_package_list = data elif data.sort == 'cgc-extended-application': cgc_extended_application = data if not cgc_package_list or not cgc_extended_application: return False if cgc_package_list.skip or cgc_extended_application.skip: # they have already been removed # so we still return True to indicate that CGC attachments have been removed return True # there is a single function referencing them cgcpl_memory_data = self.cfg.memory_data.get(cgc_package_list.addr, None) cgcea_memory_data = self.cfg.memory_data.get(cgc_extended_application.addr, None) refs = self.cfg.model.references if cgcpl_memory_data is None or cgcea_memory_data is None: return False if len(refs.data_addr_to_ref[cgcpl_memory_data.addr]) != 1: return False if len(refs.data_addr_to_ref[cgcea_memory_data.addr]) != 1: return False # check if the irsb addresses are the same if next(iter(refs.data_addr_to_ref[cgcpl_memory_data.addr])).block_addr != \ next(iter(refs.data_addr_to_ref[cgcea_memory_data.addr])).block_addr: return False insn_addr = next(iter(refs.data_addr_to_ref[cgcpl_memory_data.addr])).insn_addr # get the basic block cfg_node = self.cfg.get_any_node(insn_addr, anyaddr=True) if not cfg_node: return False func_addr = cfg_node.function_address # this function should be calling another function sub_func_addr = None if func_addr not in self.cfg.functions: return False function = self.cfg.functions[func_addr] # traverse the graph and make sure there is only one call edge calling_targets = [ ] for _, dst, data in function.transition_graph.edges(data=True): if 'type' in data and data['type'] == 'call': calling_targets.append(dst.addr) if len(calling_targets) != 1: return False sub_func_addr = calling_targets[0] # alright. We want to nop this function, as well as the subfunction proc = next((p for p in self.procedures if p.addr == func_addr), None) if proc is None: return False subproc = next((p for p in self.procedures if p.addr == sub_func_addr), None) if subproc is None: return False # if those two data entries have any label, we should properly modify them # at this point, we are fairly confident that none of those labels are direct data references to either package # list or extended application has_label = True lowest_address = min(cgc_package_list.addr, cgc_extended_application.addr) for obj in (cgc_package_list, cgc_extended_application): labels = obj.labels for addr, label in labels: if addr != lowest_address: label.base_addr = lowest_address if has_label: # is there any memory data entry that ends right at the lowest address? data = next((d for d in self.data if d.addr is not None and d.addr + d.size == lowest_address), None) if data is None: # since there is no gap between memory data entries (we guarantee that), this can only be that no other # data resides in the same memory region that CGC attachments are in pass else: lbl = self.symbol_manager.addr_to_label[lowest_address][0] if lbl not in data.end_labels: data.end_labels.append(lbl) # practically nop the function proc.asm_code = "\tret\n" subproc.asm_code = "\tret\n" # remove those two data entries cgc_package_list.skip = True cgc_extended_application.skip = True l.info('CGC attachments are removed.') return True
def function[remove_cgc_attachments, parameter[self]]: constant[ Remove CGC attachments. :return: True if CGC attachments are found and removed, False otherwise :rtype: bool ] variable[cgc_package_list] assign[=] constant[None] variable[cgc_extended_application] assign[=] constant[None] for taget[name[data]] in starred[name[self].data] begin[:] if compare[name[data].sort equal[==] constant[cgc-package-list]] begin[:] variable[cgc_package_list] assign[=] name[data] if <ast.BoolOp object at 0x7da2044c1330> begin[:] return[constant[False]] if <ast.BoolOp object at 0x7da2044c1030> begin[:] return[constant[True]] variable[cgcpl_memory_data] assign[=] call[name[self].cfg.memory_data.get, parameter[name[cgc_package_list].addr, constant[None]]] variable[cgcea_memory_data] assign[=] call[name[self].cfg.memory_data.get, parameter[name[cgc_extended_application].addr, constant[None]]] variable[refs] assign[=] name[self].cfg.model.references if <ast.BoolOp object at 0x7da2044c1540> begin[:] return[constant[False]] if compare[call[name[len], parameter[call[name[refs].data_addr_to_ref][name[cgcpl_memory_data].addr]]] not_equal[!=] constant[1]] begin[:] return[constant[False]] if compare[call[name[len], parameter[call[name[refs].data_addr_to_ref][name[cgcea_memory_data].addr]]] not_equal[!=] constant[1]] begin[:] return[constant[False]] if compare[call[name[next], parameter[call[name[iter], parameter[call[name[refs].data_addr_to_ref][name[cgcpl_memory_data].addr]]]]].block_addr not_equal[!=] call[name[next], parameter[call[name[iter], parameter[call[name[refs].data_addr_to_ref][name[cgcea_memory_data].addr]]]]].block_addr] begin[:] return[constant[False]] variable[insn_addr] assign[=] call[name[next], parameter[call[name[iter], parameter[call[name[refs].data_addr_to_ref][name[cgcpl_memory_data].addr]]]]].insn_addr variable[cfg_node] assign[=] call[name[self].cfg.get_any_node, parameter[name[insn_addr]]] if <ast.UnaryOp object at 0x7da2044c2320> begin[:] return[constant[False]] variable[func_addr] assign[=] name[cfg_node].function_address variable[sub_func_addr] assign[=] constant[None] if compare[name[func_addr] <ast.NotIn object at 0x7da2590d7190> name[self].cfg.functions] begin[:] return[constant[False]] variable[function] assign[=] call[name[self].cfg.functions][name[func_addr]] variable[calling_targets] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b1c320b0>, <ast.Name object at 0x7da1b1c30490>, <ast.Name object at 0x7da1b1c33d00>]]] in starred[call[name[function].transition_graph.edges, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b1c32950> begin[:] call[name[calling_targets].append, parameter[name[dst].addr]] if compare[call[name[len], parameter[name[calling_targets]]] not_equal[!=] constant[1]] begin[:] return[constant[False]] variable[sub_func_addr] assign[=] call[name[calling_targets]][constant[0]] variable[proc] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da1b1c31a50>, constant[None]]] if compare[name[proc] is constant[None]] begin[:] return[constant[False]] variable[subproc] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da1b1c33ee0>, constant[None]]] if compare[name[subproc] is constant[None]] begin[:] return[constant[False]] variable[has_label] assign[=] constant[True] variable[lowest_address] assign[=] call[name[min], parameter[name[cgc_package_list].addr, name[cgc_extended_application].addr]] for taget[name[obj]] in starred[tuple[[<ast.Name object at 0x7da1b1c305b0>, <ast.Name object at 0x7da1b1c32800>]]] begin[:] variable[labels] assign[=] name[obj].labels for taget[tuple[[<ast.Name object at 0x7da1b1c33e50>, <ast.Name object at 0x7da1b1c33070>]]] in starred[name[labels]] begin[:] if compare[name[addr] not_equal[!=] name[lowest_address]] begin[:] name[label].base_addr assign[=] name[lowest_address] if name[has_label] begin[:] variable[data] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da1b1c305e0>, constant[None]]] if compare[name[data] is constant[None]] begin[:] pass name[proc].asm_code assign[=] constant[ ret ] name[subproc].asm_code assign[=] constant[ ret ] name[cgc_package_list].skip assign[=] constant[True] name[cgc_extended_application].skip assign[=] constant[True] call[name[l].info, parameter[constant[CGC attachments are removed.]]] return[constant[True]]
keyword[def] identifier[remove_cgc_attachments] ( identifier[self] ): literal[string] identifier[cgc_package_list] = keyword[None] identifier[cgc_extended_application] = keyword[None] keyword[for] identifier[data] keyword[in] identifier[self] . identifier[data] : keyword[if] identifier[data] . identifier[sort] == literal[string] : identifier[cgc_package_list] = identifier[data] keyword[elif] identifier[data] . identifier[sort] == literal[string] : identifier[cgc_extended_application] = identifier[data] keyword[if] keyword[not] identifier[cgc_package_list] keyword[or] keyword[not] identifier[cgc_extended_application] : keyword[return] keyword[False] keyword[if] identifier[cgc_package_list] . identifier[skip] keyword[or] identifier[cgc_extended_application] . identifier[skip] : keyword[return] keyword[True] identifier[cgcpl_memory_data] = identifier[self] . identifier[cfg] . identifier[memory_data] . identifier[get] ( identifier[cgc_package_list] . identifier[addr] , keyword[None] ) identifier[cgcea_memory_data] = identifier[self] . identifier[cfg] . identifier[memory_data] . identifier[get] ( identifier[cgc_extended_application] . identifier[addr] , keyword[None] ) identifier[refs] = identifier[self] . identifier[cfg] . identifier[model] . identifier[references] keyword[if] identifier[cgcpl_memory_data] keyword[is] keyword[None] keyword[or] identifier[cgcea_memory_data] keyword[is] keyword[None] : keyword[return] keyword[False] keyword[if] identifier[len] ( identifier[refs] . identifier[data_addr_to_ref] [ identifier[cgcpl_memory_data] . identifier[addr] ])!= literal[int] : keyword[return] keyword[False] keyword[if] identifier[len] ( identifier[refs] . identifier[data_addr_to_ref] [ identifier[cgcea_memory_data] . identifier[addr] ])!= literal[int] : keyword[return] keyword[False] keyword[if] identifier[next] ( identifier[iter] ( identifier[refs] . identifier[data_addr_to_ref] [ identifier[cgcpl_memory_data] . identifier[addr] ])). identifier[block_addr] != identifier[next] ( identifier[iter] ( identifier[refs] . identifier[data_addr_to_ref] [ identifier[cgcea_memory_data] . identifier[addr] ])). identifier[block_addr] : keyword[return] keyword[False] identifier[insn_addr] = identifier[next] ( identifier[iter] ( identifier[refs] . identifier[data_addr_to_ref] [ identifier[cgcpl_memory_data] . identifier[addr] ])). identifier[insn_addr] identifier[cfg_node] = identifier[self] . identifier[cfg] . identifier[get_any_node] ( identifier[insn_addr] , identifier[anyaddr] = keyword[True] ) keyword[if] keyword[not] identifier[cfg_node] : keyword[return] keyword[False] identifier[func_addr] = identifier[cfg_node] . identifier[function_address] identifier[sub_func_addr] = keyword[None] keyword[if] identifier[func_addr] keyword[not] keyword[in] identifier[self] . identifier[cfg] . identifier[functions] : keyword[return] keyword[False] identifier[function] = identifier[self] . identifier[cfg] . identifier[functions] [ identifier[func_addr] ] identifier[calling_targets] =[] keyword[for] identifier[_] , identifier[dst] , identifier[data] keyword[in] identifier[function] . identifier[transition_graph] . identifier[edges] ( identifier[data] = keyword[True] ): keyword[if] literal[string] keyword[in] identifier[data] keyword[and] identifier[data] [ literal[string] ]== literal[string] : identifier[calling_targets] . identifier[append] ( identifier[dst] . identifier[addr] ) keyword[if] identifier[len] ( identifier[calling_targets] )!= literal[int] : keyword[return] keyword[False] identifier[sub_func_addr] = identifier[calling_targets] [ literal[int] ] identifier[proc] = identifier[next] (( identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[procedures] keyword[if] identifier[p] . identifier[addr] == identifier[func_addr] ), keyword[None] ) keyword[if] identifier[proc] keyword[is] keyword[None] : keyword[return] keyword[False] identifier[subproc] = identifier[next] (( identifier[p] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[procedures] keyword[if] identifier[p] . identifier[addr] == identifier[sub_func_addr] ), keyword[None] ) keyword[if] identifier[subproc] keyword[is] keyword[None] : keyword[return] keyword[False] identifier[has_label] = keyword[True] identifier[lowest_address] = identifier[min] ( identifier[cgc_package_list] . identifier[addr] , identifier[cgc_extended_application] . identifier[addr] ) keyword[for] identifier[obj] keyword[in] ( identifier[cgc_package_list] , identifier[cgc_extended_application] ): identifier[labels] = identifier[obj] . identifier[labels] keyword[for] identifier[addr] , identifier[label] keyword[in] identifier[labels] : keyword[if] identifier[addr] != identifier[lowest_address] : identifier[label] . identifier[base_addr] = identifier[lowest_address] keyword[if] identifier[has_label] : identifier[data] = identifier[next] (( identifier[d] keyword[for] identifier[d] keyword[in] identifier[self] . identifier[data] keyword[if] identifier[d] . identifier[addr] keyword[is] keyword[not] keyword[None] keyword[and] identifier[d] . identifier[addr] + identifier[d] . identifier[size] == identifier[lowest_address] ), keyword[None] ) keyword[if] identifier[data] keyword[is] keyword[None] : keyword[pass] keyword[else] : identifier[lbl] = identifier[self] . identifier[symbol_manager] . identifier[addr_to_label] [ identifier[lowest_address] ][ literal[int] ] keyword[if] identifier[lbl] keyword[not] keyword[in] identifier[data] . identifier[end_labels] : identifier[data] . identifier[end_labels] . identifier[append] ( identifier[lbl] ) identifier[proc] . identifier[asm_code] = literal[string] identifier[subproc] . identifier[asm_code] = literal[string] identifier[cgc_package_list] . identifier[skip] = keyword[True] identifier[cgc_extended_application] . identifier[skip] = keyword[True] identifier[l] . identifier[info] ( literal[string] ) keyword[return] keyword[True]
def remove_cgc_attachments(self): """ Remove CGC attachments. :return: True if CGC attachments are found and removed, False otherwise :rtype: bool """ cgc_package_list = None cgc_extended_application = None for data in self.data: if data.sort == 'cgc-package-list': cgc_package_list = data # depends on [control=['if'], data=[]] elif data.sort == 'cgc-extended-application': cgc_extended_application = data # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['data']] if not cgc_package_list or not cgc_extended_application: return False # depends on [control=['if'], data=[]] if cgc_package_list.skip or cgc_extended_application.skip: # they have already been removed # so we still return True to indicate that CGC attachments have been removed return True # depends on [control=['if'], data=[]] # there is a single function referencing them cgcpl_memory_data = self.cfg.memory_data.get(cgc_package_list.addr, None) cgcea_memory_data = self.cfg.memory_data.get(cgc_extended_application.addr, None) refs = self.cfg.model.references if cgcpl_memory_data is None or cgcea_memory_data is None: return False # depends on [control=['if'], data=[]] if len(refs.data_addr_to_ref[cgcpl_memory_data.addr]) != 1: return False # depends on [control=['if'], data=[]] if len(refs.data_addr_to_ref[cgcea_memory_data.addr]) != 1: return False # depends on [control=['if'], data=[]] # check if the irsb addresses are the same if next(iter(refs.data_addr_to_ref[cgcpl_memory_data.addr])).block_addr != next(iter(refs.data_addr_to_ref[cgcea_memory_data.addr])).block_addr: return False # depends on [control=['if'], data=[]] insn_addr = next(iter(refs.data_addr_to_ref[cgcpl_memory_data.addr])).insn_addr # get the basic block cfg_node = self.cfg.get_any_node(insn_addr, anyaddr=True) if not cfg_node: return False # depends on [control=['if'], data=[]] func_addr = cfg_node.function_address # this function should be calling another function sub_func_addr = None if func_addr not in self.cfg.functions: return False # depends on [control=['if'], data=[]] function = self.cfg.functions[func_addr] # traverse the graph and make sure there is only one call edge calling_targets = [] for (_, dst, data) in function.transition_graph.edges(data=True): if 'type' in data and data['type'] == 'call': calling_targets.append(dst.addr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if len(calling_targets) != 1: return False # depends on [control=['if'], data=[]] sub_func_addr = calling_targets[0] # alright. We want to nop this function, as well as the subfunction proc = next((p for p in self.procedures if p.addr == func_addr), None) if proc is None: return False # depends on [control=['if'], data=[]] subproc = next((p for p in self.procedures if p.addr == sub_func_addr), None) if subproc is None: return False # depends on [control=['if'], data=[]] # if those two data entries have any label, we should properly modify them # at this point, we are fairly confident that none of those labels are direct data references to either package # list or extended application has_label = True lowest_address = min(cgc_package_list.addr, cgc_extended_application.addr) for obj in (cgc_package_list, cgc_extended_application): labels = obj.labels for (addr, label) in labels: if addr != lowest_address: label.base_addr = lowest_address # depends on [control=['if'], data=['lowest_address']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['obj']] if has_label: # is there any memory data entry that ends right at the lowest address? data = next((d for d in self.data if d.addr is not None and d.addr + d.size == lowest_address), None) if data is None: # since there is no gap between memory data entries (we guarantee that), this can only be that no other # data resides in the same memory region that CGC attachments are in pass # depends on [control=['if'], data=[]] else: lbl = self.symbol_manager.addr_to_label[lowest_address][0] if lbl not in data.end_labels: data.end_labels.append(lbl) # depends on [control=['if'], data=['lbl']] # depends on [control=['if'], data=[]] # practically nop the function proc.asm_code = '\tret\n' subproc.asm_code = '\tret\n' # remove those two data entries cgc_package_list.skip = True cgc_extended_application.skip = True l.info('CGC attachments are removed.') return True
def get_comments(self): """Gets all comments. return: (osid.commenting.CommentList) - a list of comments raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('commenting', collection='Comment', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.CommentList(result, runtime=self._runtime, proxy=self._proxy)
def function[get_comments, parameter[self]]: constant[Gets all comments. return: (osid.commenting.CommentList) - a list of comments raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ] variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[commenting]]] variable[result] assign[=] call[call[name[collection].find, parameter[call[name[self]._view_filter, parameter[]]]].sort, parameter[constant[_id], name[DESCENDING]]] return[call[name[objects].CommentList, parameter[name[result]]]]
keyword[def] identifier[get_comments] ( identifier[self] ): literal[string] identifier[collection] = identifier[JSONClientValidated] ( literal[string] , identifier[collection] = literal[string] , identifier[runtime] = identifier[self] . identifier[_runtime] ) identifier[result] = identifier[collection] . identifier[find] ( identifier[self] . identifier[_view_filter] ()). identifier[sort] ( literal[string] , identifier[DESCENDING] ) keyword[return] identifier[objects] . identifier[CommentList] ( identifier[result] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] )
def get_comments(self): """Gets all comments. return: (osid.commenting.CommentList) - a list of comments raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('commenting', collection='Comment', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.CommentList(result, runtime=self._runtime, proxy=self._proxy)
def build_query(self, **filters): """ Build queries for geo spatial filtering. Expected query parameters are: - a `unit=value` parameter where the unit is a valid UNIT in the `django.contrib.gis.measure.Distance` class. - `from` which must be a comma separated latitude and longitude. Example query: /api/v1/search/?km=10&from=59.744076,10.152045 Will perform a `dwithin` query within 10 km from the point with latitude 59.744076 and longitude 10.152045. """ applicable_filters = None filters = dict((k, filters[k]) for k in chain(self.D.UNITS.keys(), [constants.DRF_HAYSTACK_SPATIAL_QUERY_PARAM]) if k in filters) distance = dict((k, v) for k, v in filters.items() if k in self.D.UNITS.keys()) try: latitude, longitude = map(float, self.tokenize(filters[constants.DRF_HAYSTACK_SPATIAL_QUERY_PARAM], self.view.lookup_sep)) point = self.Point(longitude, latitude, srid=constants.GEO_SRID) except ValueError: raise ValueError("Cannot convert `from=latitude,longitude` query parameter to " "float values. Make sure to provide numerical values only!") except KeyError: # If the user has not provided any `from` query string parameter, # just return. pass else: for unit in distance.keys(): if not len(distance[unit]) == 1: raise ValueError("Each unit must have exactly one value.") distance[unit] = float(distance[unit][0]) if point and distance: applicable_filters = { "dwithin": { "field": self.backend.point_field, "point": point, "distance": self.D(**distance) }, "distance": { "field": self.backend.point_field, "point": point } } return applicable_filters
def function[build_query, parameter[self]]: constant[ Build queries for geo spatial filtering. Expected query parameters are: - a `unit=value` parameter where the unit is a valid UNIT in the `django.contrib.gis.measure.Distance` class. - `from` which must be a comma separated latitude and longitude. Example query: /api/v1/search/?km=10&from=59.744076,10.152045 Will perform a `dwithin` query within 10 km from the point with latitude 59.744076 and longitude 10.152045. ] variable[applicable_filters] assign[=] constant[None] variable[filters] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b110e7d0>]] variable[distance] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b110f7c0>]] <ast.Try object at 0x7da1b110eec0> return[name[applicable_filters]]
keyword[def] identifier[build_query] ( identifier[self] ,** identifier[filters] ): literal[string] identifier[applicable_filters] = keyword[None] identifier[filters] = identifier[dict] (( identifier[k] , identifier[filters] [ identifier[k] ]) keyword[for] identifier[k] keyword[in] identifier[chain] ( identifier[self] . identifier[D] . identifier[UNITS] . identifier[keys] (), [ identifier[constants] . identifier[DRF_HAYSTACK_SPATIAL_QUERY_PARAM] ]) keyword[if] identifier[k] keyword[in] identifier[filters] ) identifier[distance] = identifier[dict] (( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[filters] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[self] . identifier[D] . identifier[UNITS] . identifier[keys] ()) keyword[try] : identifier[latitude] , identifier[longitude] = identifier[map] ( identifier[float] , identifier[self] . identifier[tokenize] ( identifier[filters] [ identifier[constants] . identifier[DRF_HAYSTACK_SPATIAL_QUERY_PARAM] ], identifier[self] . identifier[view] . identifier[lookup_sep] )) identifier[point] = identifier[self] . identifier[Point] ( identifier[longitude] , identifier[latitude] , identifier[srid] = identifier[constants] . identifier[GEO_SRID] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[except] identifier[KeyError] : keyword[pass] keyword[else] : keyword[for] identifier[unit] keyword[in] identifier[distance] . identifier[keys] (): keyword[if] keyword[not] identifier[len] ( identifier[distance] [ identifier[unit] ])== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[distance] [ identifier[unit] ]= identifier[float] ( identifier[distance] [ identifier[unit] ][ literal[int] ]) keyword[if] identifier[point] keyword[and] identifier[distance] : identifier[applicable_filters] ={ literal[string] :{ literal[string] : identifier[self] . identifier[backend] . identifier[point_field] , literal[string] : identifier[point] , literal[string] : identifier[self] . identifier[D] (** identifier[distance] ) }, literal[string] :{ literal[string] : identifier[self] . identifier[backend] . identifier[point_field] , literal[string] : identifier[point] } } keyword[return] identifier[applicable_filters]
def build_query(self, **filters): """ Build queries for geo spatial filtering. Expected query parameters are: - a `unit=value` parameter where the unit is a valid UNIT in the `django.contrib.gis.measure.Distance` class. - `from` which must be a comma separated latitude and longitude. Example query: /api/v1/search/?km=10&from=59.744076,10.152045 Will perform a `dwithin` query within 10 km from the point with latitude 59.744076 and longitude 10.152045. """ applicable_filters = None filters = dict(((k, filters[k]) for k in chain(self.D.UNITS.keys(), [constants.DRF_HAYSTACK_SPATIAL_QUERY_PARAM]) if k in filters)) distance = dict(((k, v) for (k, v) in filters.items() if k in self.D.UNITS.keys())) try: (latitude, longitude) = map(float, self.tokenize(filters[constants.DRF_HAYSTACK_SPATIAL_QUERY_PARAM], self.view.lookup_sep)) point = self.Point(longitude, latitude, srid=constants.GEO_SRID) # depends on [control=['try'], data=[]] except ValueError: raise ValueError('Cannot convert `from=latitude,longitude` query parameter to float values. Make sure to provide numerical values only!') # depends on [control=['except'], data=[]] except KeyError: # If the user has not provided any `from` query string parameter, # just return. pass # depends on [control=['except'], data=[]] else: for unit in distance.keys(): if not len(distance[unit]) == 1: raise ValueError('Each unit must have exactly one value.') # depends on [control=['if'], data=[]] distance[unit] = float(distance[unit][0]) # depends on [control=['for'], data=['unit']] if point and distance: applicable_filters = {'dwithin': {'field': self.backend.point_field, 'point': point, 'distance': self.D(**distance)}, 'distance': {'field': self.backend.point_field, 'point': point}} # depends on [control=['if'], data=[]] return applicable_filters
def patch(self, *args, **kwargs): """ Executes an HTTP PATCH. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments """ return self.session.patch(*args, **self.get_kwargs(**kwargs))
def function[patch, parameter[self]]: constant[ Executes an HTTP PATCH. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments ] return[call[name[self].session.patch, parameter[<ast.Starred object at 0x7da1b06fda50>]]]
keyword[def] identifier[patch] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[session] . identifier[patch] (* identifier[args] ,** identifier[self] . identifier[get_kwargs] (** identifier[kwargs] ))
def patch(self, *args, **kwargs): """ Executes an HTTP PATCH. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments """ return self.session.patch(*args, **self.get_kwargs(**kwargs))