text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def fetch_data_table(api_key, show_progress, retries): """ Fetch WIKI Prices data table from Quandl """ for _ in range(retries): try: if show_progress: log.info('Downloading WIKI metadata.') metadata = pd.read_csv( format_metadata_url(api_key) ) # Extract link from metadata and download zip file. table_url = metadata.loc[0, 'file.link'] if show_progress: raw_file = download_with_progress( table_url, chunk_size=ONE_MEGABYTE, label="Downloading WIKI Prices table from Quandl" ) else: raw_file = download_without_progress(table_url) return load_data_table( file=raw_file, index_col=None, show_progress=show_progress, ) except Exception: log.exception("Exception raised reading Quandl data. Retrying.") else: raise ValueError( "Failed to download Quandl data after %d attempts." % (retries) )
[ "def", "fetch_data_table", "(", "api_key", ",", "show_progress", ",", "retries", ")", ":", "for", "_", "in", "range", "(", "retries", ")", ":", "try", ":", "if", "show_progress", ":", "log", ".", "info", "(", "'Downloading WIKI metadata.'", ")", "metadata", ...
31.621622
18.189189
def deprecated(since_or_msg, old=None, new=None, extra=None): """ Issue a nicely formatted deprecation warning. """ if isinstance(since_or_msg, tuple): if old is None or new is None: raise ValueError("deprecated entity and a replacement are required") if len(since_or_msg) != 3 or not all(isinstance(x, int) and x >=0 for x in since_or_msg): raise ValueError("invalid version tuple: %r" % (since_or_msg,)) since = "%d.%d.%d" % since_or_msg message = "%(old)s was deprecated in Bokeh %(since)s and will be removed, use %(new)s instead." message = message % dict(old=old, since=since, new=new) if extra is not None: message += " " + extra.strip() elif isinstance(since_or_msg, six.string_types): if not (old is None and new is None and extra is None): raise ValueError("deprecated(message) signature doesn't allow extra arguments") message = since_or_msg else: raise ValueError("expected a version tuple or string message") warn(message)
[ "def", "deprecated", "(", "since_or_msg", ",", "old", "=", "None", ",", "new", "=", "None", ",", "extra", "=", "None", ")", ":", "if", "isinstance", "(", "since_or_msg", ",", "tuple", ")", ":", "if", "old", "is", "None", "or", "new", "is", "None", ...
44.041667
26.5
def context_chain(self) -> List['Context']: """Return a list of contexts starting from this one, its parent and so on.""" contexts = [] ctx = self # type: Optional[Context] while ctx is not None: contexts.append(ctx) ctx = ctx.parent return contexts
[ "def", "context_chain", "(", "self", ")", "->", "List", "[", "'Context'", "]", ":", "contexts", "=", "[", "]", "ctx", "=", "self", "# type: Optional[Context]", "while", "ctx", "is", "not", "None", ":", "contexts", ".", "append", "(", "ctx", ")", "ctx", ...
34.111111
12.666667
def get_file_contents(self, pointer=False): ''' Gets any file contents you care about. Defaults to the main file @param pointer: The the contents of the file pointer, not the pointed at file @return: A string of the contents ''' if self.pointer: if pointer: return self.old_pointed else: return self.old_data else: return self.old_data
[ "def", "get_file_contents", "(", "self", ",", "pointer", "=", "False", ")", ":", "if", "self", ".", "pointer", ":", "if", "pointer", ":", "return", "self", ".", "old_pointed", "else", ":", "return", "self", ".", "old_data", "else", ":", "return", "self",...
32.428571
18
def _setup_py_run_from_dir(root_dir, py_interpreter): """run the extractmeta command via the setup.py in the given root_dir. the output of extractmeta is json and is stored in a tempfile which is then read in and returned as data""" data = {} with _enter_single_subdir(root_dir) as single_subdir: if not os.path.exists("setup.py"): raise Exception("'setup.py' does not exist in '%s'" % ( single_subdir)) # generate a temporary json file which contains the metadata output_json = tempfile.NamedTemporaryFile() cmd = "%s setup.py -q --command-packages metaextract " \ "metaextract -o %s " % (py_interpreter, output_json.name) try: subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError: # try again with a encoding in setup.py _set_file_encoding_utf8("setup.py") subprocess.check_output(cmd, shell=True) # read json file and return data with open(output_json.name, "r") as f: data = json.loads(f.read()) # sort some of the keys if the dict values are lists for key in ['data_files', 'entry_points', 'extras_require', 'install_requires', 'setup_requires', 'scripts', 'tests_require', 'tests_suite']: if key in data['data'] and isinstance(data['data'][key], list): data['data'][key] = sorted(data['data'][key]) return data
[ "def", "_setup_py_run_from_dir", "(", "root_dir", ",", "py_interpreter", ")", ":", "data", "=", "{", "}", "with", "_enter_single_subdir", "(", "root_dir", ")", "as", "single_subdir", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "\"setup.py\"", ")...
48.806452
18.387097
def default_logging(grab_log=None, # '/tmp/grab.log', network_log=None, # '/tmp/grab.network.log', level=logging.DEBUG, mode='a', propagate_network_logger=False): """ Customize logging output to display all log messages except grab network logs. Redirect grab network logs into file. """ logging.basicConfig(level=level) network_logger = logging.getLogger('grab.network') network_logger.propagate = propagate_network_logger if network_log: hdl = logging.FileHandler(network_log, mode) network_logger.addHandler(hdl) network_logger.setLevel(level) grab_logger = logging.getLogger('grab') if grab_log: hdl = logging.FileHandler(grab_log, mode) grab_logger.addHandler(hdl) grab_logger.setLevel(level)
[ "def", "default_logging", "(", "grab_log", "=", "None", ",", "# '/tmp/grab.log',", "network_log", "=", "None", ",", "# '/tmp/grab.network.log',", "level", "=", "logging", ".", "DEBUG", ",", "mode", "=", "'a'", ",", "propagate_network_logger", "=", "False", ")", ...
33.16
14.52
def process_task_topic_list(app, doctree, fromdocname): """Process the ``task_topic_list`` node to generate a rendered listing of Task, Configurable, or Config topics (as determined by the types key of the ``task_topic_list`` node). This is called during the "doctree-resolved" phase so that the ``lsst_task_topcs`` environment attribute is fully set. """ logger = getLogger(__name__) logger.debug('Started process_task_list') env = app.builder.env for node in doctree.traverse(task_topic_list): try: topics = env.lsst_task_topics except AttributeError: message = ( "Environment does not have 'lsst_task_topics', " "can't process the listing." ) logger.warning(message) node.replace_self(nodes.paragraph(text=message)) continue root = node['root_namespace'] # Sort tasks by the topic's class name. # NOTE: if the presentation of the link is changed to the fully # qualified name, with full Python namespace, then the topic_names # should be changed to match that. topic_keys = [k for k, topic in topics.items() if topic['type'] in node['types'] if topic['fully_qualified_name'].startswith(root)] topic_names = [topics[k]['fully_qualified_name'].split('.')[-1] for k in topic_keys] topic_keys = [ k for k, _ in sorted(zip(topic_keys, topic_names), key=lambda pair: pair[1])] if len(topic_keys) == 0: # Fallback if no topics are found p = nodes.paragraph(text='No topics.') node.replace_self(p) continue dl = nodes.definition_list() for key in topic_keys: topic = topics[key] class_name = topic['fully_qualified_name'].split('.')[-1] summary_text = topic['summary_node'][0].astext() # Each topic in the listing is a definition list item. The term is # the linked class name and the description is the summary # sentence from the docstring _or_ the content of the # topic directive dl_item = nodes.definition_list_item() # Can insert an actual reference since the doctree is resolved. ref_node = nodes.reference('', '') ref_node['refdocname'] = topic['docname'] ref_node['refuri'] = app.builder.get_relative_uri( fromdocname, topic['docname']) # NOTE: Not appending an anchor to the URI because task topics # are designed to occupy an entire page. link_label = nodes.Text(class_name, class_name) ref_node += link_label term = nodes.term() term += ref_node dl_item += term # We're degrading the summary to plain text to avoid syntax issues # and also because it may be distracting def_node = nodes.definition() def_node += nodes.paragraph(text=summary_text) dl_item += def_node dl += dl_item # Replace the task_list node (a placeholder) with this renderable # content node.replace_self(dl)
[ "def", "process_task_topic_list", "(", "app", ",", "doctree", ",", "fromdocname", ")", ":", "logger", "=", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "'Started process_task_list'", ")", "env", "=", "app", ".", "builder", ".", "env", "for...
39.54878
19.353659
def run_step(self): """ Defines what to do in one iteration. The default is: ``self.hooked_sess.run(self.train_op)``. The behavior of each iteration can be changed by either setting ``trainer.train_op``, or overriding this method. """ if not hasattr(self, 'train_op'): raise NotImplementedError( "Please either set `Trainer.train_op` or provide an implementation " "of Trainer.run_step()!") self.hooked_sess.run(self.train_op)
[ "def", "run_step", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'train_op'", ")", ":", "raise", "NotImplementedError", "(", "\"Please either set `Trainer.train_op` or provide an implementation \"", "\"of Trainer.run_step()!\"", ")", "self", ".", "ho...
40.230769
15.307692
def __get_by_info(post_id, catalog_id): ''' Geo the record by post and catalog. ''' recs = TabPost2Tag.select().where( (TabPost2Tag.post_id == post_id) & (TabPost2Tag.tag_id == catalog_id) ) if recs.count() == 1: return recs.get() elif recs.count() > 1: # return the first one, and delete others. out_rec = None for rec in recs: if out_rec: entry = TabPost2Tag.delete().where( TabPost2Tag.uid == rec.uid ) entry.execute() else: out_rec = rec return out_rec return None
[ "def", "__get_by_info", "(", "post_id", ",", "catalog_id", ")", ":", "recs", "=", "TabPost2Tag", ".", "select", "(", ")", ".", "where", "(", "(", "TabPost2Tag", ".", "post_id", "==", "post_id", ")", "&", "(", "TabPost2Tag", ".", "tag_id", "==", "catalog_...
30.416667
14.25
def put_file(self, target, path, file_data=None, server_file=None, offset=None, truncate=False): """Put data into a file on the device :param target: The device(s) to be targeted with this request :type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances :param path: The path on the target to the file to write to. If the file already exists it will be overwritten. :param file_data: A `six.binary_type` containing the data to put into the file :param server_file: The path to a file on the devicecloud server containing the data to put into the file on the device :param offset: Start writing bytes to the file at this position, if None start at the beginning :param truncate: Boolean, if True after bytes are done being written end the file their even if previous data exists beyond it. If False, leave any existing data in place. :return: A dictionary with keys being device ids and value being None if successful or an :class:`~.ErrorInfo` if the operation failed on that device :raises: :class:`~.FileSystemServiceException` if either both file_data and server_file are specified or neither are specified :raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting """ command_block = FileSystemServiceCommandBlock() command_block.add_command(PutCommand(path, file_data, server_file, offset, truncate)) root = _parse_command_response(self._sci_api.send_sci("file_system", target, command_block.get_command_string())) out_dict = {} for device in root.findall('./file_system/device'): device_id = device.get('id') error = device.find('./error') if error is not None: out_dict[device_id] = _parse_error_tree(error) else: out_dict[device_id] = PutCommand.parse_response(device.find('./commands/put_file')) return out_dict
[ "def", "put_file", "(", "self", ",", "target", ",", "path", ",", "file_data", "=", "None", ",", "server_file", "=", "None", ",", "offset", "=", "None", ",", "truncate", "=", "False", ")", ":", "command_block", "=", "FileSystemServiceCommandBlock", "(", ")"...
61.909091
38.090909
def export_data_dir(target_path): """ Exports the media files of the application and bundles a zip archive :return: the target path of the zip archive """ from django_productline import utils from django.conf import settings utils.zipdir(settings.PRODUCT_CONTEXT.DATA_DIR, target_path, wrapdir='__data__') print('... wrote {target_path}'.format(target_path=target_path)) return target_path
[ "def", "export_data_dir", "(", "target_path", ")", ":", "from", "django_productline", "import", "utils", "from", "django", ".", "conf", "import", "settings", "utils", ".", "zipdir", "(", "settings", ".", "PRODUCT_CONTEXT", ".", "DATA_DIR", ",", "target_path", ",...
37.818182
16.363636
def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with io.open(vocab_file, 'r') as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab
[ "def", "load_vocab", "(", "vocab_file", ")", ":", "vocab", "=", "collections", ".", "OrderedDict", "(", ")", "index", "=", "0", "with", "io", ".", "open", "(", "vocab_file", ",", "'r'", ")", "as", "reader", ":", "while", "True", ":", "token", "=", "r...
29.076923
12.461538
def byte_to_unitcode(bytecode): """Return an X10 unitcode value from a byte value.""" return list(UC_LOOKUP.keys())[list(UC_LOOKUP.values()).index(bytecode)]
[ "def", "byte_to_unitcode", "(", "bytecode", ")", ":", "return", "list", "(", "UC_LOOKUP", ".", "keys", "(", ")", ")", "[", "list", "(", "UC_LOOKUP", ".", "values", "(", ")", ")", ".", "index", "(", "bytecode", ")", "]" ]
54.333333
14.666667
def resolved_ok(self): """ Shortcut to testing unresolved_symbols and count_braces separately. Returns false if there are unresolved symbols or {{ or }} braces remaining, true otherwise """ left_braces, right_braces = self.count_braces() return len(self.unresolved_symbols()) == left_braces == right_braces == 0
[ "def", "resolved_ok", "(", "self", ")", ":", "left_braces", ",", "right_braces", "=", "self", ".", "count_braces", "(", ")", "return", "len", "(", "self", ".", "unresolved_symbols", "(", ")", ")", "==", "left_braces", "==", "right_braces", "==", "0" ]
47
21.571429
def get_next_environment(env): """ Given an environment, return the next environment in the promotion hierarchy """ config = _config_file() juicer.utils.Log.log_debug("Finding next environment...") if env not in config.sections(): raise JuicerConfigError("%s is not a server configured in juicer.conf", env) section = dict(config.items(env)) if 'promotes_to' not in section.keys(): err = "Environment `%s` has no entry for `promotes_to`\nCheck man 5 juicer.conf." % env raise JuicerConfigError(err) return section['promotes_to']
[ "def", "get_next_environment", "(", "env", ")", ":", "config", "=", "_config_file", "(", ")", "juicer", ".", "utils", ".", "Log", ".", "log_debug", "(", "\"Finding next environment...\"", ")", "if", "env", "not", "in", "config", ".", "sections", "(", ")", ...
30.473684
21.105263
def getVariants(self, referenceName, startPosition, endPosition, callSetIds=[]): """ Returns an iterator over the specified variants. The parameters correspond to the attributes of a GASearchVariantsRequest object. """ if callSetIds is None: callSetIds = self._callSetIds else: for callSetId in callSetIds: if callSetId not in self._callSetIds: raise exceptions.CallSetNotInVariantSetException( callSetId, self.getId()) for record in self.getPysamVariants( referenceName, startPosition, endPosition): yield self.convertVariant(record, callSetIds)
[ "def", "getVariants", "(", "self", ",", "referenceName", ",", "startPosition", ",", "endPosition", ",", "callSetIds", "=", "[", "]", ")", ":", "if", "callSetIds", "is", "None", ":", "callSetIds", "=", "self", ".", "_callSetIds", "else", ":", "for", "callSe...
44.9375
13.8125
def get_inchi(self): '''Returns inchi''' inchi = parsers.get_inchi(self.__chebi_id) if inchi is None: inchi = parsers.get_inchi(self.get_parent_id()) if inchi is None: for parent_or_child_id in self.__get_all_ids(): inchi = parsers.get_inchi(parent_or_child_id) if inchi is not None: break return inchi
[ "def", "get_inchi", "(", "self", ")", ":", "inchi", "=", "parsers", ".", "get_inchi", "(", "self", ".", "__chebi_id", ")", "if", "inchi", "is", "None", ":", "inchi", "=", "parsers", ".", "get_inchi", "(", "self", ".", "get_parent_id", "(", ")", ")", ...
27.2
22
def replaceNode(oldNode, newNode): # type: (_RuleConnectable, _RuleConnectable) -> _RuleConnectable """ Replace instance of Nonterminal or Terminal in the tree with another one. :param oldNode: Old nonterminal or terminal already in the tree. :param newNode: Instance of nonterminal or terminal to replace with. :return: Instance `newNode` in the tree. """ if oldNode.from_rule is not None and len(oldNode.from_rule.to_symbols) > 0: indexParent = oldNode.from_rule.to_symbols.index(oldNode) oldNode.from_rule.to_symbols[indexParent] = newNode newNode._set_from_rule(oldNode.from_rule) if oldNode.to_rule is not None and len(oldNode.to_rule.from_symbols) > 0: indexChild = oldNode.to_rule.from_symbols.index(oldNode) oldNode.to_rule._from_symbols[indexChild] = newNode newNode._set_to_rule(oldNode.to_rule) return newNode
[ "def", "replaceNode", "(", "oldNode", ",", "newNode", ")", ":", "# type: (_RuleConnectable, _RuleConnectable) -> _RuleConnectable", "if", "oldNode", ".", "from_rule", "is", "not", "None", "and", "len", "(", "oldNode", ".", "from_rule", ".", "to_symbols", ")", ">", ...
56.235294
22.470588
def match_template(template, image, options=None): """ Multi channel template matching using simple correlation distance :param template: Template image :param image: Search image :param options: Other options: - distance: Distance measure to use. Default: 'correlation' - normalize: Heatmap values will be in the range of 0 to 1. Default: True - retain_size: Whether to retain the same size as input image. Default: True :return: Heatmap """ # If the input has max of 3 channels, use the faster OpenCV matching if len(image.shape) <= 3 and image.shape[2] <= 3: return match_template_opencv(template, image, options) op = _DEF_TM_OPT.copy() if options is not None: op.update(options) template = img_utils.gray3(template) image = img_utils.gray3(image) h, w, d = template.shape im_h, im_w = image.shape[:2] template_v = template.flatten() heatmap = np.zeros((im_h - h, im_w - w)) for col in range(0, im_w - w): for row in range(0, im_h - h): cropped_im = image[row:row + h, col:col + w, :] cropped_v = cropped_im.flatten() if op['distance'] == 'euclidean': heatmap[row, col] = scipy.spatial.distance.euclidean(template_v, cropped_v) elif op['distance'] == 'correlation': heatmap[row, col] = scipy.spatial.distance.correlation(template_v, cropped_v) # normalize if op['normalize']: heatmap /= heatmap.max() # size if op['retain_size']: hmap = np.ones(image.shape[:2]) * heatmap.max() h, w = heatmap.shape hmap[:h, :w] = heatmap heatmap = hmap return heatmap
[ "def", "match_template", "(", "template", ",", "image", ",", "options", "=", "None", ")", ":", "# If the input has max of 3 channels, use the faster OpenCV matching", "if", "len", "(", "image", ".", "shape", ")", "<=", "3", "and", "image", ".", "shape", "[", "2"...
32.843137
20.686275
def vectors_between_pts(pts=[]): '''Return vectors between points on N dimensions. Last vector is the path between the first and last point, creating a loop. ''' assert isinstance(pts, list) and len(pts) > 0 l_pts = len(pts) l_pt_prev = None for pt in pts: assert isinstance(pt, tuple) l_pt = len(pt) assert l_pt > 1 for i in pt: assert isinstance(i, float) if l_pt_prev is not None: assert l_pt == l_pt_prev l_pt_prev = l_pt return [tuple([pts[(i+1) % l_pts][j] - pts[i][j] for j in range(l_pt)]) \ for i in range(l_pts)]
[ "def", "vectors_between_pts", "(", "pts", "=", "[", "]", ")", ":", "assert", "isinstance", "(", "pts", ",", "list", ")", "and", "len", "(", "pts", ")", ">", "0", "l_pts", "=", "len", "(", "pts", ")", "l_pt_prev", "=", "None", "for", "pt", "in", "...
32.473684
17.315789
def version(): """Wrapper for opj_version library routine.""" try: OPENJP2.opj_version.restype = ctypes.c_char_p except: return "0.0.0" v = OPENJP2.opj_version() return v.decode('utf-8') if sys.hexversion >= 0x03000000 else v
[ "def", "version", "(", ")", ":", "try", ":", "OPENJP2", ".", "opj_version", ".", "restype", "=", "ctypes", ".", "c_char_p", "except", ":", "return", "\"0.0.0\"", "v", "=", "OPENJP2", ".", "opj_version", "(", ")", "return", "v", ".", "decode", "(", "'ut...
28.222222
21.777778
def _generate_read_callable(name, display_name, arguments, regex, doc, supported): """ Returns a callable which conjures the URL for the resource and GETs a response """ def f(self, *args, **kwargs): url = self._generate_url(regex, args) if 'params' in kwargs: url += "?" + urllib.urlencode(kwargs['params']) return self._get_data(url, accept=(kwargs.get('accept'))) f.__name__ = str('read_%s' % name) f.__doc__ = doc f._resource_uri = regex f._get_args = arguments f._put_or_post_args = None f.resource_name = display_name f.is_api_call = True f.is_supported_api = supported return f
[ "def", "_generate_read_callable", "(", "name", ",", "display_name", ",", "arguments", ",", "regex", ",", "doc", ",", "supported", ")", ":", "def", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "_gener...
36.444444
14.666667
def plot_recurrence_models( configs, area, slip, msr, rake, shear_modulus=30.0, disp_length_ratio=1.25E-5, msr_sigma=0., figure_size=(8, 6), filename=None, filetype='png', dpi=300, ax=None): """ Plots a set of recurrence models :param list configs: List of configuration dictionaries """ if ax is None: fig, ax = plt.subplots(figsize=figure_size) else: fig = ax.get_figure() for config in configs: model = RecurrenceBranch(area, slip, msr, rake, shear_modulus, disp_length_ratio, msr_sigma, weight=1.0) model.get_recurrence(config) occurrence = model.recurrence.occur_rates cumulative = np.array([np.sum(occurrence[iloc:]) for iloc in range(0, len(occurrence))]) if 'AndersonLuco' in config['Model_Name']: flt_label = config['Model_Name'] + ' - ' + config['Model_Type'] +\ ' Type' else: flt_label = config['Model_Name'] flt_color = np.random.uniform(0.1, 1.0, 3) ax.semilogy(model.magnitudes, cumulative, '-', label=flt_label, color=flt_color, linewidth=2.) ax.semilogy(model.magnitudes, model.recurrence.occur_rates, '--', color=flt_color, linewidth=2.) ax.set_xlabel('Magnitude') ax.set_ylabel('Annual Rate') ax.legend(bbox_to_anchor=(1.1, 1.0)) _save_image(fig, filename, filetype, dpi)
[ "def", "plot_recurrence_models", "(", "configs", ",", "area", ",", "slip", ",", "msr", ",", "rake", ",", "shear_modulus", "=", "30.0", ",", "disp_length_ratio", "=", "1.25E-5", ",", "msr_sigma", "=", "0.", ",", "figure_size", "=", "(", "8", ",", "6", ")"...
39.567568
17.459459
def repository_contributors(self, **kwargs): """Return a list of contributors for the project. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server failed to perform the request Returns: list: The contributors """ path = '/projects/%s/repository/contributors' % self.get_id() return self.manager.gitlab.http_list(path, **kwargs)
[ "def", "repository_contributors", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'/projects/%s/repository/contributors'", "%", "self", ".", "get_id", "(", ")", "return", "self", ".", "manager", ".", "gitlab", ".", "http_list", "(", "path", ","...
43.55
24.35
def validate(self, folder): """Validate files and folders contained in this folder It validates all of the files and folders contained in this folder if some observers are interested in them. """ for observer in list(self.observers): observer.validate(folder)
[ "def", "validate", "(", "self", ",", "folder", ")", ":", "for", "observer", "in", "list", "(", "self", ".", "observers", ")", ":", "observer", ".", "validate", "(", "folder", ")" ]
33.888889
16
def run(create_app, config, description=None, args=None, namespace=None, options=None): """Parses commandline options, updates config, creates and runs the application. Supports listing and selecting environment configurations if `Flask-Config`_ configuration class is used. .. _Flask-Config: http://pypi.python.org/pypi/Flask-Config """ import argparse class HelpFormatter( # argparse.RawTextHelpFormatter, argparse.RawDescriptionHelpFormatter, # argparse.ArgumentDefaultsHelpFormatter, # argparse.HelpFormatter, ): pass # Get configurations data if envName = getattr(config, 'ENV', None) envMap = getattr(config, 'envMap', None) envHelp = getattr(config, 'envHelp', {}) envSelectVar = getattr(config, 'envSelectVar', None) configurations = getattr(config, 'configurations', None) envSelectable = callable(getattr(config, 'select', None)) envPrintable = config.__str__ is not object.__str__ parser = argparse.ArgumentParser( description = description or getattr(config, 'description', 'runs Flask-based web application using Python WSGI reference server'), epilog = ''.join(( '\n\noptional environment variables:\n{}'.format( '\n'.join(sorted(' {:<20} {}'.format(envMap[key], envHelp.get(key, '')) for key in envMap))) if envMap else '', '\n\navailable environment configurations (*: active):\n{}'.format( '\n'.join('{} {}'.format( '*' if envName in c.names else ' ', ' | '.join(c.names) ) for c in configurations)) if configurations else '', )), formatter_class = HelpFormatter) parser.add_argument('-b', '--bind', metavar='[HOST|:PORT]', default='127.0.0.1:5000', help = 'bind to HOST:PORT (default: 127.0.0.1:5000)') debug = config.DEBUG debugMsg = ' (default in {})'.format(config.ENV) debugTrueMsg = debugMsg if debug else '' debugFalseMsg = debugMsg if not debug else '' parser.add_argument('-r', '--reload', action='store_true', default=debug, help = 'reload server on code change' + debugTrueMsg) parser.add_argument('-R', '--no-reload', action='store_false', dest='reload', help = 'do not reload server on code change' + debugFalseMsg) parser.add_argument('-d', '--debug', action='store_true', default=debug, help = 'show debugger on exception' + debugTrueMsg) parser.add_argument('-D', '--no-debug', action='store_false', dest='debug', help = 'do not show debugger on exception' + debugFalseMsg) if envSelectable: parser.add_argument('-e', '--env', default=config.ENV, help = 'select environment config (default: {})'.format(config.ENV)) if envPrintable: if envSelectable: parser.add_argument('-E', '--show-env', nargs='?', const=True, help = 'show environment config and exit ({}default: {})' .format('*: all, ' if configurations else '', config.ENV)) else: parser.add_argument('-E', '--show-env', action='store_true', help = 'show environment config and exit') parser.add_argument('-g', '--gen-key', action='store_true', help = 'generate a good secret key and exit') args = parser.parse_args(args, namespace) if args.gen_key: # See http://flask.pocoo.org/docs/0.10/quickstart/#sessions import os key = os.urandom(24) return print(key) # return print('{0}\n{0!r}'.format(key)) if envSelectable and args.env: config = config.select(args.env) if envPrintable and args.show_env: if configurations and args.show_env == '*': print('\n\n'.join('# {}\n{}'.format(' | '.join(c.names), c) for c in configurations)) elif args.show_env is True: print(config) else: print(config.select(args.show_env)) return host, port = (args.bind + ':').split(':')[:2] host = host or '127.0.0.1' port = int(port) if port else 5000 return create_app(config).run(host, port, args.debug, use_reloader=args.reload, **(options or {}))
[ "def", "run", "(", "create_app", ",", "config", ",", "description", "=", "None", ",", "args", "=", "None", ",", "namespace", "=", "None", ",", "options", "=", "None", ")", ":", "import", "argparse", "class", "HelpFormatter", "(", "# argparse.RawTextHelpForma...
40.495146
24.019417
def get_response_attribute_filter(self, template_filter, template_model=None): """ Prestans-Response-Attribute-List can contain a client's requested definition for attributes required in the response. This should match the response_attribute_filter_template? :param template_filter: :param template_model: the expected model that this filter corresponds to :return: :rtype: None | AttributeFilter """ if template_filter is None: return None if 'Prestans-Response-Attribute-List' not in self.headers: return None # header not set results in a None attribute_list_str = self.headers['Prestans-Response-Attribute-List'] # deserialize the header contents json_deserializer = deserializer.JSON() attribute_list_dictionary = json_deserializer.loads(attribute_list_str) # construct an AttributeFilter attribute_filter = AttributeFilter( from_dictionary=attribute_list_dictionary, template_model=template_model ) #: Check template? Do this even through we might have template_model #: in case users have made a custom filter evaluated_filter = attribute_filter.conforms_to_template_filter(template_filter) return evaluated_filter
[ "def", "get_response_attribute_filter", "(", "self", ",", "template_filter", ",", "template_model", "=", "None", ")", ":", "if", "template_filter", "is", "None", ":", "return", "None", "if", "'Prestans-Response-Attribute-List'", "not", "in", "self", ".", "headers", ...
36.833333
22.666667
def pprofile(line, cell=None): """ Profile line execution. """ if cell is None: # TODO: detect and use arguments (statistical profiling, ...) ? return run(line) return _main( ['%%pprofile', '-m', '-'] + shlex.split(line), io.StringIO(cell), )
[ "def", "pprofile", "(", "line", ",", "cell", "=", "None", ")", ":", "if", "cell", "is", "None", ":", "# TODO: detect and use arguments (statistical profiling, ...) ?", "return", "run", "(", "line", ")", "return", "_main", "(", "[", "'%%pprofile'", ",", "'-m'", ...
26.181818
16
def write_dir_tree(self, tree): """ Recur through dir tree data structure and write it as a set of objects """ dirs = tree['dirs']; files = tree['files'] child_dirs = {name : self.write_dir_tree(contents) for name, contents in dirs.iteritems()} return self.write_index_object('tree', {'files' : files, 'dirs': child_dirs})
[ "def", "write_dir_tree", "(", "self", ",", "tree", ")", ":", "dirs", "=", "tree", "[", "'dirs'", "]", "files", "=", "tree", "[", "'files'", "]", "child_dirs", "=", "{", "name", ":", "self", ".", "write_dir_tree", "(", "contents", ")", "for", "name", ...
58.5
27.166667
def retrieve_tx(self, txid): """Returns rawtx for <txid>.""" txid = deserialize.txid(txid) tx = self.service.get_tx(txid) return serialize.tx(tx)
[ "def", "retrieve_tx", "(", "self", ",", "txid", ")", ":", "txid", "=", "deserialize", ".", "txid", "(", "txid", ")", "tx", "=", "self", ".", "service", ".", "get_tx", "(", "txid", ")", "return", "serialize", ".", "tx", "(", "tx", ")" ]
34.6
5.2
def encryption_mode(self): """ Returns the name of the encryption mode to use. :return: A unicode string from one of the following: "cbc", "ecb", "ofb", "cfb", "wrap", "gcm", "ccm", "wrap_pad" """ encryption_algo = self['algorithm'].native if encryption_algo[0:7] in set(['aes128_', 'aes192_', 'aes256_']): return encryption_algo[7:] if encryption_algo[0:6] == 'pbes1_': return 'cbc' if encryption_algo[0:7] == 'pkcs12_': return 'cbc' if encryption_algo in set(['des', 'tripledes_3key', 'rc2', 'rc5']): return 'cbc' if encryption_algo == 'pbes2': return self['parameters']['encryption_scheme'].encryption_mode raise ValueError(unwrap( ''' Unrecognized encryption algorithm "%s" ''', encryption_algo ))
[ "def", "encryption_mode", "(", "self", ")", ":", "encryption_algo", "=", "self", "[", "'algorithm'", "]", ".", "native", "if", "encryption_algo", "[", "0", ":", "7", "]", "in", "set", "(", "[", "'aes128_'", ",", "'aes192_'", ",", "'aes256_'", "]", ")", ...
28.3125
22
def get_nonlocal_ip(host, subnet=None): """ Search result of getaddrinfo() for a non-localhost-net address """ try: ailist = socket.getaddrinfo(host, None) except socket.gaierror: raise exc.UnableToResolveError(host) for ai in ailist: # an ai is a 5-tuple; the last element is (ip, port) ip = ai[4][0] if subnet and ip_in_subnet(ip, subnet): LOG.info('found ip (%s) for host (%s) to be in cluster subnet (%s)' % ( ip, host, subnet,) ) return ip if not ip.startswith('127.'): if subnet: LOG.warning('could not match ip (%s) for host (%s) for cluster subnet (%s)' % ( ip, host, subnet,) ) return ip raise exc.UnableToResolveError(host)
[ "def", "get_nonlocal_ip", "(", "host", ",", "subnet", "=", "None", ")", ":", "try", ":", "ailist", "=", "socket", ".", "getaddrinfo", "(", "host", ",", "None", ")", "except", "socket", ".", "gaierror", ":", "raise", "exc", ".", "UnableToResolveError", "(...
30.448276
18.448276
def _entry_must_exist(df, k1, k2): """Evaluate key-subkey existence. Checks that the key-subkey combo exists in the configuration options. """ count = df[(df['k1'] == k1) & (df['k2'] == k2)].shape[0] if count == 0: raise NotRegisteredError( "Option {0}.{1} not registered".format(k1, k2))
[ "def", "_entry_must_exist", "(", "df", ",", "k1", ",", "k2", ")", ":", "count", "=", "df", "[", "(", "df", "[", "'k1'", "]", "==", "k1", ")", "&", "(", "df", "[", "'k2'", "]", "==", "k2", ")", "]", ".", "shape", "[", "0", "]", "if", "count"...
30.727273
11.454545
def set_triggered_by_event(self, value): """ Setter for 'triggered_by_event' field. :param value - a new value of 'triggered_by_event' field. Must be a boolean type. Does not accept None value. """ if value is None or not isinstance(value, bool): raise TypeError("TriggeredByEvent must be set to a bool") else: self.__triggered_by_event = value
[ "def", "set_triggered_by_event", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", "or", "not", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"TriggeredByEvent must be set to a bool\"", ")", "else", ":", "self"...
45.333333
17.777778
def apply_gates_to_fd(stilde_dict, gates): """Applies the given dictionary of gates to the given dictionary of strain in the frequency domain. Gates are applied by IFFT-ing the strain data to the time domain, applying the gate, then FFT-ing back to the frequency domain. Parameters ---------- stilde_dict : dict Dictionary of frequency-domain strain, keyed by the ifos. gates : dict Dictionary of gates. Keys should be the ifo to apply the data to, values are a tuple giving the central time of the gate, the half duration, and the taper duration. Returns ------- dict Dictionary of frequency-domain strain with the gates applied. """ # copy data to new dictionary outdict = dict(stilde_dict.items()) # create a time-domin strain dictionary to apply the gates to strain_dict = dict([[ifo, outdict[ifo].to_timeseries()] for ifo in gates]) # apply gates and fft back to the frequency domain for ifo,d in apply_gates_to_td(strain_dict, gates).items(): outdict[ifo] = d.to_frequencyseries() return outdict
[ "def", "apply_gates_to_fd", "(", "stilde_dict", ",", "gates", ")", ":", "# copy data to new dictionary", "outdict", "=", "dict", "(", "stilde_dict", ".", "items", "(", ")", ")", "# create a time-domin strain dictionary to apply the gates to", "strain_dict", "=", "dict", ...
37.931034
21.37931
def register_backend(mimetype, module, extensions=None): """Register a backend. `mimetype`: a mimetype string (e.g. 'text/plain') `module`: an import string (e.g. path.to.my.module) `extensions`: a list of extensions (e.g. ['txt', 'text']) """ if mimetype in MIMETYPE_TO_BACKENDS: warn("overwriting %r mimetype which was already set" % mimetype) MIMETYPE_TO_BACKENDS[mimetype] = module if extensions is None: try: ext = _MIMETYPES_TO_EXT[mimetype] except KeyError: raise KeyError( "mimetypes module has no extension associated " "with %r mimetype; use 'extensions' arg yourself" % mimetype) assert ext, ext EXTS_TO_MIMETYPES[ext] = mimetype else: if not isinstance(extensions, (list, tuple, set, frozenset)): raise TypeError("invalid extensions type (got %r)" % extensions) for ext in set(extensions): ext = ext if ext.startswith('.') else '.' + ext assert ext, ext EXTS_TO_MIMETYPES[ext] = mimetype
[ "def", "register_backend", "(", "mimetype", ",", "module", ",", "extensions", "=", "None", ")", ":", "if", "mimetype", "in", "MIMETYPE_TO_BACKENDS", ":", "warn", "(", "\"overwriting %r mimetype which was already set\"", "%", "mimetype", ")", "MIMETYPE_TO_BACKENDS", "[...
42.84
15.68
def fetchall(self): """Fetch all available rows from select result set. :returns: list of row tuples """ result = r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE) while len(r) == self.FETCHALL_BLOCKSIZE or not self._received_last_resultset_part: r = self.fetchmany(size=self.FETCHALL_BLOCKSIZE) result.extend(r) return result
[ "def", "fetchall", "(", "self", ")", ":", "result", "=", "r", "=", "self", ".", "fetchmany", "(", "size", "=", "self", ".", "FETCHALL_BLOCKSIZE", ")", "while", "len", "(", "r", ")", "==", "self", ".", "FETCHALL_BLOCKSIZE", "or", "not", "self", ".", "...
43.222222
16.777778
def _filter_namespaces_by_route_whitelist(self): """ Given a parsed API in IR form, filter the user-defined datatypes so that they include only the route datatypes and their direct dependencies. """ assert self._routes is not None, "Missing route whitelist" assert 'route_whitelist' in self._routes assert 'datatype_whitelist' in self._routes # Get route whitelist in canonical form route_whitelist = {} for namespace_name, route_reprs in self._routes['route_whitelist'].items(): new_route_reprs = [] if route_reprs == ['*']: namespace = self.api.namespaces[namespace_name] new_route_reprs = [route.name_with_version() for route in namespace.routes] else: for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) if version > 1: new_route_reprs.append('{}:{}'.format(route_name, version)) else: new_route_reprs.append(route_name) route_whitelist[namespace_name] = new_route_reprs # Parse the route whitelist and populate any starting data types route_data_types = [] for namespace_name, route_reprs in route_whitelist.items(): # Error out if user supplied nonexistent namespace if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) namespace = self.api.namespaces[namespace_name] # Parse namespace doc refs and add them to the starting data types if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) # Parse user-specified routes and add them to the starting data types # Note that this may add duplicates, but that's okay, as the recursion # keeps track of visited data types. assert '*' not in route_reprs for routes_repr in route_reprs: route_name, version = parse_route_name_and_version(routes_repr) if route_name not in namespace.routes_by_name or \ version not in namespace.routes_by_name[route_name].at_version: raise AssertionError('Route %s at version %d is not defined!' % (route_name, version)) route = namespace.routes_by_name[route_name].at_version[version] route_data_types.extend(namespace.get_route_io_data_types_for_route(route)) if route.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, route.doc, namespace_name)) # Parse the datatype whitelist and populate any starting data types for namespace_name, datatype_names in self._routes['datatype_whitelist'].items(): if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) # Parse namespace doc refs and add them to the starting data types namespace = self.api.namespaces[namespace_name] if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) for datatype_name in datatype_names: if datatype_name not in self.api.namespaces[namespace_name].data_type_by_name: raise AssertionError('Datatype %s is not defined!' % datatype_name) data_type = self.api.namespaces[namespace_name].data_type_by_name[datatype_name] route_data_types.append(data_type) # Recurse on dependencies output_types_by_ns, output_routes_by_ns = self._find_dependencies(route_data_types) # Update the IR representation. This involves editing the data types and # routes for each namespace. for namespace in self.api.namespaces.values(): data_types = list(set(output_types_by_ns[namespace.name])) # defaults to empty list namespace.data_types = data_types namespace.data_type_by_name = {d.name: d for d in data_types} output_route_reprs = [output_route.name_with_version() for output_route in output_routes_by_ns[namespace.name]] if namespace.name in route_whitelist: whitelisted_route_reprs = route_whitelist[namespace.name] route_reprs = list(set(whitelisted_route_reprs + output_route_reprs)) else: route_reprs = output_route_reprs routes = [] for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) route = namespace.routes_by_name[route_name].at_version[version] routes.append(route) namespace.routes = [] namespace.route_by_name = {} namespace.routes_by_name = {} for route in routes: namespace.add_route(route)
[ "def", "_filter_namespaces_by_route_whitelist", "(", "self", ")", ":", "assert", "self", ".", "_routes", "is", "not", "None", ",", "\"Missing route whitelist\"", "assert", "'route_whitelist'", "in", "self", ".", "_routes", "assert", "'datatype_whitelist'", "in", "self...
52
25.80198
def make_gaussian_kernel(sigma, npix=501, cdelt=0.01, xpix=None, ypix=None): """Make kernel for a 2D gaussian. Parameters ---------- sigma : float Standard deviation in degrees. """ sigma /= cdelt def fn(t, s): return 1. / (2 * np.pi * s ** 2) * np.exp( -t ** 2 / (s ** 2 * 2.0)) dxy = make_pixel_distance(npix, xpix, ypix) k = fn(dxy, sigma) k /= (np.sum(k) * np.radians(cdelt) ** 2) return k
[ "def", "make_gaussian_kernel", "(", "sigma", ",", "npix", "=", "501", ",", "cdelt", "=", "0.01", ",", "xpix", "=", "None", ",", "ypix", "=", "None", ")", ":", "sigma", "/=", "cdelt", "def", "fn", "(", "t", ",", "s", ")", ":", "return", "1.", "/",...
23.052632
22.210526
def from_short_lines_text(self, text: str): """ Example from Völsupá 28 >>> stanza = "Ein sat hon úti,\\nþá er inn aldni kom\\nyggjungr ása\\nok í augu leit.\\nHvers fregnið mik?\\nHví freistið mín?\\nAllt veit ek, Óðinn,\\nhvar þú auga falt,\\ní inum mæra\\nMímisbrunni.\\nDrekkr mjöð Mímir\\nmorgun hverjan\\naf veði Valföðrs.\\nVituð ér enn - eða hvat?" >>> us = UnspecifiedStanza() >>> us.from_short_lines_text(stanza) >>> [sl.text for sl in us.short_lines] ['Ein sat hon úti,', 'þá er inn aldni kom', 'yggjungr ása', 'ok í augu leit.', 'Hvers fregnið mik?', 'Hví freistið mín?', 'Allt veit ek, Óðinn,', 'hvar þú auga falt,', 'í inum mæra', 'Mímisbrunni.', 'Drekkr mjöð Mímir', 'morgun hverjan', 'af veði Valföðrs.', 'Vituð ér enn - eða hvat?'] >>> us.long_lines :param text: :return: """ Metre.from_short_lines_text(self, text) self.short_lines = [ShortLine(line) for line in text.split("\n") if line] self.long_lines = None
[ "def", "from_short_lines_text", "(", "self", ",", "text", ":", "str", ")", ":", "Metre", ".", "from_short_lines_text", "(", "self", ",", "text", ")", "self", ".", "short_lines", "=", "[", "ShortLine", "(", "line", ")", "for", "line", "in", "text", ".", ...
64.1875
43.0625
def apply_grad_cartesian_tensor(grad_X, zmat_dist): """Apply the gradient for transformation to cartesian space onto zmat_dist. Args: grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array. The mathematical details of the index layout is explained in :meth:`~chemcoord.Cartesian.get_grad_zmat()`. zmat_dist (:class:`~chemcoord.Zmat`): Distortions in Zmatrix space. Returns: :class:`~chemcoord.Cartesian`: Distortions in cartesian space. """ columns = ['bond', 'angle', 'dihedral'] C_dist = zmat_dist.loc[:, columns].values.T try: C_dist = C_dist.astype('f8') C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :]) except (TypeError, AttributeError): C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :]) cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian return Cartesian(atoms=zmat_dist['atom'], coords=cart_dist, index=zmat_dist.index)
[ "def", "apply_grad_cartesian_tensor", "(", "grad_X", ",", "zmat_dist", ")", ":", "columns", "=", "[", "'bond'", ",", "'angle'", ",", "'dihedral'", "]", "C_dist", "=", "zmat_dist", ".", "loc", "[", ":", ",", "columns", "]", ".", "values", ".", "T", "try",...
43.666667
18.083333
def stellar_luminosity2(self, steps=10000): """ DEPRECATED: ADW 2017-09-20 Compute the stellar luminosity (L_Sol; average per star). Uses "sample" to generate mass sample and pdf. The range of integration only covers the input isochrone data (no extrapolation used), but this seems like a sub-percent effect if the isochrone goes to 0.15 Msun for the old and metal-poor stellar populations of interest. Note that the stellar luminosity is very sensitive to the post-AGB population. """ msg = "'%s.stellar_luminosity2': ADW 2017-09-20"%self.__class__.__name__ DeprecationWarning(msg) mass_init, mass_pdf, mass_act, mag_1, mag_2 = self.sample(mass_steps=steps) luminosity_interpolation = scipy.interpolate.interp1d(self.mass_init, self.luminosity,fill_value=0,bounds_error=False) luminosity = luminosity_interpolation(mass_init) return np.sum(luminosity * mass_pdf)
[ "def", "stellar_luminosity2", "(", "self", ",", "steps", "=", "10000", ")", ":", "msg", "=", "\"'%s.stellar_luminosity2': ADW 2017-09-20\"", "%", "self", ".", "__class__", ".", "__name__", "DeprecationWarning", "(", "msg", ")", "mass_init", ",", "mass_pdf", ",", ...
49.15
22.75
def sample_stats_prior_to_xarray(self): """Extract sample_stats_prior from prior.""" prior = self.prior data = get_sample_stats(prior) return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims)
[ "def", "sample_stats_prior_to_xarray", "(", "self", ")", ":", "prior", "=", "self", ".", "prior", "data", "=", "get_sample_stats", "(", "prior", ")", "return", "dict_to_dataset", "(", "data", ",", "library", "=", "self", ".", "pystan", ",", "coords", "=", ...
50.4
13.4
def _has_flaky_attributes(cls, test): """ Returns True if the test callable in question is marked as flaky. :param test: The test that is being prepared to run :type test: :class:`nose.case.Test` or :class:`Function` :return: :rtype: `bool` """ current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS) return current_runs is not None
[ "def", "_has_flaky_attributes", "(", "cls", ",", "test", ")", ":", "current_runs", "=", "cls", ".", "_get_flaky_attribute", "(", "test", ",", "FlakyNames", ".", "CURRENT_RUNS", ")", "return", "current_runs", "is", "not", "None" ]
31.642857
18.071429
def load(): """Loads the libdmtx shared library. """ if 'Windows' == platform.system(): # Possible scenarios here # 1. Run from source, DLLs are in pylibdmtx directory # cdll.LoadLibrary() imports DLLs in repo root directory # 2. Wheel install into CPython installation # cdll.LoadLibrary() imports DLLs in package directory # 3. Wheel install into virtualenv # cdll.LoadLibrary() imports DLLs in package directory # 4. Frozen # cdll.LoadLibrary() imports DLLs alongside executable fname = _windows_fname() try: libdmtx = cdll.LoadLibrary(fname) except OSError: libdmtx = cdll.LoadLibrary( str(Path(__file__).parent.joinpath(fname)) ) else: # Assume a shared library on the path path = find_library('dmtx') if not path: raise ImportError('Unable to find dmtx shared library') libdmtx = cdll.LoadLibrary(path) return libdmtx
[ "def", "load", "(", ")", ":", "if", "'Windows'", "==", "platform", ".", "system", "(", ")", ":", "# Possible scenarios here", "# 1. Run from source, DLLs are in pylibdmtx directory", "# cdll.LoadLibrary() imports DLLs in repo root directory", "# 2. Wheel install into CPyt...
35.896552
17.448276
def read(self, line, f, data): """See :meth:`PunchParser.read`""" N = len(data["symbols"]) masses = np.zeros(N, float) counter = 0 while counter < N: words = f.readline().split() for word in words: masses[counter] = float(word)*amu counter += 1 data["masses"] = masses
[ "def", "read", "(", "self", ",", "line", ",", "f", ",", "data", ")", ":", "N", "=", "len", "(", "data", "[", "\"symbols\"", "]", ")", "masses", "=", "np", ".", "zeros", "(", "N", ",", "float", ")", "counter", "=", "0", "while", "counter", "<", ...
32.909091
8.909091
def camera_status_send(self, time_usec, target_system, cam_idx, img_idx, event_id, p1, p2, p3, p4, force_mavlink1=False): ''' Camera Event time_usec : Image timestamp (microseconds since UNIX epoch, according to camera clock) (uint64_t) target_system : System ID (uint8_t) cam_idx : Camera ID (uint8_t) img_idx : Image index (uint16_t) event_id : See CAMERA_STATUS_TYPES enum for definition of the bitmask (uint8_t) p1 : Parameter 1 (meaning depends on event, see CAMERA_STATUS_TYPES enum) (float) p2 : Parameter 2 (meaning depends on event, see CAMERA_STATUS_TYPES enum) (float) p3 : Parameter 3 (meaning depends on event, see CAMERA_STATUS_TYPES enum) (float) p4 : Parameter 4 (meaning depends on event, see CAMERA_STATUS_TYPES enum) (float) ''' return self.send(self.camera_status_encode(time_usec, target_system, cam_idx, img_idx, event_id, p1, p2, p3, p4), force_mavlink1=force_mavlink1)
[ "def", "camera_status_send", "(", "self", ",", "time_usec", ",", "target_system", ",", "cam_idx", ",", "img_idx", ",", "event_id", ",", "p1", ",", "p2", ",", "p3", ",", "p4", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", ...
78.75
55.5
def hotspots(self): ''' Get lines sampled accross all threads, in order from most to least sampled. ''' rooted_leaf_samples, _ = self.live_data_copy() line_samples = {} for _, counts in rooted_leaf_samples.items(): for key, count in counts.items(): line_samples.setdefault(key, 0) line_samples[key] += count return sorted( line_samples.items(), key=lambda v: v[1], reverse=True)
[ "def", "hotspots", "(", "self", ")", ":", "rooted_leaf_samples", ",", "_", "=", "self", ".", "live_data_copy", "(", ")", "line_samples", "=", "{", "}", "for", "_", ",", "counts", "in", "rooted_leaf_samples", ".", "items", "(", ")", ":", "for", "key", "...
37.384615
15.384615
def get(self, service, path, **kwargs): """ Make a get request (this returns a coroutine)""" return self.make_request(Methods.GET, service, path, **kwargs)
[ "def", "get", "(", "self", ",", "service", ",", "path", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "make_request", "(", "Methods", ".", "GET", ",", "service", ",", "path", ",", "*", "*", "kwargs", ")" ]
56.333333
10.333333
def win32_refresh_window(cls): """ Call win32 API to refresh the whole Window. This is sometimes necessary when the application paints background for completion menus. When the menu disappears, it leaves traces due to a bug in the Windows Console. Sending a repaint request solves it. """ # Get console handle handle = windll.kernel32.GetConsoleWindow() RDW_INVALIDATE = 0x0001 windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE))
[ "def", "win32_refresh_window", "(", "cls", ")", ":", "# Get console handle", "handle", "=", "windll", ".", "kernel32", ".", "GetConsoleWindow", "(", ")", "RDW_INVALIDATE", "=", "0x0001", "windll", ".", "user32", ".", "RedrawWindow", "(", "handle", ",", "None", ...
39.846154
21.384615
def score_for_task(properties, category, result): """ Return the possible score of task, depending on whether the result is correct or not. """ assert result is not None if properties and Property.create_from_names(properties).is_svcomp: return _svcomp_score(category, result) return None
[ "def", "score_for_task", "(", "properties", ",", "category", ",", "result", ")", ":", "assert", "result", "is", "not", "None", "if", "properties", "and", "Property", ".", "create_from_names", "(", "properties", ")", ".", "is_svcomp", ":", "return", "_svcomp_sc...
39.125
16.375
def DbDeleteDevice(self, argin): """ Delete a devcie from database :param argin: device name :type: tango.DevString :return: :rtype: tango.DevVoid """ self._log.debug("In DbDeleteDevice()") ret, dev_name, dfm = check_device_name(argin) if not ret: self.warn_stream("DataBase::db_delete_device(): device name " + argin + " incorrect ") th_exc(DB_IncorrectDeviceName, "failed to delete device, device name incorrect", "DataBase::DeleteDevice()") self.db.delete_device(dev_name)
[ "def", "DbDeleteDevice", "(", "self", ",", "argin", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"In DbDeleteDevice()\"", ")", "ret", ",", "dev_name", ",", "dfm", "=", "check_device_name", "(", "argin", ")", "if", "not", "ret", ":", "self", ".", ...
37.25
16.5
def writeConfig(self): """ Persists the value of the :attr:`AbstractJobStore.config` attribute to the job store, so that it can be retrieved later by other instances of this class. """ with self.writeSharedFileStream('config.pickle', isProtected=False) as fileHandle: pickle.dump(self.__config, fileHandle, pickle.HIGHEST_PROTOCOL)
[ "def", "writeConfig", "(", "self", ")", ":", "with", "self", ".", "writeSharedFileStream", "(", "'config.pickle'", ",", "isProtected", "=", "False", ")", "as", "fileHandle", ":", "pickle", ".", "dump", "(", "self", ".", "__config", ",", "fileHandle", ",", ...
53.857143
27.285714
def wrap(scope, lines, format=BARE_FORMAT): """Wrap a stream of lines in armour. Takes a stream of lines, for example, the following single line: Line(1, "Lorem ipsum dolor.") Or the following multiple lines: Line(1, "Lorem ipsum") Line(2, "dolor") Line(3, "sit amet.") Provides a generator of wrapped lines. For a single line, the following format is utilized: {format.single.prefix}{line.stripped}{format.single.suffix} In the above multi-line example, the following format would be utilized: {format.multiple.prefix}{line[1].stripped}{format.intra.suffix} {format.intra.prefix}{line[2].stripped}{format.intra.suffix} {format.intra.prefix}{line[3].stripped}{format.multiple.suffix} """ for line in iterate(lines): prefix = suffix = '' if line.first and line.last: prefix = format.single.prefix suffix = format.single.suffix else: prefix = format.multiple.prefix if line.first else format.intra.prefix suffix = format.multiple.suffix if line.last else format.intra.suffix yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent))
[ "def", "wrap", "(", "scope", ",", "lines", ",", "format", "=", "BARE_FORMAT", ")", ":", "for", "line", "in", "iterate", "(", "lines", ")", ":", "prefix", "=", "suffix", "=", "''", "if", "line", ".", "first", "and", "line", ".", "last", ":", "prefix...
33.114286
26.514286
def graph_query(kind, source, target=None, neighbor_limit=1, database_filter=None): """Perform a graph query on PathwayCommons. For more information on these queries, see http://www.pathwaycommons.org/pc2/#graph Parameters ---------- kind : str The kind of graph query to perform. Currently 3 options are implemented, 'neighborhood', 'pathsbetween' and 'pathsfromto'. source : list[str] A list of gene names which are the source set for the graph query. target : Optional[list[str]] A list of gene names which are the target set for the graph query. Only needed for 'pathsfromto' queries. neighbor_limit : Optional[int] This limits the length of the longest path considered in the graph query. Default: 1 Returns ------- model : org.biopax.paxtools.model.Model A BioPAX model (java object). """ default_databases = ['wp', 'smpdb', 'reconx', 'reactome', 'psp', 'pid', 'panther', 'netpath', 'msigdb', 'mirtarbase', 'kegg', 'intact', 'inoh', 'humancyc', 'hprd', 'drugbank', 'dip', 'corum'] if not database_filter: query_databases = default_databases else: query_databases = database_filter # excluded: ctd params = {} params['format'] = 'BIOPAX' params['organism'] = '9606' params['datasource'] = query_databases # Get the "kind" string kind_str = kind.lower() if kind not in ['neighborhood', 'pathsbetween', 'pathsfromto']: logger.warn('Invalid query type %s' % kind_str) return None params['kind'] = kind_str # Get the source string if isinstance(source, basestring): source_str = source else: source_str = ','.join(source) params['source'] = source_str try: neighbor_limit = int(neighbor_limit) params['limit'] = neighbor_limit except (TypeError, ValueError): logger.warn('Invalid neighborhood limit %s' % neighbor_limit) return None if target is not None: if isinstance(target, basestring): target_str = target else: target_str = ','.join(target) params['target'] = target_str logger.info('Sending Pathway Commons query with parameters: ') for k, v in params.items(): logger.info(' %s: %s' % (k, v)) logger.info('Sending Pathway Commons query...') res = requests.get(pc2_url + 'graph', params=params) if not res.status_code == 200: logger.error('Response is HTTP code %d.' % res.status_code) if res.status_code == 500: logger.error('Note: HTTP code 500 can mean empty ' 'results for a valid query.') return None # We don't decode to Unicode here because owl_str_to_model expects # a byte stream model = owl_str_to_model(res.content) if model is not None: logger.info('Pathway Commons query returned a model...') return model
[ "def", "graph_query", "(", "kind", ",", "source", ",", "target", "=", "None", ",", "neighbor_limit", "=", "1", ",", "database_filter", "=", "None", ")", ":", "default_databases", "=", "[", "'wp'", ",", "'smpdb'", ",", "'reconx'", ",", "'reactome'", ",", ...
35.86747
17.072289
def from_bytes(cls, bitstream, decode_payload=True): r''' Parse the given packet and update properties accordingly >>> data_hex = ('c033d3c10000000745c0005835400000' ... 'ff06094a254d38204d45d1a30016f597' ... 'a1c3c7406718bf1b50180ff0793f0000' ... 'b555e59ff5ba6aad33d875c600fd8c1f' ... 'c5268078f365ee199179fbd09d09d690' ... '193622a6b70bcbc7bf5f20dda4258801') >>> data = data_hex.decode('hex') >>> message = DataPacket.from_bytes(data) >>> message.echo_nonce_request False >>> message.nonce '3\xd3\xc1' >>> message.source_map_version >>> message.destination_map_version >>> message.lsb ... # doctest: +ELLIPSIS [True, True, True, False, False, ..., False, False, False, False] >>> message.instance_id >>> bytes(message.payload) ... # doctest: +ELLIPSIS 'E\xc0\x00X5@\x00\x00\xff\x06\tJ%M8...\xdd\xa4%\x88\x01' ''' packet = cls() # Convert to ConstBitStream (if not already provided) if not isinstance(bitstream, ConstBitStream): if isinstance(bitstream, Bits): bitstream = ConstBitStream(auto=bitstream) else: bitstream = ConstBitStream(bytes=bitstream) # Read the flags (nonce_present, lsb_enabled, packet.echo_nonce_request, map_version_present, instance_id_present) = bitstream.readlist('5*bool') # Skip over reserved bits bitstream.read(3) # Parse nonce or map versions if nonce_present: # Nonce: yes, versions: no packet.nonce = bitstream.read('bytes:3') packet.source_map_version = None packet.destination_map_version = None elif map_version_present: # Nonce: no, versions: yes packet.nonce = None (packet.source_map_version, packet.destination_map_version) = bitstream.readlist('2*uint:12') else: # Nonce: no, versions: no packet.nonce = None packet.source_map_version = None packet.destination_map_version = None # Skip over the nonce/map-version bits bitstream.read(24) # Parse instance-id if instance_id_present: packet.instance_id = bitstream.read('uint:24') # 8 bits remaining for LSB lsb_bits = 8 else: # 32 bits remaining for LSB lsb_bits = 32 # Parse LSBs if lsb_enabled: packet.lsb = bitstream.readlist('%d*bool' % lsb_bits) # Reverse for readability: least significant locator-bit first packet.lsb.reverse() else: # Skip over the LSBs bitstream.read(lsb_bits) # The rest of the packet is payload remaining = bitstream[bitstream.pos:] # Parse IP packet if len(remaining): ip_version = remaining.peek('uint:4') if ip_version == 4: packet.payload = IPv4Packet.from_bytes(remaining, decode_payload=decode_payload) elif ip_version == 6: packet.payload = IPv6Packet.from_bytes(remaining, decode_payload=decode_payload) else: packet.payload = remaining.bytes # Verify that the properties make sense packet.sanitize() return packet
[ "def", "from_bytes", "(", "cls", ",", "bitstream", ",", "decode_payload", "=", "True", ")", ":", "packet", "=", "cls", "(", ")", "# Convert to ConstBitStream (if not already provided)", "if", "not", "isinstance", "(", "bitstream", ",", "ConstBitStream", ")", ":", ...
34.078431
18
def gen_challenge(self, state): """returns the next challenge and increments the seed and index in the state. :param state: the state to use for generating the challenge. will verify the integrity of the state object before using it to generate a challenge. it will then modify the state by incrementing the seed and index and resign the state for passing back to the server for storage """ state.checksig(self.key) if (state.index >= state.n): raise HeartbeatError("Out of challenges.") state.seed = MerkleHelper.get_next_seed(self.key, state.seed) chal = Challenge(state.seed, state.index) state.index += 1 state.sign(self.key) return chal
[ "def", "gen_challenge", "(", "self", ",", "state", ")", ":", "state", ".", "checksig", "(", "self", ".", "key", ")", "if", "(", "state", ".", "index", ">=", "state", ".", "n", ")", ":", "raise", "HeartbeatError", "(", "\"Out of challenges.\"", ")", "st...
42.166667
19.166667
def request_anime(aid: int) -> 'Anime': """Make an anime API request.""" anime_info = alib.request_anime(_CLIENT, aid) return Anime._make(anime_info)
[ "def", "request_anime", "(", "aid", ":", "int", ")", "->", "'Anime'", ":", "anime_info", "=", "alib", ".", "request_anime", "(", "_CLIENT", ",", "aid", ")", "return", "Anime", ".", "_make", "(", "anime_info", ")" ]
39.5
4
def connect_all(state): ''' Connect to all the configured servers in parallel. Reads/writes state.inventory. Args: state (``pyinfra.api.State`` obj): the state containing an inventory to connect to ''' hosts = [ host for host in state.inventory if state.is_host_in_limit(host) ] greenlet_to_host = { state.pool.spawn(host.connect, state): host for host in hosts } with progress_spinner(greenlet_to_host.values()) as progress: for greenlet in gevent.iwait(greenlet_to_host.keys()): host = greenlet_to_host[greenlet] progress(host) # Get/set the results failed_hosts = set() for greenlet, host in six.iteritems(greenlet_to_host): # Raise any unexpected exception greenlet.get() if host.connection: state.activate_host(host) else: failed_hosts.add(host) # Remove those that failed, triggering FAIL_PERCENT check state.fail_hosts(failed_hosts, activated_count=len(hosts))
[ "def", "connect_all", "(", "state", ")", ":", "hosts", "=", "[", "host", "for", "host", "in", "state", ".", "inventory", "if", "state", ".", "is_host_in_limit", "(", "host", ")", "]", "greenlet_to_host", "=", "{", "state", ".", "pool", ".", "spawn", "(...
27.648649
24.135135
def imatch(pattern, name): # type: (Text, Text) -> bool """Test whether a name matches a wildcard pattern (case insensitive). Arguments: pattern (str): A wildcard pattern, e.g. ``"*.py"``. name (bool): A filename. Returns: bool: `True` if the filename matches the pattern. """ try: re_pat = _PATTERN_CACHE[(pattern, False)] except KeyError: res = "(?ms)" + _translate(pattern, case_sensitive=False) + r'\Z' _PATTERN_CACHE[(pattern, False)] = re_pat = re.compile(res, re.IGNORECASE) return re_pat.match(name) is not None
[ "def", "imatch", "(", "pattern", ",", "name", ")", ":", "# type: (Text, Text) -> bool", "try", ":", "re_pat", "=", "_PATTERN_CACHE", "[", "(", "pattern", ",", "False", ")", "]", "except", "KeyError", ":", "res", "=", "\"(?ms)\"", "+", "_translate", "(", "p...
32.5
20.944444
def _set_get_flexports(self, v, load=False): """ Setter method for get_flexports, mapped from YANG variable /brocade_hardware_rpc/get_flexports (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_flexports is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_flexports() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_flexports.get_flexports, is_leaf=True, yang_name="get-flexports", rest_name="get-flexports", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'connector_group_show'}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """get_flexports must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=get_flexports.get_flexports, is_leaf=True, yang_name="get-flexports", rest_name="get-flexports", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'connector_group_show'}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='rpc', is_config=True)""", }) self.__get_flexports = t if hasattr(self, '_set'): self._set()
[ "def", "_set_get_flexports", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "...
73.727273
34.590909
def enrich_pull_requests(self, ocean_backend, enrich_backend, raw_issues_index="github_issues_raw"): """ The purpose of this Study is to add additional fields to the pull_requests only index. Basically to calculate some of the metrics from Code Development under GMD metrics: https://github.com/chaoss/wg-gmd/blob/master/2_Growth-Maturity-Decline.md#code-development When data from the pull requests category is fetched using perceval, some additional fields such as "number_of_comments" that are made on the PR cannot be calculated as the data related to comments is not fetched. When data from the issues category is fetched, then every item is considered as an issue and PR specific data such as "review_comments" are not fetched. Items (pull requests) from the raw issues index are queried and data from those items are used to add fields in the corresponding pull request in the pull requests only index. The ids are matched in both the indices. :param ocean_backend: backend from which to read the raw items :param enrich_backend: backend from which to read the enriched items :param raw_issues_index: the raw issues index from which the data for PRs is to be extracted :return: None """ HEADER_JSON = {"Content-Type": "application/json"} # issues raw index from which the data will be extracted github_issues_raw_index = ocean_backend.elastic_url + "/" + raw_issues_index issues_index_search_url = github_issues_raw_index + "/_search" # pull_requests index search url in which the data is to be updated enrich_index_search_url = self.elastic.index_url + "/_search" logger.info("Doing enrich_pull_request study for index {}" .format(self.elastic.anonymize_url(self.elastic.index_url))) time.sleep(1) # HACK: Wait until git enrich index has been written def make_request(url, error_msg, data=None, req_type="GET"): """ Make a request to the given url. The request can be of type GET or a POST. If the request raises an error, display that error using the custom error msg. :param url: URL to make the GET request to :param error_msg: custom error message for logging purposes :param data: data to be sent with the POST request optional if type="GET" else compulsory :param req_type: the type of request to be made: GET or POST default: GET :return r: requests object """ r = None if req_type == "GET": r = self.requests.get(url, headers=HEADER_JSON, verify=False) elif req_type == "POST" and data is not None: r = self.requests.post(url, data=data, headers=HEADER_JSON, verify=False) try: r.raise_for_status() except requests.exceptions.HTTPError as ex: logger.error(error_msg) logger.error(ex) return return r # Check if the github issues raw index exists, if not raise an error and abort error_msg = "Invalid index provided for enrich_pull_requests study. Aborting." make_request(issues_index_search_url, error_msg) # get the number of pull requests in the pull_requests index # https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html # Example: # epoch timestamp count # 1533454641 13:07:21 276 count_url = enrich_backend.elastic_url + "/_cat/count/" + enrich_backend.elastic.index + "?v" error_msg = "Cannot fetch number of items in {} Aborting.".format(enrich_backend.elastic.index) r = make_request(count_url, error_msg) num_pull_requests = int(r.text.split()[-1]) # get all the ids that are in the enriched pull requests index which will be used later # to pull requests data from the issue having the same id in the raw_issues_index pull_requests_ids = [] size = 10000 # Default number of items that can be queried from elasticsearch at a time i = 0 # counter while num_pull_requests > 0: fetch_id_in_repo_query = """ { "_source": ["id_in_repo"], "from": %s, "size": %s } """ % (i, size) error_msg = "Error extracting id_in_repo from {}. Aborting.".format(self.elastic.index_url) r = make_request(enrich_index_search_url, error_msg, fetch_id_in_repo_query, "POST") id_in_repo_json = r.json()["hits"]["hits"] pull_requests_ids.extend([item["_source"]["id_in_repo"] for item in id_in_repo_json]) i += size num_pull_requests -= size # get pull requests data from the github_issues_raw and pull_requests only # index using specific id for each of the item query = """ { "query": { "bool": { "must": [{ "match": { %s: %s } }] } } } """ num_enriched = 0 # counter to count the number of PRs enriched pull_requests = [] for pr_id in pull_requests_ids: # retrieve the data from the issues index issue_query = query % ('"data.number"', pr_id) error_msg = "Id {} doesnot exists in {}. Aborting.".format(pr_id, github_issues_raw_index) r = make_request(issues_index_search_url, error_msg, issue_query, "POST") issue = r.json()["hits"]["hits"][0]["_source"]["data"] # retrieve the data from the pull_requests index pr_query = query % ('"id_in_repo"', pr_id) error_msg = "Id {} doesnot exists in {}. Aborting.".format(pr_id, self.elastic.index_url) r = make_request(enrich_index_search_url, error_msg, pr_query, "POST") pull_request_data = r.json()["hits"]["hits"][0] pull_request = pull_request_data['_source'] pull_request["_item_id"] = pull_request_data['_id'] # Add the necessary fields reaction_time = get_time_diff_days(str_to_datetime(issue['created_at']), self.get_time_to_first_attention(issue)) if not reaction_time: reaction_time = 0 if pull_request["time_to_merge_request_response"]: reaction_time = min(pull_request["time_to_merge_request_response"], reaction_time) pull_request["time_to_merge_request_response"] = reaction_time pull_request['num_comments'] = issue['comments'] # should latest reviews be considered as well? pull_request['pr_comment_duration'] = get_time_diff_days(str_to_datetime(issue['created_at']), self.get_latest_comment_date(issue)) pull_request['pr_comment_diversity'] = self.get_num_commenters(issue) pull_requests.append(pull_request) if len(pull_requests) >= self.elastic.max_items_bulk: self.elastic.bulk_upload(pull_requests, "_item_id") pull_requests = [] num_enriched += 1 logger.info("pull_requests processed %i/%i", num_enriched, len(pull_requests_ids)) self.elastic.bulk_upload(pull_requests, "_item_id")
[ "def", "enrich_pull_requests", "(", "self", ",", "ocean_backend", ",", "enrich_backend", ",", "raw_issues_index", "=", "\"github_issues_raw\"", ")", ":", "HEADER_JSON", "=", "{", "\"Content-Type\"", ":", "\"application/json\"", "}", "# issues raw index from which the data w...
48.607595
28.056962
def check_inputs(self): """ Check for the existence of input files """ self.inputs = self.expand_filenames(self.inputs) result = False if len(self.inputs) == 0 or self.files_exist(self.inputs): result = True else: print("Not executing task. Input file(s) do not exist.") return result
[ "def", "check_inputs", "(", "self", ")", ":", "self", ".", "inputs", "=", "self", ".", "expand_filenames", "(", "self", ".", "inputs", ")", "result", "=", "False", "if", "len", "(", "self", ".", "inputs", ")", "==", "0", "or", "self", ".", "files_exi...
38.666667
18.444444
def got_arbiter_module_type_defined(self, module_type): """Check if a module type is defined in one of the arbiters Also check the module name :param module_type: module type to search for :type module_type: str :return: True if mod_type is found else False :rtype: bool TODO: Factorize it with got_broker_module_type_defined: """ for arbiter in self.arbiters: # Do like the linkify will do after.... for module in getattr(arbiter, 'modules', []): # So look at what the arbiter try to call as module module_name = module.get_name() # Ok, now look in modules... for mod in self.modules: # try to see if this module is the good type if getattr(mod, 'python_name', '').strip() == module_type.strip(): # if so, the good name? if getattr(mod, 'name', '').strip() == module_name: return True return False
[ "def", "got_arbiter_module_type_defined", "(", "self", ",", "module_type", ")", ":", "for", "arbiter", "in", "self", ".", "arbiters", ":", "# Do like the linkify will do after....", "for", "module", "in", "getattr", "(", "arbiter", ",", "'modules'", ",", "[", "]",...
46.130435
14.913043
def parse_localnamespacepath(parser, event, node): #pylint: disable=unused-argument """Parse LOCALNAMESPACEPATH for Namespace. Return assembled namespace <!ELEMENT LOCALNAMESPACEPATH (NAMESPACE+)> """ (next_event, next_node) = six.next(parser) namespaces = [] if not _is_start(next_event, next_node, 'NAMESPACE'): print(next_event, next_node) raise ParseError('Expecting NAMESPACE') namespaces.append(parse_namespace(parser, next_event, next_node)) while 1: (next_event, next_node) = six.next(parser) if _is_end(next_event, next_node, 'LOCALNAMESPACEPATH'): break if _is_start(next_event, next_node, 'NAMESPACE'): namespaces.append(parse_namespace(parser, next_event, next_node)) else: raise ParseError('Expecting NAMESPACE') return '/'.join(namespaces)
[ "def", "parse_localnamespacepath", "(", "parser", ",", "event", ",", "node", ")", ":", "#pylint: disable=unused-argument", "(", "next_event", ",", "next_node", ")", "=", "six", ".", "next", "(", "parser", ")", "namespaces", "=", "[", "]", "if", "not", "_is_s...
29.862069
22.689655
def verified_approved(pronac, dt): """ This metric compare budgetary items of SALIC projects in terms of verified versus approved value Items that have vlComprovacao > vlAprovacao * 1.5 are considered outliers output: is_outlier: True if any item is outlier valor: Absolute number of items that are outliers outlier_items: Outlier items detail """ items_df = data.approved_verified_items items_df = items_df.loc[items_df['PRONAC'] == pronac] items_df[[APPROVED_COLUMN, VERIFIED_COLUMN]] = items_df[ [APPROVED_COLUMN, VERIFIED_COLUMN] ].astype(float) items_df["Item"] = items_df["Item"].str.replace("\r", "") items_df["Item"] = items_df["Item"].str.replace("\n", "") items_df["Item"] = items_df["Item"].str.replace('"', "") items_df["Item"] = items_df["Item"].str.replace("'", "") items_df["Item"] = items_df["Item"].str.replace("\\", "") THRESHOLD = 1.5 bigger_than_approved = items_df[VERIFIED_COLUMN] > ( items_df[APPROVED_COLUMN] * THRESHOLD ) features = items_df[bigger_than_approved] outlier_items = outlier_items_(features) features_size = features.shape[0] is_outlier = features_size > 0 return { "is_outlier": is_outlier, "valor": features_size, "maximo_esperado": MIN_EXPECTED_ITEMS, "minimo_esperado": MAX_EXPECTED_ITEMS, "lista_de_comprovantes": outlier_items, "link_da_planilha": "http://salic.cultura.gov.br/projeto/#/{0}/relacao-de-pagamento".format(pronac) }
[ "def", "verified_approved", "(", "pronac", ",", "dt", ")", ":", "items_df", "=", "data", ".", "approved_verified_items", "items_df", "=", "items_df", ".", "loc", "[", "items_df", "[", "'PRONAC'", "]", "==", "pronac", "]", "items_df", "[", "[", "APPROVED_COLU...
40.368421
17.157895
def _getStrippedValue(value, strip): """Like the strip() string method, except the strip argument describes different behavior: If strip is None, whitespace is stripped. If strip is a string, the characters in the string are stripped. If strip is False, nothing is stripped.""" if strip is None: value = value.strip() # Call strip() with no arguments to strip whitespace. elif isinstance(strip, str): value = value.strip(strip) # Call strip(), passing the strip argument. elif strip is False: pass # Don't strip anything. return value
[ "def", "_getStrippedValue", "(", "value", ",", "strip", ")", ":", "if", "strip", "is", "None", ":", "value", "=", "value", ".", "strip", "(", ")", "# Call strip() with no arguments to strip whitespace.", "elif", "isinstance", "(", "strip", ",", "str", ")", ":"...
36.375
20.375
def encode (text): """Encode text with default encoding if its Unicode.""" if isinstance(text, unicode): return text.encode(i18n.default_encoding, 'ignore') return text
[ "def", "encode", "(", "text", ")", ":", "if", "isinstance", "(", "text", ",", "unicode", ")", ":", "return", "text", ".", "encode", "(", "i18n", ".", "default_encoding", ",", "'ignore'", ")", "return", "text" ]
36.8
14.6
def get_common_complete_suffix(document, completions): """ Return the common prefix for all completions. """ # Take only completions that don't change the text before the cursor. def doesnt_change_before_cursor(completion): end = completion.text[:-completion.start_position] return document.text_before_cursor.endswith(end) completions2 = [c for c in completions if doesnt_change_before_cursor(c)] # When there is at least one completion that changes the text before the # cursor, don't return any common part. if len(completions2) != len(completions): return '' # Return the common prefix. def get_suffix(completion): return completion.text[-completion.start_position:] return _commonprefix([get_suffix(c) for c in completions2])
[ "def", "get_common_complete_suffix", "(", "document", ",", "completions", ")", ":", "# Take only completions that don't change the text before the cursor.", "def", "doesnt_change_before_cursor", "(", "completion", ")", ":", "end", "=", "completion", ".", "text", "[", ":", ...
37.809524
20.095238
def order_assets(self, asset_ids, composition_id): """Reorders a set of assets in a composition. arg: asset_ids (osid.id.Id[]): ``Ids`` for a set of ``Assets`` arg: composition_id (osid.id.Id): ``Id`` of the ``Composition`` raise: NotFound - ``composition_id`` not found or, an ``asset_id`` not related to ``composition_id`` raise: NullArgument - ``instruction_ids`` or ``agenda_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ if (not isinstance(composition_id, ABCId) and composition_id.get_identifier_namespace() != 'repository.Composition'): raise errors.InvalidArgument('the argument is not a valid OSID Id') composition_map, collection = self._get_composition_collection(composition_id) composition_map['assetIds'] = order_ids(asset_ids, composition_map['assetIds']) collection.save(composition_map)
[ "def", "order_assets", "(", "self", ",", "asset_ids", ",", "composition_id", ")", ":", "if", "(", "not", "isinstance", "(", "composition_id", ",", "ABCId", ")", "and", "composition_id", ".", "get_identifier_namespace", "(", ")", "!=", "'repository.Composition'", ...
51.090909
22.636364
def map_query(self, variables=None, evidence=None, elimination_order=None): """ Computes the MAP Query over the variables given the evidence. Note: When multiple variables are passed, it returns the map_query for each of them individually. Parameters ---------- variables: list list of variables over which we want to compute the max-marginal. evidence: dict a dict key, value pair as {var: state_of_var_observed} None if no evidence elimination_order: list order of variable eliminations (if nothing is provided) order is computed automatically Examples -------- >>> from pgmpy.inference import VariableElimination >>> from pgmpy.models import BayesianModel >>> import numpy as np >>> import pandas as pd >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')]) >>> model.fit(values) >>> inference = VariableElimination(model) >>> phi_query = inference.map_query(['A', 'B']) """ # TODO:Check the note in docstring. Change that behavior to return the joint MAP final_distribution = self._variable_elimination(variables, 'marginalize', evidence=evidence, elimination_order=elimination_order) argmax = np.argmax(final_distribution.values) assignment = final_distribution.assignment([argmax])[0] map_query_results = {} for var_assignment in assignment: var, value = var_assignment map_query_results[var] = value if not variables: return map_query_results else: return_dict = {} for var in variables: return_dict[var] = map_query_results[var] return return_dict
[ "def", "map_query", "(", "self", ",", "variables", "=", "None", ",", "evidence", "=", "None", ",", "elimination_order", "=", "None", ")", ":", "# TODO:Check the note in docstring. Change that behavior to return the joint MAP", "final_distribution", "=", "self", ".", "_v...
41.38
21.66
def save(self, description=None, raiseError=True, ntrials=3): """ Save repository '.pyreprepo' to disk and create (if missing) or update (if description is not None) '.pyrepdirinfo'. :Parameters: #. description (None, str): Repository main directory information. If given will be replaced. #. raiseError (boolean): Whether to raise encountered error instead of returning failure. #. ntrials (int): After aquiring all locks, ntrials is the maximum number of trials allowed before failing. In rare cases, when multiple processes are accessing the same repository components, different processes can alter repository components between successive lock releases of some other process. Bigger number of trials lowers the likelyhood of failure due to multiple processes same time alteration. :Returns: #. success (bool): Whether saving was successful. #. error (None, string): Fail to save repository message in case saving is not successful. If success is True, error will be None. """ assert isinstance(raiseError, bool), "raiseError must be boolean" assert isinstance(ntrials, int), "ntrials must be integer" assert ntrials>0, "ntrials must be >0" # get description if description is not None: assert isinstance(description, basestring), "description must be None or a string" dirInfoPath = os.path.join(self.__path, self.__dirInfo) if description is None and not os.path.isfile(dirInfoPath): description = '' # create and acquire lock LR = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock)) acquired, code = LR.acquire_lock() # check if acquired. m = "code %s. Unable to aquire the lock when calling 'save'. You may try again!"%(code,) if not acquired: assert not raiseError, Exception(m) return False, m # save repository for _trial in range(ntrials): try: # open file repoInfoPath = os.path.join(self.__path, self.__repoFile) error = None self.__save_dirinfo(description=description, dirInfoPath=dirInfoPath) # load and update repository info if existing if os.path.isfile(repoInfoPath): with open(repoInfoPath, 'rb') as fd: repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile)) self.__repo['walk_repo'] = repo['walk_repo'] # create repository with open(repoInfoPath, 'wb') as fd: self.__repo["last_update_utctime"] = time.time() pickle.dump( self.__repo,fd, protocol=self._DEFAULT_PICKLE_PROTOCOL ) fd.flush() os.fsync(fd.fileno()) except Exception as err: error = "Unable to save repository (%s)"%err if self.DEBUG_PRINT_FAILED_TRIALS: print("Trial %i failed in Repository.%s (%s). Set Repository.DEBUG_PRINT_FAILED_TRIALS to False to mute"%(_trial, inspect.stack()[1][3], str(error))) else: break # release lock LR.release_lock() # return assert error is None or not raiseError, error return error is None, error
[ "def", "save", "(", "self", ",", "description", "=", "None", ",", "raiseError", "=", "True", ",", "ntrials", "=", "3", ")", ":", "assert", "isinstance", "(", "raiseError", ",", "bool", ")", ",", "\"raiseError must be boolean\"", "assert", "isinstance", "(", ...
51.594203
24.347826
def multivariate_ess(samples, batch_size_generator=None): r"""Estimate the multivariate Effective Sample Size for the samples of every problem. This essentially applies :func:`estimate_multivariate_ess` to every problem. Args: samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally, a generator function that yields sample arrays of shape (p, n). batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many batches and of which size we use in estimating the minimum ESS. Returns: ndarray: the multivariate ESS per problem """ samples_generator = _get_sample_generator(samples) return np.array(multiprocess_mapping(_MultivariateESSMultiProcessing(batch_size_generator), samples_generator()))
[ "def", "multivariate_ess", "(", "samples", ",", "batch_size_generator", "=", "None", ")", ":", "samples_generator", "=", "_get_sample_generator", "(", "samples", ")", "return", "np", ".", "array", "(", "multiprocess_mapping", "(", "_MultivariateESSMultiProcessing", "(...
55.882353
36
def discover_config_path(self, config_filename: str) -> str: """ Search for config file in a number of places. If there is no config file found, will return None. :param config_filename: Config file name or custom path to filename with config. :return: Path to the discovered config file or None. """ if config_filename and os.path.isfile(config_filename): return config_filename for place in _common_places: config_path = os.path.join(place, config_filename) if os.path.isfile(config_path): return config_path return
[ "def", "discover_config_path", "(", "self", ",", "config_filename", ":", "str", ")", "->", "str", ":", "if", "config_filename", "and", "os", ".", "path", ".", "isfile", "(", "config_filename", ")", ":", "return", "config_filename", "for", "place", "in", "_co...
34.888889
20.555556
def add_cookies_to_web_driver(driver, cookies): """ Sets cookies in an existing WebDriver session. """ for cookie in cookies: driver.add_cookie(convert_cookie_to_dict(cookie)) return driver
[ "def", "add_cookies_to_web_driver", "(", "driver", ",", "cookies", ")", ":", "for", "cookie", "in", "cookies", ":", "driver", ".", "add_cookie", "(", "convert_cookie_to_dict", "(", "cookie", ")", ")", "return", "driver" ]
30.142857
10.142857
def get_card(self): ''' Get card this checklist is on. ''' card_id = self.get_checklist_information().get('idCard', None) if card_id: return self.client.get_card(card_id)
[ "def", "get_card", "(", "self", ")", ":", "card_id", "=", "self", ".", "get_checklist_information", "(", ")", ".", "get", "(", "'idCard'", ",", "None", ")", "if", "card_id", ":", "return", "self", ".", "client", ".", "get_card", "(", "card_id", ")" ]
30.857143
20
def emit(self, record): """ Emit a record. The record is formatted, and then sent to the syslog server. If exception information is present, it is NOT sent to the server. """ try: syslog_msg = self.build_msg(record) self.transport.transmit(syslog_msg) except Exception: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "try", ":", "syslog_msg", "=", "self", ".", "build_msg", "(", "record", ")", "self", ".", "transport", ".", "transmit", "(", "syslog_msg", ")", "except", "Exception", ":", "self", ".", "handleError", ...
31.333333
16.5
def _set_exp_dscp(self, v, load=False): """ Setter method for exp_dscp, mapped from YANG variable /qos_mpls/map/exp_dscp (list) If this variable is read-only (config: false) in the source YANG file, then _set_exp_dscp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_exp_dscp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("exp_dscp_map_name",exp_dscp.exp_dscp, yang_name="exp-dscp", rest_name="exp-dscp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='exp-dscp-map-name', extensions={u'tailf-common': {u'info': u'Configure Exp dscp', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCmd4Callpoint', u'cli-mode-name': u'exp-dscp-$(exp-dscp-map-name)'}}), is_container='list', yang_name="exp-dscp", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Exp dscp', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCmd4Callpoint', u'cli-mode-name': u'exp-dscp-$(exp-dscp-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """exp_dscp must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("exp_dscp_map_name",exp_dscp.exp_dscp, yang_name="exp-dscp", rest_name="exp-dscp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='exp-dscp-map-name', extensions={u'tailf-common': {u'info': u'Configure Exp dscp', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCmd4Callpoint', u'cli-mode-name': u'exp-dscp-$(exp-dscp-map-name)'}}), is_container='list', yang_name="exp-dscp", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Exp dscp', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCmd4Callpoint', u'cli-mode-name': u'exp-dscp-$(exp-dscp-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)""", }) self.__exp_dscp = t if hasattr(self, '_set'): self._set()
[ "def", "_set_exp_dscp", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base"...
113.727273
54.318182
def GroupEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a group field.""" start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP) end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP) assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: write(start_tag) element._InternalSerialize(write) write(end_tag) return EncodeRepeatedField else: def EncodeField(write, value): write(start_tag) value._InternalSerialize(write) return write(end_tag) return EncodeField
[ "def", "GroupEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "start_tag", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_START_GROUP", ")", "end_tag", "=", "TagBytes", "(", "field_number", ",", "wire_format"...
32.105263
15.947368
def _handle_subscribed(self, *args, chanId=None, channel=None, **kwargs): """ Handles responses to subscribe() commands - registers a channel id with the client and assigns a data handler to it. :param chanId: int, represent channel id as assigned by server :param channel: str, represents channel name """ log.debug("_handle_subscribed: %s - %s - %s", chanId, channel, kwargs) if chanId in self.channels: raise AlreadyRegisteredError() self._heartbeats[chanId] = time.time() try: channel_key = ('raw_'+channel if kwargs['prec'].startswith('R') and channel == 'book' else channel) except KeyError: channel_key = channel try: self.channels[chanId] = self._data_handlers[channel_key] except KeyError: raise UnknownChannelError() # prep kwargs to be used as secondary value in dict key try: kwargs.pop('event') except KeyError: pass try: kwargs.pop('len') except KeyError: pass try: kwargs.pop('chanId') except KeyError: pass self.channel_labels[chanId] = (channel_key, kwargs)
[ "def", "_handle_subscribed", "(", "self", ",", "*", "args", ",", "chanId", "=", "None", ",", "channel", "=", "None", ",", "*", "*", "kwargs", ")", ":", "log", ".", "debug", "(", "\"_handle_subscribed: %s - %s - %s\"", ",", "chanId", ",", "channel", ",", ...
30.833333
21.404762
def _GetDataStreams(self): """Retrieves the data streams. Returns: list[DataStream]: data streams. """ if self._data_streams is None: if self._directory is None: self._directory = self._GetDirectory() self._data_streams = [] # It is assumed that directory and link file entries typically # do not have data streams. if not self._directory and not self.link: data_stream = DataStream() self._data_streams.append(data_stream) return self._data_streams
[ "def", "_GetDataStreams", "(", "self", ")", ":", "if", "self", ".", "_data_streams", "is", "None", ":", "if", "self", ".", "_directory", "is", "None", ":", "self", ".", "_directory", "=", "self", ".", "_GetDirectory", "(", ")", "self", ".", "_data_stream...
27.105263
15.789474
def h2i(self, pkt, h): """human x.x.x.x/y to internal""" ip,mask = re.split( '/', h) return int(mask), ip
[ "def", "h2i", "(", "self", ",", "pkt", ",", "h", ")", ":", "ip", ",", "mask", "=", "re", ".", "split", "(", "'/'", ",", "h", ")", "return", "int", "(", "mask", ")", ",", "ip" ]
27.25
11.5
def normal(target, seeds, scale, loc): r""" Produces values from a Weibull distribution given a set of random numbers. Parameters ---------- target : OpenPNM Object The object with which this function as associated. This argument is required to (1) set number of values to generate (geom.Np or geom.Nt) and (2) provide access to other necessary values (i.e. geom['pore.seed']). seeds : string, optional The dictionary key on the Geometry object containing random seed values (between 0 and 1) to use in the statistical distribution. scale : float The standard deviation of the Normal distribution loc : float The mean of the Normal distribution Examples -------- The following code illustrates the inner workings of this function, which uses the 'norm' method of the scipy.stats module. This can be used to find suitable values of 'scale' and 'loc'. >>> import scipy >>> func = scipy.stats.norm(scale=.0001, loc=0.001) >>> import matplotlib.pyplot as plt >>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50) """ seeds = target[seeds] value = spts.norm.ppf(q=seeds, scale=scale, loc=loc) return value
[ "def", "normal", "(", "target", ",", "seeds", ",", "scale", ",", "loc", ")", ":", "seeds", "=", "target", "[", "seeds", "]", "value", "=", "spts", ".", "norm", ".", "ppf", "(", "q", "=", "seeds", ",", "scale", "=", "scale", ",", "loc", "=", "lo...
33.135135
23.540541
def error_class_for_http_status(status): """Return the appropriate `ResponseError` subclass for the given HTTP status code.""" try: return error_classes[status] except KeyError: def new_status_error(xml_response): if (status > 400 and status < 500): return UnexpectedClientError(status, xml_response) if (status > 500 and status < 600): return UnexpectedServerError(status, xml_response) return UnexpectedStatusError(status, xml_response) return new_status_error
[ "def", "error_class_for_http_status", "(", "status", ")", ":", "try", ":", "return", "error_classes", "[", "status", "]", "except", "KeyError", ":", "def", "new_status_error", "(", "xml_response", ")", ":", "if", "(", "status", ">", "400", "and", "status", "...
42.923077
12
def lookup(self, name, version=None): """If version is omitted, max version is used""" versions = self.get(name) if not versions: return None if version: return versions[version] return versions[max(versions)]
[ "def", "lookup", "(", "self", ",", "name", ",", "version", "=", "None", ")", ":", "versions", "=", "self", ".", "get", "(", "name", ")", "if", "not", "versions", ":", "return", "None", "if", "version", ":", "return", "versions", "[", "version", "]", ...
33.25
8.75
def proton_hydroxide_free_energy(temperature, pressure, pH): """Returns the Gibbs free energy of proton in bulk solution. Parameters ---------- pH : pH of bulk solution temperature : numeric temperature in K pressure : numeric pressure in mbar Returns ------- G_H, G_OH : Gibbs free energy of proton and hydroxide. """ H2 = GasMolecule('H2') H2O = GasMolecule('H2O') G_H2 = H2.get_free_energy(temperature = temperature, pressure = pressure) G_H2O = H2O.get_free_energy(temperature = temperature) G_H = (0.5*G_H2) - ((R*temperature)/(z*F))*ln10*pH G_OH = G_H2O - G_H # Do not need Kw when water equilibrated return(G_H, G_OH)
[ "def", "proton_hydroxide_free_energy", "(", "temperature", ",", "pressure", ",", "pH", ")", ":", "H2", "=", "GasMolecule", "(", "'H2'", ")", "H2O", "=", "GasMolecule", "(", "'H2O'", ")", "G_H2", "=", "H2", ".", "get_free_energy", "(", "temperature", "=", "...
31.318182
20.181818
def newDocText(self, content): """Creation of a new text node within a document. """ ret = libxml2mod.xmlNewDocText(self._o, content) if ret is None:raise treeError('xmlNewDocText() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "newDocText", "(", "self", ",", "content", ")", ":", "ret", "=", "libxml2mod", ".", "xmlNewDocText", "(", "self", ".", "_o", ",", "content", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlNewDocText() failed'", ")", "__tmp", ...
44
12.833333
def wb020(self, value=None): """ Corresponds to IDD Field `wb020` Wet-bulb temperature corresponding to 02.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `wb020` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `wb020`'.format(value)) self._wb020 = value
[ "def", "wb020", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '"...
35.52381
21.714286
def parse_css(self): """ Take a .css file (classes only please) and parse it into a dictionary of class/style pairs. """ # todo: save the prefs for use later # orig_prefs = cssutils.ser.prefs cssutils.ser.prefs.useMinified() pairs = ( (r.selectorText, r.style.cssText) for r in self.get_stylesheet() if not isinstance(r, cssutils.css.CSSComment) ) return dict(pairs)
[ "def", "parse_css", "(", "self", ")", ":", "# todo: save the prefs for use later", "# orig_prefs = cssutils.ser.prefs", "cssutils", ".", "ser", ".", "prefs", ".", "useMinified", "(", ")", "pairs", "=", "(", "(", "r", ".", "selectorText", ",", "r", ".", "style", ...
27.285714
13.285714
def run(itf): """ Run optimize functions. """ if not itf: return 1 # access user input options = SplitInput(itf) # read input inputpath = os.path.abspath(options.inputpath) print(" Reading input file ...") molecules = csv_interface.read_csv(inputpath, options) if not molecules: print("\n '{flag}' was unable to be parsed\n".format(flag=os.path.basename(options.inputpath))) sys.exit(1) # determine the sort order & ensemble_size #sort_order = classification.get_sort_order(molecules) sort_order = 'asc' ensemble_size = options.ensemble_size # loop over all ensembles # temp 2/3/15 append to auc_list ef_list & write it out for later histogram construction auc_list = [] ef_list = [] for size in [x + 1 for x in range(ensemble_size)]: auc, ef = optimizor(molecules, sort_order, size, options) auc_list += auc ef_list += ef # temp 2/9/15 write auc_list & ef_list out to files for subsequent post-processing f = open('auc_histogram.csv', 'w') for value in auc_list: f.write('%f\n' % value) #f.write('%f, %s\n' % (value[0], value[1])) f.close() f = open('ef_histogram.csv', 'w') for value in ef_list: f.write('%f\n' % value) f.close()
[ "def", "run", "(", "itf", ")", ":", "if", "not", "itf", ":", "return", "1", "# access user input", "options", "=", "SplitInput", "(", "itf", ")", "# read input", "inputpath", "=", "os", ".", "path", ".", "abspath", "(", "options", ".", "inputpath", ")", ...
30.214286
20.714286
def relpath(self, path): """ Return a relative filepath to path from Dir path. """ return os.path.relpath(path, start=self.path)
[ "def", "relpath", "(", "self", ",", "path", ")", ":", "return", "os", ".", "path", ".", "relpath", "(", "path", ",", "start", "=", "self", ".", "path", ")" ]
47.333333
9.666667
def find(self, binding_id, instance): """find an instance Create a new instance and populate it with data stored if it exists. Args: binding_id (string): UUID of the binding instance (AtlasServiceInstance.Instance): instance Returns: AtlasServiceBinding: A binding """ binding = AtlasServiceBinding.Binding(binding_id, instance) self.backend.storage.populate(binding) return binding
[ "def", "find", "(", "self", ",", "binding_id", ",", "instance", ")", ":", "binding", "=", "AtlasServiceBinding", ".", "Binding", "(", "binding_id", ",", "instance", ")", "self", ".", "backend", ".", "storage", ".", "populate", "(", "binding", ")", "return"...
33.266667
17.933333
def p_ConstValue_float(p): """ConstValue : FLOAT""" p[0] = model.Value(type=model.Value.FLOAT, value=p[1])
[ "def", "p_ConstValue_float", "(", "p", ")", ":", "p", "[", "0", "]", "=", "model", ".", "Value", "(", "type", "=", "model", ".", "Value", ".", "FLOAT", ",", "value", "=", "p", "[", "1", "]", ")" ]
36
10
def u_grade_ipix(ipix, nside_in, nside_out, nest=False): """ Return the indices of sub-pixels (resolution nside_subpix) within the super-pixel(s) (resolution nside_superpix). Parameters: ----------- ipix : index of the input superpixel(s) nside_in : nside of the input superpixel nside_out : nside of the desired subpixels Returns: -------- ipix_out : subpixels for each superpixel """ if nside_in==nside_out: return ipix if not (nside_in < nside_out): raise ValueError("nside_in must be less than nside_out") if nest: nest_ipix = ipix else: nest_ipix = hp.ring2nest(nside_in, ipix) factor = (nside_out//nside_in)**2 if np.isscalar(ipix): nest_ipix_out = factor*nest_ipix + np.arange(factor) else: nest_ipix_out = factor*np.asarray(nest_ipix)[:,np.newaxis]+np.arange(factor) if nest: return nest_ipix_out else: return hp.nest2ring(nside_out, nest_ipix_out)
[ "def", "u_grade_ipix", "(", "ipix", ",", "nside_in", ",", "nside_out", ",", "nest", "=", "False", ")", ":", "if", "nside_in", "==", "nside_out", ":", "return", "ipix", "if", "not", "(", "nside_in", "<", "nside_out", ")", ":", "raise", "ValueError", "(", ...
30.870968
19.774194
def get_context_data(self, **kwargs): """ checks if there is SocialFrind model record for the user if not attempt to create one if all fail, redirects to the next page """ context = super(FriendListView, self).get_context_data(**kwargs) friends = [] for friend_list in self.social_friend_lists: fs = friend_list.existing_social_friends() for f in fs: friends.append(f) # Add friends to context context['friends'] = friends connected_providers = [] for sa in self.social_auths: connected_providers.append(sa.provider) context['connected_providers'] = connected_providers return context
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "super", "(", "FriendListView", ",", "self", ")", ".", "get_context_data", "(", "*", "*", "kwargs", ")", "friends", "=", "[", "]", "for", "friend_list", "in", "s...
31.73913
16.173913
def _copy_attachment(self, name, data, mimetype, mfg_event): """Copies an attachment to mfg_event.""" attachment = mfg_event.attachment.add() attachment.name = name if isinstance(data, unicode): data = data.encode('utf8') attachment.value_binary = data if mimetype in test_runs_converter.MIMETYPE_MAP: attachment.type = test_runs_converter.MIMETYPE_MAP[mimetype] elif mimetype == test_runs_pb2.MULTIDIM_JSON: attachment.type = mimetype else: attachment.type = test_runs_pb2.BINARY
[ "def", "_copy_attachment", "(", "self", ",", "name", ",", "data", ",", "mimetype", ",", "mfg_event", ")", ":", "attachment", "=", "mfg_event", ".", "attachment", ".", "add", "(", ")", "attachment", ".", "name", "=", "name", "if", "isinstance", "(", "data...
40.307692
11.384615
def exists(cls, excludes_, **filters): """ Return `True` if objects matching the provided filters and excludes exist if not return false. Calls the `filter` method by default, but can be overridden for better and quicker implementations that may be supported by a database. :param excludes_: entities without this combination of field name and values will be returned """ results = cls.query.filter(**filters).exclude(**excludes_) return bool(results)
[ "def", "exists", "(", "cls", ",", "excludes_", ",", "*", "*", "filters", ")", ":", "results", "=", "cls", ".", "query", ".", "filter", "(", "*", "*", "filters", ")", ".", "exclude", "(", "*", "*", "excludes_", ")", "return", "bool", "(", "results",...
43.5
20.166667
def relabel_variables(self, mapping, inplace=True): """Relabel variables of a binary polynomial as specified by mapping. Args: mapping (dict): Dict mapping current variable labels to new ones. If an incomplete mapping is provided, unmapped variables retain their current labels. inplace (bool, optional, default=True): If True, the binary polynomial is updated in-place; otherwise, a new binary polynomial is returned. Returns: :class:`.BinaryPolynomial`: A binary polynomial with the variables relabeled. If `inplace` is set to True, returns itself. """ if not inplace: return self.copy().relabel_variables(mapping, inplace=True) try: old_labels = set(mapping) new_labels = set(mapping.values()) except TypeError: raise ValueError("mapping targets must be hashable objects") variables = self.variables for v in new_labels: if v in variables and v not in old_labels: raise ValueError(('A variable cannot be relabeled "{}" without also relabeling ' "the existing variable of the same name").format(v)) shared = old_labels & new_labels if shared: old_to_intermediate, intermediate_to_new = resolve_label_conflict(mapping, old_labels, new_labels) self.relabel_variables(old_to_intermediate, inplace=True) self.relabel_variables(intermediate_to_new, inplace=True) return self for oldterm, bias in list(self.items()): newterm = frozenset((mapping.get(v, v) for v in oldterm)) if newterm != oldterm: self[newterm] = bias del self[oldterm] return self
[ "def", "relabel_variables", "(", "self", ",", "mapping", ",", "inplace", "=", "True", ")", ":", "if", "not", "inplace", ":", "return", "self", ".", "copy", "(", ")", ".", "relabel_variables", "(", "mapping", ",", "inplace", "=", "True", ")", "try", ":"...
37.816327
25.061224
def _parallel_tfa_worker(task): ''' This is a parallel worker for the function below. task[0] = lcfile task[1] = timecol task[2] = magcol task[3] = errcol task[4] = templateinfo task[5] = lcformat task[6] = lcformatdir task[6] = interp task[7] = sigclip ''' (lcfile, timecol, magcol, errcol, templateinfo, lcformat, lcformatdir, interp, sigclip, mintemplatedist_arcmin) = task try: res = apply_tfa_magseries( lcfile, timecol, magcol, errcol, templateinfo, lcformat=lcformat, lcformatdir=lcformatdir, interp=interp, sigclip=sigclip, mintemplatedist_arcmin=mintemplatedist_arcmin ) if res: LOGINFO('%s -> %s TFA OK' % (lcfile, res)) return res except Exception as e: LOGEXCEPTION('TFA failed for %s' % lcfile) return None
[ "def", "_parallel_tfa_worker", "(", "task", ")", ":", "(", "lcfile", ",", "timecol", ",", "magcol", ",", "errcol", ",", "templateinfo", ",", "lcformat", ",", "lcformatdir", ",", "interp", ",", "sigclip", ",", "mintemplatedist_arcmin", ")", "=", "task", "try"...
23.128205
20.512821
def Indicator(pos, size, dtype): """ Returns an array of length size and type dtype that is everywhere 0, except in the index in pos. :param pos: (int) specifies the position of the one entry that will be set. :param size: (int) The total size of the array to be returned. :param dtype: The element type (compatible with NumPy array()) of the array to be returned. :returns: (list) of length ``size`` and element type ``dtype``. """ x = numpy.zeros(size, dtype=dtype) x[pos] = 1 return x
[ "def", "Indicator", "(", "pos", ",", "size", ",", "dtype", ")", ":", "x", "=", "numpy", ".", "zeros", "(", "size", ",", "dtype", "=", "dtype", ")", "x", "[", "pos", "]", "=", "1", "return", "x" ]
36.142857
18.857143