positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._status is not None: return False if self._sub_status is not None: return False if self._type_ is not None: return False if self._counterparty_alias is not None: return False if self._amount_reward is not None: return False return True
:rtype: bool
def reset_from_xml_string(self, xml_string): """Reloads the environment from an XML description of the environment.""" # if there is an active viewer window, destroy it self.close() # load model from xml self.mjpy_model = load_model_from_xml(xml_string) self.sim = MjSim(self.mjpy_model) self.initialize_time(self.control_freq) if self.has_renderer and self.viewer is None: self.viewer = MujocoPyRenderer(self.sim) self.viewer.viewer.vopt.geomgroup[0] = ( 1 if self.render_collision_mesh else 0 ) self.viewer.viewer.vopt.geomgroup[1] = 1 if self.render_visual_mesh else 0 # hiding the overlay speeds up rendering significantly self.viewer.viewer._hide_overlay = True elif self.has_offscreen_renderer: render_context = MjRenderContextOffscreen(self.sim) render_context.vopt.geomgroup[0] = 1 if self.render_collision_mesh else 0 render_context.vopt.geomgroup[1] = 1 if self.render_visual_mesh else 0 self.sim.add_render_context(render_context) self.sim_state_initial = self.sim.get_state() self._get_reference() self.cur_time = 0 self.timestep = 0 self.done = False # necessary to refresh MjData self.sim.forward()
Reloads the environment from an XML description of the environment.
def submit(self, **kwargs): """ Submit a job script that will run the schedulers with `abirun.py`. Args: verbose: Verbosity level dry_run: Don't submit the script if dry_run. Default: False Returns: namedtuple with attributes: retcode: Return code as returned by the submission script. qjob: :class:`QueueJob` object. num_flows_inbatch: Number of flows executed by the batch script Return code of the job script submission. """ verbose, dry_run = kwargs.pop("verbose", 0), kwargs.pop("dry_run", False) if not self.flows: print("Cannot submit an empty list of flows!") return 0 if hasattr(self, "qjob"): # This usually happens when we have loaded the object from pickle # and we have already submitted to batch script to the queue. # At this point we need to understand if the previous batch job # is still running before trying to submit it again. There are three cases: # # 1) The batch script has completed withing timelimit and therefore # the pid_file has been removed by the script. In this case, we # should not try to submit it again. # 2) The batch script has been killed due to timelimit (other reasons are possible # but we neglect them). In this case the pid_file exists but there's no job with # this pid runnig and we can resubmit it again. # 3) The batch script is still running. print("BatchLauncher has qjob %s" % self.qjob) if not self.batch_pid_file.exists: print("It seems that the batch script reached the end. Wont' try to submit it again") return 0 msg = ("Here I have to understand if qjob is in the queue." " but I need an abstract API that can retrieve info from the queue id") raise RuntimeError(msg) # TODO: Temptative API if self.qjob.in_status("Running|Queued"): print("Job is still running. Cannot submit") else: del self.qjob script, num_flows_inbatch = self._get_script_nflows() if num_flows_inbatch == 0: print("All flows have reached all_ok! Batch script won't be submitted") return 0 if verbose: print("*** submission script ***") print(script) # Write the script. self.script_file.write(script) self.script_file.chmod(0o740) # Builf the flow. for flow in self.flows: flow.build_and_pickle_dump() # Submit the task and save the queue id. if dry_run: return -1 print("Will submit %s flows in batch script" % len(self.flows)) self.qjob, process = self.qadapter.submit_to_queue(self.script_file.path) # Save the queue id in the pid file # The file will be removed by the job script if execution is completed. self.batch_pidfile.write(str(self.qjob.qid)) self.pickle_dump() process.wait() return dict2namedtuple(retcode=process.returncode, qjob=self.qjob, num_flows_inbatch=num_flows_inbatch)
Submit a job script that will run the schedulers with `abirun.py`. Args: verbose: Verbosity level dry_run: Don't submit the script if dry_run. Default: False Returns: namedtuple with attributes: retcode: Return code as returned by the submission script. qjob: :class:`QueueJob` object. num_flows_inbatch: Number of flows executed by the batch script Return code of the job script submission.
def merge_vertices(self): """call reduce_vertex on all vertices with identical values.""" # groupby expects sorted data sorted_vertices = sorted(list(self.vertices.items()), key=lambda v: hash(v[1])) groups = [] for k, g in groupby(sorted_vertices, lambda v: hash(v[1])): groups.append(list(g)) for group in groups: if len(group) == 1: continue names = [v[0] for v in group] self.reduce_vertex(*names)
call reduce_vertex on all vertices with identical values.
def list_all_brands(cls, **kwargs): """List Brands Return a list of Brands This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_brands(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Brand] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_brands_with_http_info(**kwargs) else: (data) = cls._list_all_brands_with_http_info(**kwargs) return data
List Brands Return a list of Brands This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_brands(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Brand] If the method is called asynchronously, returns the request thread.
def parse_xml(self, node): """ Parse an Image Layer from ElementTree xml node :param node: ElementTree xml node :return: self """ self._set_properties(node) self.name = node.get('name', None) self.opacity = node.get('opacity', self.opacity) self.visible = node.get('visible', self.visible) image_node = node.find('image') self.source = image_node.get('source', None) self.trans = image_node.get('trans', None) return self
Parse an Image Layer from ElementTree xml node :param node: ElementTree xml node :return: self
def _minigui_report_search_status(self, leaves): """Prints the current MCTS search status to stderr. Reports the current search path, root node's child_Q, root node's child_N, the most visited path in a format that can be parsed by one of the STDERR_HANDLERS in minigui.ts. Args: leaves: list of leaf MCTSNodes returned by tree_search(). """ root = self._player.get_root() msg = { "id": hex(id(root)), "n": int(root.N), "q": float(root.Q), } msg["childQ"] = [int(round(q * 1000)) for q in root.child_Q] msg["childN"] = [int(n) for n in root.child_N] ranked_children = root.rank_children() variations = {} for i in ranked_children[:15]: if root.child_N[i] == 0 or i not in root.children: break c = coords.to_gtp(coords.from_flat(i)) child = root.children[i] nodes = child.most_visited_path_nodes() moves = [coords.to_gtp(coords.from_flat(m.fmove)) for m in nodes] variations[c] = { "n": int(root.child_N[i]), "q": float(root.child_Q[i]), "moves": [c] + moves, } if leaves: path = [] leaf = leaves[0] while leaf != root: path.append(leaf.fmove) leaf = leaf.parent if path: path.reverse() variations["live"] = { "n": int(root.child_N[path[0]]), "q": float(root.child_Q[path[0]]), "moves": [coords.to_gtp(coords.from_flat(m)) for m in path] } if variations: msg["variations"] = variations dbg("mg-update:%s" % json.dumps(msg, sort_keys=True))
Prints the current MCTS search status to stderr. Reports the current search path, root node's child_Q, root node's child_N, the most visited path in a format that can be parsed by one of the STDERR_HANDLERS in minigui.ts. Args: leaves: list of leaf MCTSNodes returned by tree_search().
def get_collection_filename(self, language): """ Returns the filename containing the stop words collection for a specific language. """ filename = os.path.join(self.data_directory, '%s.txt' % language) return filename
Returns the filename containing the stop words collection for a specific language.
def _check_running_services(services): """Check that the services dict provided is actually running and provide a list of (service, boolean) tuples for each service. Returns both a zipped list of (service, boolean) and a list of booleans in the same order as the services. @param services: OrderedDict of strings: [ports], one for each service to check. @returns [(service, boolean), ...], : results for checks [boolean] : just the result of the service checks """ services_running = [service_running(s) for s in services] return list(zip(services, services_running)), services_running
Check that the services dict provided is actually running and provide a list of (service, boolean) tuples for each service. Returns both a zipped list of (service, boolean) and a list of booleans in the same order as the services. @param services: OrderedDict of strings: [ports], one for each service to check. @returns [(service, boolean), ...], : results for checks [boolean] : just the result of the service checks
def tabulate(self, format='html', syntax=''): ''' a function to create a table from the class model keyMap :param format: string with format for table output :param syntax: [optional] string with linguistic syntax :return: string with table ''' from tabulate import tabulate as _tabulate # define headers headers = ['Field', 'Datatype', 'Required', 'Default', 'Examples', 'Conditionals', 'Description'] rows = [] default_values = False additional_conditions = False field_description = False # construct rows for key, value in self.keyMap.items(): key_segments = _segment_path(key) if key_segments: row = [] # add field column field_name = '' if len(key_segments) > 1: for i in range(1,len(key_segments)): field_name += '&nbsp;&nbsp;&nbsp;&nbsp;' if key_segments[-1] == '0': field_name += '<i>item</i>' else: field_name += key_segments[-1] row.append(field_name) # add datatype column value_datatype = value['value_datatype'] if 'integer_data' in value.keys(): if value['integer_data'] and syntax != 'javascript': value_datatype = 'integer' elif value['value_datatype'] == 'map': if syntax == 'javascript': value_datatype = 'object' elif value['value_datatype'] == 'list': if syntax == 'javascript': value_datatype = 'array' # retrieve datatype of item in list item_key = key + '[0]' item_datatype = self.keyMap[item_key]['value_datatype'] if syntax == 'javascript': if item_datatype == 'list': item_datatype = 'array' elif item_datatype == 'map': item_datatype = 'object' elif 'integer_data' in self.keyMap[item_key].keys(): if self.keyMap[item_key]['integer_data']: item_datatype = 'integer' value_datatype += ' of %ss' % item_datatype row.append(value_datatype) # add required column if value['required_field']: row.append('yes') else: row.append('') # add default column if 'default_value' in value.keys(): default_values = True if isinstance(value['default_value'], str): row.append('"%s"' % value['default_value']) elif isinstance(value['default_value'], bool): row.append(str(value['default_value']).lower()) else: row.append(str(value['default_value'])) else: row.append('') # define recursive example constructor def determine_example(k, v): example_value = '' if 'example_values' in v.keys(): for i in v['example_values']: if example_value: example_value += ', ' if isinstance(i, str): example_value += '"%s"' % i else: example_value += value elif 'declared_value' in v.keys(): if isinstance(v['declared_value'], str): example_value = '"%s"' % v['declared_value'] elif isinstance(v['declared_value'], bool): example_value = str(v['declared_value']).lower() else: example_value = v['declared_value'] else: if v['value_datatype'] == 'map': example_value = '{...}' elif v['value_datatype'] == 'list': example_value = '[...]' elif v['value_datatype'] == 'null': example_value = 'null' return example_value # add examples column row.append(determine_example(key, value)) # add additional conditions conditions = '' description = '' for k, v in value.items(): extra_integer = False if k == 'integer_data' and syntax == 'javascript': extra_integer = True if k not in ('example_values', 'value_datatype', 'required_field', 'declared_value', 'default_value', 'field_position', 'field_metadata') or extra_integer: add_extra = False if k == 'extra_fields': if v: add_extra = True if k in ('field_description', 'field_title'): field_description = True if k == 'field_description': description = v elif not description: description = v elif k != 'extra_fields' or add_extra: additional_conditions = True if conditions: conditions += '<br>' condition_value = v if isinstance(v, str): condition_value = '"%s"' % v elif isinstance(v, bool): condition_value = str(v).lower() conditions += '%s: %s' % (k, condition_value) row.append(conditions) row.append(description) # add row to rows rows.append(row) # add rows for top field top_dict = self.keyMap['.'] if top_dict['extra_fields']: rows.append(['<i>**extra fields allowed</i>', '', '', '', '', '', '']) if 'max_bytes' in top_dict.keys(): rows.append(['<i>**max bytes: %s</i>' % top_dict['max_bytes'], '', '', '', '', '', '']) # eliminate unused columns if not field_description: headers.pop() if not additional_conditions: headers.pop() if not default_values: headers.pop(3) for row in rows: if not field_description: row.pop() if not additional_conditions: row.pop() if not default_values: row.pop(3) # construct table html table_html = _tabulate(rows, headers, tablefmt='html') # add links to urls in text # markdown_url = re.compile('\[(.*?)\]\((.*)\)') table_html = _add_links(table_html) return table_html
a function to create a table from the class model keyMap :param format: string with format for table output :param syntax: [optional] string with linguistic syntax :return: string with table
def get_releasenotes(project_dir=os.curdir, bugtracker_url=''): """ Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved """ releasenotes = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') releasenotes_file = os.path.join(project_dir, 'RELEASE_NOTES') if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file): with open(releasenotes_file) as releasenotes_fd: releasenotes = releasenotes_fd.read() else: releasenotes = api.get_releasenotes( repo_path=project_dir, bugtracker_url=bugtracker_url, ) return releasenotes
Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved
def count(self) -> "CountQuery": """ Return count of objects in queryset instead of objects. """ return CountQuery( db=self._db, model=self.model, q_objects=self._q_objects, annotations=self._annotations, custom_filters=self._custom_filters, )
Return count of objects in queryset instead of objects.
def move(self, path, raise_if_exists=False): """ Alias for ``rename()`` """ self.rename(path, raise_if_exists=raise_if_exists)
Alias for ``rename()``
def _validate_config(self, folder, validate_folder=True): ''' validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id ''' config = "%s/config.json" % folder name = os.path.basename(folder) if not os.path.exists(config): return notvalid("%s: config.json not found." %(folder)) # Load the config try: config = read_json(config) except: return notvalid("%s: cannot load json, invalid." %(name)) # Config.json should be single dict if isinstance(config, list): return notvalid("%s: config.json is a list, not valid." %(name)) # Check over required fields fields = self.get_validation_fields() for field,value,ftype in fields: bot.verbose('field: %s, required: %s' %(field,value)) # Field must be in the keys if required if field not in config.keys(): if value == 1: return notvalid("%s: config.json is missing required field %s" %(name,field)) # Field is present, check type else: if not isinstance(config[field], ftype): return notvalid("%s: invalid type, must be %s." %(name,str(ftype))) # Expid gets special treatment if field == "exp_id" and validate_folder is True: if config[field] != name: return notvalid("%s: exp_id parameter %s does not match folder name." %(name,config[field])) # name cannot have special characters, only _ and letters/numbers if not re.match("^[a-z0-9_-]*$", config[field]): message = "%s: exp_id parameter %s has invalid characters" message += "only lowercase [a-z],[0-9], -, and _ allowed." return notvalid(message %(name,config[field])) return True
validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id
def get_shape_mask(self, shape_obj): """ Return full mask where True marks pixels within the given shape. """ wd, ht = self.get_size() yi = np.mgrid[:ht].reshape(-1, 1) xi = np.mgrid[:wd].reshape(1, -1) pts = np.asarray((xi, yi)).T contains = shape_obj.contains_pts(pts) return contains
Return full mask where True marks pixels within the given shape.
def geom_length(geom): """ Calculates the length of coordinates in a shapely geometry. """ if geom.geom_type == 'Point': return 1 if hasattr(geom, 'exterior'): geom = geom.exterior if not geom.geom_type.startswith('Multi') and hasattr(geom, 'array_interface_base'): return len(geom.array_interface_base['data'])//2 else: length = 0 for g in geom: length += geom_length(g) return length
Calculates the length of coordinates in a shapely geometry.
def remove_root_family(self, family_id): """Removes a root family. arg: family_id (osid.id.Id): the ``Id`` of a family raise: NotFound - ``family_id`` not a root raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.remove_root_bin_template if self._catalog_session is not None: return self._catalog_session.remove_root_catalog(catalog_id=family_id) return self._hierarchy_session.remove_root(id_=family_id)
Removes a root family. arg: family_id (osid.id.Id): the ``Id`` of a family raise: NotFound - ``family_id`` not a root raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def submissions(self): """List job submissions in workspace.""" r = fapi.get_submissions(self.namespace, self.name, self.api_url) fapi._check_response_code(r, 200) return r.json()
List job submissions in workspace.
def _dms_formatter(latitude, longitude, mode, unistr=False): """Generate a human readable DM/DMS location string. Args: latitude (float): Location's latitude longitude (float): Location's longitude mode (str): Coordinate formatting system to use unistr (bool): Whether to use extended character set """ if unistr: chars = ('°', '′', '″') else: chars = ('°', "'", '"') latitude_dms = tuple(map(abs, utils.to_dms(latitude, mode))) longitude_dms = tuple(map(abs, utils.to_dms(longitude, mode))) text = [] if mode == 'dms': text.append('%%02i%s%%02i%s%%02i%s' % chars % latitude_dms) else: text.append('%%02i%s%%05.2f%s' % chars[:2] % latitude_dms) text.append('S' if latitude < 0 else 'N') if mode == 'dms': text.append(', %%03i%s%%02i%s%%02i%s' % chars % longitude_dms) else: text.append(', %%03i%s%%05.2f%s' % chars[:2] % longitude_dms) text.append('W' if longitude < 0 else 'E') return text
Generate a human readable DM/DMS location string. Args: latitude (float): Location's latitude longitude (float): Location's longitude mode (str): Coordinate formatting system to use unistr (bool): Whether to use extended character set
def _train(self, trial): """Start one iteration of training and save remote id.""" assert trial.status == Trial.RUNNING, trial.status remote = trial.runner.train.remote() # Local Mode if isinstance(remote, dict): remote = _LocalWrapper(remote) self._running[remote] = trial
Start one iteration of training and save remote id.
def main_bigg(args=None, urlopen=urlopen): """Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from BiGG database') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') parser.add_argument('id', help='BiGG model to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') # Print list of available models if args.id == 'list': print('Available models:') f = urlopen('http://bigg.ucsd.edu/api/v2/models') doc = json.loads(f.read().decode('utf-8')) results = doc['results'] id_width = min(max(len(result['bigg_id']) for result in results), 16) for result in sorted(results, key=lambda x: x.get('organism')): print('{} {}'.format( result.get('bigg_id').ljust(id_width), result.get('organism'))) return 0 importer_entry = None try: importer_entry = next( pkg_resources.iter_entry_points('psamm.importer', 'JSON')) except StopIteration: logger.error('Failed to locate the COBRA JSON model importer!') sys.exit(-1) importer_class = importer_entry.load() importer = importer_class() try: f = urlopen( 'http://bigg.ucsd.edu/api/v2/models/{}/download'.format( url_quote(args.id))) model = importer.import_model(codecs.getreader('utf-8')(f)) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing.
def convert_to_file(cgi_input, output_file, twobit_ref, twobit_name, var_only=False): """Convert a CGI var file and output VCF-formatted data to file""" if isinstance(output_file, str): output_file = auto_zip_open(output_file, 'w') conversion = convert(cgi_input=cgi_input, twobit_ref=twobit_ref, twobit_name=twobit_name, var_only=var_only) for line in conversion: output_file.write(line + "\n") output_file.close()
Convert a CGI var file and output VCF-formatted data to file
def _match(self, x, op, y): """Compare the given `x` and `y` based on `op` :@param x, y, op :@type x, y: mixed :@type op: string :@return bool :@throws ValueError """ if (op not in self.condition_mapper): raise ValueError('Invalid where condition given') func = getattr(self, self.condition_mapper.get(op)) return func(x, y)
Compare the given `x` and `y` based on `op` :@param x, y, op :@type x, y: mixed :@type op: string :@return bool :@throws ValueError
def _unfiltered_jvm_dependency_map(self, fully_transitive=False): """Jvm dependency map without filtering out non-JvmTarget keys, exposed for testing. Unfiltered because the keys in the resulting map include non-JvmTargets. See the explanation in the jvm_dependency_map() docs for what this method produces. :param fully_transitive: if true, the elements of the map will be the full set of transitive JvmTarget dependencies, not just the "direct" ones. (see jvm_dependency_map for the definition of "direct") :return: map of target -> set of JvmTarget "direct" dependencies. """ targets = self.jvm_targets jvm_deps = defaultdict(set) def accumulate_jvm_deps(target): for dep in target.dependencies: if self._is_jvm_target(dep): jvm_deps[target].add(dep) if not fully_transitive: continue # If 'dep' isn't in jvm_deps, that means that it isn't in the `targets` list at all # (since this is a post-order traversal). If it's not in the targets list at all, # that means it cannot have any JvmTargets as transitive dependencies. In which case # we don't care about it, so it's fine that the line below is a no-op. # # Otherwise, we add in any transitive dependencies that were previously collected. jvm_deps[target].update(jvm_deps[dep]) # Vanilla DFS runs in O(|V|+|E|), and the code inside the loop in accumulate_jvm_deps ends up # being run once for each in the graph over the course of the entire search, which means that # the total asymptotic runtime complexity is O(|V|+2|E|), which is still O(|V|+|E|). self.context.build_graph.walk_transitive_dependency_graph( addresses=[t.address for t in targets], work=accumulate_jvm_deps, postorder=True ) return jvm_deps
Jvm dependency map without filtering out non-JvmTarget keys, exposed for testing. Unfiltered because the keys in the resulting map include non-JvmTargets. See the explanation in the jvm_dependency_map() docs for what this method produces. :param fully_transitive: if true, the elements of the map will be the full set of transitive JvmTarget dependencies, not just the "direct" ones. (see jvm_dependency_map for the definition of "direct") :return: map of target -> set of JvmTarget "direct" dependencies.
def ref_context_from_geoloc(geoloc): """Return a RefContext object given a geoloc entry.""" text = geoloc.get('text') geoid = geoloc.get('geoID') rc = RefContext(name=text, db_refs={'GEOID': geoid}) return rc
Return a RefContext object given a geoloc entry.
def accountSummary(self, account: str = '') -> List[AccountValue]: """ List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name. """ if not self.wrapper.acctSummary: # loaded on demand since it takes ca. 250 ms self.reqAccountSummary() if account: return [v for v in self.wrapper.acctSummary.values() if v.account == account] else: return list(self.wrapper.acctSummary.values())
List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name.
def agm(x, y, context=None): """ Return the arithmetic geometric mean of x and y. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_agm, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
Return the arithmetic geometric mean of x and y.
async def close(self) -> None: """ Explicit exit. If so configured, populate cache to prove for any creds on schemata, cred defs, and rev regs marked of interest in configuration at initialization, archive cache, and purge prior cache archives. :return: current object """ LOGGER.debug('OrgHubAnchor.close >>>') archive_caches = False if self.config.get('archive-holder-prover-caches-on-close', False): archive_caches = True await self.load_cache_for_proof(False) if self.config.get('archive-verifier-caches-on-close', {}): archive_caches = True await self.load_cache_for_verification(False) if archive_caches: ArchivableCaches.archive(self.dir_cache) ArchivableCaches.purge_archives(self.dir_cache, True) # Do not close wallet independently: allow for sharing open wallet over many anchor lifetimes # await self.wallet.close() #1.7.8 # Do not close pool independently: let relying party decide when to go on-line and off-line for path_rr_id in Tails.links(self._dir_tails): rr_id = basename(path_rr_id) try: await HolderProver._sync_revoc_for_proof(self, rr_id) except ClosedPool: LOGGER.warning('OrgHubAnchor sync-revoc on close required ledger for %s but pool was closed', rr_id) LOGGER.debug('OrgHubAnchor.close <<<')
Explicit exit. If so configured, populate cache to prove for any creds on schemata, cred defs, and rev regs marked of interest in configuration at initialization, archive cache, and purge prior cache archives. :return: current object
def get_known_host_entries(user, hostname, config=None, port=None, fingerprint_hash_type=None): ''' .. versionadded:: 2018.3.0 Return information about known host entries from the configfile, if any. If there are no entries for a matching hostname, return None. CLI Example: .. code-block:: bash salt '*' ssh.get_known_host_entries <user> <hostname> ''' full = _get_known_hosts_file(config=config, user=user) if isinstance(full, dict): return full ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port) cmd = ['ssh-keygen', '-F', ssh_hostname, '-f', full] lines = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False).splitlines() known_host_entries = list( _parse_openssh_output(lines, fingerprint_hash_type=fingerprint_hash_type) ) return known_host_entries if known_host_entries else None
.. versionadded:: 2018.3.0 Return information about known host entries from the configfile, if any. If there are no entries for a matching hostname, return None. CLI Example: .. code-block:: bash salt '*' ssh.get_known_host_entries <user> <hostname>
def setup_versioned_routes(routes, version=None): """Set up routes with a version prefix.""" prefix = '/' + version if version else "" for r in routes: path, method = r route(prefix + path, method, routes[r])
Set up routes with a version prefix.
def control(self): """ control : 'if' ctrl_exp block ('elif' ctrl_exp block)* ('else' block) """ self.eat(TokenTypes.IF) ctrl = self.expression() block = self.block() ifs = [If(ctrl, block)] else_block = Block() while self.cur_token.type == TokenTypes.ELIF: self.eat(TokenTypes.ELIF) ctrl = self.expression() block = self.block() ifs.append(If(ctrl, block)) if self.cur_token.type == TokenTypes.ELSE: self.eat(TokenTypes.ELSE) else_block = self.block() return ControlBlock(ifs, else_block)
control : 'if' ctrl_exp block ('elif' ctrl_exp block)* ('else' block)
def _try_to_clean_garbage(self, writer_spec, exclude_list=()): """Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed. """ # Try to remove garbage (if any). Note that listbucket is not strongly # consistent so something might survive. tmpl = string.Template(self._TMPFILE_PREFIX) prefix = tmpl.substitute( id=self.status.mapreduce_id, shard=self.status.shard) bucket = self._get_tmp_gcs_bucket(writer_spec) account_id = self._get_tmp_account_id(writer_spec) for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix), _account_id=account_id): if f.filename not in exclude_list: self._remove_tmpfile(f.filename, self.status.writer_spec)
Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed.
def remove_line_breaks(text): """Remove line breaks from input. Including unicode 'line separator', 'paragraph separator', and 'next line' characters. """ return unicode(text, 'utf-8').replace('\f', '').replace('\n', '') \ .replace('\r', '').replace(u'\xe2\x80\xa8', '') \ .replace(u'\xe2\x80\xa9', '').replace(u'\xc2\x85', '') \ .encode('utf-8')
Remove line breaks from input. Including unicode 'line separator', 'paragraph separator', and 'next line' characters.
def populateFromFile(self, dataUrl): """ Populates the instance variables of this RnaQuantificationSet from the specified data URL. """ self._dbFilePath = dataUrl self._db = SqliteRnaBackend(self._dbFilePath) self.addRnaQuants()
Populates the instance variables of this RnaQuantificationSet from the specified data URL.
def doActionFor(instance, action_id, idxs=None): """Tries to perform the transition to the instance. Object is reindexed after the transition takes place, but only if succeeds. If idxs is set, only these indexes will be reindexed. Otherwise, will try to use the indexes defined in ACTIONS_TO_INDEX mapping if any. :param instance: Object to be transitioned :param action_id: transition id :param idxs: indexes to be reindexed after the transition :returns: True if the transition has been performed, together with message :rtype: tuple (bool,str) """ if not instance: return False, "" if isinstance(instance, list): # TODO Workflow . Check if this is strictly necessary # This check is here because sometimes Plone creates a list # from submitted form elements. logger.warn("Got a list of obj in doActionFor!") if len(instance) > 1: logger.warn( "doActionFor is getting an instance parameter which is a list " "with more than one item. Instance: '{}', action_id: '{}'" .format(instance, action_id) ) return doActionFor(instance=instance[0], action_id=action_id, idxs=idxs) # Since a given transition can cascade or promote to other objects, we want # to reindex all objects for which the transition succeed at once, at the # end of process. Otherwise, same object will be reindexed multiple times # unnecessarily. Also, ActionsHandlerPool ensures the same transition is not # applied twice to the same object due to cascade/promote recursions. pool = ActionHandlerPool.get_instance() if pool.succeed(instance, action_id): return False, "Transition {} for {} already done"\ .format(action_id, instance.getId()) # Return False if transition is not permitted if not isTransitionAllowed(instance, action_id): return False, "Transition {} for {} is not allowed"\ .format(action_id, instance.getId()) # Add this batch process to the queue pool.queue_pool() succeed = False message = "" workflow = getToolByName(instance, "portal_workflow") try: workflow.doActionFor(instance, action_id) succeed = True except WorkflowException as e: message = str(e) curr_state = getCurrentState(instance) clazz_name = instance.__class__.__name__ logger.warning( "Transition '{0}' not allowed: {1} '{2}' ({3})"\ .format(action_id, clazz_name, instance.getId(), curr_state)) logger.error(message) # If no indexes to reindex have been defined, try to use those defined in # the ACTIONS_TO_INDEXES mapping. Reindexing only those indexes that might # be affected by the transition boosts the overall performance!. if idxs is None: portal_type = instance.portal_type idxs = ACTIONS_TO_INDEXES.get(portal_type, {}).get(action_id, []) # Add the current object to the pool and resume pool.push(instance, action_id, succeed, idxs=idxs) pool.resume() return succeed, message
Tries to perform the transition to the instance. Object is reindexed after the transition takes place, but only if succeeds. If idxs is set, only these indexes will be reindexed. Otherwise, will try to use the indexes defined in ACTIONS_TO_INDEX mapping if any. :param instance: Object to be transitioned :param action_id: transition id :param idxs: indexes to be reindexed after the transition :returns: True if the transition has been performed, together with message :rtype: tuple (bool,str)
def assert_has_permission(self, scope_required): """ Warn that the required scope is not found in the scopes granted to the currently authenticated user. :: # The admin user should have client admin permissions uaa.assert_has_permission('admin', 'clients.admin') """ if not self.authenticated: raise ValueError("Must first authenticate()") if scope_required not in self.get_scopes(): logging.warning("Authenticated as %s" % (self.client['id'])) logging.warning("Have scopes: %s" % (str.join(',', self.get_scopes()))) logging.warning("Insufficient scope %s for operation" % (scope_required)) raise ValueError("Client does not have permission.") return True
Warn that the required scope is not found in the scopes granted to the currently authenticated user. :: # The admin user should have client admin permissions uaa.assert_has_permission('admin', 'clients.admin')
def cache_makedirs(self, subdir=None): ''' Make necessary directories to hold cache value ''' if subdir is not None: dirname = self.cache_path if subdir: dirname = os.path.join(dirname, subdir) else: dirname = os.path.dirname(self.cache_path) os.makedirs(dirname, exist_ok=True)
Make necessary directories to hold cache value
def extract(self, item, list_article_candidate): """Compares the extracted publish dates. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely publish date """ list_publish_date = [] for article_candidate in list_article_candidate: if article_candidate.publish_date != None: list_publish_date.append((article_candidate.publish_date, article_candidate.extractor)) # If there is no value in the list, return None. if len(list_publish_date) == 0: return None # If there are more options than one, return the result from date_extractor. list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"] if len(list_date_extractor) == 0: # If there is no date extracted by date_extractor, return the first result of list_publish_date. return list_publish_date[0][0] else: return list_date_extractor[0][0]
Compares the extracted publish dates. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely publish date
def agenda_changed(self): """True if any rule activation changes have occurred.""" value = bool(lib.EnvGetAgendaChanged(self._env)) lib.EnvSetAgendaChanged(self._env, int(False)) return value
True if any rule activation changes have occurred.
def _get_controller_type(self): """Returns the current node's controller type""" if self.node_type == self.NODE_CONTROLLER_ROOT and self.name in self.CONTROLLERS: return self.name elif self.parent: return self.parent.controller_type else: return None
Returns the current node's controller type
def ancestor_paths(start=None, limit={}): """ All paths above you """ import utool as ut limit = ut.ensure_iterable(limit) limit = {expanduser(p) for p in limit}.union(set(limit)) if start is None: start = os.getcwd() path = start prev = None while path != prev and prev not in limit: yield path prev = path path = dirname(path)
All paths above you
def fit_predict(training_data, fitting_data, tau=1, samples_per_job=0, save_results=True, show=False): from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator from disco.core import Disco """ training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job. """ try: tau = float(tau) if tau <= 0: raise Exception("Parameter tau should be >= 0.") except ValueError: raise Exception("Parameter tau should be numerical.") if fitting_data.params["id_index"] == -1: raise Exception("Predict data should have id_index set.") job = Job(worker=Worker(save_results=save_results)) job.pipeline = [ ("split", Stage("map", input_chain=fitting_data.params["input_chain"], init=simple_init, process=map_predict))] job.params = fitting_data.params job.run(name="lwlr_read_data", input=fitting_data.params["data_tag"]) samples = {} results = [] tau = float(2 * tau ** 2) # calculate tau once counter = 0 for test_id, x in result_iterator(job.wait(show=show)): if samples_per_job == 0: # calculate number of samples per job if len(x) <= 100: # if there is less than 100 attributes samples_per_job = 100 # 100 samples is max per on job else: # there is more than 100 attributes samples_per_job = len(x) * -25 / 900. + 53 # linear function samples[test_id] = x if counter == samples_per_job: results.append(_fit_predict(training_data, samples, tau, save_results, show)) counter = 0 samples = {} counter += 1 if len(samples) > 0: # if there is some samples left in the the dictionary results.append(_fit_predict(training_data, samples, tau, save_results, show)) # merge results of every iteration into a single tag ddfs = Disco().ddfs ddfs.tag(job.name, [[list(ddfs.blobs(tag))[0][0]] for tag in results]) return ["tag://" + job.name]
training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job.
def new(self, fname=None, editorstack=None, text=None): """ Create a new file - Untitled fname=None --> fname will be 'untitledXX.py' but do not create file fname=<basestring> --> create file """ # If no text is provided, create default content empty = False try: if text is None: default_content = True text, enc = encoding.read(self.TEMPLATE_PATH) enc_match = re.search(r'-*- coding: ?([a-z0-9A-Z\-]*) -*-', text) if enc_match: enc = enc_match.group(1) # Initialize template variables # Windows username = encoding.to_unicode_from_fs( os.environ.get('USERNAME', '')) # Linux, Mac OS X if not username: username = encoding.to_unicode_from_fs( os.environ.get('USER', '-')) VARS = { 'date': time.ctime(), 'username': username, } try: text = text % VARS except Exception: pass else: default_content = False enc = encoding.read(self.TEMPLATE_PATH)[1] except (IOError, OSError): text = '' enc = 'utf-8' default_content = True empty = True create_fname = lambda n: to_text_string(_("untitled")) + ("%d.py" % n) # Creating editor widget if editorstack is None: current_es = self.get_current_editorstack() else: current_es = editorstack created_from_here = fname is None if created_from_here: while True: fname = create_fname(self.untitled_num) self.untitled_num += 1 if not osp.isfile(fname): break basedir = getcwd_or_home() if self.main.projects.get_active_project() is not None: basedir = self.main.projects.get_active_project_path() else: c_fname = self.get_current_filename() if c_fname is not None and c_fname != self.TEMPFILE_PATH: basedir = osp.dirname(c_fname) fname = osp.abspath(osp.join(basedir, fname)) else: # QString when triggered by a Qt signal fname = osp.abspath(to_text_string(fname)) index = current_es.has_filename(fname) if index is not None and not current_es.close_file(index): return # Creating the editor widget in the first editorstack (the one that # can't be destroyed), then cloning this editor widget in all other # editorstacks: finfo = self.editorstacks[0].new(fname, enc, text, default_content, empty) finfo.path = self.main.get_spyder_pythonpath() self._clone_file_everywhere(finfo) current_editor = current_es.set_current_filename(finfo.filename) self.register_widget_shortcuts(current_editor) if not created_from_here: self.save(force=True)
Create a new file - Untitled fname=None --> fname will be 'untitledXX.py' but do not create file fname=<basestring> --> create file
def _poly_eval_0(self, u, ids): """Evaluate internal polynomial.""" return u * (u * (self._a[ids] * u + self._b[ids]) + self._c[ids]) + self._d[ids]
Evaluate internal polynomial.
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', dtype=None): """Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable """ a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, dtype=dtype) if a is None: return None else: return FeatureTable(a, copy=False)
Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable
def confdate(self): """Date range of the conference the abstract belongs to represented by two tuples in the form (YYYY, MM, DD). """ date = self._confevent.get('confdate', {}) if len(date) > 0: start = {k: int(v) for k, v in date['startdate'].items()} end = {k: int(v) for k, v in date['enddate'].items()} return ((start['@year'], start['@month'], start['@day']), (end['@year'], end['@month'], end['@day'])) else: return ((None, None, None), (None, None, None))
Date range of the conference the abstract belongs to represented by two tuples in the form (YYYY, MM, DD).
def heat_setting(self, value): """Verifies that the heat setting is between 0 and 3.""" if value not in range(0, 4): raise exceptions.RoasterValueError self._heat_setting.value = value
Verifies that the heat setting is between 0 and 3.
def encode_7or8bit(msg): """Set the Content-Transfer-Encoding header to 7bit or 8bit.""" orig = msg.get_payload() if orig is None: # There's no payload. For backwards compatibility we use 7bit msg['Content-Transfer-Encoding'] = '7bit' return # We play a trick to make this go fast. If encoding/decode to ASCII # succeeds, we know the data must be 7bit, otherwise treat it as 8bit. try: if isinstance(orig, str): orig.encode('ascii') else: orig.decode('ascii') except UnicodeError: charset = msg.get_charset() output_cset = charset and charset.output_charset # iso-2022-* is non-ASCII but encodes to a 7-bit representation if output_cset and output_cset.lower().startswith('iso-2022-'): msg['Content-Transfer-Encoding'] = '7bit' else: msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' if not isinstance(orig, str): msg.set_payload(orig.decode('ascii', 'surrogateescape'))
Set the Content-Transfer-Encoding header to 7bit or 8bit.
def compile_suffix_regex(entries): """Compile a sequence of suffix rules into a regex object. entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search. """ expression = "|".join([piece + "$" for piece in entries if piece.strip()]) return re.compile(expression)
Compile a sequence of suffix rules into a regex object. entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES. RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
def call(self, input_data=None, *args, **kwargs): """ Calls the request with input data using given configuration (retry, timeout, ...). :param input_data: :param args: :param kwargs: :return: """ self.build_request(input_data) self.caller = RequestCall(self.request) self.exception = None try: self.caller.call() self.response = self.caller.response self.decrypt_result() return self.decrypted except Exception as e: self.exception = e logger.info("Exception throw %s", e) pass
Calls the request with input data using given configuration (retry, timeout, ...). :param input_data: :param args: :param kwargs: :return:
def synchronize_files(inputpaths, outpath, database=None, tqdm_bar=None, report_file=None, ptee=None, verbose=False): ''' Main function to synchronize files contents by majority vote The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one. The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures. ''' # (Generator) Files Synchronization Algorithm: # Needs a function stable_dir_walking, which will walk through directories recursively but in always the same order on all platforms (same order for files but also for folders), whatever order it is, as long as it is stable. # Until there's no file in any of the input folders to be processed: # - curfiles <- load first file for each folder by using stable_dir_walking on each input folder. # - curfiles_grouped <- group curfiles_ordered: # * curfiles_ordered <- order curfiles alphabetically (need to separate the relative parent directory and the filename, to account for both without ambiguity) # * curfiles_grouped <- empty list # * curfiles_grouped[0] = add first element in curfiles_ordered # * last_group = 0 # * for every subsequent element nextelt in curfiles_ordered: # . if nextelt == curfiles_grouped[last_group][0]: add nextelt into curfiles_grouped[last_group] (the latest group in curfiles_grouped) # . else: create a new group in curfiles_grouped (last_group += 1) and add nextelt into curfiles_grouped[last_group] # At this stage, curfiles_grouped[0] should contain a group of files with the same relative filepath from different input folders, and since we used stable_dir_walking, we are guaranteed that this file is the next to be processed in alphabetical order. # - Majority vote byte-by-byte for each of curfiles_grouped[0], and output winning byte to the output file. # - Update files list alignment: we will now ditch files in curfiles_grouped[0] from curfiles, and replace by the next files respectively from each respective folder. Since we processed in alphabetical (or whatever) order, the next loaded files will match the files in other curfiles_grouped groups that we could not process before. # At this point (after the loop), all input files have been processed in order, without maintaining the whole files list in memory, just one file per input folder. # Init files walking generator for each inputpaths recgen = [recwalk(path, sorting=True) for path in inputpaths] curfiles = {} recgen_exhausted = {} recgen_exhausted_count = 0 nbpaths = len(inputpaths) retcode = 0 if not ptee: ptee = sys.stdout # Open report file and write header if report_file is not None: rfile = open(report_file, 'wb') r_writer = csv.writer(rfile, delimiter='|', lineterminator='\n', quotechar='"') r_header = ["filepath"] + ["dir%i" % (i+1) for i in xrange(nbpaths)] + ["hash-correct", "error_code", "errors"] r_length = len(r_header) r_writer.writerow(r_header) # Initialization: load the first batch of files, one for each folder for i in xrange(len(recgen)): recgen_exhausted[i] = False try: if curfiles.get(i, None) is None: curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1] except StopIteration: recgen_exhausted[i] = True recgen_exhausted_count += 1 # Files lists alignment loop while recgen_exhausted_count < nbpaths: errcode = 0 errmsg = None # Init a new report's row if report_file: r_row = ["-"] * r_length # -- Group equivalent relative filepaths together #print curfiles # debug curfiles_grouped = sort_group(curfiles, True) # -- Extract first group of equivalent filepaths (this allows us to process with the same alphabetical order on all platforms) # Note that the remaining files in other groups will be processed later, because their alphabetical order is higher to the first group, this means that the first group is to be processed now to_process = curfiles_grouped[0] #print to_process # debug # -- Byte-by-byte majority vote on the first group of files # Need the relative filepath also (note that there's only one since it's a group of equivalent relative filepaths, only the absolute path is different between files of a same group) relfilepath = path2unix(os.path.join(*to_process[0][1])) if report_file: r_row[0] = relfilepath if verbose: ptee.write("- Processing file %s." % relfilepath) # Generate output path outpathfull = os.path.join(outpath, relfilepath) create_dir_if_not_exist(os.path.dirname(outpathfull)) # Initialize the list of absolute filepaths fileslist = [] for elt in to_process: i = elt[0] fileslist.append(os.path.join(inputpaths[i], os.path.join(*elt[1]))) if report_file: r_row[i+1] = 'X' # put an X in the report file below each folder that contains this file # If there's only one file, just copy it over if len(to_process) == 1: shutil.copyfile(fileslist[0], outpathfull) id = to_process[0][0] if report_file: r_row[id+1] = 'O' # Else, merge by majority vote else: # Before-merge check using rfigc database, if provided # If one of the files in the input folders is already correct, just copy it over correct_file = None if database: for id, filepath in enumerate(fileslist): if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (filepath, database)) == 0: correct_file = filepath correct_id = to_process[id][0] break # If one correct file was found, copy it over if correct_file: create_dir_if_not_exist(os.path.dirname(outpathfull)) shutil.copyfile(correct_file, outpathfull) if report_file: r_row[correct_id+1] = "O" r_row[-3] = "OK" # Else, we need to do the majority vote merge else: # Do the majority vote merge errcode, errmsg = majority_vote_byte_scan(relfilepath, fileslist, outpath) # After-merge/move check using rfigc database, if provided if database: if rfigc.main("-i \"%s\" -d \"%s\" -m --silent" % (outpathfull, database)) == 1: errcode = 1 r_row[-3] = "KO" if not errmsg: errmsg = '' errmsg += " File could not be totally repaired according to rfigc database." else: if report_file: r_row[-3] = "OK" if errmsg: errmsg += " But merged file is correct according to rfigc database." # Display errors if any if errcode: if report_file: r_row[-2] = "KO" r_row[-1] = errmsg ptee.write(errmsg) retcode = 1 else: if report_file: r_row[-2] = "OK" # Save current report's row if report_file: r_writer.writerow(r_row) # -- Update files lists alignment (ie, retrieve new files but while trying to keep the alignment) for elt in to_process: # for files of the first group (the ones we processed) i = elt[0] # Walk their respective folders and load up the next file try: if not recgen_exhausted.get(i, False): curfiles[i] = relpath_posix(recgen[i].next(), inputpaths[i])[1] # If there's no file left in this folder, mark this input folder as exhausted and continue with the others except StopIteration: curfiles[i] = None recgen_exhausted[i] = True recgen_exhausted_count += 1 if tqdm_bar: tqdm_bar.update() if tqdm_bar: tqdm_bar.close() # Closing report file if report_file: # Write list of directories and legend rfile.write("\n=> Input directories:") for id, ipath in enumerate(inputpaths): rfile.write("\n\t- dir%i = %s" % ((id+1), ipath)) rfile.write("\n=> Output directory: %s" % outpath) rfile.write("\n=> Legend: X=existing/selected for majority vote, O=only used this file, - = not existing, OK = check correct, KO = check incorrect (file was not recovered)\n") # Close the report file handle rfile.close() return retcode
Main function to synchronize files contents by majority vote The main job of this function is to walk through the input folders and align the files, so that we can compare every files across every folders, one by one. The whole trick here is to align files, so that we don't need to memorize all the files in memory and we compare all equivalent files together: to do that, we ensure that we walk through the input directories in alphabetical order, and we pick the relative filepath at the top of the alphabetical order, this ensures the alignment of files between different folders, without memorizing the whole trees structures.
def channels_archive(self, room_id, **kwargs): """Archives a channel.""" return self.__call_api_post('channels.archive', roomId=room_id, kwargs=kwargs)
Archives a channel.
def get_qualifier_id(self): """Gets the ``Qualifier Id`` for this authorization. return: (osid.id.Id) - the qualifier ``Id`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.Activity.get_objective_id if not bool(self._my_map['qualifierId']): raise errors.IllegalState('qualifier empty') return Id(self._my_map['qualifierId'])
Gets the ``Qualifier Id`` for this authorization. return: (osid.id.Id) - the qualifier ``Id`` *compliance: mandatory -- This method must be implemented.*
def add_duplicate_analysis(self, src_analysis, destination_slot, ref_gid=None): """ Creates a duplicate of the src_analysis passed in. If the analysis passed in is not an IRoutineAnalysis, is retracted or has dependent services, returns None.If no reference analyses group id (ref_gid) is set, the value will be generated automatically. :param src_analysis: analysis to create a duplicate from :param destination_slot: slot where duplicate analysis must be stored :param ref_gid: the reference analysis group id to be set :return: the duplicate analysis or None """ if not src_analysis: return None if not IRoutineAnalysis.providedBy(src_analysis): logger.warning('Cannot create duplicate analysis from a non ' 'routine analysis: {}'.format(src_analysis.getId())) return None if api.get_review_status(src_analysis) == 'retracted': logger.warning('Cannot create duplicate analysis from a retracted' 'analysis: {}'.format(src_analysis.getId())) return None # TODO Workflow - Duplicate Analyses - Consider duplicates with deps # Removing this check from here and ensuring that duplicate.getSiblings # returns the analyses sorted by priority (duplicates from same # AR > routine analyses from same AR > duplicates from same WS > # routine analyses from same WS) should be almost enough calc = src_analysis.getCalculation() if calc and calc.getDependentServices(): logger.warning('Cannot create duplicate analysis from an' 'analysis with dependent services: {}' .format(src_analysis.getId())) return None # Create the duplicate duplicate = _createObjectByType("DuplicateAnalysis", self, tmpID()) duplicate.setAnalysis(src_analysis) # Set ReferenceAnalysesGroupID (same id for the analyses from # the same Reference Sample and same Worksheet) if not ref_gid: ref_gid = self.nextRefAnalysesGroupID(duplicate.getRequest()) duplicate.setReferenceAnalysesGroupID(ref_gid) # Add the duplicate into the worksheet self.addToLayout(duplicate, destination_slot) self.setAnalyses(self.getAnalyses() + [duplicate, ]) # Reindex duplicate.reindexObject(idxs=["getAnalyst", "getWorksheetUID", "getReferenceAnalysesGroupID"]) self.reindexObject(idxs=["getAnalysesUIDs"]) return duplicate
Creates a duplicate of the src_analysis passed in. If the analysis passed in is not an IRoutineAnalysis, is retracted or has dependent services, returns None.If no reference analyses group id (ref_gid) is set, the value will be generated automatically. :param src_analysis: analysis to create a duplicate from :param destination_slot: slot where duplicate analysis must be stored :param ref_gid: the reference analysis group id to be set :return: the duplicate analysis or None
def get_composition(source, *fxns): """Compose several extractors together, on a source.""" val = source for fxn in fxns: val = fxn(val) return val
Compose several extractors together, on a source.
def after_websocket(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable: """Add an after websocket function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_websocket def func(response): return response Arguments: func: The after websocket function itself. name: Optional blueprint key name. """ handler = ensure_coroutine(func) self.after_websocket_funcs[name].append(handler) return func
Add an after websocket function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_websocket def func(response): return response Arguments: func: The after websocket function itself. name: Optional blueprint key name.
def tablelib_binary_features(span1, span2): """ Table-/structure-related features for a pair of spans """ binary_features = settings["featurization"]["table"]["binary_features"] if span1.sentence.is_tabular() and span2.sentence.is_tabular(): if span1.sentence.table == span2.sentence.table: yield "SAME_TABLE", DEF_VALUE if span1.sentence.cell is not None and span2.sentence.cell is not None: row_diff = min_row_diff( span1.sentence, span2.sentence, absolute=binary_features["min_row_diff"]["absolute"], ) col_diff = min_col_diff( span1.sentence, span2.sentence, absolute=binary_features["min_col_diff"]["absolute"], ) yield f"SAME_TABLE_ROW_DIFF_[{row_diff}]", DEF_VALUE yield f"SAME_TABLE_COL_DIFF_[{col_diff}]", DEF_VALUE yield ( f"SAME_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]" ), DEF_VALUE if span1.sentence.cell == span2.sentence.cell: yield "SAME_CELL", DEF_VALUE yield ( f"WORD_DIFF_[" f"{span1.get_word_start_index() - span2.get_word_start_index()}" f"]" ), DEF_VALUE yield ( f"CHAR_DIFF_[{span1.char_start - span2.char_start}]" ), DEF_VALUE if span1.sentence == span2.sentence: yield "SAME_SENTENCE", DEF_VALUE else: if span1.sentence.cell is not None and span2.sentence.cell is not None: yield "DIFF_TABLE", DEF_VALUE row_diff = min_row_diff( span1.sentence, span2.sentence, absolute=binary_features["min_row_diff"]["absolute"], ) col_diff = min_col_diff( span1.sentence, span2.sentence, absolute=binary_features["min_col_diff"]["absolute"], ) yield f"DIFF_TABLE_ROW_DIFF_[{row_diff}]", DEF_VALUE yield f"DIFF_TABLE_COL_DIFF_[{col_diff}]", DEF_VALUE yield ( f"DIFF_TABLE_MANHATTAN_DIST_[{abs(row_diff) + abs(col_diff)}]" ), DEF_VALUE
Table-/structure-related features for a pair of spans
def file_sign( blockchain_id, hostname, input_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ): """ Sign a file with the current blockchain ID's host's public key. @config_path should be for the *client*, not blockstack-file Return {'status': True, 'sender_key_id': ..., 'sig': ...} on success, and write ciphertext to output_path Return {'error': ...} on error """ config_dir = os.path.dirname(config_path) # find our encryption key key_info = file_key_lookup( blockchain_id, 0, hostname, config_path=config_path, wallet_keys=wallet_keys ) if 'error' in key_info: return {'error': 'Failed to lookup encryption key'} # sign res = blockstack_gpg.gpg_sign( input_path, key_info, config_dir=config_dir ) if 'error' in res: log.error("Failed to encrypt: %s" % res['error']) return {'error': 'Failed to encrypt'} return {'status': True, 'sender_key_id': key_info['key_id'], 'sig': res['sig']}
Sign a file with the current blockchain ID's host's public key. @config_path should be for the *client*, not blockstack-file Return {'status': True, 'sender_key_id': ..., 'sig': ...} on success, and write ciphertext to output_path Return {'error': ...} on error
def next(self): """Returns the next item in the cursor.""" if self._current_index < len(self._collection): value = self._collection[self._current_index] self._current_index += 1 return value elif self._next_cursor: self.__fetch_next() return self.next() else: self._current_index = 0 raise StopIteration
Returns the next item in the cursor.
def start_update(self, draw=None, queues=None): """ Conduct the formerly registered updates This method conducts the updates that have been registered via the :meth:`update` method. You can call this method if the :attr:`auto_update` attribute of this instance is True and the `auto_update` parameter in the :meth:`update` method has been set to False Parameters ---------- %(InteractiveBase.start_update.parameters)s Returns ------- %(InteractiveBase.start_update.returns)s See Also -------- :attr:`no_auto_update`, update """ if queues is not None: queues[0].get() try: for arr in self: arr.psy.start_update(draw=False) self.onupdate.emit() except Exception: self._finish_all(queues) raise if queues is not None: queues[0].task_done() return InteractiveBase.start_update(self, draw=draw, queues=queues)
Conduct the formerly registered updates This method conducts the updates that have been registered via the :meth:`update` method. You can call this method if the :attr:`auto_update` attribute of this instance is True and the `auto_update` parameter in the :meth:`update` method has been set to False Parameters ---------- %(InteractiveBase.start_update.parameters)s Returns ------- %(InteractiveBase.start_update.returns)s See Also -------- :attr:`no_auto_update`, update
def segwit_encode(hrp, witver, witprog): """Encode a segwit address.""" ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5)) if segwit_decode(hrp, ret) == (None, None): return None return ret
Encode a segwit address.
def _partition_all_internal(s, sep): """ Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. :param s: The string to split. :param sep: A separator string. :return: A list of parts split by sep """ parts = list(s.partition(sep)) # if sep found if parts[1] == sep: new_parts = partition_all(parts[2], sep) parts.pop() parts.extend(new_parts) return [p for p in parts if p] else: if parts[0]: return [parts[0]] else: return []
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. :param s: The string to split. :param sep: A separator string. :return: A list of parts split by sep
def state_summary(value, name, state_list, helper, ok_value = 'ok', info = None): """ Always add the status to the long output, and if the status is not ok (or ok_value), we show it in the summary and set the status to critical """ # translate the value (integer) we receive to a human readable value (e.g. ok, critical etc.) with the given state_list state_value = state_list[int(value)] summary_output = '' long_output = '' if not info: info = '' if state_value != ok_value: summary_output += ('%s status: %s %s ' % (name, state_value, info)) helper.status(pynag.Plugins.critical) long_output += ('%s status: %s %s\n' % (name, state_value, info)) return (summary_output, long_output)
Always add the status to the long output, and if the status is not ok (or ok_value), we show it in the summary and set the status to critical
def _graphic(self): """ Adds the correct graphic options depending of the OS """ if sys.platform.startswith("win"): return [] if len(os.environ.get("DISPLAY", "")) > 0: return [] if "-nographic" not in self._options: return ["-nographic"] return []
Adds the correct graphic options depending of the OS
def picard_sort(picard, align_bam, sort_order="coordinate", out_file=None, compression_level=None, pipe=False): """Sort a BAM file by coordinates. """ base, ext = os.path.splitext(align_bam) if out_file is None: out_file = "%s-sort%s" % (base, ext) if not file_exists(out_file): with tx_tmpdir(picard._config) as tmp_dir: with file_transaction(picard._config, out_file) as tx_out_file: opts = [("INPUT", align_bam), ("OUTPUT", out_file if pipe else tx_out_file), ("TMP_DIR", tmp_dir), ("SORT_ORDER", sort_order)] if compression_level: opts.append(("COMPRESSION_LEVEL", compression_level)) picard.run("SortSam", opts, pipe=pipe) return out_file
Sort a BAM file by coordinates.
def roll_out_and_store(self, batch_info): """ Roll out environment and store result in the replay buffer """ self.model.train() if self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) # Store some information about the rollout, no training phase batch_info['frames'] = rollout.frames() batch_info['episode_infos'] = rollout.episode_information() else: frames = 0 episode_infos = [] with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar: while not self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) new_frames = rollout.frames() frames += new_frames episode_infos.extend(rollout.episode_information()) pbar.update(new_frames) # Store some information about the rollout, no training phase batch_info['frames'] = frames batch_info['episode_infos'] = episode_infos
Roll out environment and store result in the replay buffer
def close_node(self, node_id, *args, **kwargs): """ Closes a VPCS VM. :returns: VPCSVM instance """ node = self.get_node(node_id) if node_id in self._used_mac_ids: i = self._used_mac_ids[node_id] self._free_mac_ids[node.project.id].insert(0, i) del self._used_mac_ids[node_id] yield from super().close_node(node_id, *args, **kwargs) return node
Closes a VPCS VM. :returns: VPCSVM instance
def date_to_epiweek(date=datetime.date.today()) -> Epiweek: """ Convert python date to Epiweek """ year = date.year start_dates = list(map(_start_date_of_year, [year - 1, year, year + 1])) start_date = start_dates[1] if start_dates[1] > date: start_date = start_dates[0] elif date >= start_dates[2]: start_date = start_dates[2] return Epiweek( year=(start_date + datetime.timedelta(days=7)).year, week=((date - start_date).days // 7) + 1, day=(date.isoweekday() % 7) + 1 )
Convert python date to Epiweek
def impact_path(self, value): """Setter to impact path. :param value: The impact path. :type value: str """ self._impact_path = value if value is None: self.action_show_report.setEnabled(False) self.action_show_log.setEnabled(False) self.report_path = None self.log_path = None else: self.action_show_report.setEnabled(True) self.action_show_log.setEnabled(True) self.log_path = '%s.log.html' % self.impact_path self.report_path = '%s.report.html' % self.impact_path self.save_report_to_html() self.save_log_to_html() self.show_report()
Setter to impact path. :param value: The impact path. :type value: str
def set_rate(rate): """Defines the ideal rate at which computation is to be performed :arg rate: the frequency in Hertz :type rate: int or float :raises: TypeError: if argument 'rate' is not int or float """ if not (isinstance(rate, int) or isinstance(rate, float)): raise TypeError("argument to set_rate is expected to be int or float") global loop_duration loop_duration = 1.0/rate
Defines the ideal rate at which computation is to be performed :arg rate: the frequency in Hertz :type rate: int or float :raises: TypeError: if argument 'rate' is not int or float
def classify(self, text=u''): """ Predicts the Language of a given text. :param text: Unicode text to be classified. """ result = self.calculate(doc_terms=self.tokenize(text)) #return (result['calc_id'], result) return (result['calc_id'], self.karbasa(result))
Predicts the Language of a given text. :param text: Unicode text to be classified.
def W(self,value): """ set fixed effect design """ if value is None: value = sp.zeros((self._N, 0)) assert value.shape[0]==self._N, 'Dimension mismatch' self._K = value.shape[1] self._W = value self._notify() self.clear_cache('predict_in_sample','Yres')
set fixed effect design
def render_sync(self): 'plots points and lines and text onto the Plotter' self.setZoom() bb = self.visibleBox xmin, ymin, xmax, ymax = bb.xmin, bb.ymin, bb.xmax, bb.ymax xfactor, yfactor = self.xScaler, self.yScaler plotxmin, plotymin = self.plotviewBox.xmin, self.plotviewBox.ymin for vertexes, attr, row in Progress(self.polylines, 'rendering'): if len(vertexes) == 1: # single point x1, y1 = vertexes[0] x1, y1 = float(x1), float(y1) if xmin <= x1 <= xmax and ymin <= y1 <= ymax: x = plotxmin+(x1-xmin)*xfactor y = plotymin+(y1-ymin)*yfactor self.plotpixel(round(x), round(y), attr, row) continue prev_x, prev_y = vertexes[0] for x, y in vertexes[1:]: r = clipline(prev_x, prev_y, x, y, xmin, ymin, xmax, ymax) if r: x1, y1, x2, y2 = r x1 = plotxmin+float(x1-xmin)*xfactor y1 = plotymin+float(y1-ymin)*yfactor x2 = plotxmin+float(x2-xmin)*xfactor y2 = plotymin+float(y2-ymin)*yfactor self.plotline(x1, y1, x2, y2, attr, row) prev_x, prev_y = x, y for x, y, text, attr, row in Progress(self.gridlabels, 'labeling'): self.plotlabel(self.scaleX(x), self.scaleY(y), text, attr, row)
plots points and lines and text onto the Plotter
def parse_featurecounts_report (self, f): """ Parse the featureCounts log file. """ file_names = list() parsed_data = dict() for l in f['f'].splitlines(): thisrow = list() s = l.split("\t") if len(s) < 2: continue if s[0] == 'Status': for f_name in s[1:]: file_names.append(f_name) else: k = s[0] if k not in self.featurecounts_keys: self.featurecounts_keys.append(k) for val in s[1:]: try: thisrow.append(int(val)) except ValueError: pass if len(thisrow) > 0: parsed_data[k] = thisrow # Check that this actually is a featureCounts file, as format and parsing is quite general if 'Assigned' not in parsed_data.keys(): return None for idx, f_name in enumerate(file_names): # Clean up sample name s_name = self.clean_s_name(f_name, f['root']) # Reorganised parsed data for this sample # Collect total count number data = dict() data['Total'] = 0 for k in parsed_data: data[k] = parsed_data[k][idx] data['Total'] += parsed_data[k][idx] # Calculate the percent aligned if we can try: data['percent_assigned'] = (float(data['Assigned'])/float(data['Total'])) * 100.0 except (KeyError, ZeroDivisionError): pass # Add to the main dictionary if len(data) > 1: if s_name in self.featurecounts_data: log.debug("Duplicate sample name found! Overwriting: {}".format(s_name)) self.add_data_source(f, s_name) self.featurecounts_data[s_name] = data
Parse the featureCounts log file.
def lazy_map(initial={}, pre_size=0): ''' lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps. ''' if is_lazy_map(initial): return initial if not initial: return _EMPTY_LMAP return _lazy_turbo_mapping(initial, pre_size)
lazy_map is a blatant copy of the pyrsistent.pmap function, and is used to create lazy maps.
def get(self, table_name): """Load table class by name, class not yet initialized""" assert table_name in self.tabs, \ "Table not avaiable. Avaiable tables: {}".format( ", ".join(self.tabs.keys()) ) return self.tabs[table_name]
Load table class by name, class not yet initialized
def process_once(self, timeout=0): """Process data from connections once. Arguments: timeout -- How long the select() call should wait if no data is available. This method should be called periodically to check and process incoming data, if there are any. If that seems boring, look at the process_forever method. """ log.log(logging.DEBUG - 2, "process_once()") sockets = self.sockets if sockets: in_, out, err = select.select(sockets, [], [], timeout) self.process_data(in_) else: time.sleep(timeout) self.process_timeout()
Process data from connections once. Arguments: timeout -- How long the select() call should wait if no data is available. This method should be called periodically to check and process incoming data, if there are any. If that seems boring, look at the process_forever method.
def _add_node(self, node): """Add a new node to node_list and give the node an ID. Args: node: An instance of Node. Returns: node_id: An integer. """ node_id = len(self.node_list) self.node_to_id[node] = node_id self.node_list.append(node) self.adj_list[node_id] = [] self.reverse_adj_list[node_id] = [] return node_id
Add a new node to node_list and give the node an ID. Args: node: An instance of Node. Returns: node_id: An integer.
def run(juttle, deployment_name, program_name=None, persist=False, token_manager=None, app_url=defaults.APP_URL): """ run a juttle program through the juttle streaming API and return the various events that are part of running a Juttle program which include: * Initial job status details including information to associate multiple flowgraphs with their individual outputs (sinks): { "status": "ok", "job": { "channel_id": "56bde5f0", "_start_time": "2015-10-03T06:59:49.233Z", "alias": "jut-tools program 1443855588", "_ms_begin": 1443855589233, "user": "0fbbd98d-cf33-4582-8ca1-15a3d3fee510", "timeout": 5, "id": "b973bce6" }, "now": "2015-10-03T06:59:49.230Z", "stats": ... "sinks": [ { "location": { "start": { "column": 17, "line": 1, "offset": 16 }, "end": { "column": 24, "line": 1, "offset": 23 }, "filename": "main" }, "name": "table", "channel": "sink237", "options": { "_jut_time_bounds": [] } }, ... as many sinks as there are flowgrpahs in your program ] } * Each set of points returned along with the indication of which sink they belong to: { "points": [ array of points ], "sink": sink_id } * Error event indicating where in your program the error occurred { "error": true, payload with "info" and "context" explaining exact error } * Warning event indicating where in your program the error occurred { "warning": true, payload with "info" and "context" explaining exact warning } * ... juttle: juttle program to execute deployment_name: the deployment name to execute the program on persist: if set to True then we won't wait for response data and will disconnect from the websocket leaving the program running in the background if it is uses a background output (http://docs.jut.io/juttle-guide/#background_outputs) and therefore becomes a persistent job. token_manager: auth.TokenManager object app_url: optional argument used primarily for internal Jut testing """ headers = token_manager.get_access_token_headers() data_url = get_juttle_data_url(deployment_name, app_url=app_url, token_manager=token_manager) websocket = __wss_connect(data_url, token_manager) data = websocket.recv() channel_id_obj = json.loads(data) if is_debug_enabled(): debug('got channel response %s', json.dumps(channel_id_obj)) channel_id = channel_id_obj['channel_id'] juttle_job = { 'channel_id': channel_id, 'alias': program_name, 'program': juttle } response = requests.post('%s/api/v1/jobs' % data_url, data=json.dumps(juttle_job), headers=headers) if response.status_code != 200: yield { "error": True, "context": response.json() } return job_info = response.json() # yield job_info so the caller to this method can figure out which sinks # correlate to which flowgraphs yield job_info job_id = job_info['job']['id'] if is_debug_enabled(): debug('started job %s', json.dumps(job_info)) for data in connect_job(job_id, deployment_name, token_manager=token_manager, app_url=app_url, persist=persist, websocket=websocket, data_url=data_url): yield data
run a juttle program through the juttle streaming API and return the various events that are part of running a Juttle program which include: * Initial job status details including information to associate multiple flowgraphs with their individual outputs (sinks): { "status": "ok", "job": { "channel_id": "56bde5f0", "_start_time": "2015-10-03T06:59:49.233Z", "alias": "jut-tools program 1443855588", "_ms_begin": 1443855589233, "user": "0fbbd98d-cf33-4582-8ca1-15a3d3fee510", "timeout": 5, "id": "b973bce6" }, "now": "2015-10-03T06:59:49.230Z", "stats": ... "sinks": [ { "location": { "start": { "column": 17, "line": 1, "offset": 16 }, "end": { "column": 24, "line": 1, "offset": 23 }, "filename": "main" }, "name": "table", "channel": "sink237", "options": { "_jut_time_bounds": [] } }, ... as many sinks as there are flowgrpahs in your program ] } * Each set of points returned along with the indication of which sink they belong to: { "points": [ array of points ], "sink": sink_id } * Error event indicating where in your program the error occurred { "error": true, payload with "info" and "context" explaining exact error } * Warning event indicating where in your program the error occurred { "warning": true, payload with "info" and "context" explaining exact warning } * ... juttle: juttle program to execute deployment_name: the deployment name to execute the program on persist: if set to True then we won't wait for response data and will disconnect from the websocket leaving the program running in the background if it is uses a background output (http://docs.jut.io/juttle-guide/#background_outputs) and therefore becomes a persistent job. token_manager: auth.TokenManager object app_url: optional argument used primarily for internal Jut testing
def _get_spyderplugins(plugin_path, is_io, modnames, modlist): """Scan the directory `plugin_path` for plugin packages and loads them.""" if not osp.isdir(plugin_path): return for name in os.listdir(plugin_path): # This is needed in order to register the spyder_io_hdf5 plugin. # See issue 4487 # Is this a Spyder plugin? if not name.startswith(PLUGIN_PREFIX): continue # Ensure right type of plugin if is_io != name.startswith(IO_PREFIX): continue # Skip names that end in certain suffixes forbidden_suffixes = ['dist-info', 'egg.info', 'egg-info', 'egg-link', 'kernels'] if any([name.endswith(s) for s in forbidden_suffixes]): continue # Import the plugin _import_plugin(name, plugin_path, modnames, modlist)
Scan the directory `plugin_path` for plugin packages and loads them.
def make_reverse_dict(in_dict, warn=True): """ Build a reverse dictionary from a cluster dictionary Parameters ---------- in_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. Returns ------- out_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself. """ out_dict = {} for k, v in in_dict.items(): for vv in v: if vv in out_dict: if warn: print("Dictionary collision %i" % vv) out_dict[vv] = k return out_dict
Build a reverse dictionary from a cluster dictionary Parameters ---------- in_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. Returns ------- out_dict : dict(int:int) A single valued dictionary pointing from source index to cluster key for each source in a cluster. Note that the key does not point to itself.
def remove_domain_user_role(request, user, role, domain=None): """Removes a given single role for a user from a domain.""" manager = keystoneclient(request, admin=True).roles return manager.revoke(role, user=user, domain=domain)
Removes a given single role for a user from a domain.
def validate(self, value=None, model=None, context=None): """ Sequentially apply each validator to value and collect errors. :param value: a value to validate :param model: parent entity :param context: validation context, usually parent entity :return: list of errors (if any) """ errors = [] for validator in self.validators: if value is None and not isinstance(validator, Required): continue error = validator.run( value=value, model=model, context=context if self.use_context else None ) if error: errors.append(error) return errors
Sequentially apply each validator to value and collect errors. :param value: a value to validate :param model: parent entity :param context: validation context, usually parent entity :return: list of errors (if any)
def gallery_section(images, title): """Create detail section with gallery. Args: title (str): Title to be displayed for detail section. images: stream of marv image files Returns One detail section. """ # pull all images imgs = [] while True: img = yield marv.pull(images) if img is None: break imgs.append({'src': img.relpath}) if not imgs: return # create gallery widget and section containing it widget = {'title': images.title, 'gallery': {'images': imgs}} section = {'title': title, 'widgets': [widget]} yield marv.push(section)
Create detail section with gallery. Args: title (str): Title to be displayed for detail section. images: stream of marv image files Returns One detail section.
def open_unknown_proxy(self, proxy, fullurl, data=None): """Overridable interface to open unknown URL type.""" type, url = splittype(fullurl) raise IOError('url error', 'invalid proxy for %s' % type, proxy)
Overridable interface to open unknown URL type.
def _get_magnitudes_from_spacing(self, magnitudes, delta_m): '''If a single magnitude spacing is input then create the bins :param numpy.ndarray magnitudes: Vector of earthquake magnitudes :param float delta_m: Magnitude bin width :returns: Vector of magnitude bin edges (numpy.ndarray) ''' min_mag = np.min(magnitudes) max_mag = np.max(magnitudes) if (max_mag - min_mag) < delta_m: raise ValueError('Bin width greater than magnitude range!') mag_bins = np.arange(np.floor(min_mag), np.ceil(max_mag), delta_m) # Check to see if there are magnitudes in lower and upper bins is_mag = np.logical_and(mag_bins - max_mag < delta_m, min_mag - mag_bins < delta_m) mag_bins = mag_bins[is_mag] return mag_bins
If a single magnitude spacing is input then create the bins :param numpy.ndarray magnitudes: Vector of earthquake magnitudes :param float delta_m: Magnitude bin width :returns: Vector of magnitude bin edges (numpy.ndarray)
def force_move(source, destination): """ Force the move of the source inside the destination even if the destination has already a folder with the name inside. In the case, the folder will be replaced. :param string source: path of the source to move. :param string destination: path of the folder to move the source to. """ if not os.path.exists(destination): raise RuntimeError( 'The code could not be moved to {destination} ' 'because the folder does not exist'.format(destination=destination)) destination_folder = os.path.join(destination, os.path.split(source)[-1]) if os.path.exists(destination_folder): shutil.rmtree(destination_folder) shutil.move(source, destination)
Force the move of the source inside the destination even if the destination has already a folder with the name inside. In the case, the folder will be replaced. :param string source: path of the source to move. :param string destination: path of the folder to move the source to.
def toc_html(self, depth=6, lowest_level=6): """ Get TOC of currently fed HTML string in form of HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: an HTML string """ toc = self.toc(depth=depth, lowest_level=lowest_level) if not toc: return '' def map_toc_list(toc_list): result = '' if toc_list: result += '<ul>\n' result += ''.join( map(lambda x: '<li>' '<a href="#{}">{}</a>{}' '</li>\n'.format( x['id'], x['inner_html'], map_toc_list(x['children'])), toc_list) ) result += '</ul>' return result return map_toc_list(toc)
Get TOC of currently fed HTML string in form of HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: an HTML string
def on_connect(self, connection): "Called when the socket connects" self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) self.encoder = connection.encoder
Called when the socket connects
def set_document_unit(self, unit): """Use specified unit for width and height of generated SVG file. See ``SVG_UNIT_*`` enumerated values for a list of available unit values that can be used here. This function can be called at any time before generating the SVG file. However to minimize the risk of ambiguities it's recommended to call it before any drawing operations have been performed on the given surface, to make it clearer what the unit used in the drawing operations is. The simplest way to do this is to call this function immediately after creating the SVG surface. Note if this function is never called, the default unit for SVG documents generated by cairo will be "pt". This is for historical reasons. :param unit: SVG unit. *New in cairo 1.16.* *New in cairocffi 0.9.* """ cairo.cairo_svg_surface_set_document_unit(self._pointer, unit) self._check_status()
Use specified unit for width and height of generated SVG file. See ``SVG_UNIT_*`` enumerated values for a list of available unit values that can be used here. This function can be called at any time before generating the SVG file. However to minimize the risk of ambiguities it's recommended to call it before any drawing operations have been performed on the given surface, to make it clearer what the unit used in the drawing operations is. The simplest way to do this is to call this function immediately after creating the SVG surface. Note if this function is never called, the default unit for SVG documents generated by cairo will be "pt". This is for historical reasons. :param unit: SVG unit. *New in cairo 1.16.* *New in cairocffi 0.9.*
def save_state(self): """Save current state of GUI to configuration file.""" set_setting('lastSourceDir', self.source_directory.text()) set_setting('lastOutputDir', self.output_directory.text()) set_setting( 'useDefaultOutputDir', self.scenario_directory_radio.isChecked())
Save current state of GUI to configuration file.
def gen_code_api(self): """TODO: Docstring for gen_code_api.""" # edit config file conf_editor = Editor(self.conf_fpath) # insert code path for searching conf_editor.editline_with_regex(r'^# import os', 'import os') conf_editor.editline_with_regex(r'^# import sys', 'import sys') conf_editor.editline_with_regex( r'^# sys\.path\.insert', 'sys.path.insert(0, "{}")'.format(self.code_fdpath)) conf_editor.editline_with_regex( r"""html_theme = 'alabaster'""", 'html_theme = \'default\''.format(self.code_fdpath)) conf_editor.finish_writing() # sphinx-apidoc to generate rst from source code # force regenerate subprocess.call(self._sphinx_apidoc_cmd) pass
TODO: Docstring for gen_code_api.
def delete_orderrun(backend, orderrun_id): """ Delete the orderrun specified by the argument. """ click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green') check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip()))
Delete the orderrun specified by the argument.
def handle_string_response(self, call_id, payload): """Handler for response `StringResponse`. This is the response for the following requests: 1. `DocUriAtPointReq` or `DocUriForSymbolReq` 2. `DebugToStringReq` """ self.log.debug('handle_string_response: in [typehint: %s, call ID: %s]', payload['typehint'], call_id) # :EnDocBrowse or :EnDocUri url = payload['text'] if not url.startswith('http'): port = self.ensime.http_port() url = gconfig['localhost'].format(port, url) options = self.call_options.get(call_id) if options and options.get('browse'): self._browse_doc(url) del self.call_options[call_id] else: # TODO: make this return value of a Vim function synchronously, how? self.log.debug('EnDocUri %s', url) return url
Handler for response `StringResponse`. This is the response for the following requests: 1. `DocUriAtPointReq` or `DocUriForSymbolReq` 2. `DebugToStringReq`
def _parse_record(data, duration_format='seconds'): """ Parse a raw data dictionary and return a Record object. """ def _map_duration(s): if s == '': return None elif duration_format.lower() == 'seconds': return int(s) else: t = time.strptime(s, duration_format) return 3600 * t.tm_hour + 60 * t.tm_min + t.tm_sec def _map_position(data): antenna = Position() if 'antenna_id' in data and data['antenna_id']: antenna.antenna = data['antenna_id'] if 'place_id' in data: raise NameError("Use field name 'antenna_id' in input files. " "'place_id' is deprecated.") if 'latitude' in data and 'longitude' in data: latitude = data['latitude'] longitude = data['longitude'] # latitude and longitude should not be empty strings. if latitude and longitude: antenna.location = float(latitude), float(longitude) return antenna return Record(interaction=data['interaction'] if data['interaction'] else None, direction=data['direction'], correspondent_id=data['correspondent_id'], datetime=_tryto( lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"), data['datetime']), call_duration=_tryto(_map_duration, data['call_duration']), position=_tryto(_map_position, data))
Parse a raw data dictionary and return a Record object.
def subtract_months(self, months: int) -> datetime: """ Subtracts a number of months from the current value """ self.value = self.value - relativedelta(months=months) return self.value
Subtracts a number of months from the current value
def get_reaction(self, reactants, products): """ Gets a reaction from the Materials Project. Args: reactants ([str]): List of formulas products ([str]): List of formulas Returns: rxn """ return self._make_request("/reaction", payload={"reactants[]": reactants, "products[]": products}, mp_decode=False)
Gets a reaction from the Materials Project. Args: reactants ([str]): List of formulas products ([str]): List of formulas Returns: rxn
def install_gem(name, version=None, install_args=None, override_args=False): ''' Instructs Chocolatey to install a package via Ruby's Gems. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. install_args A list of install arguments you want to pass to the installation process i.e product key or feature list override_args Set to true if you want to override the original install arguments (for the native installer) in the package and use your own. When this is set to False install_args will be appended to the end of the default arguments CLI Example: .. code-block:: bash salt '*' chocolatey.install_gem <package name> salt '*' chocolatey.install_gem <package name> version=<package version> salt '*' chocolatey.install_gem <package name> install_args=<args> override_args=True ''' return install(name, version=version, source='ruby', install_args=install_args, override_args=override_args)
Instructs Chocolatey to install a package via Ruby's Gems. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. install_args A list of install arguments you want to pass to the installation process i.e product key or feature list override_args Set to true if you want to override the original install arguments (for the native installer) in the package and use your own. When this is set to False install_args will be appended to the end of the default arguments CLI Example: .. code-block:: bash salt '*' chocolatey.install_gem <package name> salt '*' chocolatey.install_gem <package name> version=<package version> salt '*' chocolatey.install_gem <package name> install_args=<args> override_args=True
def endpoint_show(endpoint_id): """ Executor for `globus endpoint show` """ client = get_client() res = client.get_endpoint(endpoint_id) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=GCP_FIELDS if res["is_globus_connect"] else STANDARD_FIELDS, )
Executor for `globus endpoint show`
def query(self, url, method="GET", params=dict(), headers=dict()): """ Request a API endpoint at ``url`` with ``params`` being either the POST or GET data. """ access_token = self._get_at_from_session() oauth = OAuth1( self.consumer_key, client_secret=self.secret_key, resource_owner_key=access_token['oauth_token'], resource_owner_secret=access_token['oauth_token_secret']) response = getattr(requests, method.lower())(url, auth=oauth, headers=headers, params=params) if response.status_code != 200: raise OAuthError( _('No access to private resources at "%s".') % get_token_prefix(self.request_token_url)) return response.text
Request a API endpoint at ``url`` with ``params`` being either the POST or GET data.