code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_experiment_in_group(self, group, bucketing_id): experiment_id = self.bucketer.find_bucket(bucketing_id, group.id, group.trafficAllocation) if experiment_id: experiment = self.config.get_experiment_from_id(experiment_id) if experiment: self.logger.info('User with bucketing ID "%s" is in experiment %s of group %s.' % ( bucketing_id, experiment.key, group.id )) return experiment self.logger.info('User with bucketing ID "%s" is not in any experiments of group %s.' % ( bucketing_id, group.id )) return None
Determine which experiment in the group the user is bucketed into. Args: group: The group to bucket the user into. bucketing_id: ID to be used for bucketing the user. Returns: Experiment if the user is bucketed into an experiment in the specified group. None otherwise.
def parseOneGame(self): if self.index < self.datalen: match = self.reGameTreeStart.match(self.data, self.index) if match: self.index = match.end() return self.parseGameTree() return None
Parses one game from 'self.data'. Returns a 'GameTree' containing one game, or 'None' if the end of 'self.data' has been reached.
def delete_dataset(dataset_id,**kwargs): try: d = db.DBSession.query(Dataset).filter(Dataset.id==dataset_id).one() except NoResultFound: raise HydraError("Dataset %s does not exist."%dataset_id) dataset_rs = db.DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==dataset_id).all() if len(dataset_rs) > 0: raise HydraError("Cannot delete %s. Dataset is used by one or more resource scenarios."%dataset_id) db.DBSession.delete(d) db.DBSession.flush() db.DBSession.expunge_all()
Removes a piece of data from the DB. CAUTION! Use with care, as this cannot be undone easily.
def to_pandas(self): try: from pandas import DataFrame, Series except ImportError: raise ImportError('You must have pandas installed to export pandas DataFrames') result = DataFrame(self._raw_tree) return result
Return a pandas dataframe representation of the condensed tree. Each row of the dataframe corresponds to an edge in the tree. The columns of the dataframe are `parent`, `child`, `lambda_val` and `child_size`. The `parent` and `child` are the ids of the parent and child nodes in the tree. Node ids less than the number of points in the original dataset represent individual points, while ids greater than the number of points are clusters. The `lambda_val` value is the value (1/distance) at which the `child` node leaves the cluster. The `child_size` is the number of points in the `child` node.
def register(self, key, **kwargs): dimensions = dict((k, str(v)) for k, v in kwargs.items()) composite_key = self._composite_name(key, dimensions) self._metadata[composite_key] = { 'metric': key, 'dimensions': dimensions } return composite_key
Registers metadata for a metric and returns a composite key
def _check_id_validity(self, p_ids): errors = [] valid_ids = self.todolist.ids() if len(p_ids) == 0: errors.append('No todo item was selected') else: errors = ["Invalid todo ID: {}".format(todo_id) for todo_id in p_ids - valid_ids] errors = '\n'.join(errors) if errors else None return errors
Checks if there are any invalid todo IDs in p_ids list. Returns proper error message if any ID is invalid and None otherwise.
def replace_static_libraries(self, exclusions=None): if "stdc++" not in self.libraries: self.libraries.append("stdc++") if exclusions is None: exclusions = [] for library_name in set(self.libraries) - set(exclusions): static_lib = find_static_library(library_name, self.library_dirs) if static_lib: self.libraries.remove(library_name) self.extra_objects.append(static_lib)
Replaces references to libraries with full paths to their static versions if the static version is to be found on the library path.
def get_attachment(self, file_path): try: file_ = open(file_path, 'rb') attachment = MIMEBase('application', 'octet-stream') attachment.set_payload(file_.read()) file_.close() encoders.encode_base64(attachment) attachment.add_header('Content-Disposition', 'attachment', filename=os.path.basename(file_path)) return attachment except IOError: traceback.print_exc() message = ('The requested file could not be read. Maybe wrong ' 'permissions?') print >> sys.stderr, message sys.exit(6)
Get file as MIMEBase message
def get_detection_results(url, timeout, metadata=False, save_har=False): plugins = load_plugins() if not plugins: raise NoPluginsError('No plugins found') logger.debug('[+] Starting detection with %(n)d plugins', {'n': len(plugins)}) response = get_response(url, plugins, timeout) if save_har: fd, path = tempfile.mkstemp(suffix='.har') logger.info(f'Saving HAR file to {path}') with open(fd, 'w') as f: json.dump(response['har'], f) det = Detector(response, plugins, url) softwares = det.get_results(metadata=metadata) output = { 'url': url, 'softwares': softwares, } return output
Return results from detector. This function prepares the environment loading the plugins, getting the response and passing it to the detector. In case of errors, it raises exceptions to be handled externally.
def overview(index, start, end): results = { "activity_metrics": [Commits(index, start, end)], "author_metrics": [Authors(index, start, end)], "bmi_metrics": [], "time_to_close_metrics": [], "projects_metrics": [] } return results
Compute metrics in the overview section for enriched git indexes. Returns a dictionary. Each key in the dictionary is the name of a metric, the value is the value of that metric. Value can be a complex object (eg, a time series). :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics
def fix_e701(self, result): line_index = result['line'] - 1 target = self.source[line_index] c = result['column'] fixed_source = (target[:c] + '\n' + _get_indentation(target) + self.indent_word + target[c:].lstrip('\n\r \t\\')) self.source[result['line'] - 1] = fixed_source return [result['line'], result['line'] + 1]
Put colon-separated compound statement on separate lines.
def user_exists(username, domain='', database=None, **kwargs): if domain: username = '{0}\\{1}'.format(domain, username) if database: kwargs['database'] = database return len(tsql_query(query="SELECT name FROM sysusers WHERE name='{0}'".format(username), **kwargs)) == 1
Find if an user exists in a specific database on the MS SQL server. domain, if provided, will be prepended to username CLI Example: .. code-block:: bash salt minion mssql.user_exists 'USERNAME' [database='DBNAME']
def StoreCSRFCookie(user, response): csrf_token = GenerateCSRFToken(user, None) response.set_cookie( "csrftoken", csrf_token, max_age=CSRF_TOKEN_DURATION.seconds)
Decorator for WSGI handler that inserts CSRF cookie into response.
def getEmpTraitCovar(self): if self.P==1: out=self.Y[self.Iok].var() else: out=SP.cov(self.Y[self.Iok].T) return out
Returns the empirical trait covariance matrix
def _has_level_handler(logger): level = logger.getEffectiveLevel() current = logger while current: if any(handler.level <= level for handler in current.handlers): return True if not current.propagate: break current = current.parent return False
Check if there is a handler in the logging chain that will handle the given logger's effective level.
def update(self, data): self.debug(data) self._check(data) wg_uuid = data.get('workGroup') self.log.debug("wg_uuid : %s ", wg_uuid) uuid = data.get('uuid') url = "%(base)s/%(wg_uuid)s/nodes/%(uuid)s" % { 'base': self.local_base_url, 'wg_uuid': wg_uuid, 'uuid': uuid } return self.core.update(url, data)
Update meta of one document.
def _parse_price(html_chunk): price = get_first_content( html_chunk.find("div", {"class": "prices"}) ) if not price: return None price = dhtmlparser.removeTags(price) price = price.split("\n")[-1] return price
Parse price of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Price as string with currency or None if not found.
def add_chain(self, *nodes, _input=BEGIN, _output=None, _name=None): if len(nodes): _input = self._resolve_index(_input) _output = self._resolve_index(_output) _first = None _last = None for i, node in enumerate(nodes): _last = self.add_node(node) if not i and _name: if _name in self.named: raise KeyError("Duplicate name {!r} in graph.".format(_name)) self.named[_name] = _last if _first is None: _first = _last self.outputs_of(_input, create=True).add(_last) _input = _last if _output is not None: self.outputs_of(_input, create=True).add(_output) if hasattr(self, "_topologcally_sorted_indexes_cache"): del self._topologcally_sorted_indexes_cache return GraphRange(self, _first, _last) return GraphRange(self, None, None)
Add a chain in this graph.
def edit(self, name, description=None, homepage=None, private=None, has_issues=None, has_wiki=None, has_downloads=None, default_branch=None): edit = {'name': name, 'description': description, 'homepage': homepage, 'private': private, 'has_issues': has_issues, 'has_wiki': has_wiki, 'has_downloads': has_downloads, 'default_branch': default_branch} self._remove_none(edit) json = None if edit: json = self._json(self._patch(self._api, data=dumps(edit)), 200) self._update_(json) return True return False
Edit this repository. :param str name: (required), name of the repository :param str description: (optional), If not ``None``, change the description for this repository. API default: ``None`` - leave value unchanged. :param str homepage: (optional), If not ``None``, change the homepage for this repository. API default: ``None`` - leave value unchanged. :param bool private: (optional), If ``True``, make the repository private. If ``False``, make the repository public. API default: ``None`` - leave value unchanged. :param bool has_issues: (optional), If ``True``, enable issues for this repository. If ``False``, disable issues for this repository. API default: ``None`` - leave value unchanged. :param bool has_wiki: (optional), If ``True``, enable the wiki for this repository. If ``False``, disable the wiki for this repository. API default: ``None`` - leave value unchanged. :param bool has_downloads: (optional), If ``True``, enable downloads for this repository. If ``False``, disable downloads for this repository. API default: ``None`` - leave value unchanged. :param str default_branch: (optional), If not ``None``, change the default branch for this repository. API default: ``None`` - leave value unchanged. :returns: bool -- True if successful, False otherwise
def draw_img_button(width=200, height=50, text='This is a button', color=rgb(200,100,50)): surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height) ctx = cairo.Context(surface) ctx.rectangle(0, 0, width - 1, height - 1) ctx.set_source_rgb(color.red/255.0, color.green/255.0, color.blue/255.0) ctx.fill() ctx.set_source_rgb(1.0, 1.0, 1.0) ctx.select_font_face( "Helvetica", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD ) ctx.set_font_size(15.0) ctx.move_to(15, 2 * height / 3) ctx.show_text(text) surface.write_to_png('button.png')
Draws a simple image button.
def auth_db_connect(db_path): def dict_factory(cursor, row): return {col[0] : row[idx] for idx,col in enumerate(cursor.description)} conn = db.connect(db_path) conn.row_factory = dict_factory if not auth_db_connect.init: conn.execute('create table if not exists tokens (expires int, token text, ip text)') conn.execute('create table if not exists session_tokens (expires int, token text, ip text, username text)') auth_db_connect.init = True return conn
An SQLite database is used to store authentication transient data, this is tokens, strings of random data which are signed by the client, and session_tokens which identify authenticated users
def read(self, size=None): if size is not None: read_size = min(size, self.__remaining_bytes) else: read_size = self.__remaining_bytes data = self.__stream.read(read_size) if read_size > 0 and not data: raise exceptions.StreamExhausted( 'Not enough bytes in stream; expected %d, exhausted ' 'after %d' % ( self.__max_bytes, self.__max_bytes - self.__remaining_bytes)) self.__remaining_bytes -= len(data) return data
Read at most size bytes from this slice. Compared to other streams, there is one case where we may unexpectedly raise an exception on read: if the underlying stream is exhausted (i.e. returns no bytes on read), and the size of this slice indicates we should still be able to read more bytes, we raise exceptions.StreamExhausted. Args: size: If provided, read no more than size bytes from the stream. Returns: The bytes read from this slice. Raises: exceptions.StreamExhausted
def _register_make(cls): assert cls.nxm_headers is not None assert cls.nxm_headers is not [] for nxm_header in cls.nxm_headers: assert nxm_header not in _MF_FIELDS _MF_FIELDS[nxm_header] = cls.make return cls
class decorator to Register mf make
def export(self, fn:PathOrStr, **kwargs): "Export the minimal state and save it in `fn` to load an empty version for inference." pickle.dump(self.get_state(**kwargs), open(fn, 'wb'))
Export the minimal state and save it in `fn` to load an empty version for inference.
def join(self, *args, **kwargs): super(ThreadReturn, self).join(*args, **kwargs) return self._return
Joins the thread. Args: self (ThreadReturn): the ``ThreadReturn`` instance args: optional list of arguments kwargs: optional key-word arguments Returns: The return value of the exited thread.
def tag_implications(self, name_matches=None, antecedent_name=None, tag_id=None): params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_implications.json', params)
Get tags implications. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): Tag implication id.
def delete(ctx, slot, force): controller = ctx.obj['controller'] if not force and not controller.slot_status[slot - 1]: ctx.fail('Not possible to delete an empty slot.') force or click.confirm( 'Do you really want to delete' ' the configuration of slot {}?'.format(slot), abort=True, err=True) click.echo('Deleting the configuration of slot {}...'.format(slot)) try: controller.zap_slot(slot) except YkpersError as e: _failed_to_write_msg(ctx, e)
Deletes the configuration of a slot.
def naturalday(value, format='%b %d'): try: value = date(value.year, value.month, value.day) except AttributeError: return value except (OverflowError, ValueError): return value delta = value - date.today() if delta.days == 0: return _('today') elif delta.days == 1: return _('tomorrow') elif delta.days == -1: return _('yesterday') return value.strftime(format)
For date values that are tomorrow, today or yesterday compared to present day returns representing string. Otherwise, returns a string formatted according to ``format``.
def parse(self, input): result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input)
Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats.
def requirements(requirements_file): return [ str(pkg.req) for pkg in parse_requirements( requirements_file, session=pip_download.PipSession()) if pkg.req is not None]
Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file.
def get_brandings(self): connection = Connection(self.token) connection.set_url(self.production, self.BRANDINGS_URL) return connection.get_request()
Get all account brandings @return List of brandings
def get_array_indices(self): for token in self.tokens: if isinstance(token, SquareBrackets): yield token.tokens[1:-1]
Returns an iterator of index token lists
def save_config( self, cmd="configuration write", confirm=False, confirm_response="" ): output = self.enable() output += self.config_mode() output += self.send_command(cmd) output += self.exit_config_mode() return output
Save Config on Mellanox devices Enters and Leaves Config Mode
def add_node(self, name, desc, layout, node_x, node_y): existing_node = get_session().query(Node).filter(Node.name==name, Node.network_id==self.id).first() if existing_node is not None: raise HydraError("A node with name %s is already in network %s"%(name, self.id)) node = Node() node.name = name node.description = desc node.layout = str(layout) if layout is not None else None node.x = node_x node.y = node_y get_session().add(node) self.nodes.append(node) return node
Add a node to a network.
def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None): if translate_ncbi_namespace is None: translate_ncbi_namespace = self.translate_ncbi_namespace seq_id = self._get_unique_seqid(alias=alias, namespace=namespace) aliases = self.aliases.fetch_aliases(seq_id=seq_id, translate_ncbi_namespace=translate_ncbi_namespace) if target_namespaces: aliases = [a for a in aliases if a["namespace"] in target_namespaces] return aliases
given an alias and optional namespace, return a list of all other aliases for same sequence
def create_binding(self, key, shard=None, public=False, special_lobby_binding=False): shard = shard or self.get_shard_id() factory = recipient.Broadcast if public else recipient.Agent recp = factory(key, shard) binding = self._messaging.create_binding(recp) if special_lobby_binding: self._bindings_preserved_on_shard_change[binding] = True return binding
Used by Interest instances.
def clean_egginfo(self): dir_name = os.path.join(self.root, self.get_egginfo_dir()) self._clean_directory(dir_name)
Clean .egginfo directory
def modifie(self, key: str, value: Any) -> None: if key in self.FIELDS_OPTIONS: self.modifie_options(key, value) else: self.modifications[key] = value
Store the modification. `value` should be dumped in DB compatible format.
def _coords(shape): assert shape.geom_type == 'Polygon' coords = [list(shape.exterior.coords)] for interior in shape.interiors: coords.append(list(interior.coords)) return coords
Return a list of lists of coordinates of the polygon. The list consists firstly of the list of exterior coordinates followed by zero or more lists of any interior coordinates.
def save(self): for key in self.defaults.__dict__: data = getattr(self.data, key) self.cfg_file.Write(key, data)
Saves configuration file
def hdr(data, filename): hdrobj = data if isinstance(data, HDRobject) else HDRobject(data) hdrobj.write(filename)
write ENVI header files Parameters ---------- data: str or dict the file or dictionary to get the info from filename: str the HDR file to write Returns -------
def add_child(self, child): self.children.append(child) child.parent = self self.udepth = max([child.udepth for child in self.children]) + 1
Adds a branch to the current tree.
def add_writable_file_volume(self, runtime, volume, host_outdir_tgt, tmpdir_prefix ): if self.inplace_update: self.append_volume(runtime, volume.resolved, volume.target, writable=True) else: if host_outdir_tgt: if not os.path.exists(os.path.dirname(host_outdir_tgt)): os.makedirs(os.path.dirname(host_outdir_tgt)) shutil.copy(volume.resolved, host_outdir_tgt) else: tmp_dir, tmp_prefix = os.path.split(tmpdir_prefix) tmpdir = tempfile.mkdtemp(prefix=tmp_prefix, dir=tmp_dir) file_copy = os.path.join( tmpdir, os.path.basename(volume.resolved)) shutil.copy(volume.resolved, file_copy) self.append_volume(runtime, file_copy, volume.target, writable=True) ensure_writable(host_outdir_tgt or file_copy)
Append a writable file mapping to the runtime option list.
def zip(self, store=False, store_params=None): params = locals() params.pop('store') params.pop('store_params') new_transform = self.add_transform_task('zip', params) if store: return new_transform.store(**store_params) if store_params else new_transform.store() return utils.make_call(CDN_URL, 'get', transform_url=new_transform.url)
Returns a zip file of the current transformation. This is different from the zip function that lives on the Filestack Client *returns* [Filestack.Transform]
def _instantiate_task(api, kwargs): file_id = kwargs['file_id'] kwargs['file_id'] = file_id if str(file_id).strip() else None kwargs['cid'] = kwargs['file_id'] or None kwargs['rate_download'] = kwargs['rateDownload'] kwargs['percent_done'] = kwargs['percentDone'] kwargs['add_time'] = get_utcdatetime(kwargs['add_time']) kwargs['last_update'] = get_utcdatetime(kwargs['last_update']) is_transferred = (kwargs['status'] == 2 and kwargs['move'] == 1) if is_transferred: kwargs['pid'] = api.downloads_directory.cid else: kwargs['pid'] = None del kwargs['rateDownload'] del kwargs['percentDone'] if 'url' in kwargs: if not kwargs['url']: kwargs['url'] = None else: kwargs['url'] = None task = Task(api, **kwargs) if is_transferred: task._parent = api.downloads_directory return task
Create a Task object from raw kwargs
def search(self): search = self.document_class().search() search = self.custom_filter(search) search = self.filter_search(search) search = self.order_search(search) search = self.filter_permissions(search) if search.count() > ELASTICSEARCH_SIZE: limit = self.paginator.get_limit(self.request) if not limit or limit > ELASTICSEARCH_SIZE: raise TooManyResults() search = search.extra(size=ELASTICSEARCH_SIZE) return search
Handle the search request.
def get_session(username, password, pin, cookie_path=COOKIE_PATH): class MoparAuth(AuthBase): def __init__(self, username, password, pin, cookie_path): self.username = username self.password = password self.pin = pin self.cookie_path = cookie_path def __call__(self, r): return r session = requests.session() session.auth = MoparAuth(username, password, pin, cookie_path) session.headers.update({'User-Agent': USER_AGENT}) if os.path.exists(cookie_path): _LOGGER.info("cookie found at: %s", cookie_path) session.cookies = _load_cookies(cookie_path) else: _login(session) return session
Get a new session.
def multigraph_collect(G, traversal, attrib=None): collected = [] for u, v in util.pairwise(traversal): attribs = G[u[0]][v[0]][v[1]] if attrib is None: collected.append(attribs) else: collected.append(attribs[attrib]) return collected
Given a MultiDiGraph traversal, collect attributes along it. Parameters ------------- G: networkx.MultiDiGraph traversal: (n) list of (node, instance) tuples attrib: dict key, name to collect. If None, will return all Returns ------------- collected: (len(traversal) - 1) list of attributes
def advance_page(self): if self.next_link is None: raise StopIteration("End of paging") self._current_page_iter_index = 0 self._response = self._get_next(self.next_link) self._derserializer(self, self._response) return self.current_page
Force moving the cursor to the next azure call. This method is for advanced usage, iterator protocol is prefered. :raises: StopIteration if no further page :return: The current page list :rtype: list
def get_push_pop_stack(): push = copy.deepcopy(PUSH_STACK) pop = copy.deepcopy(POP_STACK) anno.setanno(push, 'pop', pop) anno.setanno(push, 'gen_push', True) anno.setanno(pop, 'push', push) op_id = _generate_op_id() return push, pop, op_id
Create pop and push nodes for substacks that are linked. Returns: A push and pop node which have `push_func` and `pop_func` annotations respectively, identifying them as such. They also have a `pop` and `push` annotation respectively, which links the push node to the pop node and vice versa.
def get_role(self, request: Request): idr = request.identifier return self._get_role(idr)
None roles are stored as empty strings, so the role returned as None by this function means that corresponding DID is not stored in a ledger.
def get(self, sched_rule_id): path = '/'.join(['schedulerule', sched_rule_id]) return self.rachio.get(path)
Retrieve the information for a scheduleRule entity.
def connect(port, baud=115200, user='micro', password='python', wait=0): try: ip_address = socket.gethostbyname(port) connect_telnet(port, ip_address, user=user, password=password) except socket.gaierror: connect_serial(port, baud=baud, wait=wait)
Tries to connect automagically via network or serial.
def line(ax, p1, p2, permutation=None, **kwargs): pp1 = project_point(p1, permutation=permutation) pp2 = project_point(p2, permutation=permutation) ax.add_line(Line2D((pp1[0], pp2[0]), (pp1[1], pp2[1]), **kwargs))
Draws a line on `ax` from p1 to p2. Parameters ---------- ax: Matplotlib AxesSubplot, None The subplot to draw on. p1: 2-tuple The (x,y) starting coordinates p2: 2-tuple The (x,y) ending coordinates kwargs: Any kwargs to pass through to Matplotlib.
def write_zip(self, resources=None, dumpfile=None): compression = (ZIP_DEFLATED if self.compress else ZIP_STORED) zf = ZipFile( dumpfile, mode="w", compression=compression, allowZip64=True) rdm = ResourceDumpManifest(resources=resources) real_path = {} for resource in resources: archive_path = self.archive_path(resource.path) real_path[archive_path] = resource.path resource.path = archive_path zf.writestr('manifest.xml', rdm.as_xml()) for resource in resources: zf.write(real_path[resource.path], arcname=resource.path) zf.close() zipsize = os.path.getsize(dumpfile) self.logger.info( "Wrote ZIP file dump %s with size %d bytes" % (dumpfile, zipsize))
Write a ZIP format dump file. Writes a ZIP file containing the resources in the iterable resources along with a manifest file manifest.xml (written first). No checks on the size of files or total size are performed, this is expected to have been done beforehand.
def gettempdir(): global tempdir if tempdir is None: _once_lock.acquire() try: if tempdir is None: tempdir = _get_default_tempdir() finally: _once_lock.release() return tempdir
Accessor for tempfile.tempdir.
def get_structure_from_mp(formula): m = MPRester() entries = m.get_entries(formula, inc_structure="final") if len(entries) == 0: raise ValueError("No structure with formula %s in Materials Project!" % formula) elif len(entries) > 1: warnings.warn("%d structures with formula %s found in Materials " "Project. The lowest energy structure will be returned." % (len(entries), formula)) return min(entries, key=lambda e: e.energy_per_atom).structure
Convenience method to get a crystal from the Materials Project database via the API. Requires PMG_MAPI_KEY to be set. Args: formula (str): A formula Returns: (Structure) The lowest energy structure in Materials Project with that formula.
def tryPrepare(self, pp: PrePrepare): rv, msg = self.canPrepare(pp) if rv: self.doPrepare(pp) else: self.logger.debug("{} cannot send PREPARE since {}".format(self, msg))
Try to send the Prepare message if the PrePrepare message is ready to be passed into the Prepare phase.
def lookUpReportsByCountry(self, countryName): code = self.findCountryTwoDigitCode(countryName) if code is None: raise Exception("Invalid country name.") url = self._base_url + self._url_list_reports + "/%s" % code params = { "f" : "json", } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
looks up a country by it's name Inputs countryName - name of the country to get reports list.
def push_object(self, channel_id, obj): return self.push(channel_id, json.dumps(obj).replace('"', '\\"'))
Push ``obj`` for ``channel_id``. ``obj`` will be encoded as JSON in the request.
def _list_syntax_error(): _, e, _ = sys.exc_info() if isinstance(e, SyntaxError) and hasattr(e, 'filename'): yield path.dirname(e.filename)
If we're going through a syntax error, add the directory of the error to the watchlist.
def spots_at(self, x, y): for spot in self.spot.values(): if spot.collide_point(x, y): yield spot
Iterate over spots that collide the given point.
def read(self, memory, addr, length): if memory.id in self._read_requests: logger.warning('There is already a read operation ongoing for ' 'memory id {}'.format(memory.id)) return False rreq = _ReadRequest(memory, addr, length, self.cf) self._read_requests[memory.id] = rreq rreq.start() return True
Read the specified amount of bytes from the given memory at the given address
def crop(self, start_timestamp, end_timestamp): output = {} for key, value in self.items(): if key >= start_timestamp and key <= end_timestamp: output[key] = value if output: return TimeSeries(output) else: raise ValueError('TimeSeries data was empty or invalid.')
Return a new TimeSeries object contains all the timstamps and values within the specified range. :param int start_timestamp: the start timestamp value :param int end_timestamp: the end timestamp value :return: :class:`TimeSeries` object.
def _check_email_match(self, user, email): if user.email != email: raise CommandError( _( 'Skipping user "{}" because the specified and existing email ' 'addresses do not match.' ).format(user.username) )
DRY helper. Requiring the user to specify both username and email will help catch certain issues, for example if the expected username has already been taken by someone else.
def type(self, mpath): try: return self.stat(mpath)["type"] except errors.MantaResourceNotFoundError: return None except errors.MantaAPIError: _, ex, _ = sys.exc_info() if ex.code in ('ResourceNotFound', 'DirectoryDoesNotExist'): return None else: raise
Return the manta type for the given manta path. @param mpath {str} The manta path for which to get the type. @returns {str|None} The manta type, e.g. "object" or "directory", or None if the path doesn't exist.
def create(sld, tld, nameserver, ip): opts = salt.utils.namecheap.get_opts('namecheap.domains.ns.create') opts['SLD'] = sld opts['TLD'] = tld opts['Nameserver'] = nameserver opts['IP'] = ip response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False domainnscreateresult = response_xml.getElementsByTagName('DomainNSCreateResult')[0] return salt.utils.namecheap.string_to_value(domainnscreateresult.getAttribute('IsSuccess'))
Creates a new nameserver. Returns ``True`` if the nameserver was created successfully. sld SLD of the domain name tld TLD of the domain name nameserver Nameserver to create ip Nameserver IP address CLI Example: .. code-block:: bash salt '*' namecheap_domains_ns.create sld tld nameserver ip
def write_table_to_file(self, dtype, custom_name=None, append=False, dir_path=None): if custom_name: fname = custom_name else: fname = self.filenames[dtype] if not dir_path: dir_path=self.directory if dtype in self.tables: write_df = self.remove_names(dtype) outfile = self.tables[dtype].write_magic_file(custom_name=fname, dir_path=dir_path, append=append, df=write_df) return outfile
Write out a MagIC table to file, using custom filename as specified in self.filenames. Parameters ---------- dtype : str magic table name
def tangent_surface_single_list(obj, param_list, normalize): ret_vector = [] for param in param_list: temp = tangent_surface_single(obj, param, normalize) ret_vector.append(temp) return tuple(ret_vector)
Evaluates the surface tangent vectors at the given list of parameter values. :param obj: input surface :type obj: abstract.Surface :param param_list: parameter list :type param_list: list or tuple :param normalize: if True, the returned vector is converted to a unit vector :type normalize: bool :return: a list containing "point" and "vector" pairs :rtype: tuple
def list_metering_labels(self, retrieve_all=True, **_params): return self.list('metering_labels', self.metering_labels_path, retrieve_all, **_params)
Fetches a list of all metering labels for a project.
def serialize_compound(self, tag): separator, fmt = self.comma, '{{{}}}' with self.depth(): if self.should_expand(tag): separator, fmt = self.expand(separator, fmt) return fmt.format(separator.join( f'{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}' for key, value in tag.items() ))
Return the literal representation of a compound tag.
def emulate_abs(self, x_val, y_val, timeval): x_event = self.create_event_object( "Absolute", 0x00, x_val, timeval) y_event = self.create_event_object( "Absolute", 0x01, y_val, timeval) return x_event, y_event
Emulate the absolute co-ordinates of the mouse cursor.
def copy_data(data_length, blocksize, infp, outfp): use_sendfile = False if have_sendfile: try: x_unused = infp.fileno() y_unused = outfp.fileno() use_sendfile = True except (AttributeError, io.UnsupportedOperation): pass if use_sendfile: in_offset = infp.tell() out_offset = outfp.tell() sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length) infp.seek(in_offset + data_length) outfp.seek(out_offset + data_length) else: left = data_length readsize = blocksize while left > 0: if left < readsize: readsize = left data = infp.read(readsize) data_len = len(data) if data_len != readsize: data_len = left outfp.write(data) left -= data_len
A utility function to copy data from the input file object to the output file object. This function will use the most efficient copy method available, which is often sendfile. Parameters: data_length - The amount of data to copy. blocksize - How much data to copy per iteration. infp - The file object to copy data from. outfp - The file object to copy data to. Returns: Nothing.
def execute_go_cmd(self, cmd, gopath=None, args=None, env=None, workunit_factory=None, workunit_name=None, workunit_labels=None, **kwargs): go_cmd = self.create_go_cmd(cmd, gopath=gopath, args=args) if workunit_factory is None: return go_cmd.spawn(**kwargs).wait() else: name = workunit_name or cmd labels = [WorkUnitLabel.TOOL] + (workunit_labels or []) with workunit_factory(name=name, labels=labels, cmd=str(go_cmd)) as workunit: process = go_cmd.spawn(env=env, stdout=workunit.output('stdout'), stderr=workunit.output('stderr'), **kwargs) returncode = process.wait() workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE) return returncode, go_cmd
Runs a Go command that is optionally targeted to a Go workspace. If a `workunit_factory` is supplied the command will run in a work unit context. :param string cmd: Go command to execute, e.g. 'test' for `go test` :param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run the command. :param list args: An optional list of arguments and flags to pass to the Go command. :param dict env: A custom environment to launch the Go command in. If `None` the current environment is used. :param workunit_factory: An optional callable that can produce a `WorkUnit` context :param string workunit_name: An optional name for the work unit; defaults to the `cmd` :param list workunit_labels: An optional sequence of labels for the work unit. :param kwargs: Keyword arguments to pass through to `subprocess.Popen`. :returns: A tuple of the exit code and the go command that was run. :rtype: (int, :class:`GoDistribution.GoCommand`)
def filter_model_items(index_instance, model_items, model_name, start_date, end_date): if index_instance.updated_field is None: logger.warning("No updated date field found for {} - not restricting with start and end date".format(model_name)) else: if start_date: model_items = model_items.filter(**{'{}__gte'.format(index_instance.updated_field): __str_to_tzdate__(start_date)}) if end_date: model_items = model_items.filter(**{'{}__lte'.format(index_instance.updated_field): __str_to_tzdate__(end_date)}) return model_items
Filters the model items queryset based on start and end date.
def set_locale(request): return request.query.get('lang', app.ps.babel.select_locale_by_request(request))
Return locale from GET lang param or automatically.
def postprocess(self, tempname, filename): if os.name == 'posix': mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode)
Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns.
def get(self): return StepContextContext( self._version, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['engagement_sid'], step_sid=self._solution['step_sid'], )
Constructs a StepContextContext :returns: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextContext :rtype: twilio.rest.studio.v1.flow.engagement.step.step_context.StepContextContext
def write_bytes(self, data, n): for pos in xrange(0, n): self.payload[self.pos + pos] = data[pos] self.pos += n
Write n number of bytes to this packet.
def naive( year, month, day, hour=0, minute=0, second=0, microsecond=0 ): return DateTime(year, month, day, hour, minute, second, microsecond)
Return a naive DateTime.
def _deserialize(self, value, attr, data): value = super(SanitizedUnicode, self)._deserialize(value, attr, data) value = fix_text(value) value = ''.join(filter(self.is_valid_xml_char, value)) for char in self.UNWANTED_CHARACTERS: value = value.replace(char, '') return value
Deserialize sanitized string value.
def _get_model_table(self, part): rows = self.parser.find(part).find_children('tr').list_results() table = [] for row in rows: table.append(self._get_model_row(self.parser.find( row ).find_children('td,th').list_results())) return self._get_valid_model_table(table)
Returns a list that represents the table. :param part: The table header, table footer or table body. :type part: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The list that represents the table. :rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
def save_file(self, data, dfile): self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter") if not javabridge.is_instance_of(dfile, "Ljava/io/File;"): dfile = javabridge.make_instance( "Ljava/io/File;", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(str(dfile))) javabridge.call(self.jobject, "setFile", "(Ljava/io/File;)V", dfile) javabridge.call(self.jobject, "setInstances", "(Lweka/core/Instances;)V", data.jobject) javabridge.call(self.jobject, "writeBatch", "()V")
Saves the Instances object in the specified file. :param data: the data to save :type data: Instances :param dfile: the file to save the data to :type dfile: str
def tail(self, since, filter_pattern, limit=10000, keep_open=True, colorize=True, http=False, non_http=False, force_colorize=False): try: since_stamp = string_to_timestamp(since) last_since = since_stamp while True: new_logs = self.zappa.fetch_logs( self.lambda_name, start_time=since_stamp, limit=limit, filter_pattern=filter_pattern, ) new_logs = [ e for e in new_logs if e['timestamp'] > last_since ] self.print_logs(new_logs, colorize, http, non_http, force_colorize) if not keep_open: break if new_logs: last_since = new_logs[-1]['timestamp'] time.sleep(1) except KeyboardInterrupt: try: sys.exit(0) except SystemExit: os._exit(130)
Tail this function's logs. if keep_open, do so repeatedly, printing any new logs
def neuron(layer_name, channel_n, x=None, y=None, batch=None): def inner(T): layer = T(layer_name) shape = tf.shape(layer) x_ = shape[1] // 2 if x is None else x y_ = shape[2] // 2 if y is None else y if batch is None: return layer[:, x_, y_, channel_n] else: return layer[batch, x_, y_, channel_n] return inner
Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+
def gpu_r2c_fft(in1, is_gpuarray=False, store_on_gpu=False): if is_gpuarray: gpu_in1 = in1 else: gpu_in1 = gpuarray.to_gpu_async(in1.astype(np.float32)) output_size = np.array(in1.shape) output_size[1] = 0.5*output_size[1] + 1 gpu_out1 = gpuarray.empty([output_size[0], output_size[1]], np.complex64) gpu_plan = Plan(gpu_in1.shape, np.float32, np.complex64) fft(gpu_in1, gpu_out1, gpu_plan) if store_on_gpu: return gpu_out1 else: return gpu_out1.get()
This function makes use of the scikits implementation of the FFT for GPUs to take the real to complex FFT. INPUTS: in1 (no default): The array on which the FFT is to be performed. is_gpuarray (default=True): Boolean specifier for whether or not input is on the gpu. store_on_gpu (default=False): Boolean specifier for whether the result is to be left on the gpu or not. OUTPUTS: gpu_out1 The gpu array containing the result. OR gpu_out1.get() The result from the gpu array.
def compute_regressor(exp_condition, hrf_model, frame_times, con_id='cond', oversampling=50, fir_delays=None, min_onset=-24): tr = float(frame_times.max()) / (np.size(frame_times) - 1) hr_regressor, hr_frame_times = _sample_condition( exp_condition, frame_times, oversampling, min_onset) hkernel = _hrf_kernel(hrf_model, tr, oversampling, fir_delays) conv_reg = np.array([np.convolve(hr_regressor, h)[:hr_regressor.size] for h in hkernel]) computed_regressors = _resample_regressor( conv_reg, hr_frame_times, frame_times) if hrf_model != 'fir': computed_regressors = _orthogonalize(computed_regressors) reg_names = _regressor_names(con_id, hrf_model, fir_delays=fir_delays) return computed_regressors, reg_names
This is the main function to convolve regressors with hrf model Parameters ---------- exp_condition : array-like of shape (3, n_events) yields description of events for this condition as a (onsets, durations, amplitudes) triplet hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion', 'glover', 'glover + derivative', 'fir', None} Name of the hrf model to be used frame_times : array of shape (n_scans) the desired sampling times con_id : string optional identifier of the condition oversampling : int, optional oversampling factor to perform the convolution fir_delays : 1D-array-like, optional delays (in seconds) used in case of a finite impulse reponse model min_onset : float, optional minimal onset relative to frame_times[0] (in seconds) events that start before frame_times[0] + min_onset are not considered Returns ------- computed_regressors: array of shape(n_scans, n_reg) computed regressors sampled at frame times reg_names: list of strings corresponding regressor names Notes ----- The different hemodynamic models can be understood as follows: 'spm': this is the hrf model used in SPM 'spm + derivative': SPM model plus its time derivative (2 regressors) 'spm + time + dispersion': idem, plus dispersion derivative (3 regressors) 'glover': this one corresponds to the Glover hrf 'glover + derivative': the Glover hrf + time derivative (2 regressors) 'glover + derivative + dispersion': idem + dispersion derivative (3 regressors) 'fir': finite impulse response basis, a set of delayed dirac models with arbitrary length. This one currently assumes regularly spaced frame times (i.e. fixed time of repetition). It is expected that spm standard and Glover model would not yield large differences in most cases. In case of glover and spm models, the derived regressors are orthogonalized wrt the main one.
def last_requestline(sent_data): for line in reversed(sent_data): try: parse_requestline(decode_utf8(line)) except ValueError: pass else: return line
Find the last line in sent_data that can be parsed with parse_requestline
def get_time_variables(ds): time_variables = set() for variable in ds.get_variables_by_attributes(standard_name='time'): time_variables.add(variable.name) for variable in ds.get_variables_by_attributes(axis='T'): if variable.name not in time_variables: time_variables.add(variable.name) regx = r'^(?:day|d|hour|hr|h|minute|min|second|s)s? since .*$' for variable in ds.get_variables_by_attributes(units=lambda x: isinstance(x, basestring)): if re.match(regx, variable.units) and variable.name not in time_variables: time_variables.add(variable.name) return time_variables
Returns a list of variables describing the time coordinate :param netCDF4.Dataset ds: An open netCDF4 Dataset
def mixed_parcel(p, temperature, dewpt, parcel_start_pressure=None, heights=None, bottom=None, depth=100 * units.hPa, interpolate=True): r if not parcel_start_pressure: parcel_start_pressure = p[0] theta = potential_temperature(p, temperature) mixing_ratio = saturation_mixing_ratio(p, dewpt) mean_theta, mean_mixing_ratio = mixed_layer(p, theta, mixing_ratio, bottom=bottom, heights=heights, depth=depth, interpolate=interpolate) mean_temperature = (mean_theta / potential_temperature(parcel_start_pressure, 1 * units.kelvin)) * units.kelvin mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio) mean_dewpoint = dewpoint(mean_vapor_pressure) return (parcel_start_pressure, mean_temperature.to(temperature.units), mean_dewpoint.to(dewpt.units))
r"""Calculate the properties of a parcel mixed from a layer. Determines the properties of an air parcel that is the result of complete mixing of a given atmospheric layer. Parameters ---------- p : `pint.Quantity` Atmospheric pressure profile temperature : `pint.Quantity` Atmospheric temperature profile dewpt : `pint.Quantity` Atmospheric dewpoint profile parcel_start_pressure : `pint.Quantity`, optional Pressure at which the mixed parcel should begin (default None) heights: `pint.Quantity`, optional Atmospheric heights corresponding to the given pressures (default None) bottom : `pint.Quantity`, optional The bottom of the layer as a pressure or height above the surface pressure (default None) depth : `pint.Quantity`, optional The thickness of the layer as a pressure or height above the bottom of the layer (default 100 hPa) interpolate : bool, optional Interpolate the top and bottom points if they are not in the given data Returns ------- `pint.Quantity, pint.Quantity, pint.Quantity` The pressure, temperature, and dewpoint of the mixed parcel.
def all_dependencies(self, target): for dep in target.closure(bfs=True, **self.target_closure_kwargs): yield dep
All transitive dependencies of the context's target.
def _request(self, endpoint, method, data=None, **kwargs): final_url = self.url + endpoint if not self._is_authenticated: raise LoginRequired rq = self.session if method == 'get': request = rq.get(final_url, **kwargs) else: request = rq.post(final_url, data, **kwargs) request.raise_for_status() request.encoding = 'utf_8' if len(request.text) == 0: data = json.loads('{}') else: try: data = json.loads(request.text) except ValueError: data = request.text return data
Method to hanle both GET and POST requests. :param endpoint: Endpoint of the API. :param method: Method of HTTP request. :param data: POST DATA for the request. :param kwargs: Other keyword arguments. :return: Response for the request.
def name2rgb(hue): r, g, b = colorsys.hsv_to_rgb(hue / 360.0, .8, .7) return tuple(int(x * 256) for x in [r, g, b])
Originally used to calculate color based on module name.
def write_to_path(self,path,suffix='',format='png',overwrite=False): if os.path.exists(path) and overwrite is False: raise ValueError("Error: use ovewrite=True to overwrite images") if not os.path.exists(path): os.makedirs(path) for i,r in self.iterrows(): spath = os.path.join(path,r['project_name'],r['sample_name']) if not os.path.exists(spath): os.makedirs(spath) if suffix == '': fname = os.path.join(spath,r['frame_name']+'.'+format) else: fname = os.path.join(spath,r['frame_name']+'_'+suffix+'.'+format) imageio.imwrite(fname, r['image'],format=format)
Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame Args: path (str): Where to write the directory of images suffix (str): for labeling the imaages you write format (str): default 'png' format to write the file overwrite (bool): default False. if true can overwrite files in the path Modifies: Creates path folder if necessary and writes images to path
def get_names_and_paths(compiler_output: Dict[str, Any]) -> Dict[str, str]: return { contract_name: make_path_relative(path) for path in compiler_output for contract_name in compiler_output[path].keys() }
Return a mapping of contract name to relative path as defined in compiler output.
def supports(cls, *functionalities): def _decorator(view): if not hasattr(view, "_supports"): view._supports = set() for functionality in functionalities: view._supports.add(functionality) return view return _decorator
A view decorator to indicate that an xBlock view has support for the given functionalities. Arguments: functionalities: String identifiers for the functionalities of the view. For example: "multi_device".
def convert_from_file(cls, input_file=None, output_file=None, output_format='json', indent=2, compact=False): if input_file is None: content = sys.stdin.read() config = ConfigFactory.parse_string(content) else: config = ConfigFactory.parse_file(input_file) res = cls.convert(config, output_format, indent, compact) if output_file is None: print(res) else: with open(output_file, "w") as fd: fd.write(res)
Convert to json, properties or yaml :param input_file: input file, if not specified stdin :param output_file: output file, if not specified stdout :param output_format: json, properties or yaml :return: json, properties or yaml string representation
def points(self, size=1.0, highlight=None, colorlist=None, opacity=1.0): if colorlist is None: colorlist = [get_atom_color(t) for t in self.topology['atom_types']] if highlight is not None: if isinstance(highlight, int): colorlist[highlight] = 0xff0000 if isinstance(highlight, (list, np.ndarray)): for i in highlight: colorlist[i] = 0xff0000 sizes = [size] * len(self.topology['atom_types']) points = self.add_representation('points', {'coordinates': self.coordinates.astype('float32'), 'colors': colorlist, 'sizes': sizes, 'opacity': opacity}) def update(self=self, points=points): self.update_representation(points, {'coordinates': self.coordinates.astype('float32')}) self.update_callbacks.append(update) self.autozoom(self.coordinates)
Display the system as points. :param float size: the size of the points.
def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False): def yield_csv(csv_contents, csv_file): try: for line in csv_contents: yield line finally: try: csv_file.close() except: pass def process_csv(csv_contents, csv_file): return [line for line in yield_csv(csv_contents, csv_file)] if file_contents: csv_file = BytesIO(file_contents) else: csv_file = open(file_name, 'rb') reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding) if on_demand: table = yield_csv(reader, csv_file) else: table = process_csv(reader, csv_file) return [table]
Gets good old csv data from a file. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. encoding: Loads the file with the specified cell encoding. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
def __grabHotkeysForWindow(self, window): c = self.app.configManager hotkeys = c.hotKeys + c.hotKeyFolders window_info = self.get_window_info(window) for item in hotkeys: if item.get_applicable_regex() is not None and item._should_trigger_window_title(window_info): self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window) elif self.__needsMutterWorkaround(item): self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
Grab all hotkeys relevant to the window Used when a new window is created