code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def hash(self): return u''.join([ self.alias, self.description, str(self.ignored), str(self.flags), ])
Return a value that's used to uniquely identify an entry in a date so we can regroup all entries that share the same hash.
def run_task_class(self, class_path, **options): logger.console("\n") task_class, task_config = self._init_task(class_path, options, TaskConfig()) return self._run_task(task_class, task_config)
Runs a CumulusCI task class with task options via kwargs. Use this keyword to run logic from CumulusCI tasks which have not been configured in the project's cumulusci.yml file. This is most useful in cases where a test needs to use task logic for logic unique to the test and thus not worth making into a named task for the project Examples: | =Keyword= | =task_class= | =task_options= | | Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
def stop(): global _active if not _active: msg = 'lazarus is not active' raise RuntimeWarning(msg) _observer.stop() _observer.join() _deactivate()
Stops lazarus, regardless of which mode it was started in. For example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop()
def hash_file(path, hashobj, conn=None): if os.path.isdir(path): return '' with salt.utils.files.fopen(path, 'r') as f: hashobj.update(salt.utils.stringutils.to_bytes(f.read())) return hashobj.hexdigest()
Get the hexdigest hash value of a file
def _write_iterate(self, values): result = [] for key in self.order: result.append(self.lines[key].write(values)) if len(result) > 1: return result else: return result[0]
Generates the lines for a single pass through the group.
def datapoint(self, ind, field_names=None): if self._has_unsaved_data: self.flush() if ind >= self._num_datapoints: raise ValueError('Index %d larger than the number of datapoints in the dataset (%d)' %(ind, self._num_datapoints)) if field_names is None: field_names = self.field_names datapoint = TensorDatapoint(field_names) file_num = self._index_to_file_num[ind] for field_name in field_names: tensor = self.tensor(field_name, file_num) tensor_index = ind % self._datapoints_per_file datapoint[field_name] = tensor.datapoint(tensor_index) return datapoint
Loads a tensor datapoint for a given global index. Parameters ---------- ind : int global index in the tensor field_names : :obj:`list` of str field names to load Returns ------- :obj:`TensorDatapoint` the desired tensor datapoint
def write_raw(path, raw): log.debug('Writing vault secrets for %s at %s', __grains__['id'], path) try: url = 'v1/{0}'.format(path) response = __utils__['vault.make_request']('POST', url, json=raw) if response.status_code == 200: return response.json()['data'] elif response.status_code != 204: response.raise_for_status() return True except Exception as err: log.error('Failed to write secret! %s: %s', type(err).__name__, err) return False
Set raw data at the path in vault. The vault policy used must allow this. CLI Example: .. code-block:: bash salt '*' vault.write_raw "secret/my/secret" '{"user":"foo","password": "bar"}'
def add_clause(self, clause, no_return=True): if self.solver: res = self.solver.add_clause(clause, no_return) if not no_return: return res
This method is used to add a single clause to the solver. An optional argument ``no_return`` controls whether or not to check the formula's satisfiability after adding the new clause. :param clause: an iterable over literals. :param no_return: check solver's internal formula and return the result, if set to ``False``. :type clause: iterable(int) :type no_return: bool :rtype: bool if ``no_return`` is set to ``False``. Note that a clause can be either a ``list`` of integers or another iterable type over integers, e.g. ``tuple`` or ``set`` among others. A usage example is the following: .. code-block:: python >>> s = Solver(bootstrap_with=[[-1, 2], [-1, -2]]) >>> s.add_clause([1], no_return=False) False
def sql(self, input_string, *args): psycopg2 = importlib.import_module('psycopg2') importlib.import_module('psycopg2.extensions') connection = psycopg2.connect( user=self.user, host=self.host, port=self.port, database=self.db_name) connection.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) try: cursor = connection.cursor() cursor.execute(input_string, args) try: return cursor.fetchall() except psycopg2.ProgrammingError: return None finally: connection.close()
Execute a SQL command using the Python DBI directly. Connection parameters are taken from self. Autocommit is in effect. Example: .sql('SELECT %s FROM %s WHERE age > %s', 'name', 'table1', '45') @param input_string: A string of SQL. May contain %s or %(name)s format specifiers; they are replaced with corresponding values taken from args. @param args: zero or more parameters to interpolate into the string. Note that they're passed individually, not as a single tuple. @return: Whatever .fetchall() returns.
def distance(latitude_1, longitude_1, latitude_2, longitude_2): coef = mod_math.cos(latitude_1 / 180. * mod_math.pi) x = latitude_1 - latitude_2 y = (longitude_1 - longitude_2) * coef return mod_math.sqrt(x * x + y * y) * ONE_DEGREE
Distance between two points.
def set_config(self, payload): self.config['default'][payload['option']] = str(payload['value']) if payload['option'] == 'maxProcesses': self.process_handler.set_max(payload['value']) if payload['option'] == 'customShell': path = payload['value'] if os.path.isfile(path) and os.access(path, os.X_OK): self.process_handler.set_shell(path) elif path == 'default': self.process_handler.set_shell() else: return {'message': "File in path doesn't exist or is not executable.", 'status': 'error'} self.write_config() return {'message': 'Configuration successfully updated.', 'status': 'success'}
Update the current config depending on the payload and save it.
def split_every(n, iterable): items = iter(iterable) return itertools.takewhile(bool, (list(itertools.islice(items, n)) for _ in itertools.count()))
Returns a generator that spits an iteratable into n-sized chunks. The last chunk may have less than n elements. See http://stackoverflow.com/a/22919323/503377.
def git_available(func): def inner(*args): os.chdir(APISettings.GIT_DIR) if call(['git', 'rev-parse']) == 0: return func(*args) Shell.fail('There is no git repository!') return exit(1) return inner
Check, if a git repository exists in the given folder.
def get_version(module='spyder_notebook'): with open(os.path.join(HERE, module, '_version.py'), 'r') as f: data = f.read() lines = data.split('\n') for line in lines: if line.startswith('VERSION_INFO'): version_tuple = ast.literal_eval(line.split('=')[-1].strip()) version = '.'.join(map(str, version_tuple)) break return version
Get version.
async def wait_stream(aiterable): async with streamcontext(aiterable) as streamer: async for item in streamer: item try: return item except NameError: raise StreamEmpty()
Wait for an asynchronous iterable to finish and return the last item. The iterable is executed within a safe stream context. A StreamEmpty exception is raised if the sequence is empty.
def _get_sub_package_provider_session(self, sub_package, session_name, proxy=None): agent_key = self._get_agent_key() if session_name in self._provider_sessions[agent_key]: return self._provider_sessions[agent_key][session_name] else: manager = self._get_sub_package_provider_manager(sub_package) session = self._instantiate_session('get_' + session_name + '_for_bank', proxy=self._proxy, manager=manager) self._set_bank_view(session) self._set_object_view(session) self._set_operable_view(session) self._set_containable_view(session) if self._session_management != DISABLED: self._provider_sessions[agent_key][session_name] = session return session
Gets the session from a sub-package
def correct_word(word_string): if word_string is None: return "" elif isinstance(word_string, str): return max(find_candidates(word_string), key=find_word_prob) else: raise InputError("string or none type variable not passed as argument to correct_word")
Finds all valid one and two letter corrections for word_string, returning the word with the highest relative probability as type str.
def ArtifactsFromYaml(self, yaml_content): raw_list = yaml.ParseMany(yaml_content) if (isinstance(raw_list, list) and len(raw_list) == 1 and isinstance(raw_list[0], list)): raw_list = raw_list[0] valid_artifacts = [] for artifact_dict in raw_list: try: artifact_value = rdf_artifacts.Artifact(**artifact_dict) valid_artifacts.append(artifact_value) except (TypeError, AttributeError, type_info.TypeValueError) as e: name = artifact_dict.get("name") raise rdf_artifacts.ArtifactDefinitionError( name, "invalid definition", cause=e) return valid_artifacts
Get a list of Artifacts from yaml.
def _fwhm_side_lineal(uu, vv): res1, = np.nonzero(vv < 0) if len(res1) == 0: return 0, 1 else: i2 = res1[0] i1 = i2 - 1 dx = uu[i2] - uu[i1] dy = vv[i2] - vv[i1] r12 = uu[i1] - vv[i1] * dx / dy return r12, 0
Compute r12 using linear interpolation.
def make_request(self, data, is_json=True): if is_json: self.headers['Content-Type'] = 'application/json' if not is_json: data = urlencode_utf8(data) response = requests.post( self.url, data=data, headers=self.headers, proxies=self.proxy ) if response.status_code == 200: if is_json: response = response.json() else: response = response.content return response if response.status_code == 400: raise GCMMalformedJsonException( "The request could not be parsed as JSON") elif response.status_code == 401: raise GCMAuthenticationException( "There was an error authenticating the sender account") elif response.status_code == 503: raise GCMUnavailableException("GCM service is unavailable") else: error = "GCM service error: %d" % response.status_code raise GCMUnavailableException(error)
Makes a HTTP request to GCM servers with the constructed payload :param data: return value from construct_payload method :raises GCMMalformedJsonException: if malformed JSON request found :raises GCMAuthenticationException: if there was a problem with authentication, invalid api key :raises GCMConnectionException: if GCM is screwed
def get_object(self, queryset=None): if settings.PODCAST_SINGULAR: show = get_object_or_404(Show, id=settings.PODCAST_ID) else: show = get_object_or_404(Show, slug=self.kwargs['show_slug']) obj = get_object_or_404(Episode, show=show, slug=self.kwargs['slug']) index = Episode.objects.is_public_or_secret().filter(show=show, pub_date__lt=obj.pub_date).count() obj.index = index + 1 obj.index_next = obj.index + 1 obj.index_previous = obj.index - 1 return obj
Return object with episode number attached to episode.
def _evaluate(self, message): return eval( self.code, globals(), { "J": message, "timedelta": timedelta, "datetime": datetime, "SKIP": self._SKIP})
Evaluate the expression with the given Python object in its locals. @param message: A decoded JSON input. @return: The resulting object.
def makefile(self, mode='r', bufsize=-1): 'return a file-like object that operates on the ssl connection' sockfile = gsock.SocketFile.__new__(gsock.SocketFile) gfiles.FileBase.__init__(sockfile) sockfile._sock = self sockfile.mode = mode if bufsize > 0: sockfile.CHUNKSIZE = bufsize return sockfile
return a file-like object that operates on the ssl connection
def __add_document_structure(self, docgraph, remove_redundant_layers=True): E = self.E root = self.__create_document_header() body = E('basic-body') timeline = E('common-timeline') for i in xrange(len(docgraph.tokens)+1): idx = str(i) timeline.append(E('tli', {'id': 'T'+idx, 'time': idx})) body.append(timeline) body = self.__add_token_tiers(docgraph, body) annotation_layers = get_annotation_layers(docgraph) for layer in annotation_layers: if not remove_redundant_layers: self.__add_annotation_tier(docgraph, body, layer) elif is_informative(layer): self.__add_annotation_tier(docgraph, body, layer) self.__add_coreference_chain_tiers(docgraph, body) root.append(body) return root
return an Exmaralda XML etree representation a docgraph
def _set_virtual(self, key, value): if key in self and key not in self._virtual_keys: return self._virtual_keys.add(key) if key in self and self[key] is not value: self._on_change(key, value) dict.__setitem__(self, key, value) for overlay in self._iter_overlays(): overlay._set_virtual(key, value)
Recursively set or update virtual keys. Do nothing if non-virtual value is present.
def find_collection_ids(self, search_pattern="", identifier="", fetched=0, page=1): params = {"page": page, "q": "primary_type:resource"} if search_pattern != "": search_pattern = self._escape_solr_query(search_pattern, field="title") params["q"] = params["q"] + " AND title:{}".format(search_pattern) if identifier != "": identifier = self._escape_solr_query(identifier, field="identifier") params["q"] = params["q"] + " AND identifier:{}".format(identifier) response = self._get(self.repository + "/search", params=params) hits = response.json() results = [r["uri"] for r in hits["results"]] results_so_far = fetched + hits["this_page"] if hits["total_hits"] > results_so_far: results.extend( self.find_collection_ids(fetched=results_so_far, page=page + 1) ) return results
Fetches a list of resource URLs for every resource in the database. :param string search_pattern: A search pattern to use in looking up resources by title or resourceid. The search will match any title containing this string; for example, "text" will match "this title has this text in it". If omitted, then all resources will be fetched. :param string identifier: Only records containing this identifier will be returned. Substring matching will not be performed; however, wildcards are supported. For example, searching "F1" will only return records with the identifier "F1", while searching "F*" will return "F1", "F2", etc. :return: A list containing every matched resource's URL. :rtype list:
def to_text(self, name, **kw): s = StringIO.StringIO() for rds in self.rdatasets: print >> s, rds.to_text(name, **kw) return s.getvalue()[:-1]
Convert a node to text format. Each rdataset at the node is printed. Any keyword arguments to this method are passed on to the rdataset's to_text() method. @param name: the owner name of the rdatasets @type name: dns.name.Name object @rtype: string
def find(name, path=(), parent=None): assert isinstance(path, tuple) head, _, tail = name.partition('.') try: tup = imp.find_module(head, list(path)) except ImportError: return parent fp, modpath, (suffix, mode, kind) = tup if fp: fp.close() if parent and modpath == parent.path: return None if kind == imp.PKG_DIRECTORY: modpath = os.path.join(modpath, '__init__.py') module = Module(head, modpath, kind, parent) if tail and kind == imp.PKG_DIRECTORY: return find_relative(module, tail, path) return module
Return a Module instance describing the first matching module found on the search path. :param str name: Module name. :param list path: List of directory names to search for the module. :param Module parent: Optional module parent.
def _compose_custom_getters(getter_a, getter_b): if not getter_a: return getter_b if not getter_b: return getter_a def getter_fn(getter, *args, **kwargs): return getter_b(functools.partial(getter_a, getter), *args, **kwargs) return getter_fn
Compose two custom getters. Example use: tf.get_variable_scope().set_custom_getter( compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter)) This composes getters in the same way as creating a new variable scope with the new_getter, but it does not actually create a new variable scope. Args: getter_a: a custom getter - generally from the existing variable scope. getter_b: a custom getter Returns: a custom getter
def __unset_binding(self, dependency, service, reference): self.__safe_field_callback( dependency.get_field(), constants.IPOPO_CALLBACK_UNBIND_FIELD, service, reference, ) self.safe_callback(constants.IPOPO_CALLBACK_UNBIND, service, reference) setattr(self.instance, dependency.get_field(), dependency.get_value()) self.bundle_context.unget_service(reference)
Removes a service from the component :param dependency: The dependency handler :param service: The injected service :param reference: The reference of the injected service
def start_batch(job, input_args): samples = parse_sra(input_args['sra']) job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30')
This function will administer 5 jobs at a time then recursively call itself until subset is empty
def from_file(xmu_dat_file="xmu.dat", feff_inp_file="feff.inp"): data = np.loadtxt(xmu_dat_file) header = Header.from_file(feff_inp_file) parameters = Tags.from_file(feff_inp_file) pots = Potential.pot_string_from_file(feff_inp_file) if "RECIPROCAL" in parameters: absorbing_atom = parameters["TARGET"] else: absorbing_atom = pots.splitlines()[3].split()[2] return Xmu(header, parameters, absorbing_atom, data)
Get Xmu from file. Args: xmu_dat_file (str): filename and path for xmu.dat feff_inp_file (str): filename and path of feff.inp input file Returns: Xmu object
def merge_xml(first_doc, second_doc): if isinstance(first_doc, lxml.etree._Element): first_doc = ET.fromstring(lxml.etree.tostring(first_doc)) if isinstance(second_doc, lxml.etree._Element): second_doc = ET.fromstring(lxml.etree.tostring(second_doc)) mapping = {element.tag: element for element in first_doc} for element in second_doc: if not len(element): try: mapping[element.tag].text = element.text except KeyError: mapping[element.tag] = element first_doc.append(element) else: try: merge_xml(mapping[element.tag], element) except KeyError: mapping[element.tag] = element first_doc.append(element) return lxml.etree.fromstring(ET.tostring(first_doc))
Merges two XML documents. Args: first_doc (str): First XML document. `second_doc` is merged into this document. second_doc (str): Second XML document. It is merged into the first. Returns: XML Document: The merged document. Raises: None Example: >>> import pynos.utilities >>> import lxml >>> import xml >>> x = xml.etree.ElementTree.fromstring('<config />') >>> y = lxml.etree.fromstring('<config><hello /></config>') >>> x = pynos.utilities.merge_xml(x, y)
def keep_types_s(s, types): patt = '|'.join('(?<=\n)' + s + '\n(?s).+?\n(?=\S+|$)' for s in types) return ''.join(re.findall(patt, '\n' + s.strip() + '\n')).rstrip()
Keep the given types from a string Same as :meth:`keep_types` but does not use the :attr:`params` dictionary Parameters ---------- s: str The string of the returns like section types: list of str The type identifiers to keep Returns ------- str The modified string `s` with only the descriptions of `types`
def get_chr2idx(self): return {chr(ascii_int):idx for idx, ascii_int in enumerate(self.all_chrints)}
Return a dict with the ASCII art character as key and its index as value.
def name(self): alias = getattr(self, 'alias', None) if alias: return alias caption = getattr(self, 'caption', None) if caption: return caption return self.id
Provides a nice name for the field which is derived from the alias, caption, or the id. The name resolves as either the alias if it's defined, or the caption if alias is not defined, and finally the id which is the underlying name if neither of the fields exist.
def _set_frequency_spacing(self, min_freq, max_freq): self.frequency_spacing = np.linspace(min_freq, max_freq, num=self._sheet_dimensions[0]+1, endpoint=True)
Frequency spacing to use, i.e. how to map the available frequency range to the discrete sheet rows. NOTE: We're calculating the spacing of a range between the highest and lowest frequencies, the actual segmentation and averaging of the frequencies to fit this spacing occurs in _getAmplitudes(). This method is here solely to provide a minimal overload if custom spacing is required.
def has_parent_families(self, family_id): if self._catalog_session is not None: return self._catalog_session.has_parent_catalogs(catalog_id=family_id) return self._hierarchy_session.has_parents(id_=family_id)
Tests if the ``Family`` has any parents. arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if the family has parents, ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def get_syllabus(self, site): tools = self.get_tools(site) syllabus_filter = [x.href for x in tools if x.name == 'syllabus'] if not syllabus_filter: return '' response = self._session.get(syllabus_filter[0]) response.raise_for_status() iframes = self._html_iface.get_iframes(response.text) iframe_url = '' for frame in iframes: if frame['title'] == 'Syllabus ': iframe_url = frame['src'] if iframe_url == '': print "WARHING: NO SYLLABUS IFRAME FOUND" response = self._session.get(iframe_url) response.raise_for_status() syllabus_html = self._html_iface.get_syllabus(response.text) return syllabus_html
Gets the syllabus for a course. The syllabus may or may not contain HTML, depending on the site. TSquare does not enforce whether or not pages are allowed to have HTML, so it is impossible to tell.
def _assign_entity_to_pb(entity_pb, entity): bare_entity_pb = helpers.entity_to_protobuf(entity) bare_entity_pb.key.CopyFrom(bare_entity_pb.key) entity_pb.CopyFrom(bare_entity_pb)
Copy ``entity`` into ``entity_pb``. Helper method for ``Batch.put``. :type entity_pb: :class:`.entity_pb2.Entity` :param entity_pb: The entity owned by a mutation. :type entity: :class:`google.cloud.datastore.entity.Entity` :param entity: The entity being updated within the batch / transaction.
def _log_statistics(self): rows_per_second_trans = self._count_total / (self._time1 - self._time0) rows_per_second_load = self._count_transform / (self._time2 - self._time1) rows_per_second_overall = self._count_total / (self._time3 - self._time0) self._log('Number of rows processed : {0:d}'.format(self._count_total)) self._log('Number of rows transformed : {0:d}'.format(self._count_transform)) self._log('Number of rows ignored : {0:d}'.format(self._count_ignore)) self._log('Number of rows parked : {0:d}'.format(self._count_park)) self._log('Number of errors : {0:d}'.format(self._count_error)) self._log('Number of rows per second processed : {0:d}'.format(int(rows_per_second_trans))) self._log('Number of rows per second loaded : {0:d}'.format(int(rows_per_second_load))) self._log('Number of rows per second overall : {0:d}'.format(int(rows_per_second_overall)))
Log statistics about the number of rows and number of rows per second.
def _find_min_start(text, max_width, unicode_aware=True, at_end=False): if 2 * len(text) < max_width: return 0 result = 0 string_len = wcswidth if unicode_aware else len char_len = wcwidth if unicode_aware else lambda x: 1 display_end = string_len(text) while display_end > max_width: result += 1 display_end -= char_len(text[0]) text = text[1:] if at_end and display_end == max_width: result += 1 return result
Find the starting point in the string that will reduce it to be less than or equal to the specified width when displayed on screen. :param text: The text to analyze. :param max_width: The required maximum width :param at_end: At the end of the editable line, so allow spaced for cursor. :return: The offset within `text` to start at to reduce it to the required length.
def remove(self, child): if isinstance(child, Element): return child.detach() if isinstance(child, Attribute): self.attributes.remove(child) return None
Remove the specified child element or attribute. @param child: A child to remove. @type child: L{Element}|L{Attribute} @return: The detached I{child} when I{child} is an element, else None. @rtype: L{Element}|None
def get_version(package_name, ignore_cache=False): if ignore_cache: with microcache.temporarily_disabled(): found = helpers.regex_in_package_file( VERSION_SET_REGEX, '_version.py', package_name, return_match=True ) else: found = helpers.regex_in_package_file( VERSION_SET_REGEX, '_version.py', package_name, return_match=True ) if found is None: raise ProjectError('found {}, but __version__ is not defined') current_version = found['version'] return current_version
Get the version which is currently configured by the package
def origin_north_africa(origin): return origin_egypt(origin) or origin_algeria(origin) \ or origin_libya(origin) or origin_morocco(origin) \ or origin_sudan(origin) or origin_tunisia(origin)
\ Returns if the origin is located in North Africa. Holds true for the following countries: * Algeria * Egypt * Libya * Morocco * Sudan * Tunisia `origin` The origin to check.
def main(self): for m in self.methods: if m.name in ['Main', 'main']: return m if len(self.methods): return self.methods[0] return None
Return the default method in this module. :return: the default method in this module :rtype: ``boa.code.method.Method``
def sg_regularizer_loss(scale=1.0): r return scale * tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
r""" Get regularizer losss Args: scale: A scalar. A weight applied to regularizer loss
def contains_parent_dir(fpath, dirs): return bool([x for x in dirs if _f(fpath, x)])
Returns true if paths in dirs start with fpath. Precondition: dirs and fpath should be normalized before calling this function.
def Sum(*args: BitVec) -> BitVec: raw = z3.Sum([a.raw for a in args]) annotations = [] bitvecfuncs = [] for bv in args: annotations += bv.annotations if isinstance(bv, BitVecFunc): bitvecfuncs.append(bv) if len(bitvecfuncs) >= 2: return BitVecFunc(raw=raw, func_name=None, input_=None, annotations=annotations) elif len(bitvecfuncs) == 1: return BitVecFunc( raw=raw, func_name=bitvecfuncs[0].func_name, input_=bitvecfuncs[0].input_, annotations=annotations, ) return BitVec(raw, annotations)
Create sum expression. :return:
def permissions(self, course_id, permissions=None): path = {} data = {} params = {} path["course_id"] = course_id if permissions is not None: params["permissions"] = permissions self.logger.debug("GET /api/v1/courses/{course_id}/permissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/permissions".format(**path), data=data, params=params, no_data=True)
Permissions. Returns permission information for provided course & current_user
def _update(self, data): for k, v in six.iteritems(data): new_value = v if isinstance(v, dict): new_value = type(self)(v) elif isinstance(v, list): new_value = [(type(self)(e) if isinstance(e, dict) else e) for e in v] setattr(self, k, new_value)
Update the object with new data.
def roster(team_id): data = mlbgame.info.roster(team_id) return mlbgame.info.Roster(data)
Return Roster object that contains roster info for a team
def _find_errors_param(self): if hasattr(self.estimator, 'mse_path_'): return self.estimator.mse_path_.mean(1) if hasattr(self.estimator, 'cv_values_'): return self.estimator.cv_values_.mean(0) raise YellowbrickValueError( "could not find errors param on {} estimator".format( self.estimator.__class__.__name__ ) )
Searches for the parameter on the estimator that contains the array of errors that was used to determine the optimal alpha. If it cannot find the parameter then a YellowbrickValueError is raised.
def intersect_sites_method(form): if settings.PAGE_USE_SITE_ID: if settings.PAGE_HIDE_SITES: site_ids = [global_settings.SITE_ID] else: site_ids = [int(x) for x in form.data.getlist('sites')] def intersects_sites(sibling): return sibling.sites.filter(id__in=site_ids).count() > 0 else: def intersects_sites(sibling): return True return intersects_sites
Return a method to intersect sites.
def _command(self, event, command, *args, **kwargs): self._assert_transition(event) self.trigger('pre_%s' % event, **kwargs) self._execute_command(command, *args) self.trigger('post_%s' % event, **kwargs)
Context state controller. Check whether the transition is possible or not, it executes it and triggers the Hooks with the pre_* and post_* events. @param event: (str) event generated by the command. @param command: (virDomain.method) state transition to impose. @raise: RuntimeError.
def image_format(value): if value.image.format.upper() not in constants.ALLOWED_IMAGE_FORMATS: raise ValidationError(MESSAGE_INVALID_IMAGE_FORMAT)
Confirms that the uploaded image is of supported format. Args: value (File): The file with an `image` property containing the image Raises: django.forms.ValidationError
def tag_torsion_angles(self, force=False): for polymer in self._molecules: if polymer.molecule_type == 'protein': polymer.tag_torsion_angles(force=force) return
Tags each `Monomer` in the `Assembly` with its torsion angles. Parameters ---------- force : bool, optional If `True`, the tag will be run even if `Monomers` are already tagged.
def process_response(self, request, response, spider): return response.replace(body=str(BeautifulSoup(response.body, self.parser)))
Overridden process_response would "pipe" response.body through BeautifulSoup.
def search_traceback(self, tb): new_paths = [] with self.lock: for filename, line, funcname, txt in traceback.extract_tb(tb): path = os.path.abspath(filename) if path not in self.paths: self.paths.add(path) new_paths.append(path) if new_paths: self.watch_paths(new_paths)
Inspect a traceback for new paths to add to our path set.
def main(): if len(sys.argv) != 3: print('Usage: {} <file1> <file2>'.format(sys.argv[0])) exit(-1) file1 = sys.argv[1] file2 = sys.argv[2] with open(file1) as f1, open(file2) as f2: for line1, line2 in zip(f1, f2): print("Line 1: {}".format(line1.strip())) print("Line 2: {}".format(line2.strip())) dist, _, _ = edit_distance_backpointer(line1.split(), line2.split()) print('Distance: {}'.format(dist)) print('=' * 80)
Read two files line-by-line and print edit distances between each pair of lines. Will terminate at the end of the shorter of the two files.
def parse_MML(self, mml): hashes_c = [] mentions_c = [] soup = BeautifulSoup(mml, "lxml") hashes = soup.find_all('hash', {"tag": True}) for hashe in hashes: hashes_c.append(hashe['tag']) mentions = soup.find_all('mention', {"uid": True}) for mention in mentions: mentions_c.append(mention['uid']) msg_string = soup.messageml.text.strip() self.logger.debug('%s : %s : %s' % (hashes_c, mentions_c, msg_string)) return hashes_c, mentions_c, msg_string
parse the MML structure
def _make_attr_tuple_class(cls_name, attr_names): attr_class_name = "{}Attributes".format(cls_name) attr_class_template = [ "class {}(tuple):".format(attr_class_name), " __slots__ = ()", ] if attr_names: for i, attr_name in enumerate(attr_names): attr_class_template.append( _tuple_property_pat.format(index=i, attr_name=attr_name) ) else: attr_class_template.append(" pass") globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} eval(compile("\n".join(attr_class_template), "", "exec"), globs) return globs[attr_class_name]
Create a tuple subclass to hold `Attribute`s for an `attrs` class. The subclass is a bare tuple with properties for names. class MyClassAttributes(tuple): __slots__ = () x = property(itemgetter(0))
def attr_dict(self): size = mx_uint() pairs = ctypes.POINTER(ctypes.c_char_p)() f_handle = _LIB.MXSymbolListAttr check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs))) ret = {} for i in range(size.value): name, key = py_str(pairs[i * 2]).split('$') val = py_str(pairs[i * 2 + 1]) if name not in ret: ret[name] = {} ret[name][key] = val return ret
Recursively gets all attributes from the symbol and its children. Example ------- >>> a = mx.sym.Variable('a', attr={'a1':'a2'}) >>> b = mx.sym.Variable('b', attr={'b1':'b2'}) >>> c = a+b >>> c.attr_dict() {'a': {'a1': 'a2'}, 'b': {'b1': 'b2'}} Returns ------- ret : Dict of str to dict There is a key in the returned dict for every child with non-empty attribute set. For each symbol, the name of the symbol is its key in the dict and the correspond value is that symbol's attribute list (itself a dictionary).
def _embedPayload(slaveaddress, mode, functioncode, payloaddata): _checkSlaveaddress(slaveaddress) _checkMode(mode) _checkFunctioncode(functioncode, None) _checkString(payloaddata, description='payload') firstPart = _numToOneByteString(slaveaddress) + _numToOneByteString(functioncode) + payloaddata if mode == MODE_ASCII: request = _ASCII_HEADER + \ _hexencode(firstPart) + \ _hexencode(_calculateLrcString(firstPart)) + \ _ASCII_FOOTER else: request = firstPart + _calculateCrcString(firstPart) return request
Build a request from the slaveaddress, the function code and the payload data. Args: * slaveaddress (int): The address of the slave. * mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII) * functioncode (int): The function code for the command to be performed. Can for example be 16 (Write register). * payloaddata (str): The byte string to be sent to the slave. Returns: The built (raw) request string for sending to the slave (including CRC etc). Raises: ValueError, TypeError. The resulting request has the format: * RTU Mode: slaveaddress byte + functioncode byte + payloaddata + CRC (which is two bytes). * ASCII Mode: header (:) + slaveaddress (2 characters) + functioncode (2 characters) + payloaddata + LRC (which is two characters) + footer (CRLF) The LRC or CRC is calculated from the byte string made up of slaveaddress + functioncode + payloaddata. The header, LRC/CRC, and footer are excluded from the calculation.
def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, target_mu, target_S): self._psi_computations(Z, mu, S) tmp = (self.inv_lengthscale2 * self._psi2[:, :, :, None]) / self._psi2_denom target_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1) target_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1)
Think N,num_inducing,num_inducing,input_dim
def extract_twin_values(triples, traits, gender=None): traitValuesAbsent = 0 nanValues = 0 genderSkipped = 0 twinValues = [] for a, b, t in triples: if gender is not None and t != gender: genderSkipped += 1 continue if not (a in traits and b in traits): traitValuesAbsent += 1 continue if np.isnan(traits[a]) or np.isnan(traits[b]): nanValues += 1 continue twinValues.append((traits[a], traits[b])) print("A total of {} pairs extracted ({} absent; {} nan; {} genderSkipped)"\ .format(len(twinValues), traitValuesAbsent, nanValues, genderSkipped)) return twinValues
Calculate the heritability of certain traits in triplets. Parameters ========== triples: (a, b, "Female/Male") triples. The sample IDs are then used to query the traits dictionary. traits: sample_id => value dictionary Returns ======= tuples of size 2, that contain paired trait values of the twins
def cli(env, sortby, datacenter): file_manager = SoftLayer.FileStorageManager(env.client) mask = "mask[serviceResource[datacenter[name]],"\ "replicationPartners[serviceResource[datacenter[name]]]]" file_volumes = file_manager.list_file_volumes(datacenter=datacenter, mask=mask) datacenters = dict() for volume in file_volumes: service_resource = volume['serviceResource'] if 'datacenter' in service_resource: datacenter_name = service_resource['datacenter']['name'] if datacenter_name not in datacenters.keys(): datacenters[datacenter_name] = 1 else: datacenters[datacenter_name] += 1 table = formatting.KeyValueTable(DEFAULT_COLUMNS) table.sortby = sortby for datacenter_name in datacenters: table.add_row([datacenter_name, datacenters[datacenter_name]]) env.fout(table)
List number of file storage volumes per datacenter.
def _remove_zeros(votes, fpl, cl, ranking): for v in votes: for r in v: if r not in fpl: v.remove(r) for c in cl: if c not in fpl: if c not in ranking: ranking.append((c, 0))
Remove zeros in IRV voting.
def ExtractCredentialsFromPathSpec(self, path_spec): credentials = manager.CredentialsManager.GetCredentials(path_spec) for identifier in credentials.CREDENTIALS: value = getattr(path_spec, identifier, None) if value is None: continue self.SetCredential(path_spec, identifier, value)
Extracts credentials from a path specification. Args: path_spec (PathSpec): path specification to extract credentials from.
def get_constant(self, const_name, context): const = self._constants[const_name] if isinstance(const, ast.AnnAssign): if context: expr = Expr(const.value, context).lll_node return expr else: raise VariableDeclarationException( "ByteArray: Can not be used outside of a function context: %s" % const_name ) return self._constants[const_name]
Return unrolled const
def delete(self, *args, **kwargs): return self.session.delete(*args, **self.get_kwargs(**kwargs))
Executes an HTTP DELETE. :Parameters: - `args`: Non-keyword arguments - `kwargs`: Keyword arguments
def assign_to(self, obj): obj.x = self.x obj.y = self.y
Assign `x` and `y` to an object that has properties `x` and `y`.
def get(self, **kwargs): for chain in self.chains: for key in kwargs: getter_name = "get_"+key if (hasattr(chain, getter_name)): getter = getattr(chain, getter_name) if (getter() == kwargs[key]): return chain return None
Find correct filterchain based on generic variables
def findSensor(self, sensors, sensor_name, device_type = None): if device_type == None: for sensor in sensors: if sensor['name'] == sensor_name: return sensor['id'] else: for sensor in sensors: if sensor['name'] == sensor_name and sensor['device_type'] == device_type: return sensor['id'] return None
Find a sensor in the provided list of sensors @param sensors (list) - List of sensors to search in @param sensor_name (string) - Name of sensor to find @param device_type (string) - Device type of sensor to find, can be None @return (string) - sensor_id of sensor or None if not found
def register_auth_system(self, auth_system): auth_system_settings = dbconfig.get('auth_system') if auth_system.name not in auth_system_settings['available']: auth_system_settings['available'].append(auth_system.name) dbconfig.set('default', 'auth_system', DBCChoice(auth_system_settings)) if auth_system.name == auth_system_settings['enabled'][0]: self.active_auth_system = auth_system auth_system().bootstrap() logger.debug('Registered {} as the active auth system'.format(auth_system.name)) return True else: logger.debug('Not trying to load the {} auth system as it is disabled by config'.format(auth_system.name)) return False
Register a given authentication system with the framework. Returns `True` if the `auth_system` is registered as the active auth system, else `False` Args: auth_system (:obj:`BaseAuthPlugin`): A subclass of the `BaseAuthPlugin` class to register Returns: `bool`
def _update_with_csrf_disabled(d=None): if d is None: d = {} import flask_wtf from pkg_resources import parse_version supports_meta = parse_version(flask_wtf.__version__) >= parse_version( "0.14.0") if supports_meta: d.setdefault('meta', {}) d['meta'].update({'csrf': False}) else: d['csrf_enabled'] = False return d
Update the input dict with CSRF disabled depending on WTF-Form version. From Flask-WTF 0.14.0, `csrf_enabled` param has been deprecated in favor of `meta={csrf: True/False}`.
def _rshift_logical(self, shift_amount): if self.is_empty: return self ssplit = self._ssplit() if len(ssplit) == 1: l = self.lower_bound >> shift_amount u = self.upper_bound >> shift_amount stride = max(self.stride >> shift_amount, 1) return StridedInterval(bits=self.bits, lower_bound=l, upper_bound=u, stride=stride, uninitialized=self.uninitialized ) else: a = ssplit[0]._rshift_logical(shift_amount) b = ssplit[1]._rshift_logical(shift_amount) return a.union(b)
Logical shift right with a concrete shift amount :param int shift_amount: Number of bits to shift right. :return: The new StridedInterval after right shifting :rtype: StridedInterval
def port_pair(self): if self.transport is NotSpecified: return (self.port, "tcp") else: return (self.port, self.transport)
The port and it's transport as a pair
def collect_all_bucket_keys(self): if len(self.childs) == 0: return [self.bucket_key] result = [] for child in self.childs.values(): result = result + child.collect_all_bucket_keys() return result
Just collects all buckets keys from subtree
def remove_tree(self, dirname): while osp.exists(dirname): try: shutil.rmtree(dirname, onerror=misc.onerror) except Exception as e: if type(e).__name__ == "OSError": error_path = to_text_string(e.filename) shutil.rmtree(error_path, ignore_errors=True)
Remove whole directory tree Reimplemented in project explorer widget
def remove(self, value): try: index = self._dict[value] except KeyError: raise ValueError('Value "%s" is not present.') else: del self[index]
Remove value from self. Args: value: Element to remove from self Raises: ValueError: if element is already present
def find_related(self, fullname): stack = [fullname] found = set() while stack: name = stack.pop(0) names = self.find_related_imports(name) stack.extend(set(names).difference(set(found).union(stack))) found.update(names) found.discard(fullname) return sorted(found)
Return a list of non-stdlib modules that are imported directly or indirectly by `fullname`, plus their parents. This method is like :py:meth:`find_related_imports`, but also recursively searches any modules which are imported by `fullname`. :param fullname: Fully qualified name of an _already imported_ module for which source code can be retrieved :type fullname: str
def merge_dictionaries(base_dict, extra_dict): new_dict = base_dict.copy() new_dict.update(extra_dict) return new_dict
merge two dictionaries. if both have a same key, the one from extra_dict is taken :param base_dict: first dictionary :type base_dict: dict :param extra_dict: second dictionary :type extra_dict: dict :return: a merge of the two dictionaries :rtype: dicts
def softplus(attrs, inputs, proto_obj): new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'}) return 'Activation', new_attrs, inputs
Applies the sofplus activation function element-wise to the input.
def image_show(self, image_id): nt_ks = self.compute_conn image = nt_ks.images.get(image_id) links = {} for link in image.links: links[link['rel']] = link['href'] ret = { 'name': image.name, 'id': image.id, 'status': image.status, 'progress': image.progress, 'created': image.created, 'updated': image.updated, 'metadata': image.metadata, 'links': links, } if hasattr(image, 'minDisk'): ret['minDisk'] = image.minDisk if hasattr(image, 'minRam'): ret['minRam'] = image.minRam return ret
Show image details and metadata
def bcr(eal_original, eal_retrofitted, interest_rate, asset_life_expectancy, asset_value, retrofitting_cost): return ((eal_original - eal_retrofitted) * asset_value * (1 - numpy.exp(- interest_rate * asset_life_expectancy)) / (interest_rate * retrofitting_cost))
Compute the Benefit-Cost Ratio. BCR = (EALo - EALr)(1-exp(-r*t))/(r*C) Where: * BCR -- Benefit cost ratio * EALo -- Expected annual loss for original asset * EALr -- Expected annual loss for retrofitted asset * r -- Interest rate * t -- Life expectancy of the asset * C -- Retrofitting cost
def inputAnalyzeCallback(self, *args, **kwargs): b_status = False filesRead = 0 filesAnalyzed = 0 for k, v in kwargs.items(): if k == 'filesRead': d_DCMRead = v if k == 'path': str_path = v if len(args): at_data = args[0] str_path = at_data[0] d_read = at_data[1] b_status = True self.dp.qprint("analyzing:\n%s" % self.pp.pformat(d_read['l_file']), level = 5) if int(self.f_sleepLength): self.dp.qprint("sleeping for: %f" % self.f_sleepLength, level = 5) time.sleep(self.f_sleepLength) filesAnalyzed = len(d_read['l_file']) return { 'status': b_status, 'filesAnalyzed': filesAnalyzed, 'l_file': d_read['l_file'] }
Test method for inputAnalzeCallback This method loops over the passed number of files, and optionally "delays" in each loop to simulate some analysis. The delay length is specified by the '--test <delay>' flag.
def setup_shiny(): name = 'shiny' def _get_shiny_cmd(port): conf = dedent( ).format( user=getpass.getuser(), port=str(port), site_dir=os.getcwd() ) f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(conf) f.close() return ['shiny-server', f.name] return { 'command': _get_shiny_cmd, 'launcher_entry': { 'title': 'Shiny', 'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg') } }
Manage a Shiny instance.
def re_flags_str(flags, custom_flags): res = '' for flag in RE_FLAGS: if flags & getattr(re, flag): res += flag for flag in RE_CUSTOM_FLAGS: if custom_flags & getattr(ReFlags, flag): res += flag return res
Convert regexp flags to string. Parameters ---------- flags : `int` Flags. custom_flags : `int` Custom flags. Returns ------- `str` Flag string.
def merge(self, other_roc): if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds): self.contingency_tables += other_roc.contingency_tables else: print("Input table thresholds do not match.")
Ingest the values of another DistributedROC object into this one and update the statistics inplace. Args: other_roc: another DistributedROC object.
def reapply_sampling_strategies(self): check_sensor = self._inspecting_client.future_check_sensor for sensor_name, strategy in list(self._strategy_cache.items()): try: sensor_exists = yield check_sensor(sensor_name) if not sensor_exists: self._logger.warn('Did not set strategy for non-existing sensor {}' .format(sensor_name)) continue result = yield self.set_sampling_strategy(sensor_name, strategy) except KATCPSensorError as e: self._logger.error('Error reapplying strategy for sensor {0}: {1!s}' .format(sensor_name, e)) except Exception: self._logger.exception('Unhandled exception reapplying strategy for ' 'sensor {}'.format(sensor_name), exc_info=True)
Reapply all sensor strategies using cached values
def segment_radial_dist(seg, pos): return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0))
Return the radial distance of a tree segment to a given point The radial distance is the euclidian distance between the mid-point of the segment and the point in question. Parameters: seg: tree segment pos: origin to which distances are measured. It must have at lease 3 components. The first 3 components are (x, y, z).
def is_in_team(self, team_id): if self.is_super_admin(): return True team_id = uuid.UUID(str(team_id)) return team_id in self.teams or team_id in self.child_teams_ids
Test if user is in team
def slugify(text, length_limit=0, delimiter=u'-'): result = [] for word in _punctuation_regex.split(text.lower()): word = _available_unicode_handlers[0](word) if word: result.append(word) slug = delimiter.join(result) if length_limit > 0: return slug[0:length_limit] return slug
Generates an ASCII-only slug of a string.
def parse_cookies(self, req: Request, name: str, field: Field) -> typing.Any: return core.get_value(req.cookies, name, field)
Pull a value from the cookiejar.
def invite_by_email(self, email, sender=None, request=None, **kwargs): try: user = self.user_model.objects.get(email=email) except self.user_model.DoesNotExist: if "username" in inspect.getargspec( self.user_model.objects.create_user ).args: user = self.user_model.objects.create( username=self.get_username(), email=email, password=self.user_model.objects.make_random_password(), ) else: user = self.user_model.objects.create( email=email, password=self.user_model.objects.make_random_password() ) user.is_active = False user.save() self.send_invitation(user, sender, **kwargs) return user
Creates an inactive user with the information we know and then sends an invitation email for that user to complete registration. If your project uses email in a different way then you should make to extend this method as it only checks the `email` attribute for Users.
def ibis_schema_apply_to(schema, df): for column, dtype in schema.items(): pandas_dtype = dtype.to_pandas() col = df[column] col_dtype = col.dtype try: not_equal = pandas_dtype != col_dtype except TypeError: not_equal = True if not_equal or dtype == dt.string: df[column] = convert(col_dtype, dtype, col) return df
Applies the Ibis schema to a pandas DataFrame Parameters ---------- schema : ibis.schema.Schema df : pandas.DataFrame Returns ------- df : pandas.DataFrame Notes ----- Mutates `df`
def _raveled_index_for(self, param): from ..param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param)
get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work
def is_of_genus_type(self, genus_type=None): if genus_type is None: raise NullArgument() else: my_genus_type = self.get_genus_type() return (genus_type.get_authority() == my_genus_type.get_authority() and genus_type.get_identifier_namespace() == my_genus_type.get_identifier_namespace() and genus_type.get_identifier() == my_genus_type.get_identifier())
Tests if this object is of the given genus Type. The given genus type may be supported by the object through the type hierarchy. | arg: ``genus_type`` (``osid.type.Type``): a genus type | return: (``boolean``) - true if this object is of the given genus Type, false otherwise | raise: ``NullArgument`` - ``genus_type`` is null | *compliance: mandatory - This method must be implemented.*
def on_touch_move(self, touch): if touch is not self._touch: return False self.pos = ( touch.x + self.x_down, touch.y + self.y_down ) return True
Follow the touch