code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def is_dental(c,lang): o=get_offset(c,lang) return (o>=DENTAL_RANGE[0] and o<=DENTAL_RANGE[1])
Is the character a dental
def _read_hypocentre_from_ndk_string(self, linestring): hypo = GCMTHypocentre() hypo.source = linestring[0:4] hypo.date = _read_date_from_string(linestring[5:15]) hypo.time = _read_time_from_string(linestring[16:26]) hypo.latitude = float(linestring[27:33]) hypo.longitude = float(linestring[34:41]) hypo.depth = float(linestring[42:47]) magnitudes = [float(x) for x in linestring[48:55].split(' ')] if magnitudes[0] > 0.: hypo.m_b = magnitudes[0] if magnitudes[1] > 0.: hypo.m_s = magnitudes[1] hypo.location = linestring[56:] return hypo
Reads the hypocentre data from the ndk string to return an instance of the GCMTHypocentre class
def removeContainer(tag): container = getContainerByTag(tag) if container: try: container.remove(force=True) except APIError as exc: eprint("Unhandled error while removing container", tag) raise exc
Check if a container with a given tag exists. Kill it if it exists. No extra side effects. Handles and reraises TypeError, and APIError exceptions.
def check_convert_string(obj, name=None, no_leading_trailing_whitespace=True, no_whitespace=False, no_newline=True, whole_word=False, min_len=1, max_len=0): if not name: name = 'Argument' obj = ensure_unicode(obj, name=name) if no_whitespace: if _PATTERN_WHITESPACE.match(obj): raise ValueError('%s cannot contain whitespace' % name) elif no_leading_trailing_whitespace and _PATTERN_LEAD_TRAIL_WHITESPACE.match(obj): raise ValueError('%s contains leading/trailing whitespace' % name) if (min_len and len(obj) < min_len) or (max_len and len(obj) > max_len): raise ValueError('%s too short/long (%d/%d)' % (name, min_len, max_len)) if whole_word: if not _PATTERN_WORD.match(obj): raise ValueError('%s can only contain alphanumeric (unicode) characters, numbers and the underscore' % name) elif no_newline and '\n' in obj: raise ValueError('%s cannot contain line breaks' % name) return obj
Ensures the provided object can be interpreted as a unicode string, optionally with additional restrictions imposed. By default this means a non-zero length string which does not begin or end in whitespace.
def get_seconds_description(self): return self.get_segment_description( self._expression_parts[0], _("every second"), lambda s: s, lambda s: _("every {0} seconds").format(s), lambda s: _("seconds {0} through {1} past the minute"), lambda s: _("at {0} seconds past the minute") )
Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description
def _ensure_append(self, new_items, append_to, index=0): append_to = append_to or [] append_to.insert(index, new_items) return append_to
Ensure an item is appended to a list or create a new empty list :param new_items: the item(s) to append :type new_items: list(obj) :param append_to: the list on which to append the items :type append_to: list() :param index: index of the list on which to append the items :type index: int
def available_providers(request): "Adds the list of enabled providers to the context." if APPENGINE: qs = SimpleLazyObject(lambda: _get_enabled()) else: qs = Provider.objects.filter(consumer_secret__isnull=False, consumer_key__isnull=False) return {'allaccess_providers': qs}
Adds the list of enabled providers to the context.
def find_users_by_email_starting_with(email_prefix=None, cursor=None, page_size=30): email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_starts_with(email_prefix), page_size, cursor, cache_begin=None)
Returns a command that retrieves users by its email_prefix, ordered by email. It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command.
def query_by_post(postid): return TabPost2Tag.select().where( TabPost2Tag.post_id == postid ).order_by(TabPost2Tag.order)
Query records by post.
def insert(self, index, *grids): index, index_in = safe_int_conv(index), index if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}' ''.format(index_in, self.ndim)) if index < 0: index += self.ndim if len(grids) == 0: return RectGrid(*self.coord_vectors) elif len(grids) == 1: grid = grids[0] if not isinstance(grid, RectGrid): raise TypeError('{!r} is not a `RectGrid` instance' ''.format(grid)) new_vecs = (self.coord_vectors[:index] + grid.coord_vectors + self.coord_vectors[index:]) return RectGrid(*new_vecs) else: return self.insert(index, grids[0]).insert( index + grids[0].ndim, *(grids[1:]))
Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append
def create_at_path(self, asset_content, url_path, tags=''): return self._create_asset({ 'asset': b64encode(asset_content), 'url-path': url_path, 'tags': tags, 'type': 'base64' })
Create asset at a specific URL path on the server
def is_notifying(cls, user_or_email, instance): return super(InstanceEvent, cls).is_notifying(user_or_email, object_id=instance.pk)
Check if the watch created by notify exists.
def frame_to_seconds(self, frame_index, sr): start_sample, end_sample = self.frame_to_sample(frame_index) return sample_to_seconds(start_sample, sampling_rate=sr), sample_to_seconds(end_sample, sampling_rate=sr)
Return a tuple containing the start and end of the frame in seconds.
def __pop_top_frame(self): popped = self.__stack.pop() if self.__stack: self.__stack[-1].process_subframe(popped)
Pops the top frame off the frame stack.
def merge_conf(to_hash, other_hash, path=[]): "merges other_hash into to_hash" for key in other_hash: if (key in to_hash and isinstance(to_hash[key], dict) and isinstance(other_hash[key], dict)): merge_conf(to_hash[key], other_hash[key], path + [str(key)]) else: to_hash[key] = other_hash[key] return to_hash
merges other_hash into to_hash
def fetch_tweets(account_file, outfile, limit): print('fetching tweets for accounts in', account_file) outf = io.open(outfile, 'wt') for screen_name in iter_lines(account_file): print('\nFetching tweets for %s' % screen_name) for tweet in twutil.collect.tweets_for_user(screen_name, limit): tweet['user']['screen_name'] = screen_name outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False)) outf.flush()
Fetch up to limit tweets for each account in account_file and write to outfile.
def max(self): return int(self._max) if not np.isinf(self._max) else self._max
Returns the maximum value of the domain. :rtype: `float` or `np.inf`
def validate(self): if self.required_languages: if isinstance(self.required_languages, (tuple, list)): self._check_languages(self.required_languages) else: self._check_languages(self.required_languages.keys(), extra=('default',)) for fieldnames in self.required_languages.values(): if any(f not in self.fields for f in fieldnames): raise ImproperlyConfigured( 'Fieldname in required_languages which is not in fields option.')
Perform options validation.
def iter_items(cls, repo, rev, paths='', **kwargs): if 'pretty' in kwargs: raise ValueError("--pretty cannot be used as parsing expects single sha's only") args = ['--'] if paths: args.extend((paths, )) proc = repo.git.rev_list(rev, args, as_process=True, **kwargs) return cls._iter_from_process_or_stream(repo, proc)
Find all commits matching the given criteria. :param repo: is the Repo :param rev: revision specifier, see git-rev-parse for viable options :param paths: is an optional path or list of paths, if set only Commits that include the path or paths will be considered :param kwargs: optional keyword arguments to git rev-list where ``max_count`` is the maximum number of commits to fetch ``skip`` is the number of commits to skip ``since`` all commits since i.e. '1970-01-01' :return: iterator yielding Commit items
def count_series(y_true, y_score, countna=False): y_true, y_score = to_float(y_true, y_score) top = _argsort(y_score) if not countna: a = (~np.isnan(y_true[top])).cumsum() else: a = range(1, len(y_true)+1) return pd.Series(a, index=range(1, len(a)+1))
Returns series whose i-th entry is the number of examples in the top i
def insert(self, table_name, record, attr_names=None): self.insert_many(table_name, records=[record], attr_names=attr_names)
Send an INSERT query to the database. :param str table_name: Table name of executing the query. :param record: Record to be inserted. :type record: |dict|/|namedtuple|/|list|/|tuple| :raises IOError: |raises_write_permission| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-insert-records`
def intervals_to_durations(intervals): validate_intervals(intervals) return np.abs(np.diff(intervals, axis=-1)).flatten()
Converts an array of n intervals to their n durations. Parameters ---------- intervals : np.ndarray, shape=(n, 2) An array of time intervals, as returned by :func:`mir_eval.io.load_intervals()`. The ``i`` th interval spans time ``intervals[i, 0]`` to ``intervals[i, 1]``. Returns ------- durations : np.ndarray, shape=(n,) Array of the duration of each interval.
def light(self): sun = self.chart.getObject(const.SUN) return light(self.obj, sun)
Returns if object is augmenting or diminishing its light.
def run_parallel(self, para_func): if self.timer: start_timer = time.time() with mp.Pool(self.num_processors) as pool: print('start pool with {} processors: {} total processes.\n'.format( self.num_processors, len(self.args))) results = [pool.apply_async(para_func, arg) for arg in self.args] out = [r.get() for r in results] out = {key: np.concatenate([out_i[key] for out_i in out]) for key in out[0].keys()} if self.timer: print("SNR calculation time:", time.time()-start_timer) return out
Run parallel calulation This will run the parallel calculation on self.num_processors. Args: para_func (obj): Function object to be used in parallel. Returns: (dict): Dictionary with parallel results.
def get_hosts(self, group=None): hostlist = [] if group: groupobj = self.inventory.groups.get(group) if not groupobj: print "Group [%s] not found in inventory" % group return None groupdict = {} groupdict['hostlist'] = [] for host in groupobj.get_hosts(): groupdict['hostlist'].append(host.name) hostlist.append(groupdict) else: for group in self.inventory.groups: groupdict = {} groupdict['group'] = group groupdict['hostlist'] = [] groupobj = self.inventory.groups.get(group) for host in groupobj.get_hosts(): groupdict['hostlist'].append(host.name) hostlist.append(groupdict) return hostlist
Get the hosts
def split_ref_from_uri(uri): if not isinstance(uri, six.string_types): raise TypeError("Expected a string, received {0!r}".format(uri)) parsed = urllib_parse.urlparse(uri) path = parsed.path ref = None if "@" in path: path, _, ref = path.rpartition("@") parsed = parsed._replace(path=path) return (urllib_parse.urlunparse(parsed), ref)
Given a path or URI, check for a ref and split it from the path if it is present, returning a tuple of the original input and the ref or None. :param AnyStr uri: The path or URI to split :returns: A 2-tuple of the path or URI and the ref :rtype: Tuple[AnyStr, Optional[AnyStr]]
def zrem(self, key, *members): return self._execute([b'ZREM', key] + list(members))
Removes the specified members from the sorted set stored at key. Non existing members are ignored. An error is returned when key exists and does not hold a sorted set. .. note:: **Time complexity**: ``O(M*log(N))`` with ``N`` being the number of elements in the sorted set and ``M`` the number of elements to be removed. :param key: The key of the sorted set :type key: :class:`str`, :class:`bytes` :param members: One or more member values to remove :type members: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs): "Logs training loss, validation loss and custom metrics & log prediction samples & save model" if self.save_model: current = self.get_monitor_value() if current is not None and self.operator(current, self.best): print( f'Better model found at epoch {epoch} with {self.monitor} value: {current}.' ) self.best = current with self.model_path.open('wb') as model_file: self.learn.save(model_file) if self.show_results: self.learn.show_results() wandb.log({"Prediction Samples": plt}, commit=False) logs = { name: stat for name, stat in list( zip(self.learn.recorder.names, [epoch, smooth_loss] + last_metrics))[1:] } wandb.log(logs) if self.show_results: plt.close('all')
Logs training loss, validation loss and custom metrics & log prediction samples & save model
def _write_info(self): self.write(destination=self.output_directory, filename="vspk/SdkInfo.cs", template_name="sdkinfo.cs.tpl", version=self.api_version, product_accronym=self._product_accronym, class_prefix=self._class_prefix, root_api=self.api_root, api_prefix=self.api_prefix, product_name=self._product_name, name=self._name, header=self.header_content, version_string=self._api_version_string, package_name=self._package_name)
Write API Info file
def load_calibration_template(self, template): self.tone_calibrator.stimulus.clearComponents() self.tone_calibrator.stimulus.loadFromTemplate(template['tone_doc'], self.tone_calibrator.stimulus) comp_doc = template['noise_doc'] for state, calstim in zip(comp_doc, self.bs_calibrator.get_stims()): calstim.loadState(state)
Reloads calibration settings from saved template doc :param template: Values for calibration stimuli (see calibration_template function) :type template: dict
def __check_focus(self, event): changed = False if not self._curfocus: changed = True elif self._curfocus != self.focus(): self.__clear_inplace_widgets() changed = True newfocus = self.focus() if changed: if newfocus: self._curfocus= newfocus self.__focus(newfocus) self.__updateWnds()
Checks if the focus has changed
def _calc_resp(password_hash, server_challenge): password_hash += b'\x00' * (21 - len(password_hash)) res = b'' dobj = DES(DES.key56_to_key64(password_hash[0:7])) res = res + dobj.encrypt(server_challenge[0:8]) dobj = DES(DES.key56_to_key64(password_hash[7:14])) res = res + dobj.encrypt(server_challenge[0:8]) dobj = DES(DES.key56_to_key64(password_hash[14:21])) res = res + dobj.encrypt(server_challenge[0:8]) return res
Generate the LM response given a 16-byte password hash and the challenge from the CHALLENGE_MESSAGE :param password_hash: A 16-byte password hash :param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE :return res: A 24-byte buffer to contain the LM response upon return
def image(self, raw_url, title='', alt=''): if self.check_url(raw_url, is_image_src=True): url = self.rewrite_url(raw_url, is_image_src=True) maybe_alt = ' alt="%s"' % escape_html(alt) if alt else '' maybe_title = ' title="%s"' % escape_html(title) if title else '' url = escape_html(url) return '<img src="%s"%s%s />' % (url, maybe_alt, maybe_title) else: return escape_html("![%s](%s)" % (alt, raw_url))
Filters the ``src`` attribute of an image. Note that filtering the source URL of an ``<img>`` tag is only a very basic protection, and it's mostly useless in modern browsers (they block JavaScript in there by default). An example of attack that filtering does not thwart is phishing based on HTTP Auth, see `this issue <https://github.com/liberapay/liberapay.com/issues/504>`_ for details. To mitigate this issue you should only allow images from trusted services, for example your own image store, or a proxy (see :meth:`rewrite_url`).
def line_nbr_from_position(self, y_pos): editor = self._editor height = editor.fontMetrics().height() for top, line, block in editor.visible_blocks: if top <= y_pos <= top + height: return line return -1
Returns the line number from the y_pos. :param y_pos: Y pos in the editor :return: Line number (0 based), -1 if out of range
def ext_pillar(minion_id, pillar, *args, **kwargs): return POSTGRESExtPillar().fetch(minion_id, pillar, *args, **kwargs)
Execute queries against POSTGRES, merge and return as a dict
def get_wrapped_instance(self, instance=None): if instance._meta.label_lower not in self.registry: raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.") wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls if wrapper_cls: return wrapper_cls(instance) return instance
Returns a wrapped model instance.
def unload(self, ): assert self.status() == self.LOADED,\ "Cannot unload if there is no loaded reference. \ Use delete if you want to get rid of a reference or import." childrentodelete = self.get_children_to_delete() if childrentodelete: raise ReftrackIntegrityError("Cannot unload because children of the reference would become orphans.", childrentodelete) self.get_refobjinter().unload(self._refobj) self.set_status(self.UNLOADED) self.throw_children_away() self.update_restrictions() self.emit_data_changed()
If the reference is loaded, unload it. .. Note:: Do not confuse this with a delete. This means, that the reference stays in the scene, but no data is read from the reference. This will call :meth:`RefobjInterface.unload` and set the status to :data:`Reftrack.UNLOADED`. It will also throw away all children :class:`Reftrack`. They will return after :meth:`Reftrack.load`. The problem might be that children depend on their parent, but will not get unloaded. E.g. you imported a child. It will stay in the scene after the unload and become an orphan. In this case an error is raised. It is not possible to unload such an entity. The orphan might get its parents back after you call load, but it will introduce bugs when wrapping children of unloaded entities. So we simply disable the feature in that case and raise an :class:`IntegrityError` :returns: None :rtype: None :raises: :class:`ReftrackIntegrityError`
def read_with_columns(func): def wrapper(*args, **kwargs): columns = kwargs.pop("columns", None) tab = func(*args, **kwargs) if columns is None: return tab return tab[columns] return _safe_wraps(wrapper, func)
Decorate a Table read method to use the ``columns`` keyword
def _build_params(self): d = OrderedDict() d['purpose_codes'] = 'ADULT' d['queryDate'] = self._valid_date d['from_station'] = self._from_station_telecode d['to_station'] = self._to_station_telecode return d
Have no idea why wrong params order can't get data. So, use `OrderedDict` here.
def _mark_candidate_indexes(lines, candidate): markers = list('c' * len(candidate)) for i, line_idx in reversed(list(enumerate(candidate))): if len(lines[line_idx].strip()) > TOO_LONG_SIGNATURE_LINE: markers[i] = 'l' else: line = lines[line_idx].strip() if line.startswith('-') and line.strip("-"): markers[i] = 'd' return "".join(markers)
Mark candidate indexes with markers Markers: * c - line that could be a signature line * l - long line * d - line that starts with dashes but has other chars as well >>> _mark_candidate_lines(['Some text', '', '-', 'Bob'], [0, 2, 3]) 'cdc'
def get_desktop_size(self): _ptr = ffi.new('SDL_DisplayMode *') check_int_err(lib.SDL_GetDesktopDisplayMode(self._index, _ptr)) return (_ptr.w, _ptr.h)
Get the size of the desktop display
def Henry_H_at_T(T, H, Tderiv, T0=None, units=None, backend=None): be = get_backend(backend) if units is None: K = 1 else: K = units.Kelvin if T0 is None: T0 = 298.15*K return H * be.exp(Tderiv*(1/T - 1/T0))
Evaluate Henry's constant H at temperature T Parameters ---------- T: float Temperature (with units), assumed to be in Kelvin if ``units == None`` H: float Henry's constant Tderiv: float (optional) dln(H)/d(1/T), assumed to be in Kelvin if ``units == None``. T0: float Reference temperature, assumed to be in Kelvin if ``units == None`` default: 298.15 K units: object (optional) object with attributes: kelvin (e.g. chempy.units.default_units) backend : module (optional) module with "exp", default: numpy, math
def alphafilter(request, queryset, template): qs_filter = {} for key in list(request.GET.keys()): if '__istartswith' in key: qs_filter[str(key)] = request.GET[key] break return render_to_response( template, {'objects': queryset.filter(**qs_filter), 'unfiltered_objects': queryset}, context_instance=RequestContext(request) )
Render the template with the filtered queryset
def _resolve_time(value): if value is None or isinstance(value,(int,long)): return value if NUMBER_TIME.match(value): return long(value) simple = SIMPLE_TIME.match(value) if SIMPLE_TIME.match(value): multiplier = long( simple.groups()[0] ) constant = SIMPLE_TIMES[ simple.groups()[1] ] return multiplier * constant if value in GREGORIAN_TIMES: return value raise ValueError('Unsupported time format %s'%value)
Resolve the time in seconds of a configuration value.
async def list_keys(request: web.Request) -> web.Response: keys_dir = CONFIG['wifi_keys_dir'] keys: List[Dict[str, str]] = [] for path in os.listdir(keys_dir): full_path = os.path.join(keys_dir, path) if os.path.isdir(full_path): in_path = os.listdir(full_path) if len(in_path) > 1: log.warning("Garbage in key dir for key {}".format(path)) keys.append( {'uri': '/wifi/keys/{}'.format(path), 'id': path, 'name': os.path.basename(in_path[0])}) else: log.warning("Garbage in wifi keys dir: {}".format(full_path)) return web.json_response({'keys': keys}, status=200)
List the key files installed in the system. This responds with a list of the same objects as key: ``` GET /wifi/keys -> 200 OK { keys: [ { uri: '/wifi/keys/some-hex-digest', id: 'some-hex-digest', name: 'keyfile.pem' }, ... ] } ```
def save_file(filename, data, mk_parents=True): parent = filename.parent if not parent.exists() and mk_parents: logger.debug("Creating directory: %s", parent.as_posix()) parent.mkdir(parents=True) with open(filename, mode="w") as f: logger.debug("Saving file: %s", filename.as_posix()) f.write(data)
Save file to disk. Paramaters ---------- filename : pathlib.Path Path to the file. data : str File contents. mk_parents : bool, optional If to create parent directories.
def gen_send_stdout_url(ip, port): return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, STDOUT_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID)
Generate send stdout url
def get_objective_banks_by_activity(self, activity_id): mgr = self._get_provider_manager('LEARNING', local=True) lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy) return lookup_session.get_objective_banks_by_ids( self.get_objective_bank_ids_by_activity(activity_id))
Gets the list of ``ObjectiveBanks`` mapped to a ``Activity``. arg: activity_id (osid.id.Id): ``Id`` of a ``Activity`` return: (osid.learning.ObjectiveBankList) - list of objective bank ``Ids`` raise: NotFound - ``activity_id`` is not found raise: NullArgument - ``activity_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def et(name, parallel, inputs, outputs, expression): ExpressionTool = collections.namedtuple("ExpressionTool", "name inputs outputs expression parallel") return ExpressionTool(name, inputs, outputs, expression, parallel)
Represent an ExpressionTool that reorders inputs using javascript.
def get_as_datadict(self): return dict(type=self.__class__.__name__, tags=list(self.tags))
Get information about this object as a dictionary. Used by WebSocket interface to pass some relevant information to client applications.
def update_with(self, update_fn, *maps): evolver = self.evolver() for map in maps: for key, value in map.items(): evolver.set(key, update_fn(evolver[key], value) if key in evolver else value) return evolver.persistent()
Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple maps the values will be merged using merge_fn going from left to right. >>> from operator import add >>> m1 = m(a=1, b=2) >>> m1.update_with(add, m(a=2)) pmap({'a': 3, 'b': 2}) The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost. >>> m1 = m(a=1) >>> m1.update_with(lambda l, r: l, m(a=2), {'a':3}) pmap({'a': 1})
def _attend_process(self, proc, sleeptime): try: proc.wait(timeout=sleeptime) except psutil.TimeoutExpired: return True return False
Waits on a process for a given time to see if it finishes, returns True if it's still running after the given time or False as soon as it returns. :param psutil.Popen proc: Process object opened by psutil.Popen() :param float sleeptime: Time to wait :return bool: True if process is still running; otherwise false
def run_exitfuncs(): exc_info = None for func, targs, kargs in _exithandlers: try: func(*targs, **kargs) except SystemExit: exc_info = sys.exc_info() except: exc_info = sys.exc_info() if exc_info is not None: six.reraise(exc_info[0], exc_info[1], exc_info[2])
Function that behaves exactly like Python's atexit, but runs atexit functions in the order in which they were registered, not reversed.
def configurationChangeAcknowledge(): a = TpPd(pd=0x6) b = MessageType(mesType=0x31) c = MobileId() packet = a / b / c return packet
CONFIGURATION CHANGE ACKNOWLEDGE Section 9.1.12c
def create_dir(path): try: os.makedirs(path, exist_ok=True) except Exception as err: print(err) return False if os.path.exists(path): return True else: return False
Create directory specified by `path` if it doesn't already exist. Parameters ---------- path : str path to directory Returns ------- bool True if `path` exists
def text_should_be_visible(self, text, exact_match=False, loglevel='INFO'): if not self._element_find_by_text(text, exact_match).is_displayed(): self.log_source(loglevel) raise AssertionError("Text '%s' should be visible " "but did not" % text)
Verifies that element identified with text is visible. New in AppiumLibrary 1.4.5
def download_file(self, remote_filename, local_filename=None): status = 'Failed' if local_filename is None: local_filename = remote_filename if not self.args.force and os.access(local_filename, os.F_OK): if not self._confirm_overwrite(local_filename): self._print_results(local_filename, 'Skipped') return url = '{}{}'.format(self.base_url, remote_filename) r = requests.get(url, allow_redirects=True) if r.ok: open(local_filename, 'wb').write(r.content) status = 'Success' else: self.handle_error('Error requesting: {}'.format(url), False) self._print_results(local_filename, status)
Download file from github. Args: remote_filename (str): The name of the file as defined in git repository. local_filename (str, optional): Defaults to None. The name of the file as it should be be written to local filesystem.
def register_blueprint(self, blueprint): if blueprint not in self._blueprint_known: self.app.register_blueprint(blueprint) self._blueprint_known.add(blueprint)
Register given blueprint on curren app. This method is provided for using inside plugin's module-level :func:`register_plugin` functions. :param blueprint: blueprint object with plugin endpoints :type blueprint: flask.Blueprint
def run_interactive_command(command, env=None, **kwargs): command_result = _run_command( command=command, out_pipe=sys.stdout, err_pipe=sys.stderr, stdin=sys.stdin, env=env, **kwargs ) return command_result
Runs a command interactively, reusing the current stdin, stdout and stderr Args: command(list of str): args of the command to execute, including the command itself as command[0] as `['ls', '-l']` env(dict of str:str): If set, will use the given dict as env for the subprocess **kwargs: Any other keyword args passed will be passed to the :ref:subprocess.Popen call Returns: lago.utils.CommandStatus: result of the interactive execution
def confirm(prompt, default=None, show_default=True, abort=False, input_function=None): valid = { 'yes': True, 'y': True, 'no': False, 'n': False } input_function = get_input_fn(input_function) if default not in ['yes', 'no', None]: default = None if show_default: prompt = '{} [{}/{}]: '.format(prompt, 'Y' if default == 'yes' else 'y', 'N' if default == 'no' else 'n') while True: choice = prompt_fn(input_function, prompt, default).lower() if choice in valid: if valid[choice] == False and abort: raise_abort() return valid[choice] else: echo('Please respond with "yes" or "no" (or "y" or "n").')
Prompts for confirmation from the user.
def process_cbn_jgif_file(file_name): with open(file_name, 'r') as jgf: return process_pybel_graph(pybel.from_cbn_jgif(json.load(jgf)))
Return a PybelProcessor by processing a CBN JGIF JSON file. Parameters ---------- file_name : str The path to a CBN JGIF JSON file. Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements.
def domain_create(self, domain, master=True, **kwargs): params = { 'domain': domain, 'type': 'master' if master else 'slave', } params.update(kwargs) result = self.post('/domains', data=params) if not 'id' in result: raise UnexpectedResponseError('Unexpected response when creating Domain!', json=result) d = Domain(self, result['id'], result) return d
Registers a new Domain on the acting user's account. Make sure to point your registrar to Linode's nameservers so that Linode's DNS manager will correctly serve your domain. :param domain: The domain to register to Linode's DNS manager. :type domain: str :param master: Whether this is a master (defaults to true) :type master: bool :returns: The new Domain object. :rtype: Domain
def get_description(self): return DisplayText(text='Agent representing ' + str(self.id_), language_type=DEFAULT_LANGUAGE_TYPE, script_type=DEFAULT_SCRIPT_TYPE, format_type=DEFAULT_FORMAT_TYPE,)
Creates a description
def _tscube_app(self, xmlfile): xmlfile = self.get_model_path(xmlfile) outfile = os.path.join(self.config['fileio']['workdir'], 'tscube%s.fits' % (self.config['file_suffix'])) kw = dict(cmap=self.files['ccube'], expcube=self.files['ltcube'], bexpmap=self.files['bexpmap'], irfs=self.config['gtlike']['irfs'], evtype=self.config['selection']['evtype'], srcmdl=xmlfile, nxpix=self.npix, nypix=self.npix, binsz=self.config['binning']['binsz'], xref=float(self.roi.skydir.ra.deg), yref=float(self.roi.skydir.dec.deg), proj=self.config['binning']['proj'], stlevel=0, coordsys=self.config['binning']['coordsys'], outfile=outfile) run_gtapp('gttscube', self.logger, kw)
Run gttscube as an application.
def query(self, domain): result = {} try: result = self.pdns.query(domain) except: self.error('Exception while querying passiveDNS. Check the domain format.') clean_result = [] for ind, resultset in enumerate(result): if resultset.get('time_first', None): resultset['time_first'] = resultset.get('time_first').isoformat(' ') if resultset.get('time_last', None): resultset['time_last'] = resultset.get('time_last').isoformat(' ') clean_result.append(resultset) return clean_result
The actual query happens here. Time from queries is replaced with isoformat. :param domain: The domain which should gets queried. :type domain: str :returns: List of dicts containing the search results. :rtype: [list, dict]
def process_frames_face(self, frames): detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(self.face_predictor_path) mouth_frames = self.get_frames_mouth(detector, predictor, frames) self.face = np.array(frames) self.mouth = np.array(mouth_frames) if mouth_frames[0] is not None: self.set_data(mouth_frames)
Preprocess from frames using face detector
def request_single(self, name, content={}): resp = self.request(name, content) for i in resp.values(): if type(i) == list: return i[0] elif type(i) == dict: return i return None
Simple wrapper arround request to extract a single response :returns: the first tag in the response body
def users(self, query, page=1, per_page=10): url = "/search/users" data = self._search(url, query, page=page, per_page=per_page) data["results"] = UserModel.parse_list(data.get("results")) return data
Get a single page of user results for a query. :param query [string]: Search terms. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [dict]: {u'total': 0, u'total_pages': 0, u'results': [User]}
def reverse_func(apps, schema_editor): print("\n") remove_count = 0 BackupRun = apps.get_model("backup_app", "BackupRun") backup_runs = BackupRun.objects.all() for backup_run in backup_runs: temp = OriginBackupRun(name=backup_run.name, backup_datetime=backup_run.backup_datetime) config_path = temp.get_config_path() try: config_path.unlink() except OSError as err: print("ERROR removing config file: %s" % err) else: remove_count += 1 print("%i config files removed.\n" % remove_count)
manage migrate backup_app 0003_auto_20160127_2002
def fetch_points_of_sales(self, ticket=None): ticket = ticket or self.get_or_create_ticket('wsfe') client = clients.get_client('wsfe', self.is_sandboxed) response = client.service.FEParamGetPtosVenta( serializers.serialize_ticket(ticket), ) check_response(response) results = [] for pos_data in response.ResultGet.PtoVenta: results.append(PointOfSales.objects.update_or_create( number=pos_data.Nro, issuance_type=pos_data.EmisionTipo, owner=self, defaults={ 'blocked': pos_data.Bloqueado == 'N', 'drop_date': parsers.parse_date(pos_data.FchBaja), } )) return results
Fetch all point of sales objects. Fetch all point of sales from the WS and store (or update) them locally. Returns a list of tuples with the format (pos, created,).
def _go_install(self, target, gopath, build_flags): args = build_flags + [target.import_path] result, go_cmd = self.go_dist.execute_go_cmd( 'install', gopath=gopath, args=args, workunit_factory=self.context.new_workunit, workunit_name='install {}'.format(target.import_path), workunit_labels=[WorkUnitLabel.COMPILER]) if result != 0: raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
Create and execute a `go install` command.
def _array_type_std_res(self, counts, total, colsum, rowsum): if self.mr_dim_ind == 0: total = total[:, np.newaxis] rowsum = rowsum[:, np.newaxis] expected_counts = rowsum * colsum / total variance = rowsum * colsum * (total - rowsum) * (total - colsum) / total ** 3 return (counts - expected_counts) / np.sqrt(variance)
Return ndarray containing standard residuals for array values. The shape of the return value is the same as that of *counts*. Array variables require special processing because of the underlying math. Essentially, it boils down to the fact that the variable dimensions are mutually independent, and standard residuals are calculated for each of them separately, and then stacked together in the resulting array.
def connect(self, format, *args): return lib.zsock_connect(self._as_parameter_, format, *args)
Connect a socket to a formatted endpoint Returns 0 if OK, -1 if the endpoint was invalid.
def hex_to_hsv(color): color = normalize(color) color = color[1:] color = (int(color[0:2], base=16) / 255.0, int(color[2:4], base=16) / 255.0, int(color[4:6], base=16) / 255.0) return colorsys.rgb_to_hsv(*color)
Converts from hex to hsv Parameters: ----------- color : string Color representation on color Example: hex_to_hsv('#ff9933')
def serialize(self, buf, offset): fields = [ofproto.oxm_from_user(k, uv) for (k, uv) in self._fields2] hdr_pack_str = '!HH' field_offset = offset + struct.calcsize(hdr_pack_str) for (n, value, mask) in fields: field_offset += ofproto.oxm_serialize(n, value, mask, buf, field_offset) length = field_offset - offset msg_pack_into(hdr_pack_str, buf, offset, ofproto.OFPMT_OXM, length) self.length = length pad_len = utils.round_up(length, 8) - length msg_pack_into("%dx" % pad_len, buf, field_offset) return length + pad_len
Outputs the expression of the wire protocol of the flow match into the buf. Returns the output length.
def read_excitation_energies(self): transitions = list() with zopen(self.filename, "r") as f: line = f.readline() td = False while line != "": if re.search(r"^\sExcitation energies and oscillator strengths:", line): td = True if td: if re.search(r"^\sExcited State\s*\d", line): val = [float(v) for v in float_patt.findall(line)] transitions.append(tuple(val[0:3])) line = f.readline() return transitions
Read a excitation energies after a TD-DFT calculation. Returns: A list: A list of tuple for each transition such as [(energie (eV), lambda (nm), oscillatory strength), ... ]
def format_datetime(d: PotentialDatetimeType, fmt: str, default: str = None) -> Optional[str]: d = coerce_to_pendulum(d) if d is None: return default return d.strftime(fmt)
Format a datetime with a ``strftime`` format specification string, or return ``default`` if the input is ``None``.
def _setup_transport(self): if HAVE_PY26_SSL: if hasattr(self, 'sslopts'): self.sslobj = ssl.wrap_socket(self.sock, **self.sslopts) else: self.sslobj = ssl.wrap_socket(self.sock) self.sslobj.do_handshake() else: self.sslobj = socket.ssl(self.sock)
Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.
def new(self, filename=None): path = (self.exec_path,) if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE): p = find_executable("python") path = (p, 'python') + path else: path += (self.exec_path,) if filename: path += ('-o', filename) os.spawnl(os.P_NOWAIT, *path)
start a session an independent process
def read_lease(self, lease_id): params = { 'lease_id': lease_id } api_path = '/v1/sys/leases/lookup' response = self._adapter.put( url=api_path, json=params ) return response.json()
Retrieve lease metadata. Supported methods: PUT: /sys/leases/lookup. Produces: 200 application/json :param lease_id: the ID of the lease to lookup. :type lease_id: str | unicode :return: Parsed JSON response from the leases PUT request :rtype: dict.
def maybeparens(lparen, item, rparen): return item | lparen.suppress() + item + rparen.suppress()
Wrap an item in optional parentheses, only applying them if necessary.
def get_search_fields(self): search_fields = self._search_fields.copy() if not search_fields: for cls in reversed(self.model.__mro__): super_fields = getattr(cls, "search_fields", {}) search_fields.update(search_fields_to_dict(super_fields)) if not search_fields: search_fields = [] for f in self.model._meta.fields: if isinstance(f, (CharField, TextField)): search_fields.append(f.name) search_fields = search_fields_to_dict(search_fields) return search_fields
Returns the search field names mapped to weights as a dict. Used in ``get_queryset`` below to tell ``SearchableQuerySet`` which search fields to use. Also used by ``DisplayableAdmin`` to populate Django admin's ``search_fields`` attribute. Search fields can be populated via ``SearchableManager.__init__``, which then get stored in ``SearchableManager._search_fields``, which serves as an approach for defining an explicit set of fields to be used. Alternatively and more commonly, ``search_fields`` can be defined on models themselves. In this case, we look at the model and all its base classes, and build up the search fields from all of those, so the search fields are implicitly built up from the inheritence chain. Finally if no search fields have been defined at all, we fall back to any fields that are ``CharField`` or ``TextField`` instances.
def extract_new(cls) -> DevicesTypeUnbound: devices = cls.get_handlerclass()(*_selection[cls]) _selection[cls].clear() return devices
Gather all "new" |Node| or |Element| objects. See the main documentation on module |devicetools| for further information.
def reset_component(self, component): if isinstance(component, str) is True: component = WURI.Component(component) self.__components[component] = None
Unset component in this URI :param component: component name (or component type) to reset :return: None
def add_histogram_summary(self, x, tag=None): if not self.summary_collections: return with self.g.as_default(): tag = tag or _tag_for(x.name) summary = tf.summary.histogram( tag, x, collections=self.summary_collections) return summary
Add a summary operation to visualize the histogram of x's values.
def set_baselines(self): if self.style.xbaseline: if self.style.orient in ("up", "down"): self.coords.coords[:, 0] += self.style.xbaseline self.coords.verts[:, 0] += self.style.xbaseline else: self.coords.coords[:, 1] += self.style.xbaseline self.coords.verts[:, 1] += self.style.xbaseline
Modify coords to shift tree position for x,y baseline arguments. This is useful for arrangeing trees onto a Canvas with other plots, but still sharing a common cartesian axes coordinates.
def deliver_tx(self, raw_transaction): self.abort_if_abci_chain_is_not_synced() logger.debug('deliver_tx: %s', raw_transaction) transaction = self.bigchaindb.is_valid_transaction( decode_transaction(raw_transaction), self.block_transactions) if not transaction: logger.debug('deliver_tx: INVALID') return ResponseDeliverTx(code=CodeTypeError) else: logger.debug('storing tx') self.block_txn_ids.append(transaction.id) self.block_transactions.append(transaction) return ResponseDeliverTx(code=CodeTypeOk)
Validate the transaction before mutating the state. Args: raw_tx: a raw string (in bytes) transaction.
def solve_assignement(self, costs): if costs is None or len(costs) == 0: return dict() n = costs.shape[0] pairs = [(i, j) for i in range(0, n) for j in range(0, n) if costs[i, j] < invalid_match] costs_list = [costs[i, j] for (i, j) in pairs] assignment = lapjv.lapjv(list(zip(*pairs))[0], list(zip(*pairs))[1], costs_list) indexes = enumerate(list(assignment[0])) return dict([(row, col) for row, col in indexes])
Solves assignment problem using Hungarian implementation by Brian M. Clapper. @param costs: square cost matrix @return: assignment function @rtype: int->int
def is_docker_reachable(self): try: self.docker_client.ping() return True except (docker.errors.APIError, requests.exceptions.ConnectionError): LOG.debug("Docker is not reachable", exc_info=True) return False
Checks if Docker daemon is running. This is required for us to invoke the function locally Returns ------- bool True, if Docker is available, False otherwise
def is_sock_ok(self, timeout_select): self._socket_lock.acquire() try: ret = self._is_socket_ok(timeout_select) finally: self._socket_lock.release() return ret
check if socket is OK
def set_meta(self, meta=None, **kwargs): if meta is None: meta = self.get_meta(**kwargs) setattr(self, '_meta', meta)
Assign values to self.meta. Meta is not returned
def calcRandW(self,aLvlNow,pLvlNow): AaggPrev = np.mean(np.array(aLvlNow))/np.mean(pLvlNow) AggregateK = np.mean(np.array(aLvlNow)) PermShkAggNow = self.PermShkAggHist[self.Shk_idx] TranShkAggNow = self.TranShkAggHist[self.Shk_idx] self.Shk_idx += 1 AggregateL = np.mean(pLvlNow)*PermShkAggNow KtoLnow = AggregateK/AggregateL self.KtoYnow = KtoLnow**(1.0-self.CapShare) RfreeNow = self.Rfunc(KtoLnow/TranShkAggNow) wRteNow = self.wFunc(KtoLnow/TranShkAggNow) MaggNow = KtoLnow*RfreeNow + wRteNow*TranShkAggNow self.KtoLnow = KtoLnow AggVarsNow = CobbDouglasAggVars(MaggNow,AaggPrev,KtoLnow,RfreeNow,wRteNow,PermShkAggNow,TranShkAggNow) return AggVarsNow
Calculates the interest factor and wage rate this period using each agent's capital stock to get the aggregate capital ratio. Parameters ---------- aLvlNow : [np.array] Agents' current end-of-period assets. Elements of the list correspond to types in the economy, entries within arrays to agents of that type. Returns ------- AggVarsNow : CobbDouglasAggVars An object containing the aggregate variables for the upcoming period: capital-to-labor ratio, interest factor, (normalized) wage rate, aggregate permanent and transitory shocks.
def _cleanup(self): self.device = None self.doc = None self.parser = None self.resmgr = None self.interpreter = None
Frees lots of non-textual information, such as the fonts and images and the objects that were needed to parse the PDF.
def _is_national_number_suffix_of_other(numobj1, numobj2): nn1 = str(numobj1.national_number) nn2 = str(numobj2.national_number) return nn1.endswith(nn2) or nn2.endswith(nn1)
Returns true when one national number is the suffix of the other or both are the same.
def write(text, path): with open(path, "wb") as f: f.write(text.encode("utf-8"))
Writer text to file with utf-8 encoding. Usage:: >>> from angora.dataIO import textfile or >>> from angora.dataIO import * >>> textfile.write("hello world!", "test.txt")
def insertFromMimeData(self, data): undoObj = UndoPaste(self, data, self.pasteCnt) self.pasteCnt += 1 self.qteUndoStack.push(undoObj)
Paste the MIME data at the current cursor position. This method also adds another undo-object to the undo-stack.
def get_model_class(self): try: c = ContentType.objects.get(app_label=self.app, model=self.model) except ContentType.DoesNotExist: if django.VERSION >= (1, 7): return apps.get_model(self.app, self.model) else: return c.model_class()
Returns model class
def from_json_file(cls, path): with open(path) as jsf: template_json = json.load(jsf) return cls(template_json=template_json)
Return a template from a json allocated in a path. :param path: string :return: ServiceAgreementTemplate
def pad(self, pad_width, **kwargs): kwargs.setdefault('mode', 'constant') if isinstance(pad_width, int): pad_width = (pad_width,) new = numpy.pad(self, pad_width, **kwargs).view(type(self)) new.__metadata_finalize__(self) new._unit = self.unit new.x0 -= self.dx * pad_width[0] return new
Pad this series to a new size Parameters ---------- pad_width : `int`, pair of `ints` number of samples by which to pad each end of the array. Single int to pad both ends by the same amount, or (before, after) `tuple` to give uneven padding **kwargs see :meth:`numpy.pad` for kwarg documentation Returns ------- series : `Series` the padded version of the input See also -------- numpy.pad for details on the underlying functionality
def handle_pagination(self, page_num=None, page_size=None): self._response_json = self.get_next_page(page_num=page_num, page_size=page_size) self.update_attrs() self.position = 0 self.values = self.process_page()
Handle retrieving and processing the next page of results.