code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def mimetype(self, value: str) -> None: if ( value.startswith('text/') or value == 'application/xml' or (value.startswith('application/') and value.endswith('+xml')) ): mimetype = f"{value}; charset={self.charset}" else: mimetype = value self.headers['Content-Type'] = mimetype
Set the mimetype to the value.
def get_sites_in_sphere(self, pt, r): neighbors = [] for site in self._sites: dist = site.distance_from_point(pt) if dist <= r: neighbors.append((site, dist)) return neighbors
Find all sites within a sphere from a point. Args: pt (3x1 array): Cartesian coordinates of center of sphere. r (float): Radius of sphere. Returns: [(site, dist) ...] since most of the time, subsequent processing requires the distance.
def overwrites_for(self, obj): if isinstance(obj, User): predicate = lambda p: p.type == 'member' elif isinstance(obj, Role): predicate = lambda p: p.type == 'role' else: predicate = lambda p: True for overwrite in filter(predicate, self._overwrites): if overwrite.id == obj.id: allow = Permissions(overwrite.allow) deny = Permissions(overwrite.deny) return PermissionOverwrite.from_pair(allow, deny) return PermissionOverwrite()
Returns the channel-specific overwrites for a member or a role. Parameters ----------- obj The :class:`Role` or :class:`abc.User` denoting whose overwrite to get. Returns --------- :class:`PermissionOverwrite` The permission overwrites for this object.
def get_iso3_country_code(cls, country, use_live=True, exception=None): countriesdata = cls.countriesdata(use_live=use_live) countryupper = country.upper() len_countryupper = len(countryupper) if len_countryupper == 3: if countryupper in countriesdata['countries']: return countryupper elif len_countryupper == 2: iso3 = countriesdata['iso2iso3'].get(countryupper) if iso3 is not None: return iso3 iso3 = countriesdata['countrynames2iso3'].get(countryupper) if iso3 is not None: return iso3 for candidate in cls.expand_countryname_abbrevs(countryupper): iso3 = countriesdata['countrynames2iso3'].get(candidate) if iso3 is not None: return iso3 if exception is not None: raise exception return None
Get ISO3 code for cls. Only exact matches or None are returned. Args: country (str): Country for which to get ISO3 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO3 country code or None
def classify(self, classifier_id, text, **kwargs): if classifier_id is None: raise ValueError('classifier_id must be provided') if text is None: raise ValueError('text must be provided') headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('natural_language_classifier', 'V1', 'classify') headers.update(sdk_headers) data = {'text': text} url = '/v1/classifiers/{0}/classify'.format( *self._encode_path_vars(classifier_id)) response = self.request( method='POST', url=url, headers=headers, json=data, accept_json=True) return response
Classify a phrase. Returns label information for the input. The status must be `Available` before you can use the classifier to classify text. :param str classifier_id: Classifier ID to use. :param str text: The submitted phrase. The maximum length is 2048 characters. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def wp_status(self): try: print("Have %u of %u waypoints" % (self.wploader.count()+len(self.wp_received), self.wploader.expected_count)) except Exception: print("Have %u waypoints" % (self.wploader.count()+len(self.wp_received)))
show status of wp download
def update(self, **kwargs): super(CommonConf, self).update(**kwargs) conf_changed = False for conf_name, conf_value in kwargs.items(): rtconf.base.get_validator(conf_name)(conf_value) item1 = self._settings.get(conf_name, None) item2 = kwargs.get(conf_name, None) if item1 != item2: conf_changed = True if conf_changed: for conf_name, conf_value in kwargs.items(): self._settings[conf_name] = conf_value self._notify_listeners(CommonConf.CONF_CHANGED_EVT, self)
Updates global configuration settings with given values. First checks if given configuration values differ from current values. If any of the configuration values changed, generates a change event. Currently we generate change event for any configuration change. Note: This method is idempotent.
def confirmation(self, *args, **kwargs): if not self.current_terminal: raise RuntimeError("no active terminal") if not isinstance(self.current_terminal, Client): raise RuntimeError("current terminal not a client") self.current_terminal.confirmation(*args, **kwargs)
Upstream packet, send to current terminal.
def wsgi_app(self, environ, start_response): @_LOCAL_MANAGER.middleware def _wrapped_app(environ, start_response): request = Request(environ) setattr(_local, _CURRENT_REQUEST_KEY, request) response = self._dispatch_request(request) return response(environ, start_response) return _wrapped_app(environ, start_response)
A basic WSGI app
def layer_from_combo(combo): index = combo.currentIndex() if index < 0: return None layer_id = combo.itemData(index, Qt.UserRole) layer = QgsProject.instance().mapLayer(layer_id) return layer
Get the QgsMapLayer currently selected in a combo. Obtain QgsMapLayer id from the userrole of the QtCombo and return it as a QgsMapLayer. :returns: The currently selected map layer a combo. :rtype: QgsMapLayer
def client_view(client): if request.method == 'POST' and 'delete' in request.form: db.session.delete(client) db.session.commit() return redirect(url_for('.index')) form = ClientForm(request.form, obj=client) if form.validate_on_submit(): form.populate_obj(client) db.session.commit() return render_template( 'invenio_oauth2server/settings/client_view.html', client=client, form=form, )
Show client's detail.
def _contour_helper(self, args, kwargs): contour_kwargs = {} contour_kwargs['measurement'] = kwargs.pop('measurement', 'poles') contour_kwargs['method'] = kwargs.pop('method', 'exponential_kamb') contour_kwargs['sigma'] = kwargs.pop('sigma', 3) contour_kwargs['gridsize'] = kwargs.pop('gridsize', 100) contour_kwargs['weights'] = kwargs.pop('weights', None) lon, lat, totals = contouring.density_grid(*args, **contour_kwargs) return lon, lat, totals, kwargs
Unify defaults and common functionality of ``density_contour`` and ``density_contourf``.
def convert_group(tokens): tok = tokens.asList() dic = dict(tok) if not (len(dic) == len(tok)): raise ParseFatalException("Names in group must be unique: %s" % tokens) return ConfGroup(dic)
Converts parseResult from to ConfGroup type.
def prepare_value(value): if isinstance(value, (list, tuple, set)): return ",".join(value) if isinstance(value, bool): return "y" if value else "n" return str(value)
Prepare value to pylint.
def get_normalized_bdew_profile(self): self.df['temperature'] = self.temperature.values self.df['temperature_geo'] = self.weighted_temperature( how='geometric_series') sf = self.get_sf_values() [a, b, c, d] = self.get_sigmoid_parameters() f = self.get_weekday_parameters() h = (a / (1 + (b / (self.df['temperature_geo'] - 40)) ** c) + d) kw = 1.0 / (sum(h * f) / 24) heat_profile_normalized = (kw * h * f * sf) return heat_profile_normalized
Calculation of the normalized hourly heat demand
def _create_timezone_static(tz): timezone = icalendar.Timezone() timezone.add('TZID', tz) subcomp = icalendar.TimezoneStandard() subcomp.add('TZNAME', tz) subcomp.add('DTSTART', dt.datetime(1601, 1, 1)) subcomp.add('RDATE', dt.datetime(1601, 1, 1)) subcomp.add('TZOFFSETTO', tz._utcoffset) subcomp.add('TZOFFSETFROM', tz._utcoffset) timezone.add_component(subcomp) return timezone
create an icalendar vtimezone from a pytz.tzinfo.StaticTzInfo :param tz: the timezone :type tz: pytz.tzinfo.StaticTzInfo :returns: timezone information :rtype: icalendar.Timezone()
def forward_moves(self, position): if position.is_square_empty(self.square_in_front(self.location)): if self.would_move_be_promotion(): for move in self.create_promotion_moves(notation_const.PROMOTE): yield move else: yield self.create_move(end_loc=self.square_in_front(self.location), status=notation_const.MOVEMENT) if self.on_home_row() and \ position.is_square_empty(self.two_squares_in_front(self.location)): yield self.create_move( end_loc=self.square_in_front(self.square_in_front(self.location)), status=notation_const.MOVEMENT )
Finds possible moves one step and two steps in front of Pawn. :type: position: Board :rtype: list
def _is_whitelisted(self, email): return hasattr(settings, "SAFE_EMAIL_WHITELIST") and \ any(re.match(m, email) for m in settings.SAFE_EMAIL_WHITELIST)
Check if an email is in the whitelist. If there's no whitelist, it's assumed it's not whitelisted.
def generate_aliases_global(fieldfile, **kwargs): from easy_thumbnails.files import generate_all_aliases generate_all_aliases(fieldfile, include_global=True)
A saved_file signal handler which generates thumbnails for all field, model, and app specific aliases matching the saved file's field, also generating thumbnails for each project-wide alias.
def copy_files(source_files, target_directory, source_directory=None): try: os.makedirs(target_directory) except: pass for f in source_files: source = os.path.join(source_directory, f) if source_directory else f target = os.path.join(target_directory, f) shutil.copy2(source, target)
Copies a list of files to the specified directory. If source_directory is provided, it will be prepended to each source file.
def new_temp_file(directory=None, hint=''): return tempfile.NamedTemporaryFile( prefix='tmp-wpull-{0}-'.format(hint), suffix='.tmp', dir=directory)
Return a new temporary file.
def _get_request_token(self): params = { 'oauth_callback': self.get_callback_url() } response, content = self.client().request(self.request_token_url, "POST", body=urllib.urlencode(params)) content = smart_unicode(content) if not response['status'] == '200': raise OAuthError(_( u"Invalid status code %s while obtaining request token from %s: %s") % ( response['status'], self.request_token_url, content)) token = dict(urlparse.parse_qsl(content)) return oauth.Token(token['oauth_token'], token['oauth_token_secret'])
Fetch a request token from `self.request_token_url`.
def connect(server, port, username, password): print("-" * 79) print("Connecting to: {}".format(server)) print("At port: {}".format(port)) print("Using username: {}".format(username)) print("Using password: {}".format(password)) print("-" * 79)
This function might be something coming from your ORM
def get_connection(self, fail_silently=False): from protean.services.email import get_connection if not self.connection: self.connection = get_connection(fail_silently=fail_silently) return self.connection
Retrieve connection to send email
def get_metatab_doc(nb_path): from metatab.generate import CsvDataRowGenerator from metatab.rowgenerators import TextRowGenerator from metatab import MetatabDoc with open(nb_path) as f: nb = nbformat.reads(f.read(), as_version=4) for cell in nb.cells: source = ''.join(cell['source']).strip() if source.startswith('%%metatab'): return MetatabDoc(TextRowGenerator(source))
Read a notebook and extract the metatab document. Only returns the first document
def __set_value(self, value): array = ((self._clean_value(value),),) return self._get_target().setDataArray(array)
Sets cell value to a string or number based on the given value.
def print_all_signals(self): for o in dir(self): obj= getattr(self, o) div = False for c in dir(obj): cobj = getattr(obj, c) if isinstance(cobj, Signal): print('def _on_{}__{}(self):'.format(o, c)) div = True if div: print('-'*30)
Prints out every signal available for this widget and childs.
def slices(self): if self.chunks is None: yield tuple(slice(None, s) for s in self.shape) else: ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks)) for idx in np.ndindex(ceilings): out = [] for i, c, s in zip(idx, self.chunks, self.shape): start = i * c stop = min(start + c, s + 1) out.append(slice(start, stop, 1)) yield tuple(out)
Returns a generator yielding tuple of slice objects. Order is not guaranteed.
def dimensions(self): return ImageDimensions(self._info.file.width, self._info.file.height)
Dimensions of the image. :rtype: :py:class:`ImageDimensions`
def get_recipients(self, **options): if options['recipients_from_setting']: return settings.TIMELINE_DIGEST_EMAIL_RECIPIENTS users = get_user_model()._default_manager.all() if options['staff']: users = users.filter(is_staff=True) elif not options['all']: users = users.filter(is_staff=True, is_superuser=True) return users.values_list(settings.TIMELINE_USER_EMAIL_FIELD, flat=True)
Figures out the recipients
def num_orifices(FlowPlant, RatioVCOrifice, HeadLossOrifice, DiamOrifice): return np.ceil(area_orifice(HeadLossOrifice, RatioVCOrifice, FlowPlant).magnitude / area_circle(DiamOrifice).magnitude)
Return the number of orifices.
def getPattern(self, word): if word in self.patterns: return self.patterns[word] else: raise ValueError('Unknown pattern in getPattern().', word)
Returns the pattern with key word. Example: net.getPattern("tom") => [0, 0, 0, 1]
def begin_transaction( self, database, options_=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "begin_transaction" not in self._inner_api_calls: self._inner_api_calls[ "begin_transaction" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.begin_transaction, default_retry=self._method_configs["BeginTransaction"].retry, default_timeout=self._method_configs["BeginTransaction"].timeout, client_info=self._client_info, ) request = firestore_pb2.BeginTransactionRequest( database=database, options=options_ ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["begin_transaction"]( request, retry=retry, timeout=timeout, metadata=metadata )
Starts a new transaction. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> response = client.begin_transaction(database) Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction. Defaults to a read-write transaction. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def spike_latency(signal, threshold, fs): over, = np.where(signal>threshold) segments, = np.where(np.diff(over) > 1) if len(over) > 1: if len(segments) == 0: idx = over[0] + np.argmax(signal[over[0]:over[-1]]) latency = float(idx)/fs elif segments[0] == 0: latency = float(over[0])/fs else: idx = over[0] + np.argmax(signal[over[0]:over[segments[0]]]) latency = float(idx)/fs elif len(over) > 0: latency = float(over[0])/fs else: latency = np.nan return latency
Find the latency of the first spike over threshold :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :returns: float -- Time of peak of first spike, or None if no values over threshold This is the same as the first value returned from calc_spike_times
def wrap_url(s, l): parts = s.split('/') if len(parts) == 1: return parts[0] else: i = 0 lines = [] for j in range(i, len(parts) + 1): tv = '/'.join(parts[i:j]) nv = '/'.join(parts[i:j + 1]) if len(nv) > l or nv == tv: i = j lines.append(tv) return '/\n'.join(lines)
Wrap a URL string
def limiter(arr): dyn_range = 32767.0 / 32767.0 lim_thresh = 30000.0 / 32767.0 lim_range = dyn_range - lim_thresh new_arr = arr.copy() inds = N.where(arr > lim_thresh)[0] new_arr[inds] = (new_arr[inds] - lim_thresh) / lim_range new_arr[inds] = (N.arctan(new_arr[inds]) * 2.0 / N.pi) *\ lim_range + lim_thresh inds = N.where(arr < -lim_thresh)[0] new_arr[inds] = -(new_arr[inds] + lim_thresh) / lim_range new_arr[inds] = -( N.arctan(new_arr[inds]) * 2.0 / N.pi * lim_range + lim_thresh) return new_arr
Restrict the maximum and minimum values of arr
def get_parent_until(path): dirname = osp.dirname(path) try: mod = osp.basename(path) mod = osp.splitext(mod)[0] imp.find_module(mod, [dirname]) except ImportError: return items = [mod] while 1: items.append(osp.basename(dirname)) try: dirname = osp.dirname(dirname) imp.find_module('__init__', [dirname + os.sep]) except ImportError: break return '.'.join(reversed(items))
Given a file path, determine the full module path. e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields 'numpy.core'
def groups_invite(self, *, channel: str, user: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"channel": channel, "user": user}) return self.api_call("groups.invite", json=kwargs)
Invites a user to a private channel. Args: channel (str): The group id. e.g. 'G1234567890' user (str): The user id. e.g. 'U1234567890'
def add_entry(self, entry): self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False)
Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.)
def get_snapshot_by(self, volume_id_or_uri, field, value): uri = self.__build_volume_snapshot_uri(volume_id_or_uri) return self._client.get_by(field, value, uri=uri)
Gets all snapshots that match the filter. The search is case-insensitive. Args: volume_id_or_uri: Can be either the volume id or the volume uri. field: Field name to filter. value: Value to filter. Returns: list: Snapshots
def shuffled_batches(self, batch_size): if batch_size >= self.num_envs * self.num_steps: yield self else: rollouts_in_batch = batch_size // self.num_steps batch_splits = math_util.divide_ceiling(self.num_envs, rollouts_in_batch) indices = list(range(self.num_envs)) np.random.shuffle(indices) for sub_indices in np.array_split(indices, batch_splits): yield Trajectories( num_steps=self.num_steps, num_envs=len(sub_indices), environment_information=None, transition_tensors={k: x[:, sub_indices] for k, x in self.transition_tensors.items()}, rollout_tensors={k: x[sub_indices] for k, x in self.rollout_tensors.items()}, )
Generate randomized batches of data - only sample whole trajectories
def get_encoder_settings(self): if self.lame_header is None: return u"" return self.lame_header.guess_settings(*self.lame_version)
Returns the guessed encoder settings
def saveShp(self, target): if not hasattr(target, "write"): target = os.path.splitext(target)[0] + '.shp' if not self.shapeType: self.shapeType = self._shapes[0].shapeType self.shp = self.__getFileObj(target) self.__shapefileHeader(self.shp, headerType='shp') self.__shpRecords()
Save an shp file.
def terminate(self): for process in list(self.processes): process["subprocess"].send_signal(signal.SIGTERM) self.stop_watch()
Terminates the processes right now with a SIGTERM
def get_maintainer(self): return hdx.data.user.User.read_from_hdx(self.data['maintainer'], configuration=self.configuration)
Get the dataset's maintainer. Returns: User: Dataset's maintainer
def copy_entities(from_namespace, from_workspace, to_namespace, to_workspace, etype, enames, link_existing_entities=False): uri = "workspaces/{0}/{1}/entities/copy".format(to_namespace, to_workspace) body = { "sourceWorkspace": { "namespace": from_namespace, "name": from_workspace }, "entityType": etype, "entityNames": enames } return __post(uri, json=body, params={'linkExistingEntities': str(link_existing_entities).lower()})
Copy entities between workspaces Args: from_namespace (str): project (namespace) to which source workspace belongs from_workspace (str): Source workspace name to_namespace (str): project (namespace) to which target workspace belongs to_workspace (str): Target workspace name etype (str): Entity type enames (list(str)): List of entity names to copy link_existing_entities (boolean): Link all soft conflicts to the entities that already exist. Swagger: https://api.firecloud.org/#!/Entities/copyEntities
def _read_incoming(self): fileno = self.proc.stdout.fileno() while 1: buf = b'' try: buf = os.read(fileno, 1024) except OSError as e: self._log(e, 'read') if not buf: self._read_queue.put(None) return self._read_queue.put(buf)
Run in a thread to move output from a pipe to a queue.
def guard_rollback_to_open(worksheet): for analysis in worksheet.getAnalyses(): if api.get_review_status(analysis) in ["assigned"]: return True return False
Return whether 'rollback_to_receive' transition can be performed or not
def import_qt(glbls): if 'QtCore' in glbls: return from projexui.qt import QtCore, QtGui, wrapVariant, uic from projexui.widgets.xloggersplashscreen import XLoggerSplashScreen glbls['QtCore'] = QtCore glbls['QtGui'] = QtGui glbls['wrapVariant'] = wrapVariant glbls['uic'] = uic glbls['XLoggerSplashScreen'] = XLoggerSplashScreen
Delayed qt loader.
def cira_stretch(img, **kwargs): LOG.debug("Applying the cira-stretch") def func(band_data): log_root = np.log10(0.0223) denom = (1.0 - log_root) * 0.75 band_data *= 0.01 band_data = band_data.clip(np.finfo(float).eps) band_data = xu.log10(band_data) band_data -= log_root band_data /= denom return band_data return apply_enhancement(img.data, func)
Logarithmic stretch adapted to human vision. Applicable only for visible channels.
def _to_hours_mins_secs(time_taken): mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) return hours, mins, secs
Convert seconds to hours, mins, and seconds.
def rtt_read(self, buffer_index, num_bytes): buf = (ctypes.c_ubyte * num_bytes)() bytes_read = self._dll.JLINK_RTTERMINAL_Read(buffer_index, buf, num_bytes) if bytes_read < 0: raise errors.JLinkRTTException(bytes_read) return list(buf)[:bytes_read]
Reads data from the RTT buffer. This method will read at most num_bytes bytes from the specified RTT buffer. The data is automatically removed from the RTT buffer. If there are not num_bytes bytes waiting in the RTT buffer, the entire contents of the RTT buffer will be read. Args: self (JLink): the ``JLink`` instance buffer_index (int): the index of the RTT buffer to read from num_bytes (int): the maximum number of bytes to read Returns: A list of bytes read from RTT. Raises: JLinkRTTException if the underlying JLINK_RTTERMINAL_Read call fails.
def review_metadata_csv_single_user(filedir, metadata, csv_in, n_headers): try: if not validate_metadata(filedir, metadata): return False for filename, file_metadata in metadata.items(): is_single_file_metadata_valid(file_metadata, None, filename) except ValueError as e: print_error(e) return False return True
Check validity of metadata for single user. :param filedir: This field is the filepath of the directory whose csv has to be made. :param metadata: This field is the metadata generated from the load_metadata_csv function. :param csv_in: This field returns a reader object which iterates over the csv. :param n_headers: This field is the number of headers in the csv.
def list_(runas=None): rubies = [] output = _rvm(['list'], runas=runas) if output: regex = re.compile(r'^[= ]([*> ]) ([^- ]+)-([^ ]+) \[ (.*) \]') for line in output.splitlines(): match = regex.match(line) if match: rubies.append([ match.group(2), match.group(3), match.group(1) == '*' ]) return rubies
List all rvm-installed rubies runas The user under which to run rvm. If not specified, then rvm will be run as the user under which Salt is running. CLI Example: .. code-block:: bash salt '*' rvm.list
def pore_coords(target): r network = target.project.network Ts = network.throats(target.name) conns = network['throat.conns'] coords = network['pore.coords'] return _sp.mean(coords[conns], axis=1)[Ts]
r""" The average of the pore coords
def parse(filename): with open(filename) as f: parser = ASDLParser() return parser.parse(f.read())
Parse ASDL from the given file and return a Module node describing it.
def get_layer(self): if self.layer: return try: self.layer = Layer.objects.get(slug=self.kwargs['slug']) except Layer.DoesNotExist: raise Http404(_('Layer not found'))
retrieve layer from DB
def nextClass(self, classuri): if classuri == self.classes[-1].uri: return self.classes[0] flag = False for x in self.classes: if flag == True: return x if x.uri == classuri: flag = True return None
Returns the next class in the list of classes. If it's the last one, returns the first one.
def _compute_hamming_matrix(N): possible_states = np.array(list(utils.all_states((N)))) return cdist(possible_states, possible_states, 'hamming') * N
Compute and store a Hamming matrix for |N| nodes. Hamming matrices have the following sizes:: N MBs == === 9 2 10 8 11 32 12 128 13 512 Given these sizes and the fact that large matrices are needed infrequently, we store computed matrices using the Joblib filesystem cache instead of adding computed matrices to the ``_hamming_matrices`` global and clogging up memory. This function is only called when |N| > ``_NUM_PRECOMPUTED_HAMMING_MATRICES``. Don't call this function directly; use |_hamming_matrix| instead.
def wsp(word): HEAVY = r'[ieaAoO]{1}[\.]*(u|y)[^ieaAoO]+(\.|$)' delimiters = [i for i, char in enumerate(word) if char == '.'] if len(delimiters) % 2 != 0: delimiters.append(len(word)) unstressed = [] for i, d in enumerate(delimiters): if i % 2 == 0: unstressed.extend(range(d + 1, delimiters[i + 1])) heavies = re.finditer(HEAVY, word) violations = sum(1 for m in heavies if m.start(0) in unstressed) return violations
Return the number of unstressed heavy syllables.
def add_and_matches(self, matcher, lhs, params, numq=1, flatten=None): params = self._adapt_params(params) qs = ['?'] * numq flatten = flatten or self._default_flatten(numq) expr = repeat(adapt_matcher(matcher)(lhs, *qs), len(params)) self.conditions.extend(expr) self.params.extend(flatten(params))
Add AND conditions to match to `params`. :type matcher: str or callable :arg matcher: if `str`, `matcher.format` is used. :type lhs: str :arg lhs: the first argument to `matcher`. :type params: list :arg params: each element should be able to feed into sqlite '?'. :type numq: int :arg numq: number of parameters for each condition. :type flatten: None or callable :arg flatten: when `numq > 1`, it should return a list of length `numq * len(params)`.
def readall(self): if not self._open: raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.') readsize = self._length - self._offset if readsize > 0: data = self._fp.read(readsize) self._offset += readsize else: data = b'' return data
A method to read and return the remaining bytes in the file. Parameters: None. Returns: The rest of the data left in the file. If the file is at or past EOF, returns an empty bytestring.
def load_module(self, name): if name not in sys.modules: sys.modules[name] = getattr(maps, name.split('.')[2]) return sys.modules[name]
Load the ``pygal.maps.name`` module from the previously loaded plugin
def _reserve(self, key): self.assign(key, RESERVED) try: yield finally: del self._cache[key]
Reserve a component's binding temporarily. Protects against cycles.
def update_constants(): global MANAGER_CONTROL_CHANNEL, MANAGER_EXECUTOR_CHANNELS global MANAGER_LISTENER_STATS, MANAGER_STATE_PREFIX redis_prefix = getattr(settings, 'FLOW_MANAGER', {}).get('REDIS_PREFIX', '') MANAGER_CONTROL_CHANNEL = '{}.control'.format(redis_prefix) MANAGER_EXECUTOR_CHANNELS = ManagerChannelPair( '{}.result_queue'.format(redis_prefix), '{}.result_queue_response'.format(redis_prefix), ) MANAGER_STATE_PREFIX = '{}.state'.format(redis_prefix) MANAGER_LISTENER_STATS = '{}.listener_stats'.format(redis_prefix)
Recreate channel name constants with changed settings. This kludge is mostly needed due to the way Django settings are patched for testing and how modules need to be imported throughout the project. On import time, settings are not patched yet, but some of the code needs static values immediately. Updating functions such as this one are then needed to fix dummy values.
def _exprcomp(node): try: comp = _LITS[node.data()] except KeyError: comp = _LITS[node.data()] = Complement(node) return comp
Return a unique Expression complement.
def zero_weight_obs_names(self): self.observation_data.index = self.observation_data.obsnme groups = self.observation_data.groupby( self.observation_data.weight.apply(lambda x: x==0.0)).groups if True in groups: return list(self.observation_data.loc[groups[True],"obsnme"]) else: return []
get the zero-weighted observation names Returns ------- zero_weight_obs_names : list a list of zero-weighted observation names
def focusOutEvent(self, ev): Kittens.widgets.ClickableTreeWidget.focusOutEvent(self, ev) wid = QApplication.focusWidget() while wid: if wid is self: return wid = wid.parent() self._startOrStopEditing()
Redefine focusOut events to stop editing
def _server_rollback(): from os import path, remove archpath = path.abspath(path.expanduser(settings.archfile)) if path.isfile(archpath) and not args["nolive"]: vms("Removing archive JSON file at {}.".format(archpath)) remove(archpath) datapath = path.abspath(path.expanduser(settings.datafile)) if path.isfile(datapath) and not args["nolive"]: vms("Removing script database JSON file at {}".format(datapath)) remove(datapath)
Removes script database and archive files to rollback the CI server installation.
def get_cat_model(model): try: if isinstance(model, string_types): model_class = apps.get_model(*model.split(".")) elif issubclass(model, CategoryBase): model_class = model if model_class is None: raise TypeError except TypeError: raise TemplateSyntaxError("Unknown model submitted: %s" % model) return model_class
Return a class from a string or class
def _record_last_active(self, host): if host in self.hosts: self.hosts = [host] + [h for h in self.hosts if h != host] self._last_time_recorded_active = time.time()
Put host first in our host list, so we try it first next time The implementation of get_active_namenode relies on this reordering.
def autoinc(self): if not self.get('autoinc_version'): return oldver = self['version'] newver = bump_version_tail(oldver) config_path = self.filepath temp_fd, temp_name = tempfile.mkstemp( dir=os.path.dirname(config_path), ) with open(config_path) as old: with os.fdopen(temp_fd, 'w') as new: for oldline in old: if oldline.startswith('version:'): new.write("version: '%s'\n" % newver) continue new.write(oldline) log.info('Incrementing stack version %s -> %s' % (oldver, newver)) os.rename(temp_name, config_path)
Conditionally updates the stack version in the file associated with this config. This handles both official releases (i.e. QA configs), and release candidates. Assumptions about version: - Official release versions are MAJOR.minor, where MAJOR and minor are both non-negative integers. E.g. 2.9 2.10 2.11 3.0 3.1 3.2 etc... - Release candidate versions are MAJOR.minor-rc.N, where MAJOR, minor, and N are all non-negative integers. 3.5-rc.1 3.5-rc.2
def refresh_address_presence(self, address): composite_presence = { self._fetch_user_presence(uid) for uid in self._address_to_userids[address] } new_presence = UserPresence.UNKNOWN for presence in UserPresence.__members__.values(): if presence in composite_presence: new_presence = presence break new_address_reachability = USER_PRESENCE_TO_ADDRESS_REACHABILITY[new_presence] if new_address_reachability == self._address_to_reachability.get(address): return log.debug( 'Changing address presence state', current_user=self._user_id, address=to_normalized_address(address), prev_state=self._address_to_reachability.get(address), state=new_address_reachability, ) self._address_to_reachability[address] = new_address_reachability self._address_reachability_changed_callback(address, new_address_reachability)
Update synthesized address presence state from cached user presence states. Triggers callback (if any) in case the state has changed. This method is only provided to cover an edge case in our use of the Matrix protocol and should **not** generally be used.
def db_value(self, value): if isinstance(value, string_types): value = arrow.get(value) if isinstance(value, arrow.Arrow): value = value.datetime return super(ArrowDateTimeField, self).db_value(value)
Convert the Arrow instance to a datetime for saving in the db.
def _count(self, method, limit, keywords): limit = limit.copy() has_more = True count = None while has_more: limit.set_request_args(keywords) response = self.call(method, **keywords) limit.post_fetch(response) count += Count.from_response(response) last_evaluated_key = response.get('LastEvaluatedKey') has_more = last_evaluated_key is not None and not limit.complete if has_more: keywords['ExclusiveStartKey'] = last_evaluated_key return count
Do a scan or query and aggregate the results into a Count
def getDatastreamHistory(self, pid, dsid, format=None): http_args = {} if format is not None: http_args['format'] = format uri = 'objects/%(pid)s/datastreams/%(dsid)s/history' % \ {'pid': pid, 'dsid': dsid} return self.get(uri, params=http_args)
Get history information for a datastream. :param pid: object pid :param dsid: datastream id :param format: format :rtype: :class:`requests.models.Response`
def _on_progress(adapter, operation, conn_id, done, total): conn_string = adapter._get_property(conn_id, 'connection_string') if conn_string is None: return adapter.notify_progress(conn_string, operation, done, total)
Callback when progress is reported.
def get_lower_triangle_correlation_matrix(self, sites, imt): return numpy.linalg.cholesky(self._get_correlation_matrix(sites, imt))
Get lower-triangle matrix as a result of Cholesky-decomposition of correlation matrix. The resulting matrix should have zeros on values above the main diagonal. The actual implementations of :class:`BaseCorrelationModel` interface might calculate the matrix considering site collection and IMT (like :class:`JB2009CorrelationModel` does) or might have it pre-constructed for a specific site collection and IMT, in which case they will need to make sure that parameters to this function match parameters that were used to pre-calculate decomposed correlation matrix. :param sites: :class:`~openquake.hazardlib.site.SiteCollection` to create correlation matrix for. :param imt: Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
def get_hyperedge_degree_matrix(M): degrees = M.sum(0).transpose() new_degree = [] for degree in degrees: new_degree.append(int(degree[0:])) return sparse.diags([new_degree], [0])
Creates the diagonal matrix of hyperedge degrees D_e as a sparse matrix, where a hyperedge degree is the cardinality of the hyperedge. :param M: the incidence matrix of the hypergraph to find the D_e matrix on. :returns: sparse.csc_matrix -- the diagonal hyperedge degree matrix as a sparse matrix.
def _get_weight_size(self, data, n_local_subj): weight_size = np.zeros(1).astype(int) local_weight_offset = np.zeros(n_local_subj).astype(int) for idx, subj_data in enumerate(data): if idx > 0: local_weight_offset[idx] = weight_size[0] weight_size[0] += self.K * subj_data.shape[1] return weight_size, local_weight_offset
Calculate the size of weight for this process Parameters ---------- data : a list of 2D array, each in shape [n_voxel, n_tr] The fMRI data from multi-subject. n_local_subj : int Number of subjects allocated to this process. Returns ------- weight_size : 1D array The size of total subject weight on this process. local_weight_offset : 1D array Number of elements away from the first element in the combined weight array at which to begin the new, segmented array for a subject
def make_big_empty_files(self): for file_url in self.file_urls: local_path = file_url.get_local_path(self.dest_directory) with open(local_path, "wb") as outfile: if file_url.size > 0: outfile.seek(int(file_url.size) - 1) outfile.write(b'\0')
Write out a empty file so the workers can seek to where they should write and write their data.
def set_metrics_params(self, enable=None, store_dir=None, restore=None, no_cores=None): self._set('enable-metrics', enable, cast=bool) self._set('metrics-dir', self._section.replace_placeholders(store_dir)) self._set('metrics-dir-restore', restore, cast=bool) self._set('metrics-no-cores', no_cores, cast=bool) return self._section
Sets basic Metrics subsystem params. uWSGI metrics subsystem allows you to manage "numbers" from your apps. When enabled, the subsystem configures a vast amount of metrics (like requests per-core, memory usage, etc) but, in addition to this, you can configure your own metrics, such as the number of active users or, say, hits of a particular URL, as well as the memory consumption of your app or the whole server. * http://uwsgi.readthedocs.io/en/latest/Metrics.html * SNMP Integration - http://uwsgi.readthedocs.io/en/latest/Metrics.html#snmp-integration :param bool enable: Enables the subsystem. :param str|unicode store_dir: Directory to store metrics. The metrics subsystem can expose all of its metrics in the form of text files in a directory. The content of each file is the value of the metric (updated in real time). .. note:: Placeholders can be used to build paths, e.g.: {project_runtime_dir}/metrics/ See ``Section.project_name`` and ``Section.runtime_dir``. :param bool restore: Restore previous metrics from ``store_dir``. When you restart a uWSGI instance, all of its metrics are reset. Use the option to force the metric subsystem to read-back the values from the metric directory before starting to collect values. :param bool no_cores: Disable generation of cores-related metrics.
def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size, sources, file_byte_budget=1e6, max_subtoken_length=None): vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget) return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, vocab_generator, max_subtoken_length)
Generate a vocabulary from the datasets in sources.
def reverse(self, name, **kwargs): for p, n, _ in self.endpoints: if name == n: return p.format(**kwargs)
Reverse routing. >>> from kobin import Response >>> r = Router() >>> def view(user_id: int) -> Response: ... return Response(f'You are {user_id}') ... >>> r.add('/users/{user_id}', 'GET', 'user-detail', view) >>> r.reverse('user-detail', user_id=1) '/users/1'
def success(self): return self.ack.upper() in (self.config.ACK_SUCCESS, self.config.ACK_SUCCESS_WITH_WARNING)
Checks for the presence of errors in the response. Returns ``True`` if all is well, ``False`` otherwise. :rtype: bool :returns ``True`` if PayPal says our query was successful.
def convert_result(converter): def decorate(fn): @inspection.wraps(fn) def new_fn(*args, **kwargs): return converter(fn(*args, **kwargs)) return new_fn return decorate
Decorator that can convert the result of a function call.
def is_smart(self, value): self.set_bool("is_smart", value) if value is True: if self.find("criteria") is None: self.criteria = ElementTree.SubElement(self, "criteria")
Set group is_smart property to value. Args: value: Boolean.
def half_cauchy_like(x, alpha, beta): R x = np.atleast_1d(x) if sum(x.ravel() < 0): return -inf return flib.cauchy(x, alpha, beta) + len(x) * np.log(2)
R""" Half-Cauchy log-likelihood. Simply the absolute value of Cauchy. .. math:: f(x \mid \alpha, \beta) = \frac{2}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]} :Parameters: - `alpha` : Location parameter. - `beta` : Scale parameter (beta > 0). .. note:: - x must be non-negative.
def _get_subtype_tags(self): assert self.is_member_of_enumerated_subtypes_tree(), \ 'Not a part of a subtypes tree.' cur = self.parent_type cur_dt = self tags = [] while cur: assert cur.has_enumerated_subtypes() for subtype_field in cur.get_enumerated_subtypes(): if subtype_field.data_type is cur_dt: tags.append(subtype_field.name) break else: assert False, 'Could not find?!' cur_dt = cur cur = cur.parent_type tags.reverse() return tuple(tags)
Returns a list of type tags that refer to this type starting from the base of the struct hierarchy.
def get_by_details(self, name, type, clazz): entry = DNSEntry(name, type, clazz) return self.get(entry)
Gets an entry by details. Will return None if there is no matching entry.
def _hline_bokeh_(self, col): c = hv.HLine(self.df[col].mean()) return c
Returns an horizontal line from a column mean value
def rate_slack(self, max_rate): if self.fragment_type == self.REGULAR: return -self.rate_lack(max_rate) elif self.fragment_type == self.NONSPEECH: return self.length else: return TimeValue("0.000")
The maximum time interval that can be stolen to this fragment while keeping it respecting the given max rate. For ``REGULAR`` fragments this value is the opposite of the ``rate_lack``. For ``NONSPEECH`` fragments this value is equal to the length of the fragment. For ``HEAD`` and ``TAIL`` fragments this value is ``0.000``, meaning that they cannot be stolen. :param max_rate: the maximum rate (characters/second) :type max_rate: :class:`~aeneas.exacttiming.Decimal` :rtype: :class:`~aeneas.exacttiming.TimeValue` .. versionadded:: 1.7.0
def timedelta_average(*values: datetime.timedelta) -> datetime.timedelta: r if isinstance(values[0], (list, tuple)): values = values[0] return sum(values, datetime.timedelta()) // len(values)
r"""Compute the arithmetic mean for timedeltas list. :param \*values: Timedelta instances to process.
def _guess_vc_legacy(self): default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver return os.path.join(self.ProgramFilesx86, default)
Locate Visual C for versions prior to 2017
def map(self, mapper: Callable[[Any], Any]) -> 'Observable': r source = self return Observable(lambda on_next: source.subscribe(compose(on_next, mapper)))
r"""Map a function over an observable. Haskell: fmap f m = Cont $ \c -> runCont m (c . f)
def extract_archive(archive, verbosity=0, outdir=None, program=None, interactive=True): util.check_existing_filename(archive) if verbosity >= 0: util.log_info("Extracting %s ..." % archive) return _extract_archive(archive, verbosity=verbosity, interactive=interactive, outdir=outdir, program=program)
Extract given archive.
def _arguments_repr(self): document_class_repr = ( 'dict' if self.document_class is dict else repr(self.document_class)) uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation, self.uuid_representation) return ('document_class=%s, tz_aware=%r, uuid_representation=%s, ' 'unicode_decode_error_handler=%r, tzinfo=%r, ' 'type_registry=%r' % (document_class_repr, self.tz_aware, uuid_rep_repr, self.unicode_decode_error_handler, self.tzinfo, self.type_registry))
Representation of the arguments used to create this object.
def mapper_metro(self, _, data): if 'tags' in data: type_tag = 1 lonlat = data['coordinates'] payload = data['tags'] elif 'user_id' in data: type_tag = 2 accept = [ "twitter\.com", "foursquare\.com", "instagram\.com", "untappd\.com" ] expr = "|".join(accept) if not re.findall(expr, data['source']): return lonlat = data['lonlat'] payload = None metro = self.lookup.get(lonlat, METRO_DISTANCE) if not metro: return yield metro, (type_tag, lonlat, payload)
map each osm POI and geotweets based on spatial lookup of metro area
def check_valence(self): return [x for x, atom in self.atoms() if not atom.check_valence(self.environment(x))]
check valences of all atoms :return: list of invalid atoms
def to_kwargs(triangles): triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError('Triangles must be (n,3,3)!') vertices = triangles.reshape((-1, 3)) faces = np.arange(len(vertices)).reshape((-1, 3)) kwargs = {'vertices': vertices, 'faces': faces} return kwargs
Convert a list of triangles to the kwargs for the Trimesh constructor. Parameters --------- triangles : (n, 3, 3) float Triangles in space Returns --------- kwargs : dict Keyword arguments for the trimesh.Trimesh constructor Includes keys 'vertices' and 'faces' Examples --------- >>> mesh = trimesh.Trimesh(**trimesh.triangles.to_kwargs(triangles))