code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def iter_block_items(self): block_item_tags = (qn('w:p'), qn('w:tbl'), qn('w:sdt')) for child in self: if child.tag in block_item_tags: yield child
Generate a reference to each of the block-level content elements in this cell, in the order they appear.
def lock(self, resource, label='', expire=60, patience=60): queue = Queue(client=self.client, resource=resource) with queue.draw(label=label, expire=expire) as number: queue.wait(number=number, patience=patience) yield queue.close()
Lock a resource. :param resource: String corresponding to resource type :param label: String label to attach :param expire: int seconds :param patience: int seconds
def _callUpgradeAgent(self, ev_data, failTimeout) -> None: logger.info("{}'s upgrader calling agent for upgrade".format(self)) self._actionLog.append_started(ev_data) self._action_start_callback() self.scheduledAction = None asyncio.ensure_future( self._sendUpgradeRequest(ev_data, failTimeout))
Callback which is called when upgrade time come. Writes upgrade record to upgrade log and asks node control service to perform upgrade :param when: upgrade time :param version: version to upgrade to
def error_wrapper(error, errorClass): http_status = 0 if error.check(TwistedWebError): xml_payload = error.value.response if error.value.status: http_status = int(error.value.status) else: error.raiseException() if http_status >= 400: if not xml_payload: error.raiseException() try: fallback_error = errorClass( xml_payload, error.value.status, str(error.value), error.value.response) except (ParseError, AWSResponseParseError): error_message = http.RESPONSES.get(http_status) fallback_error = TwistedWebError( http_status, error_message, error.value.response) raise fallback_error elif 200 <= http_status < 300: return str(error.value) else: error.raiseException()
We want to see all error messages from cloud services. Amazon's EC2 says that their errors are accompanied either by a 400-series or 500-series HTTP response code. As such, the first thing we want to do is check to see if the error is in that range. If it is, we then need to see if the error message is an EC2 one. In the event that an error is not a Twisted web error nor an EC2 one, the original exception is raised.
def check_is_dataarray(comp): r @wraps(comp) def func(data_array, *args, **kwds): assert isinstance(data_array, xr.DataArray) return comp(data_array, *args, **kwds) return func
r"""Decorator to check that a computation has an instance of xarray.DataArray as first argument.
def close(self): handle = self.__handle if handle is None: return weak_transfer_set = self.__transfer_set transfer_set = self.__set() while True: try: transfer = weak_transfer_set.pop() except self.__KeyError: break transfer_set.add(transfer) transfer.doom() inflight = self.__inflight for transfer in inflight: try: transfer.cancel() except (self.__USBErrorNotFound, self.__USBErrorNoDevice): pass while inflight: try: self.__context.handleEvents() except self.__USBErrorInterrupted: pass for transfer in transfer_set: transfer.close() self.__libusb_close(handle) self.__handle = None
Close this handle. If not called explicitely, will be called by destructor. This method cancels any in-flight transfer when it is called. As cancellation is not immediate, this method needs to let libusb handle events until transfers are actually cancelled. In multi-threaded programs, this can lead to stalls. To avoid this, do not close nor let GC collect a USBDeviceHandle which has in-flight transfers.
def log_mel_spectrogram(data, audio_sample_rate=8000, log_offset=0.0, window_length_secs=0.025, hop_length_secs=0.010, **kwargs): window_length_samples = int(round(audio_sample_rate * window_length_secs)) hop_length_samples = int(round(audio_sample_rate * hop_length_secs)) fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0))) spectrogram = stft_magnitude( data, fft_length=fft_length, hop_length=hop_length_samples, window_length=window_length_samples) mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix( num_spectrogram_bins=spectrogram.shape[1], audio_sample_rate=audio_sample_rate, **kwargs)) return np.log(mel_spectrogram + log_offset)
Convert waveform to a log magnitude mel-frequency spectrogram. Args: data: 1D np.array of waveform data. audio_sample_rate: The sampling rate of data. log_offset: Add this to values when taking log to avoid -Infs. window_length_secs: Duration of each window to analyze. hop_length_secs: Advance between successive analysis windows. **kwargs: Additional arguments to pass to spectrogram_to_mel_matrix. Returns: 2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank magnitudes for successive frames.
def _write(self, items): response = self._batch_write_item(items) if 'consumed_capacity' in response: self.consumed_capacity = \ sum(response['consumed_capacity'], self.consumed_capacity) if response.get('UnprocessedItems'): unprocessed = response['UnprocessedItems'].get(self.tablename, []) LOG.info("%d items were unprocessed. Storing for later.", len(unprocessed)) self._unprocessed.extend(unprocessed) self._attempt += 1 self.connection.exponential_sleep(self._attempt) else: self._attempt = 0 return response
Perform a batch write and handle the response
def get_service(self, service_name): service = self.services.get(service_name) if not service: raise KeyError('Service not registered: %s' % service_name) return service
Given the name of a registered service, return its service definition.
def keyring_refresh(**kwargs): ctx = Context(**kwargs) ctx.execute_action('keyring:refresh', **{ 'tvm': ctx.repo.create_secure_service('tvm'), })
Refresh the keyring in the cocaine-runtime.
def embed_seqdiag_sequence(self): test_name = BuiltIn().replace_variables('${TEST NAME}') outputdir = BuiltIn().replace_variables('${OUTPUTDIR}') path = os.path.join(outputdir, test_name + '.seqdiag') SeqdiagGenerator().compile(path, self._message_sequence)
Create a message sequence diagram png file to output folder and embed the image to log file. You need to have seqdiag installed to create the sequence diagram. See http://blockdiag.com/en/seqdiag/
def get_annotation_entries_by_names(self, url: str, names: Iterable[str]) -> List[NamespaceEntry]: annotation_filter = and_(Namespace.url == url, NamespaceEntry.name.in_(names)) return self.session.query(NamespaceEntry).join(Namespace).filter(annotation_filter).all()
Get annotation entries by URL and names. :param url: The url of the annotation source :param names: The names of the annotation entries from the given url's document
def restore_model(cls, data): obj = cls() for field in data: setattr(obj, field, data[field][Field.VALUE]) return obj
Returns instance of ``cls`` with attributed loaded from ``data`` dict.
def _get_log_model_class(self): if self.log_model_class is not None: return self.log_model_class app_label, model_label = self.log_model.rsplit('.', 1) self.log_model_class = apps.get_model(app_label, model_label) return self.log_model_class
Cache for fetching the actual log model object once django is loaded. Otherwise, import conflict occur: WorkflowEnabled imports <log_model> which tries to import all models to retrieve the proper model class.
def pandas(self): names,prior,posterior = [],[],[] for iname,name in enumerate(self.posterior_parameter.row_names): names.append(name) posterior.append(np.sqrt(float( self.posterior_parameter[iname, iname]. x))) iprior = self.parcov.row_names.index(name) prior.append(np.sqrt(float(self.parcov[iprior, iprior].x))) for pred_name, pred_var in self.posterior_prediction.items(): names.append(pred_name) posterior.append(np.sqrt(pred_var)) prior.append(self.prior_prediction[pred_name]) return pd.DataFrame({"posterior": posterior, "prior": prior}, index=names)
get a pandas dataframe of prior and posterior for all predictions Returns: pandas.DataFrame : pandas.DataFrame a dataframe with prior and posterior uncertainty estimates for all forecasts (predictions)
def get_layer(self, class_: Type[L], became: bool=True) -> L: try: return self._index[class_][0] except KeyError: if became: return self._transformed[class_][0] else: raise
Return the first layer of a given class. If that layer is not present, then raise a KeyError. :param class_: class of the expected layer :param became: Allow transformed layers in results
def open(path, mode='r', host=None, user=None, port=DEFAULT_PORT): if not host: raise ValueError('you must specify the host to connect to') if not user: user = getpass.getuser() conn = _connect(host, user, port) sftp_client = conn.get_transport().open_sftp_client() return sftp_client.open(path, mode)
Open a file on a remote machine over SSH. Expects authentication to be already set up via existing keys on the local machine. Parameters ---------- path: str The path to the file to open on the remote machine. mode: str, optional The mode to use for opening the file. host: str, optional The hostname of the remote machine. May not be None. user: str, optional The username to use to login to the remote machine. If None, defaults to the name of the current user. port: int, optional The port to connect to. Returns ------- A file-like object. Important --------- If you specify a previously unseen host, then its host key will be added to the local ~/.ssh/known_hosts *automatically*.
def _condense(self, data): if data: data = filter(None,data.values()) if data: return data[-1] return None
Condense by returning the last real value of the gauge.
def create_project(args): try: __import__(args.project_name) except ImportError: pass else: sys.exit("'{}' conflicts with the name of an existing " "Python module and cannot be used as a project " "name. Please try another name.".format(args.project_name)) template_path = path.join(path.dirname(longclaw.__file__), 'project_template') utility = ManagementUtility(( 'django-admin.py', 'startproject', '--template={}'.format(template_path), '--extension=html,css,js,py,txt', args.project_name )) utility.execute() print("{} has been created.".format(args.project_name))
Create a new django project using the longclaw template
def onSave(self, event, alert=False, destroy=True): if self.drop_down_menu: self.drop_down_menu.clean_up() self.grid_builder.save_grid_data() if not event and not alert: return wx.MessageBox('Saved!', 'Info', style=wx.OK | wx.ICON_INFORMATION) if destroy: self.Destroy()
Save grid data
def comments(self, update=True): if not self._comments: if self.count == 0: return self._continue_comments(update) children = [x for x in self.children if 't1_{0}'.format(x) not in self.submission._comments_by_id] if not children: return None data = {'children': ','.join(children), 'link_id': self.submission.fullname, 'r': str(self.submission.subreddit)} if self.submission._comment_sort: data['where'] = self.submission._comment_sort url = self.reddit_session.config['morechildren'] response = self.reddit_session.request_json(url, data=data) self._comments = response['data']['things'] if update: for comment in self._comments: comment._update_submission(self.submission) return self._comments
Fetch and return the comments for a single MoreComments object.
def get_process_definition_start(fname, slug): with open(fname) as file_: for i, line in enumerate(file_): if re.search(r'slug:\s*{}'.format(slug), line): return i + 1 return 1
Find the first line of process definition. The first line of process definition is the line with a slug. :param str fname: Path to filename with processes :param string slug: process slug :return: line where the process definiton starts :rtype: int
def tag_builder(parser, token, cls, flow_type): tokens = token.split_contents() tokens_num = len(tokens) if tokens_num == 1 or (tokens_num == 3 and tokens[1] == 'for'): flow_name = None if tokens_num == 3: flow_name = tokens[2] return cls(flow_name) else: raise template.TemplateSyntaxError( '"sitegate_%(type)s_form" tag requires zero or two arguments. ' 'E.g. {%% sitegate_%(type)s_form %%} or ' '{%% sitegate_%(type)s_form for ClassicSignup %%}.' % {'type': flow_type})
Helper function handling flow form tags.
def get_hash(self, handle): fpath = self._fpath_from_handle(handle) return DiskStorageBroker.hasher(fpath)
Return the hash.
def aborted(self, exc_info): self.exc_info = exc_info self.did_end = True self.write(format_exception(*self.exc_info))
Called by a logger to log an exception.
def get_issns_for_journal(nlm_id): params = {'db': 'nlmcatalog', 'retmode': 'xml', 'id': nlm_id} tree = send_request(pubmed_fetch, params) if tree is None: return None issn_list = tree.findall('.//ISSN') issn_linking = tree.findall('.//ISSNLinking') issns = issn_list + issn_linking if not issns: return None else: return [issn.text for issn in issns]
Get a list of the ISSN numbers for a journal given its NLM ID. Information on NLM XML DTDs is available at https://www.nlm.nih.gov/databases/dtd/
def scope(self, scope): if scope is None: raise ValueError("Invalid value for `scope`, must not be `None`") allowed_values = ["CLUSTER", "CUSTOMER", "USER"] if scope not in allowed_values: raise ValueError( "Invalid value for `scope` ({0}), must be one of {1}" .format(scope, allowed_values) ) self._scope = scope
Sets the scope of this Message. The audience scope that this message should reach # noqa: E501 :param scope: The scope of this Message. # noqa: E501 :type: str
def match(tgt, opts=None): if not opts: opts = __opts__ if not isinstance(tgt, six.string_types): return False return fnmatch.fnmatch(opts['id'], tgt)
Returns true if the passed glob matches the id
def create(cls, path_name=None, name=None, crawlable=True): project = cls(path_name, name, crawlable) db.session.add(project) db.session.commit() return collect_results(project, force=True)
initialize an instance and save it to db.
def tot_edges(self): all_edges = [] for facet in self.facets: edges = [] pt = self.get_line_in_facet(facet) lines = [] for i, p in enumerate(pt): if i == len(pt) / 2: break lines.append(tuple(sorted(tuple([tuple(pt[i*2]), tuple(pt[i*2+1])])))) for i, p in enumerate(lines): if p not in all_edges: edges.append(p) all_edges.extend(edges) return len(all_edges)
Returns the number of edges in the convex hull. Useful for identifying catalytically active sites.
def add_validate(subparsers): validate_parser = subparsers.add_parser( 'validate', help=add_validate.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) validate_parser.set_defaults(func=validate_parser.print_help) validate_subparsers = validate_parser.add_subparsers(title='Testers') validate_all_parser = validate_subparsers.add_parser( 'all', help=validate.validate_all.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) validate_all_parser.set_defaults(func=validate.validate_all) validate_gate_parser = validate_subparsers.add_parser( 'gate', help=validate.validate_gate.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) validate_gate_parser.set_defaults(func=validate.validate_gate)
Validate Spinnaker setup.
def register_callbacks(self, on_create, on_modify, on_delete): self.on_create = on_create self.on_modify = on_modify self.on_delete = on_delete
Register callbacks for file creation, modification, and deletion
def update_tenant( self, tenant, update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "update_tenant" not in self._inner_api_calls: self._inner_api_calls[ "update_tenant" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.update_tenant, default_retry=self._method_configs["UpdateTenant"].retry, default_timeout=self._method_configs["UpdateTenant"].timeout, client_info=self._client_info, ) request = tenant_service_pb2.UpdateTenantRequest( tenant=tenant, update_mask=update_mask ) return self._inner_api_calls["update_tenant"]( request, retry=retry, timeout=timeout, metadata=metadata )
Updates specified tenant. Example: >>> from google.cloud import talent_v4beta1 >>> >>> client = talent_v4beta1.TenantServiceClient() >>> >>> # TODO: Initialize `tenant`: >>> tenant = {} >>> >>> response = client.update_tenant(tenant) Args: tenant (Union[dict, ~google.cloud.talent_v4beta1.types.Tenant]): Required. The tenant resource to replace the current resource in the system. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.talent_v4beta1.types.Tenant` update_mask (Union[dict, ~google.cloud.talent_v4beta1.types.FieldMask]): Optional but strongly recommended for the best service experience. If ``update_mask`` is provided, only the specified fields in ``tenant`` are updated. Otherwise all the fields are updated. A field mask to specify the tenant fields to be updated. Only top level fields of ``Tenant`` are supported. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.talent_v4beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def view_context(self): url = self.get_selected_item().get('context') if url: self.selected_page = self.open_submission_page(url)
View the context surrounding the selected comment.
def compat_get_paginated_response(view, page): if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1: from rest_messaging.serializers import ComplexMessageSerializer serializer = ComplexMessageSerializer(page, many=True) return view.get_paginated_response(serializer.data) else: serializer = view.get_pagination_serializer(page) return Response(serializer.data)
get_paginated_response is unknown to DRF 3.0
def _build_tag_param_list(params, tags): keys = sorted(tags.keys()) i = 1 for key in keys: value = tags[key] params['Tags.member.{0}.Key'.format(i)] = key if value is not None: params['Tags.member.{0}.Value'.format(i)] = value i += 1
helper function to build a tag parameter list to send
def _emp_extra_options(options): metadata_path = os.path.normpath(os.path.join(options['param_dir'], options['metadata'])) if not os.path.isfile(metadata_path): raise IOError, ("Path to metadata file %s is invalid." % metadata_path) options['metadata_path'] = metadata_path subset = options.get('subset', '') options['patch'] = emp.Patch(metadata_path, subset) if 'cols' not in options.keys(): options['cols'] = '' if 'splits' not in options.keys(): options['splits'] = '' return options
Get special options patch, cols, and splits if analysis in emp module
def create_lockfile(self): process = subprocess.Popen( self.pin_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = process.communicate() if process.returncode == 0: self.fix_lockfile() else: logger.critical("ERROR executing %s", ' '.join(self.pin_command)) logger.critical("Exit code: %s", process.returncode) logger.critical(stdout.decode('utf-8')) logger.critical(stderr.decode('utf-8')) raise RuntimeError("Failed to pip-compile {0}".format(self.infile))
Write recursive dependencies list to outfile with hard-pinned versions. Then fix it.
def put(self, item, *args, **kwargs): if not self.enabled: return timeout = kwargs.pop('timeout', None) if timeout is None: timeout = self.default_timeout cache_key = self.make_key(args, kwargs) with self._cache_lock: self._cache[cache_key] = (time() + timeout, item)
Put an item into the cache, for this combination of args and kwargs. Args: *args: any arguments. **kwargs: any keyword arguments. If ``timeout`` is specified as one of the keyword arguments, the item will remain available for retrieval for ``timeout`` seconds. If ``timeout`` is `None` or not specified, the ``default_timeout`` for this cache will be used. Specify a ``timeout`` of 0 (or ensure that the ``default_timeout`` for this cache is 0) if this item is not to be cached.
def acquire(self, **kwargs): return config.download_data(self.temp_filename, self.url, self.sha256)
Download the file and return its path Returns ------- str or None The path of the file in BatchUp's temporary directory or None if the download failed.
def clear(self): self._logger.debug("Component handlers are cleared") self._field = None self._name = None self._logger = None
Cleans up the handler. The handler can't be used after this method has been called
def can_view(self, user): return user.is_admin or self == user \ or set(self.classes).intersection(user.admin_for)
Return whether or not `user` can view information about the user.
def ref_context_from_geoloc(geoloc): text = geoloc.get('text') geoid = geoloc.get('geoID') rc = RefContext(name=text, db_refs={'GEOID': geoid}) return rc
Return a RefContext object given a geoloc entry.
async def on_isupport_casemapping(self, value): if value in rfc1459.protocol.CASE_MAPPINGS: self._case_mapping = value self.channels = rfc1459.parsing.NormalizingDict(self.channels, case_mapping=value) self.users = rfc1459.parsing.NormalizingDict(self.users, case_mapping=value)
IRC case mapping for nickname and channel name comparisons.
async def _execute(self, appt): user = self.core.auth.user(appt.useriden) if user is None: logger.warning('Unknown user %s in stored appointment', appt.useriden) await self._markfailed(appt) return await self.core.boss.execute(self._runJob(user, appt), f'Agenda {appt.iden}', user)
Fire off the task to make the storm query
def competition_leaderboard_view(self, competition): result = self.process_response( self.competition_view_leaderboard_with_http_info(competition)) return [LeaderboardEntry(e) for e in result['submissions']]
view a leaderboard based on a competition name Parameters ========== competition: the competition name to view leadboard for
def short_key(): firstlast = list(ascii_letters + digits) middle = firstlast + list('-_') return ''.join(( choice(firstlast), choice(middle), choice(middle), choice(middle), choice(firstlast), ))
Generate a short key. >>> key = short_key() >>> len(key) 5
def _refresh_nvr(self): rpm_info = juicer.utils.rpm_info(self.path) self.name = rpm_info['name'] self.version = rpm_info['version'] self.release = rpm_info['release']
Refresh our name-version-release attributes.
def make_valid_polygon(shape): assert shape.geom_type == 'Polygon' shape = make_valid_pyclipper(shape) assert shape.is_valid return shape
Make a polygon valid. Polygons can be invalid in many ways, such as self-intersection, self-touching and degeneracy. This process attempts to make a polygon valid while retaining as much of its extent or area as possible. First, we call pyclipper to robustly union the polygon. Using this on its own appears to be good for "cleaning" the polygon. This might result in polygons which still have degeneracies according to the OCG standard of validity - as pyclipper does not consider these to be invalid. Therefore we follow by using the `buffer(0)` technique to attempt to remove any remaining degeneracies.
def is_multidex(self): dexre = re.compile("^classes(\d+)?.dex$") return len([instance for instance in self.get_files() if dexre.search(instance)]) > 1
Test if the APK has multiple DEX files :return: True if multiple dex found, otherwise False
def set_ctype(self, ctype, orig_ctype=None): if self.ctype is None: self.ctype = ctype self.orig_ctype = orig_ctype
Set the selected content type. Will not override the value of the content type if that has already been determined. :param ctype: The content type string to set. :param orig_ctype: The original content type, as found in the configuration.
def printDatawraps() : l = listDatawraps() printf("Available datawraps for boostraping\n") for k, v in l.iteritems() : printf(k) printf("~"*len(k) + "|") for vv in v : printf(" "*len(k) + "|" + "~~~:> " + vv) printf('\n')
print all available datawraps for bootstraping
def include(context, bundle_name, version): store = Store(context.obj['database'], context.obj['root']) if version: version_obj = store.Version.get(version) if version_obj is None: click.echo(click.style('version not found', fg='red')) else: bundle_obj = store.bundle(bundle_name) if bundle_obj is None: click.echo(click.style('bundle not found', fg='red')) version_obj = bundle_obj.versions[0] try: include_version(context.obj['root'], version_obj) except VersionIncludedError as error: click.echo(click.style(error.message, fg='red')) context.abort() version_obj.included_at = dt.datetime.now() store.commit() click.echo(click.style('included all files!', fg='green'))
Include a bundle of files into the internal space. Use bundle name if you simply want to inlcude the latest version.
def OnTool(self, event): msgtype = self.ids_msgs[event.GetId()] post_command_event(self, msgtype)
Toolbar event handler
def bus_get(celf, type, private, error = None) : "returns a Connection to one of the predefined D-Bus buses; type is a BUS_xxx value." error, my_error = _get_error(error) result = (dbus.dbus_bus_get, dbus.dbus_bus_get_private)[private](type, error._dbobj) my_error.raise_if_set() if result != None : result = celf(result) return \ result
returns a Connection to one of the predefined D-Bus buses; type is a BUS_xxx value.
def set_plot_type(self, plot_type): ptypes = [pt["type"] for pt in self.plot_types] self.plot_panel = ptypes.index(plot_type)
Sets plot type
def verbose(self_,msg,*args,**kw): self_.__db_print(VERBOSE,msg,*args,**kw)
Print msg merged with args as a verbose message. See Python's logging module for details of message formatting.
def inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None, min_neighbors=3, kind='cressman'): r points_obs = list(zip(xp, yp)) points_grid = generate_grid_coords(grid_x, grid_y) img = inverse_distance_to_points(points_obs, variable, points_grid, r, gamma=gamma, kappa=kappa, min_neighbors=min_neighbors, kind=kind) return img.reshape(grid_x.shape)
r"""Generate an inverse distance interpolation of the given points to a regular grid. Values are assigned to the given grid using inverse distance weighting based on either [Cressman1959]_ or [Barnes1964]_. The Barnes implementation used here based on [Koch1983]_. Parameters ---------- xp: (N, ) ndarray x-coordinates of observations. yp: (N, ) ndarray y-coordinates of observations. variable: (N, ) ndarray observation values associated with (xp, yp) pairs. IE, variable[i] is a unique observation at (xp[i], yp[i]). grid_x: (M, 2) ndarray Meshgrid associated with x dimension. grid_y: (M, 2) ndarray Meshgrid associated with y dimension. r: float Radius from grid center, within which observations are considered and weighted. gamma: float Adjustable smoothing parameter for the barnes interpolation. Default None. kappa: float Response parameter for barnes interpolation. Default None. min_neighbors: int Minimum number of neighbors needed to perform barnes or cressman interpolation for a point. Default is 3. kind: str Specify what inverse distance weighting interpolation to use. Options: 'cressman' or 'barnes'. Default 'cressman' Returns ------- img: (M, N) ndarray Interpolated values on a 2-dimensional grid See Also -------- inverse_distance_to_points
def _get_connection(self): try: if self._ncc_connection and self._ncc_connection.connected: return self._ncc_connection else: self._ncc_connection = manager.connect( host=self._host_ip, port=self._host_ssh_port, username=self._username, password=self._password, device_params={'name': "csr"}, timeout=self._timeout) if not self._itfcs_enabled: self._itfcs_enabled = self._enable_itfcs( self._ncc_connection) return self._ncc_connection except Exception as e: conn_params = {'host': self._host_ip, 'port': self._host_ssh_port, 'user': self._username, 'timeout': self._timeout, 'reason': e.message} raise cfg_exc.ConnectionException(**conn_params)
Make SSH connection to the IOS XE device. The external ncclient library is used for creating this connection. This method keeps state of any existing connections and reuses them if already connected. Also interfaces (except management) are typically disabled by default when it is booted. So if connecting for the first time, driver will enable all other interfaces and keep that status in the `_itfcs_enabled` flag.
def unset_env(): os.environ.pop('COV_CORE_SOURCE', None) os.environ.pop('COV_CORE_DATA_FILE', None) os.environ.pop('COV_CORE_CONFIG', None)
Remove coverage info from env.
def _GenerateClientInfo(self, client_id, client_fd): summary_dict = client_fd.ToPrimitiveDict(stringify_leaf_fields=True) summary = yaml.Dump(summary_dict).encode("utf-8") client_info_path = os.path.join(self.prefix, client_id, "client_info.yaml") st = os.stat_result((0o644, 0, 0, 0, 0, 0, len(summary), 0, 0, 0)) yield self.archive_generator.WriteFileHeader(client_info_path, st=st) yield self.archive_generator.WriteFileChunk(summary) yield self.archive_generator.WriteFileFooter()
Yields chucks of archive information for given client.
def create_feature(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_feature_with_http_info(**kwargs) else: (data) = self.create_feature_with_http_info(**kwargs) return data
Create an enumerated sequence feature This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_feature(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param FeatureRequest body: :return: Feature If the method is called asynchronously, returns the request thread.
def get(self, artifact): coord = self._key(artifact) if coord in self._artifacts_to_versions: return self._artifacts_to_versions[coord] return artifact
Gets the coordinate with the correct version for the given artifact coordinate. :param M2Coordinate artifact: the coordinate to lookup. :return: a coordinate which is the same as the input, but with the correct pinned version. If this artifact set does not pin a version for the input artifact, this just returns the original coordinate. :rtype: M2Coordinate
def show_analysis_dialog(self): self.analysis_dialog.update_evt_types() self.analysis_dialog.update_groups() self.analysis_dialog.update_cycles() self.analysis_dialog.show()
Create the analysis dialog.
def add_manager(self, manager): select_action = 'add_manager' self._update_scope_project_team(select_action=select_action, user=manager, user_type='manager')
Add a single manager to the scope. :param manager: single username to be added to the scope list of managers :type manager: basestring :raises APIError: when unable to update the scope manager
async def resetTriggerToken(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs)
Reset a trigger token Reset the token for triggering a given hook. This invalidates token that may have been issued via getTriggerToken with a new token. This method gives output: ``v1/trigger-token-response.json#`` This method is ``stable``
def first_timestamp(self, sid, epoch=False): first_block = self.dbcur.execute(SQL_TMPO_FIRST, (sid,)).fetchone() if first_block is None: return None timestamp = first_block[2] if not epoch: timestamp = pd.Timestamp.utcfromtimestamp(timestamp) timestamp = timestamp.tz_localize('UTC') return timestamp
Get the first available timestamp for a sensor Parameters ---------- sid : str SensorID epoch : bool default False If True return as epoch If False return as pd.Timestamp Returns ------- pd.Timestamp | int
def dispatch_write(self, buf): self.write_buffer += buf if len(self.write_buffer) > self.MAX_BUFFER_SIZE: console_output('Buffer too big ({:d}) for {}\n'.format( len(self.write_buffer), str(self)).encode()) raise asyncore.ExitNow(1) return True
Augment the buffer with stuff to write when possible
def worth(what, level_name): return (logging.NOTSET < globals()[what].level <= getattr(logging, level_name))
Returns `True` if the watcher `what` would log under `level_name`.
def reload_localzone(): global _cache_tz _cache_tz = pytz.timezone(get_localzone_name()) utils.assert_tz_offset(_cache_tz) return _cache_tz
Reload the cached localzone. You need to call this if the timezone has changed.
def get_replay(name, query, config, context=None): endpoint = config.get('replay_endpoints', {}).get(name, None) if not endpoint: raise IOError("No appropriate replay endpoint " "found for {0}".format(name)) if not context: context = zmq.Context(config['io_threads']) socket = context.socket(zmq.REQ) try: socket.connect(endpoint) except zmq.ZMQError as e: raise IOError("Error when connecting to the " "replay endpoint: '{0}'".format(str(e))) socket.send(fedmsg.encoding.dumps(query).encode('utf-8')) msgs = socket.recv_multipart() socket.close() for m in msgs: try: yield fedmsg.encoding.loads(m.decode('utf-8')) except ValueError: raise ValueError(m)
Query the replay endpoint for missed messages. Args: name (str): The replay endpoint name. query (dict): A dictionary used to query the replay endpoint for messages. Queries are dictionaries with the following any of the following keys: * 'seq_ids': A ``list`` of ``int``, matching the seq_id attributes of the messages. It should return at most as many messages as the length of the list, assuming no duplicate. * 'seq_id': A single ``int`` matching the seq_id attribute of the message. Should return a single message. It is intended as a shorthand for singleton ``seq_ids`` queries. * 'seq_id_range': A two-tuple of ``int`` defining a range of seq_id to check. * 'msg_ids': A ``list`` of UUIDs matching the msg_id attribute of the messages. * 'msg_id': A single UUID for the msg_id attribute. * 'time': A tuple of two timestamps. It will return all messages emitted in between. config (dict): A configuration dictionary. This dictionary should contain, at a minimum, two keys. The first key, 'replay_endpoints', should be a dictionary that maps ``name`` to a ZeroMQ socket. The second key, 'io_threads', is an integer used to initialize the ZeroMQ context. context (zmq.Context): The ZeroMQ context to use. If a context is not provided, one will be created. Returns: generator: A generator that yields message dictionaries.
def add(self, func: Callable, name: Optional[str]=None, queue: Optional[str]=None, max_retries: Optional[Number]=None, periodicity: Optional[timedelta]=None): if not name: raise ValueError('Each Spinach task needs a name') if name in self._tasks: raise ValueError('A task named {} already exists'.format(name)) if queue is None: if self.queue: queue = self.queue else: queue = const.DEFAULT_QUEUE if max_retries is None: if self.max_retries: max_retries = self.max_retries else: max_retries = const.DEFAULT_MAX_RETRIES if periodicity is None: periodicity = self.periodicity if queue and queue.startswith('_'): raise ValueError('Queues starting with "_" are reserved by ' 'Spinach for internal use') self._tasks[name] = Task(func, name, queue, max_retries, periodicity)
Register a task function. :arg func: a callable to be executed :arg name: name of the task, used later to schedule jobs :arg queue: queue of the task, the default is used if not provided :arg max_retries: maximum number of retries, the default is used if not provided :arg periodicity: for periodic tasks, delay between executions as a timedelta >>> tasks = Tasks() >>> tasks.add(lambda x: x, name='do_nothing')
def escape(self, value): value = soft_unicode(value) if self._engine._escape is None: return value return self._engine._escape(value)
Escape given value.
def check_is_notification(self, participant_id, messages): try: last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check except Exception: for m in messages: m.is_notification = True return messages for m in messages: if m.sent_at > last_check and m.sender.id != participant_id: setattr(m, "is_notification", True) else: setattr(m, "is_notification", False) return messages
Check if each message requires a notification for the specified participant.
def request_blocking(self, method, params): response = self._make_request(method, params) if "error" in response: raise ValueError(response["error"]) return response['result']
Make a synchronous request using the provider
def _get_error_generator(type, obj, schema_dir=None, version=DEFAULT_VER, default='core'): if schema_dir is None: schema_dir = os.path.abspath(os.path.dirname(__file__) + '/schemas-' + version + '/') try: schema_path = find_schema(schema_dir, type) schema = load_schema(schema_path) except (KeyError, TypeError): try: schema_path = find_schema(schema_dir, default) schema = load_schema(schema_path) except (KeyError, TypeError): if schema_dir is not None: return None raise SchemaInvalidError("Cannot locate a schema for the object's " "type, nor the base schema ({}.json).".format(default)) if type == 'observed-data' and schema_dir is None: schema['allOf'][1]['properties']['objects'] = { "objects": { "type": "object", "minProperties": 1 } } validator = load_validator(schema_path, schema) try: error_gen = validator.iter_errors(obj) except schema_exceptions.RefResolutionError: raise SchemaInvalidError('Invalid JSON schema: a JSON ' 'reference failed to resolve') return error_gen
Get a generator for validating against the schema for the given object type. Args: type (str): The object type to find the schema for. obj: The object to be validated. schema_dir (str): The path in which to search for schemas. version (str): The version of the STIX specification to validate against. Only used to find base schemas when schema_dir is None. default (str): If the schema for the given type cannot be found, use the one with this name instead. Returns: A generator for errors found when validating the object against the appropriate schema, or None if schema_dir is None and the schema cannot be found.
def expr_contains(e, o): if o == e: return True if e.has_args(): for a in e.args(): if expr_contains(a, o): return True return False
Returns true if o is in e
def get_db_filename(impl, working_dir): db_filename = impl.get_virtual_chain_name() + ".db" return os.path.join(working_dir, db_filename)
Get the absolute path to the last-block file.
def process_string_tensor_event(event): string_arr = tensor_util.make_ndarray(event.tensor_proto) html = text_array_to_html(string_arr) return { 'wall_time': event.wall_time, 'step': event.step, 'text': html, }
Convert a TensorEvent into a JSON-compatible response.
def load(self, host, exact_host_match=False): config_string, host_string = ftr_get_config(host, exact_host_match) if config_string is None: LOGGER.error(u'Error while loading configuration.', extra={'siteconfig': host_string}) return self.append(ftr_string_to_instance(config_string))
Load a config for a hostname or url. This method calls :func:`ftr_get_config` and :meth`append` internally. Refer to their docs for details on parameters.
def replace_project(self, project_key, **kwargs): request = self.__build_project_obj( lambda: _swagger.ProjectCreateRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility') ), lambda name, url, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest(url=url), description=description, labels=labels), kwargs) try: project_owner_id, project_id = parse_dataset_key(project_key) self._projects_api.replace_project(project_owner_id, project_id, body=request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Replace an existing Project *Create a project with a given id or completely rewrite the project, including any previously added files or linked datasets, if one already exists with the given id.* :param project_key: Username and unique identifier of the creator of a project in the form of owner/id. :type project_key: str :param title: Project title :type title: str :param objective: Short project objective. :type objective: str, optional :param summary: Long-form project summary. :type summary: str, optional :param tags: Project tags. Letters numbers and spaces :type tags: list, optional :param license: Project license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Project visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :param linked_datasets: Initial set of linked datasets. :type linked_datasets: list of object, optional :returns: project object :rtype: object :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.replace_project( ... 'username/test-project', ... visibility='PRIVATE', ... objective='A better objective', ... title='Replace project') # doctest: +SKIP
def get_monitors(self, condition=None, page_size=1000): req_kwargs = {} if condition: req_kwargs['condition'] = condition.compile() for monitor_data in self._conn.iter_json_pages("/ws/Monitor", **req_kwargs): yield DeviceCloudMonitor.from_json(self._conn, monitor_data, self._tcp_client_manager)
Return an iterator over all monitors matching the provided condition Get all inactive monitors and print id:: for mon in dc.monitor.get_monitors(MON_STATUS_ATTR == "DISABLED"): print(mon.get_id()) Get all the HTTP monitors and print id:: for mon in dc.monitor.get_monitors(MON_TRANSPORT_TYPE_ATTR == "http"): print(mon.get_id()) Many other possibilities exist. See the :mod:`devicecloud.condition` documention for additional details on building compound expressions. :param condition: An :class:`.Expression` which defines the condition which must be matched on the monitor that will be retrieved from Device Cloud. If a condition is unspecified, an iterator over all monitors for this account will be returned. :type condition: :class:`.Expression` or None :param int page_size: The number of results to fetch in a single page. :return: Generator yielding :class:`.DeviceCloudMonitor` instances matching the provided conditions.
def manage_brok(self, brok): manage = getattr(self, 'manage_' + brok.type + '_brok', None) if not manage: return False brok.prepare() return manage(brok)
Request the module to manage the given brok. There are a lot of different possible broks to manage. The list is defined in the Brok class. An internal module may redefine this function or, easier, define only the function for the brok it is interested with. Hence a module interested in the `service_check_result` broks will only need to define a function named as `manage_service_check_result_brok` :param brok: :type brok: :return: :rtype:
def email_verifier(self, email, raw=False): params = {'email': email, 'api_key': self.api_key} endpoint = self.base_endpoint.format('email-verifier') return self._query_hunter(endpoint, params, raw=raw)
Verify the deliverability of a given email adress.abs :param email: The email adress to check. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict.
def remove_domain_user_role(request, user, role, domain=None): manager = keystoneclient(request, admin=True).roles return manager.revoke(role, user=user, domain=domain)
Removes a given single role for a user from a domain.
def load(klass, filename, inject_env=True): p = PipfileParser(filename=filename) pipfile = klass(filename=filename) pipfile.data = p.parse(inject_env=inject_env) return pipfile
Load a Pipfile from a given filename.
def insert_pattern(self, pattern, index): LOGGER.debug("> Inserting '{0}' at '{1}' index.".format(pattern, index)) self.remove_pattern(pattern) self.beginInsertRows(self.get_node_index(self.root_node), index, index) pattern_node = PatternNode(name=pattern) self.root_node.insert_child(pattern_node, index) self.endInsertRows() self.pattern_inserted.emit(pattern_node) return True
Inserts given pattern into the Model. :param pattern: Pattern. :type pattern: unicode :param index: Insertion index. :type index: int :return: Method success. :rtype: bool
def resetStaffCompensationInfo(self, request, queryset): selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME) ct = ContentType.objects.get_for_model(queryset.model) return HttpResponseRedirect(reverse('resetCompensationRules') + "?ct=%s&ids=%s" % (ct.pk, ",".join(selected)))
This action is added to the list for staff member to permit bulk reseting to category defaults of compensation information for staff members.
def _upload_none(self, upload_info, check_result): return UploadResult( action=None, quickkey=check_result['duplicate_quickkey'], hash_=upload_info.hash_info.file, filename=upload_info.name, size=upload_info.size, created=None, revision=None )
Dummy upload function for when we don't actually upload
def wait_for_window_focus(self, window, want_focus): _libxdo.xdo_wait_for_window_focus(self._xdo, window, want_focus)
Wait for a window to have or lose focus. :param window: The window to wait on :param want_focus: If 1, wait for focus. If 0, wait for loss of focus.
def perform_exe_expansion(self): if self.has_section('executables'): for option, value in self.items('executables'): newStr = self.interpolate_exe(value) if newStr != value: self.set('executables', option, newStr)
This function will look through the executables section of the ConfigParser object and replace any values using macros with full paths. For any values that look like ${which:lalapps_tmpltbank} will be replaced with the equivalent of which(lalapps_tmpltbank) Otherwise values will be unchanged.
def get_user( self, identified_with, identifier, req, resp, resource, uri_kwargs ): stored_value = self.kv_store.get( self._get_storage_key(identified_with, identifier) ) if stored_value is not None: user = self.serialization.loads(stored_value.decode()) else: user = None return user
Get user object for given identifier. Args: identified_with (object): authentication middleware used to identify the user. identifier: middleware specifix user identifier (string or tuple in case of all built in authentication middleware classes). Returns: dict: user object stored in Redis if it exists, otherwise ``None``
def _delete_msg(self, conn, queue_url, receipt_handle): resp = conn.delete_message(QueueUrl=queue_url, ReceiptHandle=receipt_handle) if resp['ResponseMetadata']['HTTPStatusCode'] != 200: logger.error('Error: message with receipt handle %s in queue %s ' 'was not successfully deleted (HTTP %s)', receipt_handle, queue_url, resp['ResponseMetadata']['HTTPStatusCode']) return logger.info('Message with receipt handle %s deleted from queue %s', receipt_handle, queue_url)
Delete the message specified by ``receipt_handle`` in the queue specified by ``queue_url``. :param conn: SQS API connection :type conn: :py:class:`botocore:SQS.Client` :param queue_url: queue URL to delete the message from :type queue_url: str :param receipt_handle: message receipt handle :type receipt_handle: str
def set_subject(self, value: Union[Literal, Identifier, str], lang: str= None): return self.metadata.add(key=DC.subject, value=value, lang=lang)
Set the DC Subject literal value :param value: Value of the subject node :param lang: Language in which the value is
def main(): (options, _) = _parse_args() if options.change_password: c.keyring_set_password(c["username"]) sys.exit(0) if options.select: courses = client.get_courses() c.selection_dialog(courses) c.save() sys.exit(0) if options.stop: os.system("kill -2 `cat ~/.studdp/studdp.pid`") sys.exit(0) task = _MainLoop(options.daemonize, options.update_courses) if options.daemonize: log.info("daemonizing...") with daemon.DaemonContext(working_directory=".", pidfile=PIDLockFile(PID_FILE)): handler = logging.FileHandler(LOG_PATH) handler.setFormatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s') log.addHandler(handler) task() else: task()
parse command line options and either launch some configuration dialog or start an instance of _MainLoop as a daemon
def get_scenario_data(scenario_id,**kwargs): user_id = kwargs.get('user_id') scenario_data = db.DBSession.query(Dataset).filter(Dataset.id==ResourceScenario.dataset_id, ResourceScenario.scenario_id==scenario_id).options(joinedload_all('metadata')).distinct().all() for sd in scenario_data: if sd.hidden == 'Y': try: sd.check_read_permission(user_id) except: sd.value = None sd.metadata = [] db.DBSession.expunge_all() log.info("Retrieved %s datasets", len(scenario_data)) return scenario_data
Get all the datasets from the group with the specified name @returns a list of dictionaries
def get_ymal_data(data): try: format_data = yaml.load(data) except yaml.YAMLError, e: msg = "Yaml format error: {}".format( unicode(str(e), "utf-8") ) logging.error(msg) sys.exit(1) if not check_config(format_data): sys.exit(1) return format_data
Get metadata and validate them :param data: metadata in yaml format
def _eval(self): "Evaluates a individual using recursion and self._pos as pointer" pos = self._pos self._pos += 1 node = self._ind[pos] if isinstance(node, Function): args = [self._eval() for x in range(node.nargs)] node.eval(args) for x in args: x.hy = None x.hy_test = None else: node.eval(self._X) return node
Evaluates a individual using recursion and self._pos as pointer
def numbaGaussian2d(psf, sy, sx): ps0, ps1 = psf.shape c0,c1 = ps0//2, ps1//2 ssx = 2*sx**2 ssy = 2*sy**2 for i in range(ps0): for j in range(ps1): psf[i,j]=exp( -( (i-c0)**2/ssy +(j-c1)**2/ssx) ) psf/=psf.sum()
2d Gaussian to be used in numba code
def get_comments_for_reference_on_date(self, reference_id, from_, to): comment_list = [] for comment in self.get_comments_for_reference(reference_id): if overlap(from_, to, comment.start_date, comment.end_date): comment_list.append(comment) return objects.CommentList(comment_list, runtime=self._runtime)
Gets a list of all comments corresponding to a reference ``Id`` and effective during the entire given date range inclusive but not confined to the date range. arg: reference_id (osid.id.Id): a reference ``Id`` arg: from (osid.calendaring.DateTime): from date arg: to (osid.calendaring.DateTime): to date return: (osid.commenting.CommentList) - the returned ``CommentList`` raise: InvalidArgument - ``to`` is less than ``from`` raise: NullArgument - ``reference_id, from,`` or ``to`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*