code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def blob( self, blob_name, chunk_size=None, encryption_key=None, kms_key_name=None, generation=None, ): return Blob( name=blob_name, bucket=self, chunk_size=chunk_size, encryption_key=encryption_key, kms_key_name=kms_key_name, generation=generation, )
Factory constructor for blob object. .. note:: This will not make an HTTP request; it simply instantiates a blob object owned by this bucket. :type blob_name: str :param blob_name: The name of the blob to be instantiated. :type chunk_size: int :param chunk_size: The size of a chunk of data whenever iterating (in bytes). This must be a multiple of 256 KB per the API specification. :type encryption_key: bytes :param encryption_key: Optional 32 byte encryption key for customer-supplied encryption. :type kms_key_name: str :param kms_key_name: Optional resource name of KMS key used to encrypt blob's content. :type generation: long :param generation: Optional. If present, selects a specific revision of this object. :rtype: :class:`google.cloud.storage.blob.Blob` :returns: The blob object created.
def load(self): if not git: raise EnvironmentError(MISSING_GIT_ERROR) if os.path.exists(self.path): if not config.CACHE_DISABLE: return shutil.rmtree(self.path, ignore_errors=True) with files.remove_on_exception(self.path): url = self.GIT_URL.format(**vars(self)) repo = git.Repo.clone_from( url=url, to_path=self.path, b=self.branch) if self.commit: repo.head.reset(self.commit, index=True, working_tree=True)
Load the library.
def _parse_message(self, data): try: header, values = data.split(':') address, channel, value = values.split(',') self.address = int(address) self.channel = int(channel) self.value = int(value) except ValueError: raise InvalidMessageError('Received invalid message: {0}'.format(data)) if header == '!EXP': self.type = ExpanderMessage.ZONE elif header == '!REL': self.type = ExpanderMessage.RELAY else: raise InvalidMessageError('Unknown expander message header: {0}'.format(data))
Parse the raw message from the device. :param data: message data :type data: string :raises: :py:class:`~alarmdecoder.util.InvalidMessageError`
def n_pitche_classes_used(pianoroll): _validate_pianoroll(pianoroll) chroma = _to_chroma(pianoroll) return np.count_nonzero(np.any(chroma, 0))
Return the number of unique pitch classes used in a pianoroll.
def sample_random(self): if self.sampling_mode['volume']: if self.leafnode: return self.sample_bounds() else: split_ratio = ((self.split_value - self.bounds_x[0,self.split_dim]) / (self.bounds_x[1,self.split_dim] - self.bounds_x[0,self.split_dim])) if split_ratio > np.random.random(): return self.lower.sample(sampling_mode=['random']) else: return self.greater.sample(sampling_mode=['random']) else: return np.random.choice(self.get_leaves()).sample_bounds()
Sample a point in a random leaf.
def from_pandas(cls, index): from pandas import Index as PandasIndex check_type(index, PandasIndex) return Index(index.values, index.dtype, index.name)
Create baloo Index from pandas Index. Parameters ---------- index : pandas.base.Index Returns ------- Index
def json_format(out, graph): steps = {} for step, deps in each_step(graph): steps[step.name] = {} steps[step.name]["deps"] = [dep.name for dep in deps] json.dump({"steps": steps}, out, indent=4) out.write("\n")
Outputs the graph in a machine readable JSON format.
def _request(self, proxy, timeout): return request.WPToolsRequest(self.flags['silent'], self.flags['verbose'], proxy, timeout)
Returns WPToolsRequest object
def showMessageOverlay(self, pchText, pchCaption, pchButton0Text, pchButton1Text, pchButton2Text, pchButton3Text): fn = self.function_table.showMessageOverlay result = fn(pchText, pchCaption, pchButton0Text, pchButton1Text, pchButton2Text, pchButton3Text) return result
Show the message overlay. This will block and return you a result.
def overlay(im1, im2, c): r shape = im2.shape for ni in shape: if ni % 2 == 0: raise Exception("Structuring element must be odd-voxeled...") nx, ny, nz = [(ni - 1) // 2 for ni in shape] cx, cy, cz = c im1[cx-nx:cx+nx+1, cy-ny:cy+ny+1, cz-nz:cz+nz+1] += im2 return im1
r""" Overlays ``im2`` onto ``im1``, given voxel coords of center of ``im2`` in ``im1``. Parameters ---------- im1 : ND-array Original voxelated image im2 : ND-array Template voxelated image c : array_like [x, y, z] coordinates in ``im1`` where ``im2`` will be centered Returns ------- image : ND-array A modified version of ``im1``, with ``im2`` overlaid at the specified location
def _get_stack_info_for_trace( self, frames, library_frame_context_lines=None, in_app_frame_context_lines=None, with_locals=True, locals_processor_func=None, ): return list( iterate_with_template_sources( frames, with_locals=with_locals, library_frame_context_lines=library_frame_context_lines, in_app_frame_context_lines=in_app_frame_context_lines, include_paths_re=self.include_paths_re, exclude_paths_re=self.exclude_paths_re, locals_processor_func=locals_processor_func, ) )
If the stacktrace originates within the elasticapm module, it will skip frames until some other module comes up.
def act_on_droplets(self, **data): r api = self.doapi_manager return map(api._action, api.request('/v2/droplets/actions', method='POST', params={"tag_name": self.name}, data=data)["actions"])
r""" Perform an arbitrary action on all of the droplets to which the tag is applied. ``data`` will be serialized as JSON and POSTed to the proper API endpoint. All currently-documented actions require the POST body to be a JSON object containing, at a minimum, a ``"type"`` field. :return: a generator of `Action`\ s representing the in-progress operations on the droplets :rtype: generator of `Action`\ s :raises DOAPIError: if the API endpoint replies with an error
def get_zone(self, id=None, name=None): log.info("Picking zone: %s (%s)" % (name, id)) return self.zones[id or name]
Get zone object by name or id.
def parse(self, node): self._attrs = {} vals = [] yielded = False for x in self._read_parts(node): if isinstance(x, Field): yielded = True x.attrs = self._attrs yield x else: vals.append(ustr(x).strip(' \n\t')) joined = ' '.join([ x for x in vals if x ]) if joined: yielded = True yield Field(node, guess_type(joined), self._attrs) if not yielded: yield Field(node, "", self._attrs)
Return generator yielding Field objects for a given node
def generic_find_fk_constraint_names(table, columns, referenced, insp): names = set() for fk in insp.get_foreign_keys(table): if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns: names.add(fk['name']) return names
Utility to find foreign-key constraint names in alembic migrations
def get_all(self, start=0, count=-1, sort=''): return self._helper.get_all(start, count, sort=sort)
Gets a list of logical interconnects based on optional sorting and filtering and is constrained by start and count parameters. Args: start: The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the first available item. count: The number of resources to return. A count of -1 requests all items. The actual number of items in the response might differ from the requested count if the sum of start and count exceeds the total number of items. sort: The sort order of the returned data set. By default, the sort order is based on create time with the oldest entry first. Returns: list: A list of logical interconnects.
def validate(self, guide): if is_string(guide): guide = Registry['guide_{}'.format(guide)]() if not isinstance(guide, guide_class): raise PlotnineError( "Unknown guide: {}".format(guide)) return guide
Validate guide object
def after_object(eng, objects, obj): super(InvenioProcessingFactory, InvenioProcessingFactory)\ .after_object(eng, objects, obj) obj.save( status=obj.known_statuses.COMPLETED, id_workflow=eng.model.uuid ) db.session.commit()
Take action once the proccessing of an object completes.
def _string_parser(strip_whitespace): def _parse_string_value(element_text, _state): if element_text is None: value = '' elif strip_whitespace: value = element_text.strip() else: value = element_text return value return _parse_string_value
Return a parser function for parsing string values.
def build( documentPath, outputUFOFormatVersion=3, roundGeometry=True, verbose=True, logPath=None, progressFunc=None, processRules=True, logger=None, useVarlib=False, ): import os, glob if os.path.isdir(documentPath): todo = glob.glob(os.path.join(documentPath, "*.designspace")) else: todo = [documentPath] results = [] for path in todo: document = DesignSpaceProcessor(ufoVersion=outputUFOFormatVersion) document.useVarlib = useVarlib document.roundGeometry = roundGeometry document.read(path) try: r = document.generateUFO(processRules=processRules) results.append(r) except: if logger: logger.exception("ufoProcessor error") reader = None return results
Simple builder for UFO designspaces.
def setFDs(self): self.childFDs = {0: 0, 1: 1, 2: 2} self.fds = {} for name in self.servers: self.port = self.hendrix.get_port(name) fd = self.port.fileno() self.childFDs[fd] = fd self.fds[name] = fd
Iterator for file descriptors. Seperated from launchworkers for clarity and readability.
def get_available_languages(self, obj): return obj.available_languages if obj is not None else self.model.objects.none()
Returns available languages for current object.
def delete(self): if not self.id: return self.collection.remove({'_id': self._id}) self.on_delete(self)
Remove from database.
def _getInterfaces(self): interfaces = {} interfacesPath = os.path.join("application", "interface") interfaceList = os.listdir(interfacesPath) for file in interfaceList: interfaceDirectoryPath = os.path.join(interfacesPath, file) if not os.path.isdir(interfaceDirectoryPath) or file.startswith("__") or file.startswith("."): continue interfaceName = ntpath.basename(interfaceDirectoryPath) interfacePath = os.path.join(interfaceDirectoryPath, interfaceName) + ".py" if not os.path.isfile(interfacePath): continue interfaceSpec = importlib.util.spec_from_file_location( interfaceName, interfacePath ) interface = importlib.util.module_from_spec(interfaceSpec) interfaceSpec.loader.exec_module(interface) if hasattr(interface, "Service"): interfaceInstance = interface.Service(self) interfaces[interfaceName] = interfaceInstance return interfaces
Load application communication interfaces. :return: <dict>
def trust(self, scope, vk): self.data['verifiers'].append({'scope': scope, 'vk': vk}) return self
Start trusting a particular key for given scope.
def check_static_vars(self, node): if self.static_vars == "" and hasattr(self, "template"): self.static_vars = { 'upy_context': { 'template_name': u"{}/{}".format(self.template.app_name, self.template.file_name) } } elif hasattr(self, "template"): self.static_vars = literal_eval(self.static_vars) self.static_vars['upy_context']['template_name'] = u"{}/{}".format( self.template.app_name, self.template.file_name ) self.static_vars['upy_context']['NODE'] = node self.static_vars['upy_context']['PAGE'] = self
This function check if a Page has static vars
def _waiting_expect(self): if self._expect_sent is None: if self.environ.get('HTTP_EXPECT', '').lower() == '100-continue': return True self._expect_sent = '' return False
``True`` when the client is waiting for 100 Continue.
def rename_categories(self, new_categories, inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') cat = self if inplace else self.copy() if isinstance(new_categories, ABCSeries): msg = ("Treating Series 'new_categories' as a list-like and using " "the values. In a future version, 'rename_categories' will " "treat Series like a dictionary.\n" "For dict-like, use 'new_categories.to_dict()'\n" "For list-like, use 'new_categories.values'.") warn(msg, FutureWarning, stacklevel=2) new_categories = list(new_categories) if is_dict_like(new_categories): cat.categories = [new_categories.get(item, item) for item in cat.categories] elif callable(new_categories): cat.categories = [new_categories(item) for item in cat.categories] else: cat.categories = new_categories if not inplace: return cat
Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. .. versionadded:: 0.21.0 * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. .. versionadded:: 0.23.0 .. warning:: Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. inplace : bool, default False Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. Returns ------- cat : Categorical or None With ``inplace=False``, the new categorical is returned. With ``inplace=True``, there is no return value. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories add_categories remove_categories remove_unused_categories set_categories Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) [A, A, B] Categories (2, object): [A, B]
def create_container(container_name, profile, **libcloud_kwargs): conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) container = conn.create_container(container_name, **libcloud_kwargs) return { 'name': container.name, 'extra': container.extra }
Create a container in the cloud :param container_name: Container name :type container_name: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's create_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.create_container MyFolder profile1
def _update_activities(self): self._activities = self._activities_request() _LOGGER.debug("Device Activities Response: %s", self._activities) if not self._activities: self._activities = [] elif not isinstance(self._activities, (list, tuple)): self._activities = [self._activities] self._update_events()
Update stored activities and update caches as required.
def XYZ_to_xyY(cobj, *args, **kwargs): xyz_sum = cobj.xyz_x + cobj.xyz_y + cobj.xyz_z if xyz_sum == 0.0: xyy_x = 0.0 xyy_y = 0.0 else: xyy_x = cobj.xyz_x / xyz_sum xyy_y = cobj.xyz_y / xyz_sum xyy_Y = cobj.xyz_y return xyYColor( xyy_x, xyy_y, xyy_Y, observer=cobj.observer, illuminant=cobj.illuminant)
Convert from XYZ to xyY.
def sort_values(self, by=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): raise NotImplementedError("sort_values has not been implemented " "on Panel or Panel4D objects.")
Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Returns ------- sorted_obj : DataFrame or None DataFrame with sorted values if inplace=False, None otherwise. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 3 NaN 8 4 4 D 7 2 5 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 3 NaN 8 4 Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 3 NaN 8 4 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1
def parse_function( name: str, target: typing.Callable ) -> typing.Union[None, dict]: if not hasattr(target, '__code__'): return None lines = get_doc_entries(target) docs = ' '.join(filter(lambda line: not line.startswith(':'), lines)) params = parse_params(target, lines) returns = parse_returns(target, lines) return dict( name=getattr(target, '__name__'), doc=docs, params=params, returns=returns )
Parses the documentation for a function, which is specified by the name of the function and the function itself. :param name: Name of the function to parse :param target: The function to parse into documentation :return: A dictionary containing documentation for the specified function, or None if the target was not a function.
def on_created(self, event): for delegate in self.delegates: if hasattr(delegate, "on_created"): delegate.on_created(event)
On created method
def cmd_all(args): for penlist in penStore.data: puts(penlist) with indent(4, ' -'): for penfile in penStore.data[penlist]: puts(penfile)
List everything recursively
def execPluginsDialog(self): pluginsDialog = PluginsDialog(parent=self, inspectorRegistry=self.argosApplication.inspectorRegistry, rtiRegistry=self.argosApplication.rtiRegistry) pluginsDialog.exec_()
Shows the plugins dialog with the registered plugins
def make_temp(suffix="", prefix="tmp", dir=None): temporary = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir) os.close(temporary[0]) try: yield temporary[1] finally: os.remove(temporary[1])
Creates a temporary file with a closed stream and deletes it when done. :return: A contextmanager retrieving the file path.
def split(text: str) -> List[str]: return [word for word in SEPARATOR.split(text) if word.strip(' \t')]
Split a text into a list of tokens. :param text: the text to split :return: tokens
def asyncPipeStrreplace(context=None, _INPUT=None, conf=None, **kwargs): splits = yield asyncGetSplits(_INPUT, conf['RULE'], **kwargs) parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs()) _OUTPUT = yield asyncStarMap(asyncParseResult, parsed) returnValue(iter(_OUTPUT))
A string module that asynchronously replaces text. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : twisted Deferred iterable of items or strings conf : { 'RULE': [ { 'param': {'value': <match type: 1=first, 2=last, 3=every>}, 'find': {'value': <text to find>}, 'replace': {'value': <replacement>} } ] } Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of replaced strings
def augment_pipeline(pl, head_pipe=None, tail_pipe=None): for k, v in iteritems(pl): if v and len(v) > 0: if head_pipe and k != 'source': v.insert(0, head_pipe) if tail_pipe: v.append(tail_pipe)
Augment the pipeline by adding a new pipe section to each stage that has one or more pipes. Can be used for debugging :param pl: :param DebugPipe: :return:
def stringify(data): if isinstance(data, dict): for key, value in data.items(): data[key] = stringify(value) elif isinstance(data, list): return [stringify(item) for item in data] else: return smart_text(data) return data
Turns all dictionary values into strings
def build_header(dtype): header = _build_header(dtype, ()) h = [] for col in header: name = '~'.join(col[:-2]) numpytype = col[-2] shape = col[-1] coldescr = name if numpytype != 'float32' and not numpytype.startswith('|S'): coldescr += ':' + numpytype if shape: coldescr += ':' + ':'.join(map(str, shape)) h.append(coldescr) return h
Convert a numpy nested dtype into a list of strings suitable as header of csv file. >>> imt_dt = numpy.dtype([('PGA', numpy.float32, 3), ... ('PGV', numpy.float32, 4)]) >>> build_header(imt_dt) ['PGA:3', 'PGV:4'] >>> gmf_dt = numpy.dtype([('A', imt_dt), ('B', imt_dt), ... ('idx', numpy.uint32)]) >>> build_header(gmf_dt) ['A~PGA:3', 'A~PGV:4', 'B~PGA:3', 'B~PGV:4', 'idx:uint32']
def run(self): try: self.loader.find_and_load_step_definitions() except StepLoadingError, e: print "Error loading step definitions:\n", e return results = [] if self.explicit_features: features_files = self.explicit_features else: features_files = self.loader.find_feature_files() if self.random: random.shuffle(features_files) if not features_files: self.output.print_no_features_found(self.loader.base_dir) return processes = Pool(processes=self.parallelization) test_results_it = processes.imap_unordered( worker_process, [(self, filename) for filename in features_files] ) all_total = ParallelTotalResult() for result in test_results_it: all_total += result['total'] sys.stdout.write(result['stdout']) sys.stderr.write(result['stderr']) return all_total
Find and load step definitions, and them find and load features under `base_path` specified on constructor
def merge(self, dct=None, **kwargs): if dct is None: dct = {} if kwargs: dct.update(**kwargs) for key, value in dct.items(): if all(( isinstance(value, dict), isinstance(self.get(key), Configuration), getattr(self.get(key), "__merge__", True), )): self[key].merge(value) elif isinstance(value, list) and isinstance(self.get(key), list): self[key] += value else: self[key] = value
Recursively merge a dictionary or kwargs into the current dict.
def to_header(self, timestamp=None): rv = [("sentry_key", self.public_key), ("sentry_version", self.version)] if timestamp is not None: rv.append(("sentry_timestamp", str(to_timestamp(timestamp)))) if self.client is not None: rv.append(("sentry_client", self.client)) if self.secret_key is not None: rv.append(("sentry_secret", self.secret_key)) return u"Sentry " + u", ".join("%s=%s" % (key, value) for key, value in rv)
Returns the auth header a string.
def save_form(self, request, form, change): obj = form.save(commit=False) if obj.user_id is None: obj.user = request.user return super(OwnableAdmin, self).save_form(request, form, change)
Set the object's owner as the logged in user.
def unlock(arguments): import redis u = coil.utils.ask("Redis URL", "redis://localhost:6379/0") db = redis.StrictRedis.from_url(u) db.set('site:lock', 0) print("Database unlocked.") return 0
Unlock the database.
async def open(self) -> 'Tails': LOGGER.debug('Tails.open >>>') self._reader_handle = await blob_storage.open_reader('default', self._tails_config_json) LOGGER.debug('Tails.open <<<') return self
Open reader handle and return current object. :return: current object
def get_observatory_status(self, observatory_id, status_time=None): if status_time is None: response = requests.get( self.base_url + '/obstory/{0}/statusdict'.format(observatory_id)) else: response = requests.get( self.base_url + '/obstory/{0}/statusdict/{1}'.format(observatory_id, str(status_time))) if response.status_code == 200: d = safe_load(response.text) if 'status' in d: return d['status'] return None
Get details of the specified camera's status :param string observatory_id: a observatory ID, as returned by list_observatories() :param float status_time: optional, if specified attempts to get the status for the given camera at a particular point in time specified as a datetime instance. This is useful if you want to retrieve the status of the camera at the time a given event or file was produced. If this is None or not specified the time is 'now'. :return: a dictionary, or None if there was either no observatory found.
def _determine_current_dimension_size(self, dim_name, max_size): if self.dimensions[dim_name] is not None: return max_size def _find_dim(h5group, dim): if dim not in h5group: return _find_dim(h5group.parent, dim) return h5group[dim] dim_variable = _find_dim(self._h5group, dim_name) if "REFERENCE_LIST" not in dim_variable.attrs: return max_size root = self._h5group["/"] for ref, _ in dim_variable.attrs["REFERENCE_LIST"]: var = root[ref] for i, var_d in enumerate(var.dims): name = _name_from_dimension(var_d) if name == dim_name: max_size = max(var.shape[i], max_size) return max_size
Helper method to determine the current size of a dimension.
def token(self): " Get token when needed." if hasattr(self, '_token'): return getattr(self, '_token') data = json.dumps({'customer_name': self.customer, 'user_name': self.username, 'password': self.password}) response = requests.post( 'https://api2.dynect.net/REST/Session/', data=data, headers={'Content-Type': 'application/json'}) content = json.loads(response.content) if response.status_code != 200: if self.check_error(content, 'failure', 'INVALID_DATA'): raise self.CredentialsError( self.response_message(content, 'ERROR')) raise self.Failure(self.response_message(content, 'ERROR'), 'Unhandled failure') if 'data' in content and 'token' in content['data']: token = content['data']['token'] else: raise self.AuthenticationError(response) setattr(self, '_token', token) return token
Get token when needed.
def ProduceEventTag(self, event_tag): self._storage_writer.AddEventTag(event_tag) self.number_of_produced_event_tags += 1 self.last_activity_timestamp = time.time()
Produces an event tag. Args: event_tag (EventTag): event tag.
def t_escaped_BACKSPACE_CHAR(self, t): r'\x62' t.lexer.pop_state() t.value = unichr(0x0008) return t
r'\x62
def all(self, array, role = None): return self.reduce(array, reducer = np.logical_and, neutral_element = True, role = role)
Return ``True`` if ``array`` is ``True`` for all members of the entity. ``array`` must have the dimension of the number of persons in the simulation If ``role`` is provided, only the entity member with the given role are taken into account. Example: >>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0] >>> household.all(salaries >= 1800) >>> array([False])
def create_source(self, datapusher=True): task.create_source(self.target, self._preload_image(), datapusher)
Populate ckan directory from preloaded image and copy who.ini and schema.xml info conf directory
def create_stream_subscription(self, stream, on_data, timeout=60): options = rest_pb2.StreamSubscribeRequest() options.stream = stream manager = WebSocketSubscriptionManager( self._client, resource='stream', options=options) subscription = WebSocketSubscriptionFuture(manager) wrapped_callback = functools.partial( _wrap_callback_parse_stream_data, subscription, on_data) manager.open(wrapped_callback, instance=self._instance) subscription.reply(timeout=timeout) return subscription
Create a new stream subscription. :param str stream: The name of the stream. :param on_data: Function that gets called with :class:`.StreamData` updates. :param float timeout: The amount of seconds to wait for the request to complete. :return: Future that can be used to manage the background websocket subscription :rtype: .WebSocketSubscriptionFuture
def subcommand(self, command_name=None): def wrapper(decorated): cmd_name = command_name or decorated.__name__ subparser = self.subparsers.add_parser(cmd_name, description=decorated.__doc__) for args, kwargs in describe_arguments(decorated): subparser.add_argument(*args, **kwargs) subparser.set_defaults(func=decorated) return decorated return wrapper
Decorate a function as a subcommand. Use its arguments as the command-line arguments
async def handle_client_get_queue(self, client_addr, _: ClientGetQueue): jobs_running = list() for backend_job_id, content in self._job_running.items(): jobs_running.append((content[1].job_id, backend_job_id[0] == client_addr, self._registered_agents[content[0]], content[1].course_id+"/"+content[1].task_id, content[1].launcher, int(content[2]), int(content[2])+content[1].time_limit)) jobs_waiting = list() for job_client_addr, msg in self._waiting_jobs.items(): if isinstance(msg, ClientNewJob): jobs_waiting.append((msg.job_id, job_client_addr[0] == client_addr, msg.course_id+"/"+msg.task_id, msg.launcher, msg.time_limit)) await ZMQUtils.send_with_addr(self._client_socket, client_addr, BackendGetQueue(jobs_running, jobs_waiting))
Handles a ClientGetQueue message. Send back info about the job queue
def is_serializable(obj): if inspect.isclass(obj): return Serializable.is_serializable_type(obj) return isinstance(obj, Serializable) or hasattr(obj, '_asdict')
Return `True` if the given object conforms to the Serializable protocol. :rtype: bool
def create_cells(headers, schema_fields, values=None, row_number=None): fillvalue = '_fillvalue' is_header_row = (values is None) cells = [] iterator = zip_longest(headers, schema_fields, values or [], fillvalue=fillvalue) for column_number, (header, field, value) in enumerate(iterator, start=1): if header == fillvalue: header = None elif is_header_row: value = header if field == fillvalue: field = None if value == fillvalue: value = None elif value is None: value = '' cell = create_cell(header, value, field, column_number, row_number) cells.append(cell) return cells
Create list of cells from headers, fields and values. Args: headers (List[str]): The headers values. schema_fields (List[tableschema.field.Field]): The tableschema fields. values (List[Any], optional): The cells values. If not specified, the created cells will have the same values as their corresponding headers. This is useful for specifying headers cells. If the list has any `None` values, as is the case on empty cells, the resulting Cell will have an empty string value. If the `values` list has a different length than the `headers`, the resulting Cell will have value `None`. row_number (int, optional): The row number. Returns: List[dict]: List of cells.
def parameters(self): parameters = [] for k, v in self.__dict__.items(): if k.startswith("_"): continue is_function = False is_set = False if callable(v): value = pickle.dumps(func_dump(v)) is_function = True elif isinstance(v, set): value = list(v) is_set = True else: value = v parameters.append(dict( key=k, value=value, is_function=is_function, is_set=is_set )) return parameters
Get the tool parameters :return: The tool parameters along with additional information (whether they are functions or sets)
def downloadArchiveAction(self, request, queryset): output = io.BytesIO() z = zipfile.ZipFile(output, 'w') for sub in queryset: sub.add_to_zipfile(z) z.close() output.seek(0) response = HttpResponse( output, content_type="application/x-zip-compressed") response['Content-Disposition'] = 'attachment; filename=submissions.zip' return response
Download selected submissions as archive, for targeted correction.
def _add_missing_jwt_permission_classes(self, view_class): view_permissions = list(getattr(view_class, 'permission_classes', [])) permission_classes = [] classes_to_add = [] while view_permissions: permission = view_permissions.pop() if not hasattr(permission, 'perms_or_conds'): permission_classes.append(permission) else: for child in getattr(permission, 'perms_or_conds', []): view_permissions.append(child) for perm_class in self._required_permission_classes: if not self._includes_base_class(permission_classes, perm_class): log.warning( u"The view %s allows Jwt Authentication but needs to include the %s permission class (adding it for you)", view_class.__name__, perm_class.__name__, ) classes_to_add.append(perm_class) if classes_to_add: view_class.permission_classes += tuple(classes_to_add)
Adds permissions classes that should exist for Jwt based authentication, if needed.
def show_all_categories(call=None): if call == 'action': raise SaltCloudSystemExit( 'The show_all_categories function must be called with -f or --function.' ) conn = get_conn(service='SoftLayer_Product_Package') categories = [] for category in conn.getCategories(id=50): categories.append(category['categoryCode']) return {'category_codes': categories}
Return a dict of all available categories on the cloud provider. .. versionadded:: 2016.3.0
def batch_update(self, values, w=1): for x in values: self.update(x, w) self.compress() return
Update the t-digest with an iterable of values. This assumes all points have the same weight.
def _extract_header(time_series): return TimeSeries( metric=time_series.metric, resource=time_series.resource, metric_kind=time_series.metric_kind, value_type=time_series.value_type, )
Return a copy of time_series with the points removed.
def validate_wrap(self, value): if not isinstance(value, self.type): self._fail_validation_type(value, self.type)
Checks that ``value`` is an instance of ``DocumentField.type``. if it is, then validation on its fields has already been done and no further validation is needed.
def _apply_memory_config(config_spec, memory): log.trace('Configuring virtual machine memory ' 'settings memory=%s', memory) if 'size' in memory and 'unit' in memory: try: if memory['unit'].lower() == 'kb': memory_mb = memory['size'] / 1024 elif memory['unit'].lower() == 'mb': memory_mb = memory['size'] elif memory['unit'].lower() == 'gb': memory_mb = int(float(memory['size']) * 1024) except (TypeError, ValueError): memory_mb = int(memory['size']) config_spec.memoryMB = memory_mb if 'reservation_max' in memory: config_spec.memoryReservationLockedToMax = memory['reservation_max'] if 'hotadd' in memory: config_spec.memoryHotAddEnabled = memory['hotadd']
Sets memory size to the given value config_spec vm.ConfigSpec object memory Memory size and unit
def make_sequence(content, error=None, version=None, mode=None, mask=None, encoding=None, boost_error=True, symbol_count=None): return QRCodeSequence(map(QRCode, encoder.encode_sequence(content, error=error, version=version, mode=mode, mask=mask, encoding=encoding, boost_error=boost_error, symbol_count=symbol_count)))
\ Creates a sequence of QR Codes. If the content fits into one QR Code and neither ``version`` nor ``symbol_count`` is provided, this function may return a sequence with one QR Code which does not use the Structured Append mode. Otherwise a sequence of 2 .. n (max. n = 16) QR Codes is returned which use the Structured Append mode. The Structured Append mode allows to split the content over a number (max. 16) QR Codes. The Structured Append mode isn't available for Micro QR Codes, therefor the returned sequence contains QR Codes, only. Since this function returns an iterable object, it may be used as follows: .. code-block:: python for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)): qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue') The returned number of QR Codes is determined by the `version` or `symbol_count` parameter See :py:func:`make` for a description of the other parameters. :param int symbol_count: Number of symbols. :rtype: QRCodeSequence
def to_networkx(cyjs, directed=True): if directed: g = nx.MultiDiGraph() else: g = nx.MultiGraph() network_data = cyjs[DATA] if network_data is not None: for key in network_data.keys(): g.graph[key] = network_data[key] nodes = cyjs[ELEMENTS][NODES] edges = cyjs[ELEMENTS][EDGES] for node in nodes: data = node[DATA] g.add_node(data[ID], attr_dict=data) for edge in edges: data = edge[DATA] source = data[SOURCE] target = data[TARGET] g.add_edge(source, target, attr_dict=data) return g
Convert Cytoscape.js-style JSON object into NetworkX object. By default, data will be handles as a directed graph.
def cli(obj, environment, service, resource, event, group, tags, customer, start, duration, text, delete): client = obj['client'] if delete: client.delete_blackout(delete) else: if not environment: raise click.UsageError('Missing option "--environment" / "-E".') try: blackout = client.create_blackout( environment=environment, service=service, resource=resource, event=event, group=group, tags=tags, customer=customer, start=start, duration=duration, text=text ) except Exception as e: click.echo('ERROR: {}'.format(e)) sys.exit(1) click.echo(blackout.id)
Suppress alerts for specified duration based on alert attributes.
def head(self, uuid): url = "%(base)s/%(uuid)s" % { 'base': self.local_base_url, 'uuid': uuid } return self.core.head(url)
Get one thread.
def ReplaceTrigger(self, trigger_link, trigger, options=None): if options is None: options = {} CosmosClient.__ValidateResource(trigger) trigger = trigger.copy() if trigger.get('serverScript'): trigger['body'] = str(trigger['serverScript']) elif trigger.get('body'): trigger['body'] = str(trigger['body']) path = base.GetPathFromLink(trigger_link) trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link) return self.Replace(trigger, path, 'triggers', trigger_id, None, options)
Replaces a trigger and returns it. :param str trigger_link: The link to the trigger. :param dict trigger: :param dict options: The request options for the request. :return: The replaced Trigger. :rtype: dict
def remove_udp_port(self, port): if port in self._used_udp_ports: self._used_udp_ports.remove(port)
Removes an associated UDP port number from this project. :param port: UDP port number
def deleteEdge(self, edge, waitForSync = False) : url = "%s/edge/%s" % (self.URL, edge._id) r = self.connection.session.delete(url, params = {'waitForSync' : waitForSync}) if r.status_code == 200 or r.status_code == 202 : return True raise DeletionError("Unable to delete edge, %s" % edge._id, r.json())
removes an edge from the graph
def to_file_object(self, name, out_dir): make_analysis_dir(out_dir) file_ref = File('ALL', name, self.get_times_covered_by_files(), extension='.pkl', directory=out_dir) self.dump(file_ref.storage_path) return file_ref
Dump to a pickle file and return an File object reference of this list Parameters ---------- name : str An identifier of this file. Needs to be unique. out_dir : path path to place this file Returns ------- file : AhopeFile
def get_closest_points(self, max_distance=None, origin_index=0, origin_raw=None): if not self.dict_response['distance']['value']: self.get_distance_values() if origin_raw: origin = copy.deepcopy(self.dict_response['distance']['value'][origin_raw]) else: origin = copy.deepcopy(self.dict_response['distance']['value'][self.origins[origin_index]]) tmp_origin = copy.deepcopy(origin) if max_distance is not None: for k, v in tmp_origin.iteritems(): if v > max_distance or v == 'ZERO_RESULTS': del(origin[k]) return origin
Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance.
def set_power_state(self, is_on, bulb=ALL_BULBS, timeout=None): with _blocking(self.lock, self.power_state, self.light_state_event, timeout): self.send(REQ_SET_POWER_STATE, bulb, '2s', '\x00\x01' if is_on else '\x00\x00') self.send(REQ_GET_LIGHT_STATE, ALL_BULBS, '') return self.power_state
Sets the power state of one or more bulbs.
def delete(self, ids): url = build_uri_with_ids('api/v3/object-group-perm-general/%s/', ids) return super(ApiObjectGroupPermissionGeneral, self).delete(url)
Method to delete object group permissions general by their ids :param ids: Identifiers of object group permissions general :return: None
def get_manager_cmd(self): cmd = os.path.abspath(os.path.join(os.path.dirname(__file__), "server", "notebook_daemon.py")) assert os.path.exists(cmd) return cmd
Get our daemon script path.
def _flatten_mesh(self, Xs, term): n = Xs[0].size if self.terms[term].istensor: terms = self.terms[term] else: terms = [self.terms[term]] X = np.zeros((n, self.statistics_['m_features'])) for term_, x in zip(terms, Xs): X[:, term_.feature] = x.ravel() return X
flatten the mesh and distribute into a feature matrix
def create_room(self, alias=None, is_public=False, invitees=None): response = self.api.create_room(alias=alias, is_public=is_public, invitees=invitees) return self._mkroom(response["room_id"])
Create a new room on the homeserver. Args: alias (str): The canonical_alias of the room. is_public (bool): The public/private visibility of the room. invitees (str[]): A set of user ids to invite into the room. Returns: Room Raises: MatrixRequestError
def get_datarect(self): x1, y1, x2, y2 = self._org_x1, self._org_y1, self._org_x2, self._org_y2 return (x1, y1, x2, y2)
Get the approximate bounding box of the displayed image. Returns ------- rect : tuple Bounding box in data coordinates in the form of ``(x1, y1, x2, y2)``.
def findLinksRel(link_attrs_list, target_rel): matchesTarget = lambda attrs: linkHasRel(attrs, target_rel) return list(filter(matchesTarget, link_attrs_list))
Filter the list of link attributes on whether it has target_rel as a relationship.
def _parse_node_data(self, data): data = data or '' if self.numbermode == 'basic': return self._try_parse_basic_number(data) elif self.numbermode == 'decimal': return self._try_parse_decimal(data) else: return data
Parse the value of a node. Override to provide your own parsing.
def get_avatar_upload_to(self, filename): dummy, ext = os.path.splitext(filename) return os.path.join( machina_settings.PROFILE_AVATAR_UPLOAD_TO, '{id}{ext}'.format(id=str(uuid.uuid4()).replace('-', ''), ext=ext), )
Returns the path to upload the associated avatar to.
def add(self, spec): for limit in spec.limit_to: if limit not in self.limit_to: self.limit_to.append(limit)
Add limitations of given spec to self's. Args: spec (PackageSpec): another spec.
def _do_cross_validation(self, clf, data, task): time1 = time.time() if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed'\ and self.use_multiprocessing: inlist = [(clf, i + task[0], self.num_folds, data[i, :, :], self.labels) for i in range(task[1])] with multiprocessing.Pool(self.process_num) as pool: results = list(pool.starmap(_cross_validation_for_one_voxel, inlist)) else: results = [] for i in range(task[1]): result = _cross_validation_for_one_voxel(clf, i + task[0], self.num_folds, data[i, :, :], self.labels) results.append(result) time2 = time.time() logger.debug( 'cross validation for %d voxels, takes %.2f s' % (task[1], (time2 - time1)) ) return results
Run voxelwise cross validation based on correlation vectors. clf: classification function the classifier to be used in cross validation data: 3D numpy array If using sklearn.svm.SVC with precomputed kernel, it is in shape [num_processed_voxels, num_epochs, num_epochs]; otherwise it is the input argument corr, in shape [num_processed_voxels, num_epochs, num_voxels] task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- results: list of tuple (voxel_id, accuracy) the accuracy numbers of all voxels, in accuracy descending order the length of array equals the number of assigned voxels
def read_vocab_file(file_path): with file_io.FileIO(file_path, 'r') as f: vocab_pd = pd.read_csv( f, header=None, names=['vocab', 'count'], dtype=str, na_filter=False) vocab = vocab_pd['vocab'].tolist() ex_count = vocab_pd['count'].astype(int).tolist() return vocab, ex_count
Reads a vocab file to memeory. Args: file_path: Each line of the vocab is in the form "token,example_count" Returns: Two lists, one for the vocab, and one for just the example counts.
def load_minters_entry_point_group(self, entry_point_group): for ep in pkg_resources.iter_entry_points(group=entry_point_group): self.register_minter(ep.name, ep.load())
Load minters from an entry point group. :param entry_point_group: The entrypoint group.
def addTextErr(self, text): self._currentColor = self._red self.addText(text)
add red text
def _at(self, t): rITRF, vITRF, error = self.ITRF_position_velocity_error(t) rGCRS, vGCRS = ITRF_to_GCRS2(t, rITRF, vITRF) return rGCRS, vGCRS, rGCRS, error
Compute this satellite's GCRS position and velocity at time `t`.
def genTopLevelDirCMakeListsFile(self, working_path, subdirs, files, cfg): fnameOut = os.path.join(working_path, 'CMakeLists.txt') template = self.envJinja.get_template(self.TOP_LEVEL_CMAKELISTS_JINJA2_TEMPLATE) fcontent = template.render({'project_name':os.path.basename(os.path.abspath(working_path)), 'subdirs': subdirs, 'files': files, 'cfg': cfg}) with open(fnameOut, 'w') as f: f.write(fcontent) return fnameOut
Generate top level CMakeLists.txt. :param working_path: current working directory :param subdirs: a list of subdirectories of current working directory. :param files: a list of files in current working directory. :return: the full path name of generated CMakeLists.txt.
def _dirint_coeffs(times, kt_prime, solar_zenith, w, delta_kt_prime): kt_prime_bin, zenith_bin, w_bin, delta_kt_prime_bin = \ _dirint_bins(times, kt_prime, solar_zenith, w, delta_kt_prime) coeffs = _get_dirint_coeffs() dirint_coeffs = coeffs[kt_prime_bin-1, zenith_bin-1, delta_kt_prime_bin-1, w_bin-1] dirint_coeffs = np.where((kt_prime_bin == 0) | (zenith_bin == 0) | (w_bin == 0) | (delta_kt_prime_bin == 0), np.nan, dirint_coeffs) return dirint_coeffs
Determine the DISC to DIRINT multiplier `dirint_coeffs`. dni = disc_out['dni'] * dirint_coeffs Parameters ---------- times : pd.DatetimeIndex kt_prime : Zenith-independent clearness index solar_zenith : Solar zenith angle w : precipitable water estimated from surface dew-point temperature delta_kt_prime : stability index Returns ------- dirint_coeffs : array-like
def success(self, cmd, desc=''): return self._label_desc(cmd, desc, self.success_color)
Style for a success message.
def get_instance_assignment(self, ctx): if ctx is None: return None visitor = ExprVisitor(self.compiler) expr = visitor.visit(ctx.expr()) expr = expressions.AssignmentCast(self.compiler.env, SourceRef.from_antlr(ctx.op), expr, int) expr.predict_type() return expr
Gets the integer expression in any of the four instance assignment operators ('=' '@' '+=' '%=')
def download_as_string(self, client=None, start=None, end=None): string_buffer = BytesIO() self.download_to_file(string_buffer, client=client, start=start, end=end) return string_buffer.getvalue()
Download the contents of this blob as a string. If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. :type start: int :param start: Optional, the first byte in a range to be downloaded. :type end: int :param end: Optional, The last byte in a range to be downloaded. :rtype: bytes :returns: The data stored in this blob. :raises: :class:`google.cloud.exceptions.NotFound`
def get_state_machine_selection(self): if self._selected_sm_model: return self._selected_sm_model.selection, self._selected_sm_model.selection.states else: return None, set()
Getter state machine selection :return: selection object, filtered set of selected states :rtype: rafcon.gui.selection.Selection, set
def get_title(self): try: return extract_literal(self.meta_kwargs['title']) except KeyError: slot = self.get_slot() if slot is not None: return slot.replace('_', ' ').title() return None
Return the string literal that is used in the template. The title is used in the admin screens.
def open( bucket_id, key_id, mode, buffer_size=DEFAULT_BUFFER_SIZE, min_part_size=DEFAULT_MIN_PART_SIZE, session=None, resource_kwargs=None, multipart_upload_kwargs=None, ): logger.debug('%r', locals()) if mode not in MODES: raise NotImplementedError('bad mode: %r expected one of %r' % (mode, MODES)) if resource_kwargs is None: resource_kwargs = {} if multipart_upload_kwargs is None: multipart_upload_kwargs = {} if mode == READ_BINARY: fileobj = SeekableBufferedInputBase( bucket_id, key_id, buffer_size=buffer_size, session=session, resource_kwargs=resource_kwargs, ) elif mode == WRITE_BINARY: fileobj = BufferedOutputBase( bucket_id, key_id, min_part_size=min_part_size, session=session, multipart_upload_kwargs=multipart_upload_kwargs, resource_kwargs=resource_kwargs, ) else: assert False, 'unexpected mode: %r' % mode return fileobj
Open an S3 object for reading or writing. Parameters ---------- bucket_id: str The name of the bucket this object resides in. key_id: str The name of the key within the bucket. mode: str The mode for opening the object. Must be either "rb" or "wb". buffer_size: int, optional The buffer size to use when performing I/O. min_part_size: int, optional The minimum part size for multipart uploads. For writing only. session: object, optional The S3 session to use when working with boto3. resource_kwargs: dict, optional Keyword arguments to use when accessing the S3 resource for reading or writing. multipart_upload_kwargs: dict, optional Additional parameters to pass to boto3's initiate_multipart_upload function. For writing only.