code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def track_model(model): @receiver(post_save, sender=model, weak=False, dispatch_uid='simpleimages') def transform_signal(sender, **kwargs): simpleimages.utils.perform_transformation( kwargs['instance'], kwargs['update_fields'] ) def disconnect(): post_save.disconnect(sender=model, dispatch_uid='simpleimages') return disconnect
Perform designated transformations on model, when it saves. Calls :py:func:`~simpleimages.utils.perform_transformation` on every model saves using :py:data:`django.db.models.signals.post_save`. It uses the ``update_fields`` kwarg to tell what fields it should transform.
def mock_method(self, interface, dbus_method, in_signature, *args, **kwargs): if in_signature == '' and len(args) > 0: raise TypeError('Fewer items found in D-Bus signature than in Python arguments') m = dbus.connection.MethodCallMessage('a.b', '/a', 'a.b', 'a') m.append(signature=in_signature, *args) args = m.get_args_list() self.log(dbus_method + self.format_args(args)) self.call_log.append((int(time.time()), str(dbus_method), args)) self.MethodCalled(dbus_method, args) code = self.methods[interface][dbus_method][2] if code and isinstance(code, types.FunctionType): return code(self, *args) elif code: loc = locals().copy() exec(code, globals(), loc) if 'ret' in loc: return loc['ret']
Master mock method. This gets "instantiated" in AddMethod(). Execute the code snippet of the method and return the "ret" variable if it was set.
def mean(name, add, match): ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if name not in __reg__: __reg__[name] = {} __reg__[name]['val'] = 0 __reg__[name]['total'] = 0 __reg__[name]['count'] = 0 for event in __events__: try: event_data = event['data']['data'] except KeyError: event_data = event['data'] if salt.utils.stringutils.expr_match(event['tag'], match): if add in event_data: try: comp = int(event_data) except ValueError: continue __reg__[name]['total'] += comp __reg__[name]['count'] += 1 __reg__[name]['val'] = __reg__[name]['total'] / __reg__[name]['count'] return ret
Accept a numeric value from the matched events and store a running average of the values in the given register. If the specified value is not numeric it will be skipped USAGE: .. code-block:: yaml foo: reg.mean: - add: data_field - match: my/custom/event
def slides(self): sldIdLst = self._element.get_or_add_sldIdLst() self.part.rename_slide_parts([sldId.rId for sldId in sldIdLst]) return Slides(sldIdLst, self)
|Slides| object containing the slides in this presentation.
def index(self, text, terms=None, **kwargs): self.clear() terms = terms or text.terms.keys() pairs = combinations(terms, 2) count = comb(len(terms), 2) for t1, t2 in bar(pairs, expected_size=count, every=1000): score = text.score_braycurtis(t1, t2, **kwargs) self.set_pair(t1, t2, score)
Index all term pair distances. Args: text (Text): The source text. terms (list): Terms to index.
def _get_record_by_label(xapi, rectype, label): uuid = _get_label_uuid(xapi, rectype, label) if uuid is False: return False return getattr(xapi, rectype).get_record(uuid)
Internal, returns a full record for uuid
def first(self): csvsource = CSVSource(self.source, self.factory, self.key()) try: item = csvsource.items().next() return item except StopIteration: return None
Returns the first ICachableItem in the ICachableSource
def _convert_timedelta_to_seconds(timedelta): days_in_seconds = timedelta.days * 24 * 3600 return int((timedelta.microseconds + (timedelta.seconds + days_in_seconds) * 10 ** 6) / 10 ** 6)
Returns the total seconds calculated from the supplied timedelta. (Function provided to enable running on Python 2.6 which lacks timedelta.total_seconds()).
def _is_installation_local(name): loc = os.path.normcase(pkg_resources.working_set.by_key[name].location) pre = os.path.normcase(sys.prefix) return os.path.commonprefix([loc, pre]) == pre
Check whether the distribution is in the current Python installation. This is used to distinguish packages seen by a virtual environment. A venv may be able to see global packages, but we don't want to mess with them.
def grant_bonus(self, worker_id, assignment_id, bonus_price, reason): params = bonus_price.get_as_params('BonusAmount', 1) params['WorkerId'] = worker_id params['AssignmentId'] = assignment_id params['Reason'] = reason return self._process_request('GrantBonus', params)
Issues a payment of money from your account to a Worker. To be eligible for a bonus, the Worker must have submitted results for one of your HITs, and have had those results approved or rejected. This payment happens separately from the reward you pay to the Worker when you approve the Worker's assignment. The Bonus must be passed in as an instance of the Price object.
def get_docs(self, vocab): for string in self.strings: vocab[string] orth_col = self.attrs.index(ORTH) for tokens, spaces in zip(self.tokens, self.spaces): words = [vocab.strings[orth] for orth in tokens[:, orth_col]] doc = Doc(vocab, words=words, spaces=spaces) doc = doc.from_array(self.attrs, tokens) yield doc
Recover Doc objects from the annotations, using the given vocab.
def _find_parent(self, path_elements): if not self.path_elements: return self elif self.path_elements == path_elements[0:len(self.path_elements)]: return self else: return self.parent._find_parent(path_elements)
Recurse up the tree of FileSetStates until we find a parent, i.e. one whose path_elements member is the start of the path_element argument
def decode_mysql_string_literal(text): assert text.startswith("'") assert text.endswith("'") text = text[1:-1] return MYSQL_STRING_ESCAPE_SEQUENCE_PATTERN.sub( unescape_single_character, text, )
Removes quotes and decodes escape sequences from given MySQL string literal returning the result. :param text: MySQL string literal, with the quotes still included. :type text: str :return: Given string literal with quotes removed and escape sequences decoded. :rtype: str
def _login(login_func, *args): response = login_func(*args) _fail_if_contains_errors(response) user_json = response.json() return User(user_json)
A helper function for logging in. It's purpose is to avoid duplicate code in the login functions.
def image_create(auth=None, **kwargs): cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_image(**kwargs)
Create an image CLI Example: .. code-block:: bash salt '*' glanceng.image_create name=cirros file=cirros.raw disk_format=raw salt '*' glanceng.image_create name=cirros file=cirros.raw disk_format=raw hw_scsi_model=virtio-scsi hw_disk_bus=scsi
def receive_message(self, operation, request_id): try: return receive_message( self.sock, operation, request_id, self.max_message_size) except BaseException as error: self._raise_connection_failure(error)
Receive a raw BSON message or raise ConnectionFailure. If any exception is raised, the socket is closed.
def correct(self, z): c = self.linear_system.Ml*( self.linear_system.b - self.linear_system.A*z) c = utils.inner(self.W, c, ip_B=self.ip_B) if self.Q is not None and self.R is not None: c = scipy.linalg.solve_triangular(self.R, self.Q.T.conj().dot(c)) if self.WR is not self.VR: c = self.WR.dot(scipy.linalg.solve_triangular(self.VR, c)) return z + self.W.dot(c)
Correct the given approximate solution ``z`` with respect to the linear system ``linear_system`` and the deflation space defined by ``U``.
def to_capabilities(self): caps = self._caps opts = self._options.copy() if len(self._arguments) > 0: opts[self.SWITCHES] = ' '.join(self._arguments) if len(self._additional) > 0: opts.update(self._additional) if len(opts) > 0: caps[Options.KEY] = opts return caps
Marshals the IE options to the correct object.
def default_dtype(field=None): if field is None or field == RealNumbers(): return np.dtype('float64') elif field == ComplexNumbers(): return np.dtype('complex128') else: raise ValueError('no default data type defined for field {}' ''.format(field))
Return the default data type of this class for a given field. Parameters ---------- field : `Field`, optional Set of numbers to be represented by a data type. Currently supported : `RealNumbers`, `ComplexNumbers` The default ``None`` means `RealNumbers` Returns ------- dtype : `numpy.dtype` Numpy data type specifier. The returned defaults are: ``RealNumbers()`` : ``np.dtype('float64')`` ``ComplexNumbers()`` : ``np.dtype('complex128')``
def get_guild_members(self, guild_id: int) -> List[Dict[str, Any]]: return self._query(f'guilds/{guild_id}/members', 'GET')
Get a list of members in the guild Args: guild_id: snowflake id of the guild Returns: List of dictionary objects of users in the guild. Example: [ { "id": "41771983423143937", "name": "Discord Developers", "icon": "SEkgTU9NIElUUyBBTkRSRUkhISEhISEh", "splash": null, "owner_id": "80351110224678912", "region": "us-east", "afk_channel_id": "42072017402331136", "afk_timeout": 300, "embed_enabled": true, "embed_channel_id": "41771983444115456", "verification_level": 1, "roles": [], "emojis": [], "features": ["INVITE_SPLASH"], "unavailable": false }, { "id": "41771983423143937", "name": "Discord Developers", "icon": "SEkgTU9NIElUUyBBTkRSRUkhISEhISEh", "splash": null, "owner_id": "80351110224678912", "region": "us-east", "afk_channel_id": "42072017402331136", "afk_timeout": 300, "embed_enabled": true, "embed_channel_id": "41771983444115456", "verification_level": 1, "roles": [], "emojis": [], "features": ["INVITE_SPLASH"], "unavailable": false } ]
def move_down(self): old_index = self.current_index self.current_index += 1 self.__wrap_index() self.__handle_selections(old_index, self.current_index)
Try to select the button under the currently selected one. If a button is not there, wrap down to the top of the menu and select the first button.
def addRelationship(self, originItemId, destinationItemId, relationshipType): url = "%s/addRelationship" % self.root params = { "originItemId" : originItemId, "destinationItemId": destinationItemId, "relationshipType" : relationshipType, "f" : "json" } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
Adds a relationship of a certain type between two items. Inputs: originItemId - The item ID of the origin item of the relationship destinationItemId - The item ID of the destination item of the relationship. relationshipType - The type of relationship between the two items. Must be defined in Relationship types.
def default_user_agent(name="python-requests"): _implementation = platform.python_implementation() if _implementation == 'CPython': _implementation_version = platform.python_version() elif _implementation == 'PyPy': _implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro) if sys.pypy_version_info.releaselevel != 'final': _implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel]) elif _implementation == 'Jython': _implementation_version = platform.python_version() elif _implementation == 'IronPython': _implementation_version = platform.python_version() else: _implementation_version = 'Unknown' try: p_system = platform.system() p_release = platform.release() except IOError: p_system = 'Unknown' p_release = 'Unknown' return " ".join(['%s/%s' % (name, __version__), '%s/%s' % (_implementation, _implementation_version), '%s/%s' % (p_system, p_release)])
Return a string representing the default user agent.
def CreateConstMuskingumXFile(x_value, in_connectivity_file, out_x_file): num_rivers = 0 with open_csv(in_connectivity_file, "r") as csvfile: reader = csv_reader(csvfile) for _ in reader: num_rivers += 1 with open_csv(out_x_file, 'w') as kfile: x_writer = csv_writer(kfile) for _ in xrange(num_rivers): x_writer.writerow([x_value])
Create muskingum X file from value that is constant all the way through for each river segment. Parameters ---------- x_value: float Value for the muskingum X parameter [0-0.5]. in_connectivity_file: str The path to the RAPID connectivity file. out_x_file: str The path to the output x file. Example:: from RAPIDpy.gis.muskingum import CreateConstMuskingumXFile CreateConstMuskingumXFile( x_value=0.3, in_connectivity_file='/path/to/rapid_connect.csv', out_x_file='/path/to/x.csv')
def lineReceived(self, line): if line and line.isdigit(): self._expectedLength = int(line) self._rawBuffer = [] self._rawBufferLength = 0 self.setRawMode() else: self.keepAliveReceived()
Called when a line is received. We expect a length in bytes or an empty line for keep-alive. If we got a length, switch to raw mode to receive that amount of bytes.
def perform_srl(responses, prompt): predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz") sentences = [{"sentence": prompt + " " + response} for response in responses] output = predictor.predict_batch_json(sentences) full_output = [{"sentence": prompt + response, "response": response, "srl": srl} for (response, srl) in zip(responses, output)] return full_output
Perform semantic role labeling on a list of responses, given a prompt.
def from_pure(cls, z): return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
Creates a pure composition. Args: z (int): atomic number
def build_header( filename, disposition='attachment', filename_compat=None ): if disposition != 'attachment': assert is_token(disposition) rv = disposition if is_token(filename): rv += '; filename=%s' % (filename, ) return rv elif is_ascii(filename) and is_lws_safe(filename): qd_filename = qd_quote(filename) rv += '; filename="%s"' % (qd_filename, ) if qd_filename == filename: return rv elif filename_compat: if is_token(filename_compat): rv += '; filename=%s' % (filename_compat, ) else: assert is_lws_safe(filename_compat) rv += '; filename="%s"' % (qd_quote(filename_compat), ) rv += "; filename*=utf-8''%s" % (percent_encode( filename, safe=attr_chars_nonalnum, encoding='utf-8'), ) return rv.encode('iso-8859-1')
Generate a Content-Disposition header for a given filename. For legacy clients that don't understand the filename* parameter, a filename_compat value may be given. It should either be ascii-only (recommended) or iso-8859-1 only. In the later case it should be a character string (unicode in Python 2). Options for generating filename_compat (only useful for legacy clients): - ignore (will only send filename*); - strip accents using unicode's decomposing normalisations, which can be done from unicode data (stdlib), and keep only ascii; - use the ascii transliteration tables from Unidecode (PyPI); - use iso-8859-1 Ignore is the safest, and can be used to trigger a fallback to the document location (which can be percent-encoded utf-8 if you control the URLs). See https://tools.ietf.org/html/rfc6266#appendix-D
def import_instance( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "import_instance" not in self._inner_api_calls: self._inner_api_calls[ "import_instance" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_instance, default_retry=self._method_configs["ImportInstance"].retry, default_timeout=self._method_configs["ImportInstance"].timeout, client_info=self._client_info, ) request = cloud_redis_pb2.ImportInstanceRequest( name=name, input_config=input_config ) operation = self._inner_api_calls["import_instance"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, cloud_redis_pb2.Instance, metadata_type=cloud_redis_pb2.OperationMetadata, )
Import a Redis RDB snapshot file from GCS into a Redis instance. Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation. When complete, the instance will contain only data from the imported file. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation. Example: >>> from google.cloud import redis_v1 >>> >>> client = redis_v1.CloudRedisClient() >>> >>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]') >>> >>> # TODO: Initialize `input_config`: >>> input_config = {} >>> >>> response = client.import_instance(name, input_config) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Required. Redis instance resource name using the form: ``projects/{project_id}/locations/{location_id}/instances/{instance_id}`` where ``location_id`` refers to a GCP region input_config (Union[dict, ~google.cloud.redis_v1.types.InputConfig]): Required. Specify data to be imported. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.redis_v1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.redis_v1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def submit(self, func, *args, **kwargs): self.op_sequence += 1 self.sqs.send_message( QueueUrl=self.map_queue, MessageBody=utils.dumps({'args': args, 'kwargs': kwargs}), MessageAttributes={ 'sequence_id': { 'StringValue': str(self.op_sequence), 'DataType': 'Number'}, 'op': { 'StringValue': named(func), 'DataType': 'String', }, 'ser': { 'StringValue': 'json', 'DataType': 'String'}} ) self.futures[self.op_sequence] = f = SQSFuture( self.op_sequence) return f
Submit a function for serialized execution on sqs
def _extcap_call(prog, args, keyword, values): p = subprocess.Popen( [prog] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) data, err = p.communicate() if p.returncode != 0: raise OSError("%s returned with error code %s: %s" % (prog, p.returncode, err)) data = plain_str(data) res = [] for ifa in data.split("\n"): ifa = ifa.strip() if not ifa.startswith(keyword): continue res.append(tuple([re.search(r"{%s=([^}]*)}" % val, ifa).group(1) for val in values])) return res
Function used to call a program using the extcap format, then parse the results
def get_polygon_filter_names(): names = [] for p in PolygonFilter.instances: names.append(p.name) return names
Get the names of all polygon filters in the order of creation
def named_entity_texts(self): if not self.is_tagged(NAMED_ENTITIES): self.tag_named_entities() return self.texts(NAMED_ENTITIES)
The texts representing named entities.
def event_later(self, delay, data_tuple): return self._base.event_later(delay, self.make_event_data(*data_tuple))
Schedule an event to be emitted after a delay. :param delay: number of seconds :param data_tuple: a 2-tuple (flavor, data) :return: an event object, useful for cancelling.
def values(self) -> List["Package"]: values = [self.build_dependencies.get(name) for name in self.build_dependencies] return values
Return an iterable of the available `Package` instances.
def get_context_file_name(pid_file): root = os.path.dirname(pid_file) port_file = os.path.join(root, "context.json") return port_file
When the daemon is started write out the information which port it was using.
def collect_transitive_dependencies( collected: Set[str], dep_graph: DepGraph, from_name: str ) -> None: immediate_deps = dep_graph[from_name] for to_name in immediate_deps: if to_name not in collected: collected.add(to_name) collect_transitive_dependencies(collected, dep_graph, to_name)
Collect transitive dependencies. From a dependency graph, collects a list of transitive dependencies by recursing through a dependency graph.
def allreduce_grads(all_grads, average): if get_tf_version_tuple() <= (1, 12): from tensorflow.contrib import nccl else: from tensorflow.python.ops import nccl_ops as nccl nr_tower = len(all_grads) if nr_tower == 1: return all_grads new_all_grads = [] for grads in zip(*all_grads): summed = nccl.all_sum(grads) grads_for_devices = [] for g in summed: with tf.device(g.device): if average: g = tf.multiply(g, 1.0 / nr_tower) grads_for_devices.append(g) new_all_grads.append(grads_for_devices) ret = list(zip(*new_all_grads)) return ret
All-reduce average the gradients among K devices. Results are broadcasted to all devices. Args: all_grads (K x N): List of list of gradients. N is the number of variables. average (bool): average gradients or not. Returns: K x N: same as input, but each grad is replaced by the average over K devices.
def prefer_type(self, prefer, over): self._write_lock.acquire() try: if self._preferred(preferred=over, over=prefer): raise ValueError( "Type %r is already preferred over %r." % (over, prefer)) prefs = self._prefer_table.setdefault(prefer, set()) prefs.add(over) finally: self._write_lock.release()
Prefer one type over another type, all else being equivalent. With abstract base classes (Python's abc module) it is possible for a type to appear to be a subclass of another type without the supertype appearing in the subtype's MRO. As such, the supertype has no order with respect to other supertypes, and this may lead to amguity if two implementations are provided for unrelated abstract types. In such cases, it is possible to disambiguate by explictly telling the function to prefer one type over the other. Arguments: prefer: Preferred type (class). over: The type we don't like (class). Raises: ValueError: In case of logical conflicts.
def from_record(self, record): self.record = record self._setup_tls_files(self.record['files']) return self
Build a bundle from a CertStore record
def unquote (s, matching=False): if not s: return s if len(s) < 2: return s if matching: if s[0] in ("\"'") and s[0] == s[-1]: s = s[1:-1] else: if s[0] in ("\"'"): s = s[1:] if s[-1] in ("\"'"): s = s[:-1] return s
Remove leading and ending single and double quotes. The quotes need to match if matching is True. Only one quote from each end will be stripped. @return: if s evaluates to False, return s as is, else return string with stripped quotes @rtype: unquoted string, or s unchanged if it is evaluting to False
def by_population_density(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.population_density.name, ascending=False, returns=DEFAULT_LIMIT): return self.query( population_density_lower=lower, population_density_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
Search zipcode information by population density range. `population density` is `population per square miles on land`
def map_to_precursor_biopython(seqs, names, loci, args): precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = _align(str(s), precursor) if res: dat[n] = res logger.debug("mapped in %s: %s out of %s" % (loci, len(dat), len(seqs))) return dat
map the sequences using biopython package
def get_all_permissions(self): perms = set() for role in self.get_user_roles(): for perm_view in role.permissions: t = (perm_view.permission.name, perm_view.view_menu.name) perms.add(t) return perms
Returns a set of tuples with the perm name and view menu name
def set_hw_virt_ex_property(self, property_p, value): if not isinstance(property_p, HWVirtExPropertyType): raise TypeError("property_p can only be an instance of type HWVirtExPropertyType") if not isinstance(value, bool): raise TypeError("value can only be an instance of type bool") self._call("setHWVirtExProperty", in_p=[property_p, value])
Sets a new value for the specified hardware virtualization boolean property. in property_p of type :class:`HWVirtExPropertyType` Property type to set. in value of type bool New property value. raises :class:`OleErrorInvalidarg` Invalid property.
async def create(self, model_, **data): inst = model_(**data) query = model_.insert(**dict(inst.__data__)) pk = await self.execute(query) if inst._pk is None: inst._pk = pk return inst
Create a new object saved to database.
def get_long_description(): with open("README.rst", "r") as f: readme = f.read() with open("CHANGELOG.rst", "r") as f: changelog = f.read() changelog = changelog.replace("\nUnreleased\n------------------", "") return "\n".join([readme, changelog])
Return this projects description.
def delete(self, *args, **kwargs): from fields import RatingField qs = self.distinct().values_list('content_type', 'object_id').order_by('content_type') to_update = [] for content_type, objects in itertools.groupby(qs, key=lambda x: x[0]): model_class = ContentType.objects.get(pk=content_type).model_class() if model_class: to_update.extend(list(model_class.objects.filter(pk__in=list(objects)[0]))) retval = super(VoteQuerySet, self).delete(*args, **kwargs) for obj in to_update: for field in getattr(obj, '_djangoratings', []): getattr(obj, field.name)._update(commit=False) obj.save() return retval
Handles updating the related `votes` and `score` fields attached to the model.
def a10_allocate_ip_from_dhcp_range(self, subnet, interface_id, mac, port_id): subnet_id = subnet["id"] network_id = subnet["network_id"] iprange_result = self.get_ipallocationpool_by_subnet_id(subnet_id) ip_in_use_list = [x.ip_address for x in self.get_ipallocations_by_subnet_id(subnet_id)] range_begin, range_end = iprange_result.first_ip, iprange_result.last_ip ip_address = IPHelpers.find_unused_ip(range_begin, range_end, ip_in_use_list) if not ip_address: msg = "Cannot allocate from subnet {0}".format(subnet) LOG.error(msg) raise Exception mark_in_use = { "ip_address": ip_address, "network_id": network_id, "port_id": port_id, "subnet_id": subnet["id"] } self.create_ipallocation(mark_in_use) return ip_address, subnet["cidr"], mark_in_use["port_id"]
Search for an available IP.addr from unallocated nmodels.IPAllocationPool range. If no addresses are available then an error is raised. Returns the address as a string. This search is conducted by a difference of the nmodels.IPAllocationPool set_a and the current IP allocations.
def clear_output(output=None): for target in env.sos_dict['_output'] if output is None else output: if isinstance(target, file_target) and target.exists(): try: target.unlink() except Exception as e: env.logger.warning(f'Failed to remove {target}: {e}')
Remove file targets in `_output` when a step fails to complete
def _prepend_schema_name(self, message): if self._name: message = "{0!r} {1!s}".format(self._name, message) return message
If a custom schema name has been defined, prepends it to the error message that gets raised when a schema error occurs.
def load(self): self._validate() self._logger.logging_load() self._csv_reader = csv.reader( six.StringIO(self.source.strip()), delimiter=self.delimiter, quotechar=self.quotechar, strict=True, skipinitialspace=True, ) formatter = CsvTableFormatter(self._to_data_matrix()) formatter.accept(self) return formatter.to_table_data()
Extract tabular data as |TableData| instances from a CSV text object. |load_source_desc_text| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` ``""`` ``%(format_name)s`` ``"csv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the CSV data is invalid. .. seealso:: :py:func:`csv.reader`
def file_name(self, value): if isinstance(value, FileName): self._file_name = value else: self._file_name = FileName(value)
The filename of the attachment :param file_name: The filename of the attachment :type file_name: FileName, string
def replaceNode(self, cur): if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlReplaceNode(self._o, cur__o) if ret is None:raise treeError('xmlReplaceNode() failed') __tmp = xmlNode(_obj=ret) return __tmp
Unlink the old node from its current context, prune the new one at the same place. If @cur was already inserted in a document it is first unlinked from its existing context.
def direction(self, direction): if not isinstance(direction, str): raise TypeError("direction must be of type str") accepted_values = ['i', 'x', 'y', 'z', 's', 'c'] if direction not in accepted_values: raise ValueError("must be one of: {}".format(accepted_values)) self._direction = direction
set the direction
def get_auth(): import getpass user = input("User Name: ") pswd = getpass.getpass('Password: ') return Github(user, pswd)
Get authentication.
def run_basic_group(): test = htf.Test(htf.PhaseGroup( setup=[setup_phase], main=[main_phase], teardown=[teardown_phase], )) test.execute()
Run the basic phase group example. In this example, there are no terminal phases; all phases are run.
def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info): instruction_dicts = [] for sliced_split_info in list_sliced_split_info: mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value) filepaths = list(sorted(self._build_split_filenames( split_info_list=[sliced_split_info.split_info], ))) if sliced_split_info.split_info.num_examples: shard_id2num_examples = splits_lib.get_shard_id2num_examples( sliced_split_info.split_info.num_shards, sliced_split_info.split_info.num_examples, ) mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples) else: logging.warning( "Statistics not present in the dataset. TFDS is not able to load " "the total number of examples, so using the subsplit API may not " "provide precise subsplits." ) mask_offsets = [0] * len(filepaths) for filepath, mask_offset in zip(filepaths, mask_offsets): instruction_dicts.append({ "filepath": filepath, "mask": mask, "mask_offset": mask_offset, }) return instruction_dicts
Return the list of files and reading mask of the files to read.
def lock(self): try: self._do_lock() return except LockError: time.sleep(self.TIMEOUT) self._do_lock()
Acquire lock for dvc repo.
def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): if model_state is not None: self.model.load_state_dict(model_state) else: self.model.reset_weights() self.algo.initialize( training_info=training_info, model=self.model, environment=self.env_roller.environment, device=self.device )
Prepare models for training
def get_extra_claims(self, claims_set): reserved_claims = ( self.userid_claim, "iss", "aud", "exp", "nbf", "iat", "jti", "refresh_until", "nonce" ) extra_claims = {} for claim in claims_set: if claim not in reserved_claims: extra_claims[claim] = claims_set[claim] if not extra_claims: return None return extra_claims
Get claims holding extra identity info from the claims set. Returns a dictionary of extra claims or None if there are none. :param claims_set: set of claims, which was included in the received token.
def info(): if current_app.testing or current_app.debug: return jsonify(dict( user=request.oauth.user.id, client=request.oauth.client.client_id, scopes=list(request.oauth.scopes) )) else: abort(404)
Test to verify that you have been authenticated.
def reset(self): self._destroy_viewer() self._reset_internal() self.sim.forward() return self._get_observation()
Resets simulation.
def cancel_firewall(self, firewall_id, dedicated=False): fwl_billing = self._get_fwl_billing_item(firewall_id, dedicated) billing_item_service = self.client['Billing_Item'] return billing_item_service.cancelService(id=fwl_billing['id'])
Cancels the specified firewall. :param int firewall_id: Firewall ID to be cancelled. :param bool dedicated: If true, the firewall instance is dedicated, otherwise, the firewall instance is shared.
def get_allowed_permissions_for(brain_or_object, user=None): allowed = [] user = get_user(user) obj = api.get_object(brain_or_object) for permission in get_mapped_permissions_for(brain_or_object): if user.has_permission(permission, obj): allowed.append(permission) return allowed
Get the allowed permissions for the given object Code extracted from `IRoleManager.manage_getUserRolesAndPermissions` :param brain_or_object: Catalog brain or object :param user: A user ID, user object or None (for the current user) :returns: List of allowed permissions
def check(self, options=None): self.check_values(options) self.check_attributes(options) self.check_values(options) return self
check for ambiguous keys and move attributes into dict
def as_bin(self, as_spendable=False): f = io.BytesIO() self.stream(f, as_spendable=as_spendable) return f.getvalue()
Return the txo as binary.
def serialize_elements(document, elements, options=None): ctx = Context(document, options) tree_root = root = etree.Element('div') for elem in elements: _ser = ctx.get_serializer(elem) if _ser: root = _ser(ctx, document, elem, root) return etree.tostring(tree_root, pretty_print=ctx.options.get('pretty_print', True), encoding="utf-8", xml_declaration=False)
Serialize list of elements into HTML string. :Args: - document (:class:`ooxml.doc.Document`): Document object - elements (list): List of elements - options (dict): Optional dictionary with :class:`Context` options :Returns: Returns HTML representation of the document.
def strip_text_after_string(txt, junk): if junk in txt: return txt[:txt.find(junk)] else: return txt
used to strip any poorly documented comments at the end of function defs
def cmyk(c, m, y, k): return Color("cmyk", c, m, y, k)
Create a spectra.Color object in the CMYK color space. :param float c: c coordinate. :param float m: m coordinate. :param float y: y coordinate. :param float k: k coordinate. :rtype: Color :returns: A spectra.Color object in the CMYK color space.
def _checkDragDropEvent(self, ev): mimedata = ev.mimeData() if mimedata.hasUrls(): urls = [str(url.toLocalFile()) for url in mimedata.urls() if url.toLocalFile()] else: urls = [] if urls: ev.acceptProposedAction() return urls else: ev.ignore() return None
Checks if event contains a file URL, accepts if it does, ignores if it doesn't
def terminate(self, reason=None): self.logger.info('terminating') self.loop.unloop(pyev.EVUNLOOP_ALL)
Terminate the service with a reason.
def get_doc(self, doc_id): resp = self._r_session.get('/'.join([self._scheduler, 'docs', '_replicator', doc_id])) resp.raise_for_status() return response_to_json_dict(resp)
Get replication document state for a given replication document ID.
def addReadGroup(self, readGroup): id_ = readGroup.getId() self._readGroupIdMap[id_] = readGroup self._readGroupIds.append(id_)
Adds the specified ReadGroup to this ReadGroupSet.
def create(self): log.info("{module}: {name} [{id}] created".format(module=self.manager.module_name, name=self.name, id=self.id))
Creates the node.
def trend_msg(self, trend, significant=1): ret = '-' if trend is None: ret = ' ' elif trend > significant: ret = '/' elif trend < -significant: ret = '\\' return ret
Return the trend message. Do not take into account if trend < significant
def reset(self): animation_gen = self._frame_function(*self._animation_args, **self._animation_kwargs) self._current_generator = itertools.cycle( util.concatechain(animation_gen, self._back_up_generator))
Reset the current animation generator.
def exposure_notes(self): notes = [] exposure = definition(self.exposure.keywords.get('exposure')) if 'notes' in exposure: notes += exposure['notes'] if self.exposure.keywords['layer_mode'] == 'classified': if 'classified_notes' in exposure: notes += exposure['classified_notes'] if self.exposure.keywords['layer_mode'] == 'continuous': if 'continuous_notes' in exposure: notes += exposure['continuous_notes'] return notes
Get the exposure specific notes defined in definitions. This method will do a lookup in definitions and return the exposure definition specific notes dictionary. This is a helper function to make it easy to get exposure specific notes from the definitions metadata. .. versionadded:: 3.5 :returns: A list like e.g. safe.definitions.exposure_land_cover[ 'notes'] :rtype: list, None
def msvd(m): u, s, vdgr = np.linalg.svd(m) order = s.argsort() s = s[order] u= u[:,order] vdgr = vdgr[order] return u, s, vdgr.conj().T
Modified singular value decomposition. Returns U, S, V where Udagger M V = diag(S) and the singular values are sorted in ascending order (small to large).
def integrate_box(self,low,high,forcequad=False,**kwargs): if not self.adaptive and not forcequad: return self.gauss_kde.integrate_box_1d(low,high)*self.norm return quad(self.evaluate,low,high,**kwargs)[0]
Integrates over a box. Optionally force quad integration, even for non-adaptive. If adaptive mode is not being used, this will just call the `scipy.stats.gaussian_kde` method `integrate_box_1d`. Else, by default, it will call `scipy.integrate.quad`. If the `forcequad` flag is turned on, then that integration will be used even if adaptive mode is off. Parameters ---------- low : float Lower limit of integration high : float Upper limit of integration forcequad : bool If `True`, then use the quad integration even if adaptive mode is off. kwargs Keyword arguments passed to `scipy.integrate.quad`.
def clear_alert_destination(self, destination=0, channel=None): if channel is None: channel = self.get_network_channel() self.set_alert_destination( '0.0.0.0', False, 0, 0, destination, channel)
Clear an alert destination Remove the specified alert destination configuration. :param destination: The destination to clear (defaults to 0)
def block_quote(node): o = nodes.block_quote() o.line = node.sourcepos[0][0] for n in MarkDown(node): o += n return o
A block quote
def iter_instances(self): for wrkey in set(self.keys()): obj = self.get(wrkey) if obj is None: continue yield wrkey, obj
Iterate over the stored objects Yields: wrkey: The two-tuple key used to store the object obj: The instance or function object
def eqdate(y): y = datetime.date.today() if y == 'TODAY' else datetime.date(*y) return lambda x: x == y
Like eq but compares datetime with y,m,d tuple. Also accepts magic string 'TODAY'.
def write_nb(root, nb_name, cells): nb = new_notebook(cells=cells, metadata={ 'language': 'python', }) nb_path = os.path.join(root, '%s.ipynb' % nb_name) with codecs.open(nb_path, encoding='utf-8', mode='w') as nb_file: nbformat.write(nb, nb_file, NB_VERSION) print("Created Jupyter notebook at:\n%s" % nb_path)
Write a jupyter notebook to disk. Takes a given a root directory, a notebook name, and a list of cells.
def copy(self, *args, **kwargs): for slot in self.__slots__: attr = getattr(self, slot) if slot[0] == '_': slot = slot[1:] if slot not in kwargs: kwargs[slot] = attr result = type(self)(*args, **kwargs) return result
Copy this model element and contained elements if they exist.
def convert_to_dict(item): actual_type = detect_type(item) if actual_type=="dict": return item elif actual_type=="list": temp = {} ctr = 0 for entry in item: temp[ctr]=entry ctr += 1 return temp elif actual_type=="mongoengine": return item.__dict__['_data'] elif actual_type=="class": return item.__dict__ elif actual_type=="iterable_dict": d = {} for key in item: d[key] = item[key] return d elif actual_type=="object": tuples = getmembers(item) d = {} for (key, value) in tuples: d[key] = value return d return {}
Examine an item of any type and return a true dictionary. If the item is already a dictionary, then the item is returned as-is. Easy. Otherwise, it attempts to interpret it. So far, this routine can handle: * a class, function, or anything with a .__dict__ entry * a legacy mongoEngine document (a class for MongoDb handling) * a list (index positions are used as keys) * a generic object that is iterable * a generic object with members .. versionadded:: 0.0.4 :param item: Any object such as a variable, instance, or function. :returns: A true dictionary. If unable to get convert 'item', then an empty dictionary '{}' is returned.
def do(self, x_orig): if self.scales is None: return x_orig else: return np.dot(self.rotation.transpose(), x_orig)*self.scales
Transform the unknowns to preconditioned coordinates This method also transforms the gradient to original coordinates
def list_taxa(pdb_list, sleep_time=.1): if len(pdb_list)*sleep_time > 30: warnings.warn("Because of API limitations, this function\ will take at least " + str(len(pdb_list)*sleep_time) + " seconds to return results.\ If you need greater speed, try modifying the optional argument sleep_time=.1, (although \ this may cause the search to time out)" ) taxa = [] for pdb_id in pdb_list: all_info = get_all_info(pdb_id) species_results = walk_nested_dict(all_info, 'Taxonomy', maxdepth=25,outputs=[]) first_result = walk_nested_dict(species_results,'@name',outputs=[]) if first_result: taxa.append(first_result[-1]) else: taxa.append('Unknown') time.sleep(sleep_time) return taxa
Given a list of PDB IDs, look up their associated species This function digs through the search results returned by the get_all_info() function and returns any information on taxonomy included within the description. The PDB website description of each entry includes the name of the species (and sometimes details of organ or body part) for each protein structure sample. Parameters ---------- pdb_list : list of str List of PDB IDs sleep_time : float Time (in seconds) to wait between requests. If this number is too small the API will stop working, but it appears to vary among different systems Returns ------- taxa : list of str A list of the names or classifictions of species associated with entries Examples -------- >>> crispr_query = make_query('crispr') >>> crispr_results = do_search(crispr_query) >>> print(list_taxa(crispr_results[:10])) ['Thermus thermophilus', 'Sulfolobus solfataricus P2', 'Hyperthermus butylicus DSM 5456', 'unidentified phage', 'Sulfolobus solfataricus P2', 'Pseudomonas aeruginosa UCBPP-PA14', 'Pseudomonas aeruginosa UCBPP-PA14', 'Pseudomonas aeruginosa UCBPP-PA14', 'Sulfolobus solfataricus', 'Thermus thermophilus HB8']
def usage(self, auth, resource, metric, starttime, endtime, defer=False): return self._call('usage', auth, [resource, metric, starttime, endtime], defer)
Returns metric usage for client and its subhierarchy. Args: auth: <cik> for authentication resource: ResourceID metrics: Metric to measure (as string), it may be an entity or consumable. starttime: Start time of window to measure useage (format is ___). endtime: End time of window to measure useage (format is ___).
def characterize_local_files(filedir, max_bytes=MAX_FILE_DEFAULT): file_data = {} logging.info('Characterizing files in {}'.format(filedir)) for filename in os.listdir(filedir): filepath = os.path.join(filedir, filename) file_stats = os.stat(filepath) creation_date = arrow.get(file_stats.st_ctime).isoformat() file_size = file_stats.st_size if file_size <= max_bytes: file_md5 = hashlib.md5() with open(filepath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): file_md5.update(chunk) md5 = file_md5.hexdigest() file_data[filename] = { 'tags': guess_tags(filename), 'description': '', 'md5': md5, 'creation_date': creation_date, } return file_data
Collate local file info as preperation for Open Humans upload. Note: Files with filesize > max_bytes are not included in returned info. :param filedir: This field is target directory to get files from. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m.
def _read_config(cfg_file): config = ConfigParser() config.optionxform = lambda option: option if not os.path.exists(cfg_file): config.add_section(_MAIN_SECTION_NAME) config.add_section(_ENVIRONMENT_SECTION_NAME) else: config.read(cfg_file) return config
Return a ConfigParser object populated from the settings.cfg file. :return: A Config Parser object.
def _postback(self): return requests.post(self.get_endpoint(), data=b"cmd=_notify-validate&" + self.query.encode("ascii")).content
Perform PayPal Postback validation.
def pdf(self, mu): if self.transform is not None: mu = self.transform(mu) return (1.0/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2))
PDF for Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
def poly2o_residual(params, data, mask): bg = poly2o_model(params, shape=data.shape) res = (data - bg)[mask] return res.flatten()
lmfit 2nd order polynomial residuals
def _sort_lambda(sortedby='cpu_percent', sortedby_secondary='memory_percent'): ret = None if sortedby == 'io_counters': ret = _sort_io_counters elif sortedby == 'cpu_times': ret = _sort_cpu_times return ret
Return a sort lambda function for the sortedbykey
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return StepInstance( self._version, payload, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['engagement_sid'], sid=self._solution['sid'], )
Fetch a StepInstance :returns: Fetched StepInstance :rtype: twilio.rest.studio.v1.flow.engagement.step.StepInstance
def set_preferences(request, dashboard_id): try: preferences = DashboardPreferences.objects.get( user=request.user, dashboard_id=dashboard_id ) except DashboardPreferences.DoesNotExist: preferences = None if request.method == "POST": form = DashboardPreferencesForm( user=request.user, dashboard_id=dashboard_id, data=request.POST, instance=preferences ) if form.is_valid(): preferences = form.save() if request.is_ajax(): return HttpResponse('true') messages.success(request, 'Preferences saved') elif request.is_ajax(): return HttpResponse('false') else: form = DashboardPreferencesForm( user=request.user, dashboard_id=dashboard_id, instance=preferences ) return render_to_response( 'admin_tools/dashboard/preferences_form.html', {'form': form} )
This view serves and validates a preferences form.
def _strip_version_from_dependency(dep): usedmark = '' for mark in '< > ='.split(): split = dep.split(mark) if len(split) > 1: usedmark = mark break if usedmark: return split[0].strip() else: return dep.strip()
For given dependency string, return only the package name
def get_invalid_mailbox(value, endchars): invalid_mailbox = InvalidMailbox() while value and value[0] not in endchars: if value[0] in PHRASE_ENDS: invalid_mailbox.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] else: token, value = get_phrase(value) invalid_mailbox.append(token) return invalid_mailbox, value
Read everything up to one of the chars in endchars. This is outside the formal grammar. The InvalidMailbox TokenList that is returned acts like a Mailbox, but the data attributes are None.