code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def free(self): if self._ptr is None: return Gauged.map_free(self.ptr) SparseMap.ALLOCATIONS -= 1 self._ptr = None
Free the map
def filter_event(tag, data, defaults): ret = {} keys = [] use_defaults = True for ktag in __opts__.get('filter_events', {}): if tag != ktag: continue keys = __opts__['filter_events'][ktag]['keys'] use_defaults = __opts__['filter_events'][ktag].get('use_defaults', True) if use_defaults is False: defaults = [] if not isinstance(defaults, list): defaults = list(defaults) defaults = list(set(defaults + keys)) for key in defaults: if key in data: ret[key] = data[key] return ret
Accept a tag, a dict and a list of default keys to return from the dict, and check them against the cloud configuration for that tag
def get_events_for_object(self, content_object, distinction='', inherit=True): ct = ContentType.objects.get_for_model(type(content_object)) if distinction: dist_q = Q(eventrelation__distinction=distinction) cal_dist_q = Q(calendar__calendarrelation__distinction=distinction) else: dist_q = Q() cal_dist_q = Q() if inherit: inherit_q = Q( cal_dist_q, calendar__calendarrelation__content_type=ct, calendar__calendarrelation__object_id=content_object.id, calendar__calendarrelation__inheritable=True, ) else: inherit_q = Q() event_q = Q(dist_q, eventrelation__content_type=ct, eventrelation__object_id=content_object.id) return Event.objects.filter(inherit_q | event_q)
returns a queryset full of events, that relate to the object through, the distinction If inherit is false it will not consider the calendars that the events belong to. If inherit is true it will inherit all of the relations and distinctions that any calendar that it belongs to has, as long as the relation has inheritable set to True. (See Calendar) >>> event = Event.objects.get(title='Test1') >>> user = User.objects.get(username = 'alice') >>> EventRelation.objects.get_events_for_object(user, 'owner', inherit=False) [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] If a distinction is not declared it will not vet the relations based on distinction. >>> EventRelation.objects.get_events_for_object(user, inherit=False) [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>] Now if there is a Calendar >>> calendar = Calendar(name = 'MyProject') >>> calendar.save() And an event that belongs to that calendar >>> event = Event.objects.get(title='Test2') >>> calendar.events.add(event) If we relate this calendar to some object with inheritable set to true, that relation will be inherited >>> user = User.objects.get(username='bob') >>> cr = calendar.create_relation(user, 'viewer', True) >>> EventRelation.objects.get_events_for_object(user, 'viewer') [<Event: Test1: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>, <Event: Test2: Tuesday, Jan. 1, 2008-Friday, Jan. 11, 2008>]
def _altair_hline_(self, xfield, yfield, opts, style, encode): try: rawy = yfield if ":" in yfield: rawy = yfield.split(":")[0] mean = self.df[rawy].mean() l = [] i = 0 while i < len(self.df[rawy]): l.append(mean) i += 1 self.df["Mean"] = l chart = Chart(self.df).mark_line(**style).encode(x=xfield, \ y="Mean", **encode).properties(**opts) self.drop("Mean") return chart except Exception as e: self.err(e, "Can not draw mean line chart")
Get a mean line chart
def get_value(self, key): for title in _TITLES.get(key, ()) + (key,): try: value = [entry['lastMeasurement']['value'] for entry in self.data['sensors'] if entry['title'] == title][0] return value except IndexError: pass return None
Extract a value for a given key.
def wave(self, wavelength): if not isinstance(wavelength, q.quantity.Quantity): raise ValueError("Wavelength must be in length units.") self._wave = wavelength self.wave_units = wavelength.unit
A setter for the wavelength Parameters ---------- wavelength: astropy.units.quantity.Quantity The array with units
def list_resource_groups(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', '?api-version=', RESOURCE_API]) return do_get(endpoint, access_token)
List the resource groups in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response.
def _get_contigs_to_keep(self, filename): if filename is None: return set() with open(filename) as f: return {line.rstrip() for line in f}
Returns a set of names from file called filename. If filename is None, returns an empty set
async def read(cls, node, block_device): if isinstance(node, str): system_id = node elif isinstance(node, Node): system_id = node.system_id else: raise TypeError( "node must be a Node or str, not %s" % type(node).__name__) if isinstance(block_device, int): block_device = block_device elif isinstance(block_device, BlockDevice): block_device = block_device.id else: raise TypeError( "node must be a Node or str, not %s" % type(block_device).__name__) data = await cls._handler.read( system_id=system_id, device_id=block_device) return cls( cls._object(item) for item in data)
Get list of `Partitions`'s for `node` and `block_device`.
def _auto_unlock_key_position(self): found_pos = None default_keyring_ids = gkr.list_item_ids_sync(self.default_keyring) for pos in default_keyring_ids: item_attrs = gkr.item_get_attributes_sync(self.default_keyring, pos) app = 'application' if item_attrs.has_key(app) and item_attrs[app] == "opensesame": found_pos = pos break return found_pos
Find the open sesame password in the default keyring
def set_cumulative(self, cumulative): if self.get_cumulative_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_boolean(cumulative): raise errors.InvalidArgument() self._my_map['cumulative'] = cumulative
Applies this rule to all previous assessment parts. arg: cumulative (boolean): ``true`` to apply to all previous assessment parts. ``false`` to apply to the immediate previous assessment part raise: InvalidArgument - ``cumulative`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def connect_s3_bucket_to_lambda(self, bucket, function_arn, events, prefix=None, suffix=None): s3 = self._client('s3') existing_config = s3.get_bucket_notification_configuration( Bucket=bucket) existing_config.pop('ResponseMetadata', None) existing_lambda_config = existing_config.get( 'LambdaFunctionConfigurations', []) single_config = { 'LambdaFunctionArn': function_arn, 'Events': events } filter_rules = [] if prefix is not None: filter_rules.append({'Name': 'Prefix', 'Value': prefix}) if suffix is not None: filter_rules.append({'Name': 'Suffix', 'Value': suffix}) if filter_rules: single_config['Filter'] = {'Key': {'FilterRules': filter_rules}} new_config = self._merge_s3_notification_config(existing_lambda_config, single_config) existing_config['LambdaFunctionConfigurations'] = new_config s3.put_bucket_notification_configuration( Bucket=bucket, NotificationConfiguration=existing_config, )
Configure S3 bucket to invoke a lambda function. The S3 bucket must already have permission to invoke the lambda function before you call this function, otherwise the service will return an error. You can add permissions by using the ``add_permission_for_s3_event`` below. The ``events`` param matches the event strings supported by the service. This method also only supports a single prefix/suffix for now, which is what's offered in the Lambda console.
def _buffer_decode(self, input, errors, final): decoded_segments = [] position = 0 while True: decoded, consumed = self._buffer_decode_step( input[position:], errors, final ) if consumed == 0: break decoded_segments.append(decoded) position += consumed if final: assert position == len(input) return ''.join(decoded_segments), position
Decode bytes that may be arriving in a stream, following the Codecs API. `input` is the incoming sequence of bytes. `errors` tells us how to handle errors, though we delegate all error-handling cases to the real UTF-8 decoder to ensure correct behavior. `final` indicates whether this is the end of the sequence, in which case we should raise an error given incomplete input. Returns as much decoded text as possible, and the number of bytes consumed.
def label_from_re(self, pat:str, full_path:bool=False, label_cls:Callable=None, **kwargs)->'LabelList': "Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name." pat = re.compile(pat) def _inner(o): s = str((os.path.join(self.path,o) if full_path else o).as_posix()) res = pat.search(s) assert res,f'Failed to find "{pat}" in "{s}"' return res.group(1) return self.label_from_func(_inner, label_cls=label_cls, **kwargs)
Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name.
def group_device_names(devices, group_size): num_devices = len(devices) if group_size > num_devices: raise ValueError( "only %d devices, but group_size=%d" % (num_devices, group_size)) num_groups = ( num_devices // group_size + (1 if (num_devices % group_size != 0) else 0)) groups = [[] for i in range(num_groups)] for i in range(0, num_groups * group_size): groups[i % num_groups].append(devices[i % num_devices]) return groups
Group device names into groups of group_size. Args: devices: list of strings naming devices. group_size: int >= 1 Returns: list of lists of devices, where each inner list is group_size long, and each device appears at least once in an inner list. If len(devices) % group_size = 0 then each device will appear exactly once. Raises: ValueError: group_size > len(devices)
def get_email(self, token): resp = requests.get(self.emails_url, params={'access_token': token.token}) emails = resp.json().get('values', []) email = '' try: email = emails[0].get('email') primary_emails = [e for e in emails if e.get('is_primary', False)] email = primary_emails[0].get('email') except (IndexError, TypeError, KeyError): return '' finally: return email
Fetches email address from email API endpoint
def _random_ipv4_address_from_subnet(self, subnet, network=False): address = str( subnet[self.generator.random.randint( 0, subnet.num_addresses - 1, )], ) if network: address += '/' + str(self.generator.random.randint( subnet.prefixlen, subnet.max_prefixlen, )) address = str(ip_network(address, strict=False)) return address
Produces a random IPv4 address or network with a valid CIDR from within a given subnet. :param subnet: IPv4Network to choose from within :param network: Return a network address, and not an IP address
def color_normalize(src, mean, std=None): if mean is not None: src -= mean if std is not None: src /= std return src
Normalize src with mean and std. Parameters ---------- src : NDArray Input image mean : NDArray RGB mean to be subtracted std : NDArray RGB standard deviation to be divided Returns ------- NDArray An `NDArray` containing the normalized image.
def get_site_decorator(site_param='site', obj_param='obj', context_param='context'): def site_method(**extra_params): def decorator(fn): @wraps(fn) def wrapper(request, **kwargs): try: site = kwargs.pop(site_param) except KeyError: raise ValueError("'%s' parameter must be passed to " "decorated view (%s)" % (site_param, fn)) params={} for key in extra_params: value = kwargs.pop(key, extra_params[key]) params.update({key:value}) try: obj = site.object_getter(**kwargs) except models.ObjectDoesNotExist: raise Http404("Base object does not exist.") context = site.get_common_context(obj) context_instance = RequestContext(request, context, processors=site.context_processors) params.update({ site_param:site, obj_param: obj, context_param: context_instance }) return fn(request, **params) return wrapper return decorator return site_method
It is a function that returns decorator factory useful for PluggableSite views. This decorator factory returns decorator that do some boilerplate work and make writing PluggableSite views easier. It passes PluggableSite instance to decorated view, retreives and passes object that site is attached to and passes common context. It also passes and all the decorator factory's keyword arguments. For example usage please check photo_albums.views. Btw, this decorator seems frightening for me. It feels that "views as PluggableSite methods" approach can easily make this decorator obsolete. But for now it just works.
def onEnable(self): trace('onEnable') self._disable() self._aio_context.submit(self._aio_recv_block_list) self._real_onCanSend() self._enabled = True
The configuration containing this function has been enabled by host. Endpoints become working files, so submit some read operations.
def set_margins(self, left,top,right=-1): "Set left, top and right margins" self.l_margin=left self.t_margin=top if(right==-1): right=left self.r_margin=right
Set left, top and right margins
def write_header(self): for properties in self.header.values(): value = properties['value'] offset_bytes = int(properties['offset']) self.file.seek(offset_bytes) value.tofile(self.file)
Write `header` to `file`. See Also -------- write_data
def performance_view(dstore): data = sorted(dstore['performance_data'], key=operator.itemgetter(0)) out = [] for operation, group in itertools.groupby(data, operator.itemgetter(0)): counts = 0 time = 0 mem = 0 for _operation, time_sec, memory_mb, counts_ in group: counts += counts_ time += time_sec mem = max(mem, memory_mb) out.append((operation, time, mem, counts)) out.sort(key=operator.itemgetter(1), reverse=True) return numpy.array(out, perf_dt)
Returns the performance view as a numpy array.
def receive_message(self): if not ('REQUEST_METHOD' in os.environ and os.environ['REQUEST_METHOD'] == 'POST'): print("Status: 405 Method not Allowed; only POST is accepted") exit(0) content_length = int(os.environ['CONTENT_LENGTH']) request_json = sys.stdin.read(content_length) request_json = urlparse.unquote(request_json) return None, request_json
Receive a message from the transport. Blocks until a message has been received. May return a context opaque to clients that should be passed to :py:func:`send_reply` to identify the client later on. :return: A tuple consisting of ``(context, message)``.
def cartesian_square_centred_on_point(self, point, distance, **kwargs): point_surface = Point(point.longitude, point.latitude, 0.) north_point = point_surface.point_at(distance, 0., 0.) east_point = point_surface.point_at(distance, 0., 90.) south_point = point_surface.point_at(distance, 0., 180.) west_point = point_surface.point_at(distance, 0., 270.) is_long = np.logical_and( self.catalogue.data['longitude'] >= west_point.longitude, self.catalogue.data['longitude'] < east_point.longitude) is_surface = np.logical_and( is_long, self.catalogue.data['latitude'] >= south_point.latitude, self.catalogue.data['latitude'] < north_point.latitude) upper_depth, lower_depth = _check_depth_limits(kwargs) is_valid = np.logical_and( is_surface, self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth) return self.select_catalogue(is_valid)
Select earthquakes from within a square centered on a point :param point: Centre point as instance of nhlib.geo.point.Point class :param distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` class containing only selected events
def get_token(url: str, scopes: str, credentials_dir: str) -> dict: tokens.configure(url=url, dir=credentials_dir) tokens.manage('lizzy', [scopes]) tokens.start() return tokens.get('lizzy')
Get access token info.
def link_label(link): if hasattr(link, 'label'): label = link.label else: label = str(link.linknum+1) return label
return a link label as a string
def generate_data_for_env_problem(problem_name): assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps " "should be greater than zero") assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be" " greather than zero") problem = registry.env_problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) problem.initialize(batch_size=FLAGS.env_problem_batch_size) env_problem_utils.play_env_problem_randomly( problem, num_steps=FLAGS.env_problem_max_env_steps) problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id)
Generate data for `EnvProblem`s.
def _merge_assets_key_collection(saved_model_proto, path): for meta_graph in saved_model_proto.meta_graphs: node_asset_map = {} if tf_v1.saved_model.constants.ASSETS_KEY in meta_graph.collection_def: assets_any_proto = meta_graph.collection_def[ tf_v1.saved_model.constants.ASSETS_KEY].any_list.value for asset_any_proto in assets_any_proto: asset_proto = meta_graph_pb2.AssetFileDef() asset_any_proto.Unpack(asset_proto) asset_filename = _get_asset_filename(path, asset_proto.filename) node_asset_map[_get_node_name_from_tensor( asset_proto.tensor_info.name)] = asset_filename del meta_graph.collection_def[tf_v1.saved_model.constants.ASSETS_KEY] for node in meta_graph.graph_def.node: asset_filepath = node_asset_map.get(node.name) if asset_filepath: _check_asset_node_def(node) node.attr["value"].tensor.string_val[0] = asset_filepath
Merges the ASSETS_KEY collection into the GraphDefs in saved_model_proto. Removes the ASSETS_KEY collection from the GraphDefs in the SavedModel and modifies nodes with the assets filenames to point to the assets in `path`. After this transformation, the SavedModel GraphDefs can be used without feeding asset tensors. Args: saved_model_proto: SavedModel proto to be modified. path: path where the SavedModel is being loaded from.
def remove(package_name): if package_name not in packages: raise HolodeckException("Unknown package name " + package_name) for config, path in _iter_packages(): if config["name"] == package_name: shutil.rmtree(path)
Removes a holodeck package. Args: package_name (str): the name of the package to remove
def date_parser(items): try: dt = datetime.strptime(items,"%d/%m/%Y %H:%M:%S") except Exception as e: try: dt = datetime.strptime(items,"%m/%d/%Y %H:%M:%S") except Exception as ee: raise Exception("error parsing datetime string" +\ " {0}: \n{1}\n{2}".format(str(items),str(e),str(ee))) return dt
datetime parser to help load smp files Parameters ---------- items : iterable something or somethings to try to parse into datetimes Returns ------- dt : iterable the cast datetime things
def processPhoneList(platformNames=[], numbers=[], excludePlatformNames=[]): platforms = platform_selection.getPlatformsByName(platformNames, mode="phonefy", excludePlatformNames=excludePlatformNames) results = [] for num in numbers: for pla in platforms: entities = pla.getInfo(query=num, process=True, mode="phonefy") if entities != {}: results+=json.loads(entities) return results
Method to perform searchs on a series of numbers. Args: ----- platformNames: List of names of the platforms. numbers: List of numbers to be queried. excludePlatformNames: A list of platforms not to be searched. Return: ------- A list of verified emails.
def is_not_from_subdomain(self, response, site_dict): root_url = re.sub(re_url_root, '', site_dict["url"]) return UrlExtractor.get_allowed_domain(response.url) == root_url
Ensures the response's url isn't from a subdomain. :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :return bool: Determines if the response's url is from a subdomain
def _on_write(self, sender, *args, **kwargs): self.on_write(data=kwargs.get('data', None))
Internal handler for writing to the device.
def unicode2str(content): if isinstance(content, dict): result = {} for key in content.keys(): result[unicode2str(key)] = unicode2str(content[key]) return result elif isinstance(content, list): return [unicode2str(element) for element in content] elif isinstance(content, int) or isinstance(content, float): return content else: return content.encode("utf-8")
Convert the unicode element of the content to str recursively.
def exact(self, *args, **kwargs): compare = Exact(*args, **kwargs) self.add(compare) return self
Compare attributes of pairs exactly. Shortcut of :class:`recordlinkage.compare.Exact`:: from recordlinkage.compare import Exact indexer = recordlinkage.Compare() indexer.add(Exact())
def _get(self, key, section=None, default=_onion_dict_guard): if section is not None: section_dict = self.__sections.get(section, {}) if key in section_dict: return section_dict[key] for d in self.__dictionaries: if key in d: return d[key] if default is _onion_dict_guard: raise KeyError(key) else: return default
Try to get the key from each dict in turn. If you specify the optional section it looks there first.
def _bool_encode(self, d): for k, v in d.items(): if isinstance(v, bool): d[k] = str(v).lower() return d
Converts bool values to lowercase strings
def getlist(self, section, option, raw=False, vars=None, fallback=[], delimiters=','): v = self.get(section, option, raw=raw, vars=vars, fallback=fallback) return self._convert_to_list(v, delimiters=delimiters)
A convenience method which coerces the option in the specified section to a list of strings.
def disposal_date(self): date_sampled = self.getDateSampled() if not date_sampled: return None retention_period = self.getSampleType().getRetentionPeriod() or {} retention_period_delta = timedelta( days=int(retention_period.get("days", 0)), hours=int(retention_period.get("hours", 0)), minutes=int(retention_period.get("minutes", 0)) ) return dt2DT(DT2dt(date_sampled) + retention_period_delta)
Returns the date the retention period ends for this sample based on the retention period from the Sample Type. If the sample hasn't been collected yet, returns None
def figures(df,specs,asList=False): figs=[] for spec in specs: figs.append(df.figure(**spec)) if asList: return figs else: return merge_figures(figs)
Generates multiple Plotly figures for a given DataFrame Parameters: ----------- df : DataFrame Pandas DataFrame specs : list(dict) List of dictionaries with the properties of each figure. All properties avaialbe can be seen with help(cufflinks.pd.DataFrame.iplot) asList : boolean If True, then a list of figures is returned. Otherwise a single (merged) figure is returned. Default : False
def GetItemContainerInfo(self_link, alt_content_path, id_from_response): self_link = TrimBeginningAndEndingSlashes(self_link) + '/' index = IndexOfNth(self_link, '/', 4) if index != -1: collection_id = self_link[0:index] if 'colls' in self_link: index_second_slash = IndexOfNth(alt_content_path, '/', 2) if index_second_slash == -1: collection_name = alt_content_path + '/colls/' + urllib_quote(id_from_response) return collection_id, collection_name else: collection_name = alt_content_path return collection_id, collection_name else: raise ValueError('Response Not from Server Partition, self_link: {0}, alt_content_path: {1}, id: {2}' .format(self_link, alt_content_path, id_from_response)) else: raise ValueError('Unable to parse document collection link from ' + self_link)
Given the self link and alt_content_path from the reponse header and result extract the collection name and collection id Ever response header has alt-content-path that is the owner's path in ascii. For document create / update requests, this can be used to get the collection name, but for collection create response, we can't use it. So we also rely on :param str self_link: Self link of the resource, as obtained from response result. :param str alt_content_path: Owner path of the resource, as obtained from response header. :param str resource_id: 'id' as returned from the response result. This is only used if it is deduced that the request was to create a collection. :return: tuple of (collection rid, collection name) :rtype: tuple
def set_process(self, process = None): if process is None: self.dwProcessId = None self.__process = None else: self.__load_Process_class() if not isinstance(process, Process): msg = "Parent process must be a Process instance, " msg += "got %s instead" % type(process) raise TypeError(msg) self.dwProcessId = process.get_pid() self.__process = process
Manually set the parent Process object. Use with care! @type process: L{Process} @param process: (Optional) Process object. Use C{None} for no process.
def add_grant(self, grant): if hasattr(grant, "expires_in"): self.token_generator.expires_in[grant.grant_type] = grant.expires_in if hasattr(grant, "refresh_expires_in"): self.token_generator.refresh_expires_in = grant.refresh_expires_in self.grant_types.append(grant)
Adds a Grant that the provider should support. :param grant: An instance of a class that extends :class:`oauth2.grant.GrantHandlerFactory` :type grant: oauth2.grant.GrantHandlerFactory
def estimate(self, maxiter=250, convergence=1e-7): self.loglik = np.zeros(maxiter) iter = 0 while iter < maxiter: self.loglik[iter] = self.E_step() if np.isnan(self.loglik[iter]): print("undefined log-likelihood") break self.M_step() if self.loglik[iter] - self.loglik[iter - 1] < 0 and iter > 0: print("log-likelihood decreased by %f at iteration %d" % (self.loglik[iter] - self.loglik[iter - 1], iter)) elif self.loglik[iter] - self.loglik[iter - 1] < convergence \ and iter > 0: print("convergence at iteration %d, loglik = %f" % (iter, self.loglik[iter])) self.loglik = self.loglik[self.loglik < 0] break iter += 1
run EM algorithm until convergence, or until maxiter reached
def get_periodic_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0): suce = _get_supercell(obj, rCut) arrsoap = get_soap_locals(suce, Hpos, alp, bet, rCut, nMax=nMax, Lmax=Lmax, crossOver=crossOver, all_atomtypes=all_atomtypes, eta=eta) return arrsoap
Get the RBF basis SOAP output for the given position in a periodic system. Args: obj(ase.Atoms): Atomic structure for which the SOAP output is calculated. alp: Alphas bet: Betas rCut: Radial cutoff. nMax: Maximum nmber of radial basis functions Lmax: Maximum spherical harmonics degree crossOver: all_atomtypes: Can be used to specify the atomic elements for which to calculate the output. If given the output is calculated only for the given species. eta: The gaussian smearing width. Returns: np.ndarray: SOAP output for the given position.
def get_network_versions(self, name: str) -> Set[str]: return { version for version, in self.session.query(Network.version).filter(Network.name == name).all() }
Return all of the versions of a network with the given name.
def rescale_around1(self, times): if self._unit == self._UNIT_STEP: return times, 'step' m = np.mean(times) mult = 1.0 cur_unit = self._unit if (m < 0.001): while mult*m < 0.001 and cur_unit >= 0: mult *= 1000 cur_unit -= 1 return mult*times, self._unit_names[cur_unit] if (m > 1000): while mult*m > 1000 and cur_unit <= 5: mult /= 1000 cur_unit += 1 return mult*times, self._unit_names[cur_unit] return times, self._unit
Suggests a rescaling factor and new physical time unit to balance the given time multiples around 1. Parameters ---------- times : float array array of times in multiple of the present elementary unit
def gt(name, value): ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if name not in __reg__: ret['result'] = False ret['comment'] = 'Value {0} not in register'.format(name) return ret if __reg__[name]['val'] > value: ret['result'] = True return ret
Only succeed if the value in the given register location is greater than the given value USAGE: .. code-block:: yaml foo: check.gt: - value: 42 run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: foo
def handle_collect(self, msg): (success, sequence_number, comment) = self._handle_collect(msg) self.collector.send_multipart(msgs.MessageWriter().bool(success).uint64(sequence_number).string(comment).get())
handle an incoming message
def eliminate(self, node, data): self.eliminated[node] = data others = self.checks[node] del self.checks[node] for check in others: check.check ^= data check.src_nodes.remove(node) if len(check.src_nodes) == 1: yield (next(iter(check.src_nodes)), check.check)
Resolves a source node, passing the message to all associated checks
def result(self): final_result = {'epoch_idx': self.global_epoch_idx} for key, value in self.frozen_results.items(): final_result[key] = value return final_result
Return the epoch result
def load_json(cls, data, default_rule=None, raise_error=False): rules = {k: _parser.parse_rule(v, raise_error) for k, v in json.loads(data).items()} return cls(rules, default_rule)
Allow loading of JSON rule data.
def from_section(cls, stream, section_name='.pic'): binary = Executable(stream) section_data = binary.get_section_data(section_name) return cls(section_data, binary.system)
Construct a Converter object from the specified section of the specified binary stream.
def inMicrolensRegion(ra_deg, dec_deg, padding=0): fov = getKeplerFov(9) try: ch, col, row = fov.getChannelColRow(ra_deg, dec_deg, allowIllegalReturnValues=False) return maskInMicrolensRegion(ch, col, row, padding=padding) except ValueError: return False
Returns `True` if the given sky oordinate falls on the K2C9 superstamp. Parameters ---------- ra_deg : float Right Ascension (J2000) in decimal degrees. dec_deg : float Declination (J2000) in decimal degrees. padding : float Target must be at least `padding` pixels away from the edge of the superstamp. (Note that CCD boundaries are not considered as edges in this case.) Returns ------- onMicrolensRegion : bool `True` if the given coordinate is within the K2C9 microlens superstamp.
def import_class(klass): mod = __import__(klass.rpartition('.')[0]) for segment in klass.split('.')[1:-1]: mod = getattr(mod, segment) return getattr(mod, klass.rpartition('.')[2])
Import the named class and return that class
def get_organization(self, organization_id): url = 'rest/servicedeskapi/organization/{}'.format(organization_id) return self.get(url, headers=self.experimental_headers)
Get an organization for a given organization ID :param organization_id: str :return: Organization
def set_scope(self, http_method, scope): for con in self.conditions: if http_method in con['httpMethods']: if isinstance(scope, list): con['scopes'] = scope elif isinstance(scope, str) or isinstance(scope, unicode): con['scopes'].append(scope) return if isinstance(scope, list): self.conditions.append({'httpMethods': [http_method], 'scopes': scope}) elif isinstance(scope, str) or isinstance(scope, unicode): self.conditions.append({'httpMethods': [http_method], 'scopes': [scope]})
Set a scope condition for the resource for a http_method Parameters: * **http_method (str):** HTTP method like GET, POST, PUT, DELETE * **scope (str, list):** the scope of access control as str if single, or as a list of strings if multiple scopes are to be set
def where(cls, session, include=None, metadata=None, filter=None): url = session._build_url(cls._resource_path()) params = build_request_include(include, None) if metadata is not None: params['filter[metadata]'] = to_json(metadata) process = cls._mk_many(session, include=include, filter=filter) return session.get(url, CB.json(200, process), params=params)
Get filtered resources of the given resource class. This should be called on sub-classes only. The include argument allows relationship fetches to be optimized by including the target resources in the request of the containing resource. For example:: .. code-block:: python org = Organization.singleton(session, include=[Sensor]) org.sensors(use_included=True) Will fetch the sensors for the authorized organization as part of retrieving the organization. The ``use_included`` forces the use of included resources and avoids making a separate request to get the sensors for the organization. The metadata argument enables filtering on resources that support metadata filters. For example:: .. code-block:: python sensors = Sensor.where(session, metadata={ 'asset_id': '23456' }) Will fetch all sensors that match the given metadata attribute. The filter argument enables filtering the resulting resources based on a passed in function. For example:: .. code-block::python sensors = Sensor.where(session, filter=lambda s: s.name.startswith("a")) Will fetch all sensors and apply the given filter to only return sensors who's name start with the given string. Args: session(Session): The session to look up the resources in Keyword Args: incldue(list): The resource classes to include in the request. metadata(dict or list): The metadata filter to apply Returns: iterable(Resource): An iterator over all found resources of this type
def prepareToSolve(self): self.setAndUpdateValues(self.solution_next,self.IncomeDstn,self.LivPrb,self.DiscFac) self.defBoroCnst(self.BoroCnstArt)
Perform preparatory work before calculating the unconstrained consumption function. Parameters ---------- none Returns ------- none
def check_aggregations_privacy(self, aggregations_params): fields = self.get_aggregations_fields(aggregations_params) fields_dict = dictset.fromkeys(fields) fields_dict['_type'] = self.view.Model.__name__ try: validate_data_privacy(self.view.request, fields_dict) except wrappers.ValidationError as ex: raise JHTTPForbidden( 'Not enough permissions to aggregate on ' 'fields: {}'.format(ex))
Check per-field privacy rules in aggregations. Privacy is checked by making sure user has access to the fields used in aggregations.
def get_handler(self, handler_input, exception): for handler in self.exception_handlers: if handler.can_handle( handler_input=handler_input, exception=exception): return handler return None
Get the exception handler that can handle the input and exception. :param handler_input: Generic input passed to the dispatcher. :type handler_input: Input :param exception: Exception thrown by :py:class:`ask_sdk_runtime.dispatch.GenericRequestDispatcher` dispatch method. :type exception: Exception :return: Exception Handler that can handle the input or None. :rtype: Union[None, ask_sdk_runtime.dispatch_components.exception_components.AbstractExceptionHandler]
def __select_text_under_cursor_blocks(self, cursor): start_block = self.document().findBlock(cursor.selectionStart()).firstLineNumber() end_block = self.document().findBlock(cursor.selectionEnd()).firstLineNumber() cursor.setPosition(self.document().findBlockByLineNumber(start_block).position()) cursor.movePosition(QTextCursor.StartOfLine, QTextCursor.MoveAnchor) cursor.movePosition(QTextCursor.Down, QTextCursor.KeepAnchor, end_block - start_block) cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
Selects the document text under cursor blocks. :param cursor: Cursor. :type cursor: QTextCursor
def _parse_data_desc(data_names, label_names, data_shapes, label_shapes): data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes] _check_names_match(data_names, data_shapes, 'data', True) if label_shapes is not None: label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes] _check_names_match(label_names, label_shapes, 'label', False) else: _check_names_match(label_names, [], 'label', False) return data_shapes, label_shapes
parse data_attrs into DataDesc format and check that names match
def maps(map_id=None, lang="en"): if map_id: cache_name = "maps.%s.%s.json" % (map_id, lang) params = {"map_id": map_id, "lang": lang} else: cache_name = "maps.%s.json" % lang params = {"lang": lang} data = get_cached("maps.json", cache_name, params=params).get("maps") return data.get(str(map_id)) if map_id else data
This resource returns details about maps in the game, including details about floor and translation data on how to translate between world coordinates and map coordinates. :param map_id: Only list this map. :param lang: Show localized texts in the specified language. The response is a dictionary where the key is the map id and the value is a dictionary containing the following properties: map_name (string) The map name. min_level (number) The minimal level of this map. max_level (number) The maximum level of this map. default_floor (number) The default floor of this map. floors (list) A list of available floors for this map. region_id (number) The id of the region this map belongs to. region_name (string) The name of the region this map belongs to. continent_id (number) The id of the continent this map belongs to. continent_name (string) The name of the continent this map belongs to. map_rect (rect) The dimensions of the map. continent_rect (rect) The dimensions of the map within the continent coordinate system. If a map_id is given, only the values for that map are returned.
def sample(self, size=(), rule="R", antithetic=None): size_ = numpy.prod(size, dtype=int) dim = len(self) if dim > 1: if isinstance(size, (tuple, list, numpy.ndarray)): shape = (dim,) + tuple(size) else: shape = (dim, size) else: shape = size from . import sampler out = sampler.generator.generate_samples( order=size_, domain=self, rule=rule, antithetic=antithetic) try: out = out.reshape(shape) except: if len(self) == 1: out = out.flatten() else: out = out.reshape(dim, int(out.size/dim)) return out
Create pseudo-random generated samples. By default, the samples are created using standard (pseudo-)random samples. However, if needed, the samples can also be created by either low-discrepancy sequences, and/or variance reduction techniques. Changing the sampling scheme, use the following ``rule`` flag: +-------+-------------------------------------------------+ | key | Description | +=======+=================================================+ | ``C`` | Roots of the first order Chebyshev polynomials. | +-------+-------------------------------------------------+ | ``NC``| Chebyshev nodes adjusted to ensure nested. | +-------+-------------------------------------------------+ | ``K`` | Korobov lattice. | +-------+-------------------------------------------------+ | ``R`` | Classical (Pseudo-)Random samples. | +-------+-------------------------------------------------+ | ``RG``| Regular spaced grid. | +-------+-------------------------------------------------+ | ``NG``| Nested regular spaced grid. | +-------+-------------------------------------------------+ | ``L`` | Latin hypercube samples. | +-------+-------------------------------------------------+ | ``S`` | Sobol low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``H`` | Halton low-discrepancy sequence. | +-------+-------------------------------------------------+ | ``M`` | Hammersley low-discrepancy sequence. | +-------+-------------------------------------------------+ All samples are created on the ``[0, 1]``-hypercube, which then is mapped into the domain of the distribution using the inverse Rosenblatt transformation. Args: size (numpy.ndarray): The size of the samples to generate. rule (str): Indicator defining the sampling scheme. antithetic (bool, numpy.ndarray): If provided, will be used to setup antithetic variables. If array, defines the axes to mirror. Returns: (numpy.ndarray): Random samples with shape ``(len(self),)+self.shape``.
def done(p_queue, host=None): if host is not None: return _path(_c.FSQ_DONE, root=_path(host, root=hosts(p_queue))) return _path(p_queue, _c.FSQ_DONE)
Construct a path to the done dir for a queue
def populate(self, fields=None, **fields_kwargs): pop_fields = {} fields = self.make_dict(fields, fields_kwargs) for k in self.schema.fields.keys(): pop_fields[k] = fields.get(k, None) self._populate(pop_fields)
take the passed in fields, combine them with missing fields that should be there and then run all those through appropriate methods to hydrate this orm. The method replaces cls.hydrate() since it was becoming hard to understand what was going on with all these methods that did things just a little bit different. This is used to completely set all the fields of self. If you just want to set certain fields, you can use the submethod _populate :param fields: dict, the fields in a dict :param **fields_kwargs: dict, if you would like to pass the fields as key=val this picks those up and combines them with fields
def iterkeys(self): stack = collections.deque(self._hives) stack.reverse() return self.__iterate(stack)
Returns an iterator that crawls the entire Windows Registry.
def get_subdomain_iterator(self, domain, limit=None, offset=None): return SubdomainResultsIterator(self._manager, domain=domain)
Returns an iterator that will return each available subdomain for the specified domain. If there are more than the limit of 100 subdomains, the iterator will continue to fetch subdomains from the API until all subdomains have been returned.
def evolve(self, rho: Density) -> Density: N = rho.qubit_nb qubits = rho.qubits indices = list([qubits.index(q) for q in self.qubits]) + \ list([qubits.index(q) + N for q in self.qubits]) tensor = bk.tensormul(self.tensor, rho.tensor, indices) return Density(tensor, qubits, rho.memory)
Apply the action of this channel upon a density
def today(self): today = timezone.now().date() try: return Day.objects.get(date=today) except Day.DoesNotExist: return None
Return the Day for the current day
def stop(self): self._clean_prior() if not self._loaded: raise errors.NoActiveTask self._clean()
Stops the current task and cleans up, including removing active task config file. * Raises ``NoActiveTask`` exception if no active task found.
def load_file(self, filepath, **kwargs): log.setLevel(kwargs.get("log_level", self.log_level)) filename = os.path.split(filepath)[-1] if filename in self.loaded: if self.loaded_times.get(filename, datetime.datetime(2001,1,1)).timestamp() \ < os.path.getmtime(filepath): self.drop_file(filename, **kwargs) else: return conn = self.__get_conn__(**kwargs) conn.load_data(graph=getattr(__NSM__.kdr, filename).clean_uri, data=filepath, is_file=True) self.__update_time__(filename, **kwargs) log.warning("\n\tfile: '%s' loaded\n\tconn: '%s'\n\tpath: %s", filename, conn, filepath) self.loaded.append(filename)
loads a file into the defintion triplestore args: filepath: the path to the file
def ratio_split(amount, ratios): ratio_total = sum(ratios) divided_value = amount / ratio_total values = [] for ratio in ratios: value = divided_value * ratio values.append(value) rounded = [v.quantize(Decimal("0.01")) for v in values] remainders = [v - rounded[i] for i, v in enumerate(values)] remainder = sum(remainders) rounded[-1] = (rounded[-1] + remainder).quantize(Decimal("0.01")) assert sum(rounded) == amount return rounded
Split in_value according to the ratios specified in `ratios` This is special in that it ensures the returned values always sum to in_value (i.e. we avoid losses or gains due to rounding errors). As a result, this method returns a list of `Decimal` values with length equal to that of `ratios`. Examples: .. code-block:: python >>> from hordak.utilities.money import ratio_split >>> from decimal import Decimal >>> ratio_split(Decimal('10'), [Decimal('1'), Decimal('2')]) [Decimal('3.33'), Decimal('6.67')] Note the returned values sum to the original input of ``10``. If we were to do this calculation in a naive fashion then the returned values would likely be ``3.33`` and ``6.66``, which would sum to ``9.99``, thereby loosing ``0.01``. Args: amount (Decimal): The amount to be split ratios (list[Decimal]): The ratios that will determine the split Returns: list(Decimal)
def _raw_recv(self): with self.lock: if self._index >= len(self._buffer): self._mcon() if self._index >= 199: self._resetbuffer() self._mcon() msg = self._buffer[self._index] while self.find(msg, 'PING :'): self._index += 1 try: msg = self._buffer[self._index] except IndexError: self._mcon() self.stepback(append=False) self._index += 1 return msg
Return the next available IRC message in the buffer.
def get(cls, tab_uuid, tab_attachment_tab_id, custom_headers=None): if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) endpoint_url = cls._ENDPOINT_URL_READ.format(tab_uuid, tab_attachment_tab_id) response_raw = api_client.get(endpoint_url, {}, custom_headers) return BunqResponseTabAttachmentTab.cast_from_bunq_response( cls._from_json(response_raw, cls._OBJECT_TYPE_GET) )
Get a specific attachment. The header of the response contains the content-type of the attachment. :type api_context: context.ApiContext :type tab_uuid: str :type tab_attachment_tab_id: int :type custom_headers: dict[str, str]|None :rtype: BunqResponseTabAttachmentTab
def get(self, id): url = self._url('%s' % (id)) return self.client.get(url)
Retrieves custom domain. See: https://auth0.com/docs/api/management/v2#!/Custom_Domains/get_custom_domains_by_id
def epilogue(app_name): app_name = clr.stringc(app_name, "bright blue") command = clr.stringc("command", "cyan") help = clr.stringc("--help", "green") return "\n%s %s %s for more info on a command\n" % (app_name, command, help)
Return the epilogue for the help command.
def cache_it(self, url): cached = self._cache_it(url) if not isfile(cached.name): self._cache_it.cache_clear() cached = self._cache_it(url) return cached.name
Take an url which deliver a plain document and convert it to a temporary file, this document is an xslt file expecting contains all xslt definitions, then the cache process is recursive. :param url: document origin url :type url: str :return file_path: local new absolute path :rtype file_path: str
def datasets_org_count(self): from udata.models import Dataset return sum(Dataset.objects(organization=org).visible().count() for org in self.organizations)
Return the number of datasets of user's organizations.
def exporter(directory, method, datasets): if method.lower() == 'json': json_string = json.dumps(datasets, indent=4) savefile = open('{}/exported.json'.format(directory), 'w+') savefile.write(json_string) savefile.close() if method.lower() == 'csv': with open('{}/exported.csv'.format(directory), 'w+') as csvfile: csv_writer = csv.writer( csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) for key, values in datasets.items(): if values is None: csv_writer.writerow([key]) else: csv_writer.writerow([key] + values) csvfile.close()
Export the results.
def get_artifact_url(context, task_id, path): if path.startswith("public/"): url = context.queue.buildUrl('getLatestArtifact', task_id, path) else: url = context.queue.buildSignedUrl( 'getLatestArtifact', task_id, path, ) return url
Get a TaskCluster artifact url. Args: context (scriptworker.context.Context): the scriptworker context task_id (str): the task id of the task that published the artifact path (str): the relative path of the artifact Returns: str: the artifact url Raises: TaskClusterFailure: on failure.
def __format_occurence(self, occurence): color = "rgb({0}, {1}, {2})" span_format = "<span style=\"color: {0};\">{{0}}</span>".format(color.format(self.__default_line_color.red(), self.__default_line_color.green(), self.__default_line_color.blue())) line = foundations.strings.to_string(occurence.text) start = span_format.format(line[:occurence.column]) pattern = "<b>{0}</b>".format(line[occurence.column:occurence.column + occurence.length]) end = span_format.format(line[occurence.column + occurence.length:]) return "".join((start, pattern, end))
Formats the given occurence and returns the matching rich html text. :param occurence: Occurence to format. :type occurence: Occurence :return: Rich text. :rtype: unicode
def get_neutral(array_list): res = [] for x in array_list: res.append(np.zeros_like(x)) return res
Get list of zero-valued numpy arrays for specified list of numpy arrays :param array_list: list of numpy arrays :return: list of zeros of same shape as input
def _style(self, retval): "Applies custom option tree to values return by the callback." if self.id not in Store.custom_options(): return retval spec = StoreOptions.tree_to_dict(Store.custom_options()[self.id]) return retval.opts(spec)
Applies custom option tree to values return by the callback.
def intersect(self, **kwargs): end_point = kwargs.pop('end_point') depth = self.get_depth(location=end_point) if depth < 0 and depth > end_point.depth: inter = True else: inter = False return inter
Intersect Point and Bathymetry returns bool
def cpp_spec(): return { INDENTATION : '\t', BEG_BLOCK : '{', END_BLOCK : '}', BEG_LINE : '', END_LINE : '\n', BEG_ACTION : '', END_ACTION : ';', BEG_CONDITION : 'if(', END_CONDITION : ')', LOGICAL_AND : ' && ', LOGICAL_OR : ' || ' }
C++ specification, provided for example, and java compatible.
def get_languages_from_application(app_label): try: mod_lan = TransApplicationLanguage.objects.filter(application=app_label).get() languages = [lang.code for lang in mod_lan.languages.all()] return languages except TransApplicationLanguage.DoesNotExist: return []
Get the languages configured for the current application :param app_label: :return:
def unsubscribe(self, subscription): params = {'ContentType' : 'JSON', 'SubscriptionArn' : subscription} response = self.make_request('Unsubscribe', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body)
Allows endpoint owner to delete subscription. Confirmation message will be delivered. :type subscription: string :param subscription: The ARN of the subscription to be deleted.
def bounding_box(self): if self.world_coords: return self.world_obj.findSolid().BoundingBox() return self.local_obj.findSolid().BoundingBox()
Generate a bounding box based on the full complexity part. :return: bounding box of part :rtype: cadquery.BoundBox
def set_stream_class_lists_url(self, session_id): url = self.api_url + '/v2/project/' + self.api_key + '/session/' + session_id + '/stream' return url
this method returns the url to set the stream class list
def get_percent_identity(a_aln_seq, b_aln_seq): if len(a_aln_seq) != len(b_aln_seq): raise ValueError('Sequence lengths not equal - was an alignment run?') count = 0 gaps = 0 for n in range(0, len(a_aln_seq)): if a_aln_seq[n] == b_aln_seq[n]: if a_aln_seq[n] != "-": count += 1 else: gaps += 1 return count / float((len(a_aln_seq) - gaps))
Get the percent identity between two alignment strings
def authenticated_users(func): is_object_permission = "has_object" in func.__name__ @wraps(func) def func_wrapper(*args, **kwargs): request = args[0] if is_object_permission: request = args[1] if not(request.user and request.user.is_authenticated): return False return func(*args, **kwargs) return func_wrapper
This decorator is used to abstract common authentication checking functionality out of permission checks. It determines which parameter is the request based on name.
def gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat, NatOrgMat, coag): return min(((ConcNatOrgMat / conc_precipitate(ConcAl, coag).magnitude) * (coag.Density / NatOrgMat.Density) * (coag.Diameter / (4 * NatOrgMat.Diameter)) ), 1)
Return the fraction of the coagulant that is coated with humic acid. :param ConcAl: Concentration of alumninum in solution :type ConcAl: float :param ConcNatOrgMat: Concentration of natural organic matter in solution :type ConcNatOrgMat: float :param NatOrgMat: type of natural organic matter, e.g. floc_model.HumicAcid :type NatOrgMat: floc_model.Material :param coag: Type of coagulant in solution, e.g. floc_model.PACl :type coag: floc_model.Material :return: fraction of the coagulant that is coated with humic acid :rtype: float
def get_wifi_state(self): log.debug("getting wifi state...") cmd, url = DEVICE_URLS["get_wifi_state"] return self._exec(cmd, url)
returns the current Wi-Fi state the device is connected to
def grad(self, xs, ys): return self.sess.run( self.cross_entropy_grads, feed_dict={ self.x: xs, self.y_: ys })
Computes the gradients of the network.
def toggleAttributesDOM(isEnabled): if isEnabled: AdvancedTag.attributes = AdvancedTag.attributesDOM else: AdvancedTag.attributes = AdvancedTag.attributesDict
toggleAttributesDOM - Toggle if the old DOM tag.attributes NamedNodeMap model should be used for the .attributes method, versus a more sane direct dict implementation. The DOM version is always accessable as AdvancedTag.attributesDOM The dict version is always accessable as AdvancedTag.attributesDict Default for AdvancedTag.attributes is to be attributesDict implementation. @param isEnabled <bool> - If True, .attributes will be changed to use the DOM-provider. Otherwise, it will use the dict provider.
def group(self): "Group inherited from main element" if self.main and self.main.group != type(self.main).__name__: return self.main.group else: return 'AdjointLayout'
Group inherited from main element
def __check_field(self, key): if not self._props.get(key): raise KeyError( 'The field "%s" does not exist on "%s"' % ( key, self.__class__.__name__, ), )
Raises a KeyError if the field doesn't exist.