code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def mode(self, mode): modes = self.available_modes if (not modes) or (mode not in modes): return self.publish( action='set', resource='modes' if mode != 'schedule' else 'schedule', mode=mode, publish_response=True) self.update()
Set Arlo camera mode. :param mode: arm, disarm
def request(self): headers = {'Accept': 'application/json'} if self.api_key: headers['X-API-KEY'] = self.api_key return requests,headers else: if self.token: return OAuth2Session(self.client_id, token=self.token),headers else: raise APIError("No API key and no OAuth session available")
Returns an OAuth2 Session to be used to make requests. Returns None if a token hasn't yet been received.
def filterAcceptsRow(self, source_row, source_parent): source_index = self.sourceModel().index(source_row, 0, source_parent) item = self.sourceModel().dataItem(source_index) if item.metaObject().className() not in [ 'QgsPGRootItem', 'QgsPGConnectionItem', 'QgsPGSchemaItem', 'QgsPGLayerItem', 'QgsFavoritesItem', 'QgsDirectoryItem', 'QgsLayerItem', 'QgsGdalLayerItem', 'QgsOgrLayerItem']: return False if item.path().endswith('.xml'): return False return True
The filter method .. note:: This filter hides top-level items of unsupported branches and also leaf items containing xml files. Enabled root items: QgsDirectoryItem, QgsFavouritesItem, QgsPGRootItem. Disabled root items: QgsMssqlRootItem, QgsSLRootItem, QgsOWSRootItem, QgsWCSRootItem, QgsWFSRootItem, QgsWMSRootItem. Disabled leaf items: QgsLayerItem and QgsOgrLayerItem with path ending with '.xml' :param source_row: Parent widget of the model :type source_row: int :param source_parent: Parent item index :type source_parent: QModelIndex :returns: Item validation result :rtype: bool
def clean_form_template(self): form_template = self.cleaned_data.get('form_template', '') if form_template: try: get_template(form_template) except TemplateDoesNotExist: msg = _('Selected Form Template does not exist.') raise forms.ValidationError(msg) return form_template
Check if template exists
def solar_zenith(self, dateandtime, latitude, longitude): return 90.0 - self.solar_elevation(dateandtime, latitude, longitude)
Calculates the solar zenith angle. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :return: The zenith angle in degrees from vertical. :rtype: float If `dateandtime` is a naive Python datetime then it is assumed to be in the UTC timezone.
def transform_annotation(self, ann, duration): _, values = ann.to_interval_values() vector = np.asarray(values[0], dtype=self.dtype) if len(vector) != self.dimension: raise DataError('vector dimension({:0}) ' '!= self.dimension({:1})' .format(len(vector), self.dimension)) return {'vector': vector}
Apply the vector transformation. Parameters ---------- ann : jams.Annotation The input annotation duration : number > 0 The duration of the track Returns ------- data : dict data['vector'] : np.ndarray, shape=(dimension,) Raises ------ DataError If the input dimension does not match
def is_imap(self, model): from pgmpy.models import BayesianModel if not isinstance(model, BayesianModel): raise TypeError("model must be an instance of BayesianModel") factors = [cpd.to_factor() for cpd in model.get_cpds()] factor_prod = six.moves.reduce(mul, factors) JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values) if JPD_fact == factor_prod: return True else: return False
Checks whether the given BayesianModel is Imap of JointProbabilityDistribution Parameters ----------- model : An instance of BayesianModel Class, for which you want to check the Imap Returns -------- boolean : True if given bayesian model is Imap for Joint Probability Distribution False otherwise Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]]) >>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]]) >>> grade_cpd = TabularCPD('grade', 3, ... [[0.1,0.1,0.1,0.1,0.1,0.1], ... [0.1,0.1,0.1,0.1,0.1,0.1], ... [0.8,0.8,0.8,0.8,0.8,0.8]], ... evidence=['diff', 'intel'], ... evidence_card=[2, 3]) >>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd) >>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032, 0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128] >>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val) >>> JPD.is_imap(bm) True
def assertFileSizeLess(self, filename, size, msg=None): fsize = self._get_file_size(filename) self.assertLess(fsize, size, msg=msg)
Fail if ``filename``'s size is not less than ``size`` as determined by the '<' operator. Parameters ---------- filename : str, bytes, file-like size : int, float msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like.
def remove(self, row_or_row_indices): if not row_or_row_indices: return if isinstance(row_or_row_indices, int): rows_remove = [row_or_row_indices] else: rows_remove = row_or_row_indices for col in self._columns: self._columns[col] = [elem for i, elem in enumerate(self[col]) if i not in rows_remove] return self
Removes a row or multiple rows of a table in place.
def zoom_out(self): zoom = self.grid.grid_renderer.zoom target_zoom = zoom * (1 - config["zoom_factor"]) if target_zoom > config["minimum_zoom"]: self.zoom(target_zoom)
Zooms out by zoom factor
def shellfilter(value): replacements = {'\\': '\\\\', '`': '\\`', "'": "\\'", '"': '\\"'} for search, repl in replacements.items(): value = value.replace(search, repl) return safestring.mark_safe(value)
Replace HTML chars for shell usage.
def debugObject(object, cat, format, *args): doLog(DEBUG, object, cat, format, args)
Log a debug message in the given category.
def _to_args(x): if not isinstance(x, (list, tuple, np.ndarray)): x = [x] return x
Convert to args representation
def delete_network(self, tenant_name, network): seg_id = network.segmentation_id network_info = { 'organizationName': tenant_name, 'partitionName': self._part_name, 'segmentId': seg_id, } LOG.debug("Deleting %s network in DCNM.", network_info) res = self._delete_network(network_info) if res and res.status_code in self._resp_ok: LOG.debug("Deleted %s network in DCNM.", network_info) else: LOG.error("Failed to delete %s network in DCNM.", network_info) raise dexc.DfaClientRequestFailed(reason=res)
Delete network on the DCNM. :param tenant_name: name of tenant the network belongs to :param network: object that contains network parameters
def coalesce(*series): series = [pd.Series(s) for s in series] coalescer = pd.concat(series, axis=1) min_nonna = np.argmin(pd.isnull(coalescer).values, axis=1) min_nonna = [coalescer.columns[i] for i in min_nonna] return coalescer.lookup(np.arange(coalescer.shape[0]), min_nonna)
Takes the first non-NaN value in order across the specified series, returning a new series. Mimics the coalesce function in dplyr and SQL. Args: *series: Series objects, typically represented in their symbolic form (like X.series). Example: df = pd.DataFrame({ 'a':[1,np.nan,np.nan,np.nan,np.nan], 'b':[2,3,np.nan,np.nan,np.nan], 'c':[np.nan,np.nan,4,5,np.nan], 'd':[6,7,8,9,np.nan] }) df >> transmute(coal=coalesce(X.a, X.b, X.c, X.d)) coal 0 1 1 3 2 4 3 5 4 np.nan
def register_recipe(cls, recipe): recipe_name = recipe.contents['name'] cls._recipe_classes[recipe_name] = ( recipe.contents, recipe.args, recipe.__doc__)
Registers a dftimewolf recipe. Args: recipe: imported python module representing the recipe.
def add_sample(a_float, dist): dist_type, _ = _detect_bucket_option(dist) if dist_type == u'exponentialBuckets': _update_general_statistics(a_float, dist) _update_exponential_bucket_count(a_float, dist) elif dist_type == u'linearBuckets': _update_general_statistics(a_float, dist) _update_linear_bucket_count(a_float, dist) elif dist_type == u'explicitBuckets': _update_general_statistics(a_float, dist) _update_explicit_bucket_count(a_float, dist) else: _logger.error(u'Could not determine bucket option type for %s', dist) raise ValueError(u'Unknown bucket option type')
Adds `a_float` to `dist`, updating its existing buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not have known bucket options defined ValueError: if there are not enough bucket count fields in `dist`
def _sounds_re(include_erhua=False): tone = '[1-5]' optional_final_erhua = '|r\\b' if include_erhua else '' pattern = '({}{}{})'.format(_joined_syllables_re(), tone, optional_final_erhua) return re.compile(pattern, re.IGNORECASE)
Sounds are syllables + tones
def _folder_item_uncertainty(self, analysis_brain, item): item["Uncertainty"] = "" if not self.has_permission(ViewResults, analysis_brain): return result = analysis_brain.getResult obj = self.get_object(analysis_brain) formatted = format_uncertainty(obj, result, decimalmark=self.dmk, sciformat=int(self.scinot)) if formatted: item["Uncertainty"] = formatted else: item["Uncertainty"] = obj.getUncertainty(result) if self.is_uncertainty_edition_allowed(analysis_brain): item["allow_edit"].append("Uncertainty")
Fills the analysis' uncertainty to the item passed in. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row
def _execute_config_show(self, show_command, delay_factor=.1): rpc_command = '<CLI><Configuration>{show_command}</Configuration></CLI>'.format( show_command=escape_xml(show_command) ) response = self._execute_rpc(rpc_command, delay_factor=delay_factor) raw_response = response.xpath('.//CLI/Configuration')[0].text return raw_response.strip() if raw_response else ''
Executes a configuration show-type command.
def example_splits(url_file, all_files): def generate_hash(inp): h = hashlib.sha1() h.update(inp) return h.hexdigest() all_files_map = {f.split("/")[-1]: f for f in all_files} urls = [line.strip().encode("utf-8") for line in tf.gfile.Open(url_file)] filelist = [] for url in urls: url_hash = generate_hash(url) filename = url_hash + ".story" if filename not in all_files_map: tf.logging.info("Missing file: %s" % url) continue filelist.append(all_files_map[filename]) tf.logging.info("Found %d examples" % len(filelist)) return filelist
Generate splits of the data.
def compile(source, ezo): try: compiled = compile_source(source) compiled_list = [] for name in compiled: c = Contract(name, ezo) interface = compiled[name] c.abi = interface['abi'] c.bin = interface['bin'] compiled_list.append(c) except Exception as e: return None, e return compiled_list, None
compiles the source code :param source: (string) - contract source code :param ezo: - ezo reference for Contract object creation :return: (list) compiled source
def register(self, key, value): self._actions[key] = value if key in self._cache: del self._cache[key]
Registers a callable with the specified key. `key` String key to identify a callable. `value` Callable object.
def get_energy_management_properties(self): result = self.manager.session.get(self.uri + '/energy-management-data') em_list = result['objects'] if len(em_list) != 1: uris = [em_obj['object-uri'] for em_obj in em_list] raise ParseError("Energy management data returned for no resource " "or for more than one resource: %r" % uris) em_cpc_obj = em_list[0] if em_cpc_obj['object-uri'] != self.uri: raise ParseError("Energy management data returned for an " "unexpected resource: %r" % em_cpc_obj['object-uri']) if em_cpc_obj['error-occurred']: raise ParseError("Errors occurred when retrieving energy " "management data for CPC. Operation result: %r" % result) cpc_props = em_cpc_obj['properties'] return cpc_props
Return the energy management properties of the CPC. The returned energy management properties are a subset of the properties of the CPC resource, and are also available as normal properties of the CPC resource. In so far, there is no new data provided by this method. However, because only a subset of the properties is returned, this method is faster than retrieving the complete set of CPC properties (e.g. via :meth:`~zhmcclient.BaseResource.pull_full_properties`). This method performs the HMC operation "Get CPC Energy Management Data", and returns only the energy management properties for this CPC from the operation result. Note that in non-ensemble mode of a CPC, the HMC operation result will only contain data for the CPC alone. It requires that the feature "Automate/advanced management suite" (FC 0020) is installed and enabled, and returns empty values for most properties, otherwise. Authorization requirements: * Object-access permission to this CPC. Returns: dict: A dictionary of properties of the CPC that are related to energy management. For details, see section "Energy management related additional properties" in the data model for the CPC resource in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of operation "Get CPC Energy Management Data" in the :term:`HMC API` book. :exc:`~zhmcclient.ParseError`: Also raised by this method when the JSON response could be parsed but contains inconsistent data. :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
def upsert(self): if not self.jenkins_host.has_job(self.name): LOGGER.info("creating {0}...".format(self.name)) self.jenkins_host.create_job(self.name, self.config_xml) else: jenkins_job = self.jenkins_host[self.name] LOGGER.info("updating {0}...".format(self.name)) jenkins_job.update_config(self.config_xml)
create or update the jenkins job
def marker_(self, lat, long, text, pmap, color=None, icon=None): try: xmap = self._marker(lat, long, text, pmap, color, icon) return xmap except Exception as e: self.err(e, self.marker_, "Can not get marker")
Returns the map with a marker to the default map
def size_to_content(self, get_font_metrics_fn): new_sizing = self.copy_sizing() new_sizing.minimum_width = 0 new_sizing.maximum_width = 0 axes = self.__axes if axes and axes.is_valid: font = "{0:d}px".format(self.font_size) max_width = 0 y_range = axes.calibrated_data_max - axes.calibrated_data_min label = axes.y_ticker.value_label(axes.calibrated_data_max + y_range * 5) max_width = max(max_width, get_font_metrics_fn(font, label).width) label = axes.y_ticker.value_label(axes.calibrated_data_min - y_range * 5) max_width = max(max_width, get_font_metrics_fn(font, label).width) new_sizing.minimum_width = max_width new_sizing.maximum_width = max_width self.update_sizing(new_sizing)
Size the canvas item to the proper width, the maximum of any label.
def query_ids(self, ids): results = self._get_repo_filter(Layer.objects).filter(uuid__in=ids).all() if len(results) == 0: results = self._get_repo_filter(Service.objects).filter(uuid__in=ids).all() return results
Query by list of identifiers
def save(self, filename, wildcard='*', verbose=False): f = open(filename, mode='w') k = list(self.keys()) k.sort() count = 0 for p in k: if p and fnmatch.fnmatch(str(p).upper(), wildcard.upper()): f.write("%-16.16s %f\n" % (p, self.__getitem__(p))) count += 1 f.close() if verbose: print("Saved %u parameters to %s" % (count, filename))
save parameters to a file
def make_api_method(func): @functools.wraps(func) def wrapper(*args, **kwargs): args[0]._extra_params = kwargs.pop("extra_params", None) result = func(*args, **kwargs) try: del args[0]._extra_params except AttributeError: pass return result return wrapper
Provides a single entry point for modifying all API methods. For now this is limited to allowing the client object to be modified with an `extra_params` keyword arg to each method, that is then used as the params for each web service request. Please note that this is an unsupported feature for advanced use only. It's also currently incompatibile with multiple threads, see GH #160.
def validate_args(args): if not any([args.environment, args.stage, args.account]): sys.exit(NO_ACCT_OR_ENV_ERROR) if args.environment and args.account: sys.exit(ENV_AND_ACCT_ERROR) if args.environment and args.role: sys.exit(ENV_AND_ROLE_ERROR)
Validate command-line arguments.
def filter(self, order_by=None, limit=0, **kwargs): with rconnect() as conn: if len(kwargs) == 0: raise ValueError try: query = self._base() query = query.filter(kwargs) if order_by is not None: query = self._order_by(query, order_by) if limit > 0: query = self._limit(query, limit) log.debug(query) rv = query.run(conn) except ReqlOpFailedError as e: log.warn(e) raise except Exception as e: log.warn(e) raise else: data = [self._model(_) for _ in rv] return data
Fetch a list of instances. :param order_by: column on which to order the results. \ To change the sort, prepend with < or >. :param limit: How many rows to fetch. :param kwargs: keyword args on which to filter, column=value
def get_all_handleable_roots(self): nodes = self.get_device_tree() return [node.device for node in sorted(nodes.values(), key=DevNode._sort_key) if not node.ignored and node.device and (node.root == '/' or nodes[node.root].ignored)]
Get list of all handleable devices, return only those that represent root nodes within the filtered device tree.
def _get_resource_type(self, name): extension = self._get_file_extension(name) if extension is None: return self.RESOURCE_TYPE elif extension in app_settings.STATIC_IMAGES_EXTENSIONS: return RESOURCE_TYPES['IMAGE'] elif extension in app_settings.STATIC_VIDEOS_EXTENSIONS: return RESOURCE_TYPES['VIDEO'] else: return self.RESOURCE_TYPE
Implemented as static files can be of different resource types. Because web developers are the people who control those files, we can distinguish them simply by looking at their extensions, we don't need any content based validation.
def send_query(query_dict): query = query_dict['query'] params = query_dict['params'] url = 'https://www.ebi.ac.uk/chembl/api/data/' + query + '.json' r = requests.get(url, params=params) r.raise_for_status() js = r.json() return js
Query ChEMBL API Parameters ---------- query_dict : dict 'query' : string of the endpoint to query 'params' : dict of params for the query Returns ------- js : dict dict parsed from json that is unique to the submitted query
def _prepack(self): current = self while current is not None: current._parser.prepack(current, skip_self = True) current = getattr(current, '_sub', None) current = self while current is not None: current._parser.prepack(current, skip_sub = True) current = getattr(current, '_sub', None)
Prepack stage. For parser internal use.
def get_sns_topic_arn(topic_name, account, region): if topic_name.count(':') == 5 and topic_name.startswith('arn:aws:sns:'): return topic_name session = boto3.Session(profile_name=account, region_name=region) sns_client = session.client('sns') topics = sns_client.list_topics()['Topics'] matched_topic = None for topic in topics: topic_arn = topic['TopicArn'] if topic_name == topic_arn.split(':')[-1]: matched_topic = topic_arn break else: LOG.critical("No topic with name %s found.", topic_name) raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name)) return matched_topic
Get SNS topic ARN. Args: topic_name (str): Name of the topic to lookup. account (str): Environment, e.g. dev region (str): Region name, e.g. us-east-1 Returns: str: ARN for requested topic name
def marshal(self) : "serializes this Message into the wire protocol format and returns a bytes object." buf = ct.POINTER(ct.c_ubyte)() nr_bytes = ct.c_int() if not dbus.dbus_message_marshal(self._dbobj, ct.byref(buf), ct.byref(nr_bytes)) : raise CallFailed("dbus_message_marshal") result = bytearray(nr_bytes.value) ct.memmove \ ( ct.addressof((ct.c_ubyte * nr_bytes.value).from_buffer(result)), buf, nr_bytes.value ) dbus.dbus_free(buf) return \ result
serializes this Message into the wire protocol format and returns a bytes object.
def find_group(self, star, starlist): star_distance = np.hypot(star['x_0'] - starlist['x_0'], star['y_0'] - starlist['y_0']) distance_criteria = star_distance < self.crit_separation return np.asarray(starlist[distance_criteria]['id'])
Find the ids of those stars in ``starlist`` which are at a distance less than ``crit_separation`` from ``star``. Parameters ---------- star : `~astropy.table.Row` Star which will be either the head of a cluster or an isolated one. starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- Array containing the ids of those stars which are at a distance less than ``crit_separation`` from ``star``.
def public_key(self): if self._public_key is None: self._public_key = PublicKeyList(self._version, ) return self._public_key
Access the public_key :returns: twilio.rest.accounts.v1.credential.public_key.PublicKeyList :rtype: twilio.rest.accounts.v1.credential.public_key.PublicKeyList
def get_configuration(self, uri): req_headers = { 'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json' } return self._api.request('get', '/api/partstudios/d/' + uri["did"] + '/' + uri["wvm_type"] + '/' + uri["wvm"] + '/e/' + uri["eid"] + '/configuration', headers=req_headers)
get the configuration of a PartStudio Args: - uri (dict): points to a particular element Returns: - requests.Response: Onshape response data
def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024): java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix", rowsPerBlock, colsPerBlock) return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock)
Convert this matrix to a BlockMatrix. :param rowsPerBlock: Number of rows that make up each block. The blocks forming the final rows are not required to have the given number of rows. :param colsPerBlock: Number of columns that make up each block. The blocks forming the final columns are not required to have the given number of columns. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(6, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows).toBlockMatrix() >>> # This IndexedRowMatrix will have 7 effective rows, due to >>> # the highest row index being 6, and the ensuing >>> # BlockMatrix will have 7 rows as well. >>> print(mat.numRows()) 7 >>> print(mat.numCols()) 3
def _find_recorder(recorder, tokens, index): if recorder is None: for recorder_factory in _RECORDERS: recorder = recorder_factory.maybe_start_recording(tokens, index) if recorder is not None: return recorder return recorder
Given a current recorder and a token index, try to find a recorder.
def server(addr): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(addr) sock.listen(1) conn, addr = sock.accept() talk = SocketTalk(conn) return talk
Return a SocketTalk server.
def immediate_postdominators(self, end, target_graph=None): return self._immediate_dominators(end, target_graph=target_graph, reverse_graph=True)
Get all immediate postdominators of sub graph from given node upwards. :param str start: id of the node to navigate forwards from. :param networkx.classes.digraph.DiGraph target_graph: graph to analyse, default is self.graph. :return: each node of graph as index values, with element as respective node's immediate dominator. :rtype: dict
def send_username_changed_email(self, user): if not self.user_manager.USER_ENABLE_EMAIL: return if not self.user_manager.USER_SEND_USERNAME_CHANGED_EMAIL: return user_or_user_email_object = self.user_manager.db_manager.get_primary_user_email_object(user) email = user_or_user_email_object.email self._render_and_send_email( email, user, self.user_manager.USER_USERNAME_CHANGED_EMAIL_TEMPLATE, )
Send the 'username has changed' notification email.
def generate_pydenticon(identifier, size): blocks_size = get_internal_config('size') foreground = get_internal_config('foreground') background = get_internal_config('background') generator = pydenticon.Generator(blocks_size, blocks_size, digest=hashlib.sha1, foreground=foreground, background=background) padding = int(round(get_internal_config('padding') * size / 100.)) size = size - 2 * padding padding = (padding, ) * 4 return generator.generate(identifier, size, size, padding=padding, output_format='png')
Use pydenticon to generate an identicon image. All parameters are extracted from configuration.
def get_option(self, key, subkey, in_path_none=False): key, subkey = _lower_keys(key, subkey) _entry_must_exist(self.gc, key, subkey) df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)] if df["type"].values[0] == "bool": return bool(df["value"].values[0]) elif df["type"].values[0] == "int": return int(df["value"].values[0]) elif df["type"].values[0] == "path_in": if df["value"].values[0] is None and not in_path_none: raise ValueError('Unspecified path for {0}.{1}'.format(key, subkey)) return df["value"].values[0] else: return df["value"].values[0]
Get the current value of the option. :param str key: First identifier of the option. :param str subkey: Second identifier of the option. :param bool in_path_none: Allows for ``in_path`` values of :data:`None` to be retrieved. :return: Current value of the option (type varies). :raise: :NotRegisteredError: If ``key`` or ``subkey`` do not define any option. :ValueError: If a ``in_path`` type with :data:`None` value is requested.
def rget(self, key, replica_index=None, quiet=None): if replica_index is not None: return _Base._rgetix(self, key, replica=replica_index, quiet=quiet) else: return _Base._rget(self, key, quiet=quiet)
Get an item from a replica node :param string key: The key to fetch :param int replica_index: The replica index to fetch. If this is ``None`` then this method will return once any replica responds. Use :attr:`configured_replica_count` to figure out the upper bound for this parameter. The value for this parameter must be a number between 0 and the value of :attr:`configured_replica_count`-1. :param boolean quiet: Whether to suppress errors when the key is not found This method (if `replica_index` is not supplied) functions like the :meth:`get` method that has been passed the `replica` parameter:: c.get(key, replica=True) .. seealso:: :meth:`get` :meth:`rget_multi`
def read_message(self): with self.__class__.__locker: result = self.__passive_read(4) if result is None: return None (four_bytes, last_buffer_index, updates1) = result (length,) = unpack('>I', four_bytes) result = self.__passive_read(length, last_buffer_index) if result is None: return None (data, last_buffer_index, updates2) = result for updates in (updates1, updates2): for update in updates: (buffer_index, buffer_, length_consumed) = update self.__buffers[buffer_index] = buffer_ if buffer_ else '' self.__length -= length_consumed self.__read_buffer_index = last_buffer_index self.__hits += 1 if self.__hits >= self.__class__.__cleanup_interval: self.__cleanup() self.__hits = 0 return data
Try to read a message from the buffered data. A message is defined as a 32-bit integer size, followed that number of bytes. First we try to non-destructively read the integer. Then, we try to non- destructively read the remaining bytes. If both are successful, we then go back to remove the span from the front of the buffers.
def logo_url(self): if self.logo_ext: return '/api/files/{bucket}/{key}'.format( bucket=current_app.config['COMMUNITIES_BUCKET_UUID'], key='{0}/logo.{1}'.format(self.id, self.logo_ext), ) return None
Get URL to collection logo. :returns: Path to community logo. :rtype: str
def GrabObject(self, identifier): if identifier not in self._values: raise KeyError('Missing cached object for identifier: {0:s}'.format( identifier)) cache_value = self._values[identifier] if not cache_value: raise RuntimeError('Missing cache value for identifier: {0:s}'.format( identifier)) cache_value.IncrementReferenceCount()
Grabs a cached object based on the identifier. This method increments the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache. RuntimeError: if the cache value is missing.
def rename_feature(self, mapobject_type_name, name, new_name): logger.info( 'rename feature "%s" of experiment "%s", mapobject type "%s"', name, self.experiment_name, mapobject_type_name ) content = { 'name': new_name, } feature_id = self._get_feature_id(mapobject_type_name, name) url = self._build_api_url( '/experiments/{experiment_id}/features/{feature_id}'.format( experiment_id=self._experiment_id, feature_id=feature_id ) ) res = self._session.put(url, json=content) res.raise_for_status()
Renames a feature. Parameters ---------- mapobject_type_name: str name of the segmented objects type name: str name of the feature that should be renamed new_name: str name that should be given to the feature See also -------- :func:`tmserver.api.feature.update_feature` :class:`tmlib.models.feature.Feature`
def write(self, bytes): if self.address + len(bytes) > self._end_address: n_bytes = self._end_address - self.address warnings.warn("write truncated from {} to {} bytes".format( len(bytes), n_bytes), TruncationWarning, stacklevel=3) bytes = bytes[:n_bytes] if len(bytes) == 0: return 0 self._parent._perform_write(self.address, bytes) self._offset += len(bytes) return len(bytes)
Write data to the memory. .. note:: Writes beyond the specified memory range will be truncated and a :py:exc:`.TruncationWarning` is produced. These warnings can be converted into exceptions using :py:func:`warnings.simplefilter`:: >>> import warnings >>> from rig.machine_control.machine_controller \\ ... import TruncationWarning >>> warnings.simplefilter('error', TruncationWarning) Parameters ---------- bytes : :py:class:`bytes` Data to write to the memory as a bytestring. Returns ------- int Number of bytes written.
def get_go2sectiontxt(self): go2txt = {} _get_secs = self.hdrobj.get_sections hdrgo2sectxt = {h:" ".join(_get_secs(h)) for h in self.get_hdrgos()} usrgo2hdrgo = self.get_usrgo2hdrgo() for goid, ntgo in self.go2nt.items(): hdrgo = ntgo.GO if ntgo.is_hdrgo else usrgo2hdrgo[ntgo.GO] go2txt[goid] = hdrgo2sectxt[hdrgo] return go2txt
Return a dict with actual header and user GO IDs as keys and their sections as values.
def removeblanklines(astr): lines = astr.splitlines() lines = [line for line in lines if line.strip() != ""] return "\n".join(lines)
remove the blank lines in astr
def add(self, key, value, expire=0, noreply=None): if noreply is None: noreply = self.default_noreply return self._store_cmd(b'add', {key: value}, expire, noreply)[key]
The memcached "add" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If noreply is True, the return value is always True. Otherwise the return value is True if the value was stored, and False if it was not (because the key already existed).
def _variants_dtype(fields, dtypes, arities, filter_ids, flatten_filter, info_types): dtype = list() for f, n, vcf_type in zip(fields, arities, info_types): if f == 'FILTER' and flatten_filter: for flt in filter_ids: nm = 'FILTER_' + flt dtype.append((nm, 'b1')) elif f == 'FILTER' and not flatten_filter: t = [(flt, 'b1') for flt in filter_ids] dtype.append((f, t)) else: if dtypes is not None and f in dtypes: t = dtypes[f] elif f in config.STANDARD_VARIANT_FIELDS: t = config.DEFAULT_VARIANT_DTYPE[f] elif f in config.DEFAULT_INFO_DTYPE: t = config.DEFAULT_INFO_DTYPE[f] else: t = config.DEFAULT_TYPE_MAP[vcf_type] if n == 1: dtype.append((f, t)) else: dtype.append((f, t, (n,))) return dtype
Utility function to build a numpy dtype for a variants array, given user arguments and information available from VCF header.
def make_response(self, data, *args, **kwargs): default_mediatype = kwargs.pop('fallback_mediatype', None) or self.default_mediatype mediatype = request.accept_mimetypes.best_match( self.representations, default=default_mediatype, ) if mediatype is None: raise NotAcceptable() if mediatype in self.representations: resp = self.representations[mediatype](data, *args, **kwargs) resp.headers['Content-Type'] = mediatype return resp elif mediatype == 'text/plain': resp = original_flask_make_response(str(data), *args, **kwargs) resp.headers['Content-Type'] = 'text/plain' return resp else: raise InternalServerError()
Looks up the representation transformer for the requested media type, invoking the transformer to create a response object. This defaults to default_mediatype if no transformer is found for the requested mediatype. If default_mediatype is None, a 406 Not Acceptable response will be sent as per RFC 2616 section 14.1 :param data: Python object containing response data to be transformed
def getdirs(self, section, option, raw=False, vars=None, fallback=[]): globs = self.getlist(section, option, fallback=[]) return [f for g in globs for f in glob.glob(g) if os.path.isdir(f)]
A convenience method which coerces the option in the specified section to a list of directories.
def _compute_am_i_owner(self): for rec in self: rec.am_i_owner = (rec.create_uid == self.env.user)
Check if current user is the owner
def _compute_dynamic_properties(self, builder): splits = self.splits for split_info in utils.tqdm( splits.values(), desc="Computing statistics...", unit=" split"): try: split_name = split_info.name dataset_feature_statistics, schema = get_dataset_feature_statistics( builder, split_name) split_info.statistics.CopyFrom(dataset_feature_statistics) self.as_proto.schema.CopyFrom(schema) except tf.errors.InvalidArgumentError: logging.error(("%s's info() property specifies split %s, but it " "doesn't seem to have been generated. Please ensure " "that the data was downloaded for this split and re-run " "download_and_prepare."), self.name, split_name) raise self._set_splits(splits)
Update from the DatasetBuilder.
def parse(self, kv): key, val = kv.split(self.kv_sep, 1) keys = key.split(self.keys_sep) for k in reversed(keys): val = {k: val} return val
Parses key value string into dict Examples: >> parser.parse('test1.test2=value') {'test1': {'test2': 'value'}} >> parser.parse('test=value') {'test': 'value'}
def get_list(self, name, default=None): if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return list(self[name])
Retrieves an environment variable as a list. Note that while implicit access of environment variables containing tuples will return tuples, using this method will coerce tuples to lists. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: list: The environment variable's value as a list. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. ValueError: If the environment variable value is not an integer with base 10.
def get_abs_path_static(savepath, relative_to_path): if os.path.isabs(savepath): return os.path.abspath(savepath) else: return os.path.abspath( os.path.join(relative_to_path, (savepath)) )
Figures out the savepath's absolute version. :param str savepath: the savepath to return an absolute version of :param str relative_to_path: the file path this savepath should be relative to :return str: absolute version of savepath
def _match_lhs(cp, rules): rule_matches = [] for rule in rules: reactant_pattern = rule.rule_expression.reactant_pattern for rule_cp in reactant_pattern.complex_patterns: if _cp_embeds_into(rule_cp, cp): rule_matches.append(rule) break return rule_matches
Get rules with a left-hand side matching the given ComplexPattern.
def spsolve(A, b): x = UmfpackLU(A).solve(b) if b.ndim == 2 and b.shape[1] == 1: return x.ravel() else: return x
Solve the sparse linear system Ax=b, where b may be a vector or a matrix. Parameters ---------- A : ndarray or sparse matrix The square matrix A will be converted into CSC or CSR form b : ndarray or sparse matrix The matrix or vector representing the right hand side of the equation. Returns ------- x : ndarray or sparse matrix the solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[0] If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
def remove_hook(self, key_name, hook_name): kf = self.dct[key_name] if 'hooks' in kf: if hook_name in kf['hooks']: return kf['hooks'].pop(hook_name)
Remove hook from the keyframe key_name.
def request_length(self): remainder = self.stop_at - self.offset return self.chunk_size if remainder > self.chunk_size else remainder
Return length of next chunk upload.
def has_concluded(self, bigchain, current_votes=[]): if self.has_validator_set_changed(bigchain): return False election_pk = self.to_public_key(self.id) votes_committed = self.get_commited_votes(bigchain, election_pk) votes_current = self.count_votes(election_pk, current_votes) total_votes = sum(output.amount for output in self.outputs) if (votes_committed < (2/3) * total_votes) and \ (votes_committed + votes_current >= (2/3)*total_votes): return True return False
Check if the election can be concluded or not. * Elections can only be concluded if the validator set has not changed since the election was initiated. * Elections can be concluded only if the current votes form a supermajority. Custom elections may override this function and introduce additional checks.
def runcmd(self, cmd, args): dof = getattr(self, 'do_' + cmd, None) if dof is None: return self.default(' '.join([cmd] + args)) argf = getattr(self, 'args_' + cmd, None) if argf is not None: parser = argparse.ArgumentParser( prog=cmd, description=getattr(dof, '__doc__', None)) argf(parser) argl = parser.parse_args(args) else: argl = ' '.join(args) return dof(argl)
Run a single command from pre-parsed arguments. This is intended to be run from :meth:`main` or somewhere else "at the top level" of the program. It may raise :exc:`exceptions.SystemExit` if an argument such as ``--help`` that normally causes execution to stop is encountered.
def get_block_statistics(cls, block_id): if not os.environ.get("BLOCKSTACK_TEST"): raise Exception("This method is only available in the test framework") global STATISTICS return STATISTICS.get(block_id)
Get block statistics. Only works in test mode.
def update_safety_check(first_dict: MutableMapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent) -> None: for k, v in second_dict.items(): if k in first_dict and not compat(v, first_dict[k]): raise ValueError('unsafe to merge dictionaries without ' 'overriding values; conflicting key %r' % k)
Check the safety of updating one dictionary with another. Raises ValueError if dictionaries have non-compatible values for any key, where compatibility is determined by identity (they are the same item) or the `compat` function. Parameters ---------- first_dict, second_dict : dict-like All items in the second dictionary are checked against for conflicts against items in the first dictionary. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence.
def find(self, obj, filter_to_class=Ingredient, constructor=None): if callable(constructor): obj = constructor(obj, shelf=self) if isinstance(obj, basestring): set_descending = obj.startswith('-') if set_descending: obj = obj[1:] if obj not in self: raise BadRecipe("{} doesn't exist on the shelf".format(obj)) ingredient = self[obj] if not isinstance(ingredient, filter_to_class): raise BadRecipe('{} is not a {}'.format(obj, filter_to_class)) if set_descending: ingredient.ordering = 'desc' return ingredient elif isinstance(obj, filter_to_class): return obj else: raise BadRecipe('{} is not a {}'.format(obj, filter_to_class))
Find an Ingredient, optionally using the shelf. :param obj: A string or Ingredient :param filter_to_class: The Ingredient subclass that obj must be an instance of :param constructor: An optional callable for building Ingredients from obj :return: An Ingredient of subclass `filter_to_class`
def Account(self): return(clc.v2.Account(alias=self.alias,session=self.session))
Return account object for account containing this server. >>> clc.v2.Server("CA3BTDICNTRLM01").Account() <clc.APIv2.account.Account instance at 0x108789878> >>> print _ BTDI
def _filehandle(self): if not self._fh or self._is_closed(): filename = self._rotated_logfile or self.filename if filename.endswith('.gz'): self._fh = gzip.open(filename, 'r') else: self._fh = open(filename, "r", 1) if self.read_from_end and not exists(self._offset_file): self._fh.seek(0, os.SEEK_END) else: self._fh.seek(self._offset) return self._fh
Return a filehandle to the file being tailed, with the position set to the current offset.
def _fnop_style(schema, op, name): if is_common(schema): if name in op.params: del op.params[name] return if _is_pending(schema): ntp = 'pending' elif schema.style is tuple: ntp = 'tuple' elif schema.style is _spl_dict: ntp = 'dict' elif _is_namedtuple(schema.style) and hasattr(schema.style, '_splpy_namedtuple'): ntp = 'namedtuple:' + schema.style._splpy_namedtuple else: return op.params[name] = ntp
Set an operator's parameter representing the style of this schema.
def bucket(things, key): ret = defaultdict(list) for thing in things: ret[key(thing)].append(thing) return ret
Return a map of key -> list of things.
def clear(self, tag=None): if tag is None: del self.jobs[:] else: self.jobs[:] = (job for job in self.jobs if tag not in job.tags)
Deletes scheduled jobs marked with the given tag, or all jobs if tag is omitted. :param tag: An identifier used to identify a subset of jobs to delete
def trigger(self, event: str, *args: T.Any, **kw: T.Any) -> bool: callbacks = list(self._events.get(event, [])) if not callbacks: return False for callback in callbacks: callback(*args, **kw) return True
Triggers all handlers which are subscribed to an event. Returns True when there were callbacks to execute, False otherwise.
def _applytfms(args): import nibabel as nb from nipype.utils.filemanip import fname_presuffix from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms in_file, in_xform, ifargs, index, newpath = args out_file = fname_presuffix(in_file, suffix='_xform-%05d' % index, newpath=newpath, use_ext=True) copy_dtype = ifargs.pop('copy_dtype', False) xfm = ApplyTransforms( input_image=in_file, transforms=in_xform, output_image=out_file, **ifargs) xfm.terminal_output = 'allatonce' xfm.resource_monitor = False runtime = xfm.run().runtime if copy_dtype: nii = nb.load(out_file) in_dtype = nb.load(in_file).get_data_dtype() if in_dtype != nii.get_data_dtype(): nii.set_data_dtype(in_dtype) nii.to_filename(out_file) return (out_file, runtime.cmdline)
Applies ANTs' antsApplyTransforms to the input image. All inputs are zipped in one tuple to make it digestible by multiprocessing's map
def atleast_1d(*arrs): r mags = [a.magnitude if hasattr(a, 'magnitude') else a for a in arrs] orig_units = [a.units if hasattr(a, 'units') else None for a in arrs] ret = np.atleast_1d(*mags) if len(mags) == 1: if orig_units[0] is not None: return units.Quantity(ret, orig_units[0]) else: return ret return [units.Quantity(m, u) if u is not None else m for m, u in zip(ret, orig_units)]
r"""Convert inputs to arrays with at least one dimension. Scalars are converted to 1-dimensional arrays, whilst other higher-dimensional inputs are preserved. This is a thin wrapper around `numpy.atleast_1d` to preserve units. Parameters ---------- arrs : arbitrary positional arguments Input arrays to be converted if necessary Returns ------- `pint.Quantity` A single quantity or a list of quantities, matching the number of inputs.
def set_default_backend(name: str): global _default_backend if name == "bokeh": raise RuntimeError("Support for bokeh has been discontinued. At some point, we may return to support holoviews.") if not name in backends: raise RuntimeError("Backend {0} is not supported and cannot be set as default.".format(name)) _default_backend = name
Choose a default backend.
def lint(relative_path_to_file, contents, linter_functions, **kwargs): r lines = contents.splitlines(True) errors = list() for (code, info) in linter_functions.items(): error = info.function(relative_path_to_file, lines, kwargs) if error: if isinstance(error, list): errors.extend([(code, e) for e in error]) else: errors.append((code, error)) errors = [e for e in errors if not _error_is_suppressed(e[1], e[0], lines)] return sorted(errors, key=lambda e: e[1].line)
r"""Actually lints some file contents. relative_path_to_file should contain the relative path to the file being linted from the root source directory. Contents should be a raw string with \n's.
def register_ignore(self, origins, destination): if not isinstance(origins, list): origins = [origins] self.ignore_regexes.setdefault(destination, [re.compile(origin) for origin in origins]) self.regenerate_routes() return self.ignore_regexes[destination]
Add routes to the ignore dictionary :param origins: a number of origins to register :type origins: :py:class:`str` or iterable of :py:class:`str` :param destination: where the origins should point to :type destination: :py:class:`str` Ignore dictionary takes the following form:: {'node_a': set(['node_b', 'node_c']), 'node_b': set(['node_d'])}
def launch_browser(attempt_launch_browser=True): _DISPLAY_VARIABLES = ['DISPLAY', 'WAYLAND_DISPLAY', 'MIR_SOCKET'] _WEBBROWSER_NAMES_BLACKLIST = [ 'www-browser', 'lynx', 'links', 'elinks', 'w3m'] import webbrowser launch_browser = attempt_launch_browser if launch_browser: if ('linux' in sys.platform and not any(os.getenv(var) for var in _DISPLAY_VARIABLES)): launch_browser = False try: browser = webbrowser.get() if (hasattr(browser, 'name') and browser.name in _WEBBROWSER_NAMES_BLACKLIST): launch_browser = False except webbrowser.Error: launch_browser = False return launch_browser
Decide if we should launch a browser
def get_bind_data(zone_id, profile): conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone)
Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1
def print_most_common(counter, number=5, tab=1): for key, count in counter.most_common(number): print "{0}{1} - {2}".format('\t'*tab, key, count)
print the most common elements of a counter
def delete(self, item): uri = "/%s/%s" % (self.uri_base, utils.get_id(item)) return self._delete(uri)
Deletes the specified item.
def on(self): assert spotifyconnect._session_instance.player.num_listeners( spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0 spotifyconnect._session_instance.player.on( spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
Turn on the alsa_sink sink. This is done automatically when the sink is instantiated, so you'll only need to call this method if you ever call :meth:`off` and want to turn the sink back on.
def is_address_in_network(network, address): try: network = netaddr.IPNetwork(network) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) if address in network: return True else: return False
Determine whether the provided address is within a network range. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param address: An individual IPv4 or IPv6 address without a net mask or subnet prefix. For example, '192.168.1.1'. :returns boolean: Flag indicating whether address is in network.
def get_class(import_path=None): from django.core.exceptions import ImproperlyConfigured if import_path is None: raise ImproperlyConfigured('No class path specified.') try: dot = import_path.rindex('.') except ValueError: raise ImproperlyConfigured("%s isn't a module." % import_path) module, classname = import_path[:dot], import_path[dot+1:] try: mod = import_module(module) except ImportError as e: raise ImproperlyConfigured('Error importing module %s: "%s"' % (module, e)) try: return getattr(mod, classname) except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a "%s" class.' % (module, classname))
Largely based on django.core.files.storage's get_storage_class
def prt_line_detail(self, prt, line): values = line.split('\t') self._prt_line_detail(prt, values)
Print line header and values in a readable format.
def mount(self): if not self.is_mounted(): if not is_osx(): if not os.path.exists(self.connection["mount_point"]): os.mkdir(self.connection["mount_point"]) self._mount()
Mount the repository.
def is_background_knowledge(stmt): any_background = False for ev in stmt.evidence: epi = ev.epistemics if epi is not None: sec = epi.get('section_type') if sec is not None and sec not in background_secs: return False elif sec in background_secs: any_background = True return any_background
Return True if Statement is only supported by background knowledge.
def register_blueprint(self, blueprint, **options): first_registration = False if blueprint.name in self.blueprints: assert self.blueprints[blueprint.name] is blueprint, \ 'A blueprint\'s name collision occurred between %r and ' \ '%r. Both share the same name "%s". Blueprints that ' \ 'are created on the fly need unique names.' % \ (blueprint, self.blueprints[blueprint.name], blueprint.name) else: self.blueprints[blueprint.name] = blueprint first_registration = True blueprint.register(self, options, first_registration)
Registers a blueprint on the WebSockets.
def _tree(domain, tld=False): domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res
Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com']
def image_upload_to(self, filename): now = timezone.now() filename, extension = os.path.splitext(filename) return os.path.join( UPLOAD_TO, now.strftime('%Y'), now.strftime('%m'), now.strftime('%d'), '%s%s' % (slugify(filename), extension))
Compute the upload path for the image field.
def create(self, identity_id, service, token): params = {'service': service, 'token': token} return self.request.post(str(identity_id) + '/token', params)
Create the token :param identity_id: The ID of the identity to retrieve :param service: The service that the token is linked to :param token: The token provided by the the service :param expires_at: Set an expiry for this token :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def log_exception(fn_name, exception, retry_count, **kwargs): _log_exception_hook(fn_name, exception, retry_count, **kwargs)
External exception logging hook.