code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_media_stream(self, media_item, format, quality): result = self._ajax_api.VideoPlayer_GetStandardConfig( media_id=media_item.media_id, video_format=format, video_quality=quality) return MediaStream(result)
Get the stream data for a given media item @param crunchyroll.models.Media media_item @param int format @param int quality @return crunchyroll.models.MediaStream
def rmtree_or_file(path, ignore_errors=False, onerror=None): if ignore_errors and not os.path.exists(path): return if os.path.isdir(path) and not os.path.islink(path): shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror) else: os.unlink(path)
rmtree fails on files or symlinks. This removes the target, whatever it is.
def has_value(cls, value: int) -> bool: return any(value == item.value for item in cls)
True if specified value exists in int enum; otherwise, False.
def get_tokens(max_value): vocab = [str(i) for i in range(max_value)] vocab = set(vocab) vocab.update(CodeOp.LITERALS) vocab.update(CodeOp.KEYWORDS) vocab |= set("".join(vocab)) return sorted(vocab)
Defines tokens. Args: max_value: the maximum numeric range for the token. Returns: list of string tokens in vocabulary.
def _get_node_key(self, node_dict_item): s = tuple(sorted(node_dict_item['sources'])) t = tuple(sorted(node_dict_item['targets'])) return (s, t)
Return a tuple of sorted sources and targets given a node dict.
def competition_submissions(self, competition): submissions_result = self.process_response( self.competitions_submissions_list_with_http_info(id=competition)) return [Submission(s) for s in submissions_result]
get the list of Submission for a particular competition Parameters ========== competition: the name of the competition
def from_string(cls, value): match = cls.pattern.search(value) if match is None: raise ValueError('"%s" is not a valid media type' % value) try: return cls(match.group('mime_type'), float(match.group('weight') or 1)) except ValueError: return cls(value)
Return single instance parsed from given accept header string.
def ascii_printable(self, keysym): if 0 <= keysym < 9: return False elif 13 < keysym < 32: return False elif keysym > 126: return False else: return True
If the keysym corresponds to a non-printable ascii character this will return False. If it is printable, then True will be returned. ascii 11 (vertical tab) and ascii 12 are printable, chr(11) and chr(12) will return '\x0b' and '\x0c' respectively.
def init_cmu(filehandle=None): global pronunciations, lookup, rhyme_lookup if pronunciations is None: if filehandle is None: filehandle = cmudict.dict_stream() pronunciations = parse_cmu(filehandle) filehandle.close() lookup = collections.defaultdict(list) for word, phones in pronunciations: lookup[word].append(phones) rhyme_lookup = collections.defaultdict(list) for word, phones in pronunciations: rp = rhyming_part(phones) if rp is not None: rhyme_lookup[rp].append(word)
Initialize the module's pronunciation data. This function is called automatically the first time you attempt to use another function in the library that requires loading the pronunciation data from disk. You can call this function manually to control when and how the pronunciation data is loaded (e.g., you're using this module in a web application and want to load the data asynchronously). :param filehandle: a filehandle with CMUdict-formatted data :returns: None
def with_device( self, new_device: devices.Device, qubit_mapping: Callable[[ops.Qid], ops.Qid] = lambda e: e, ) -> 'Circuit': return Circuit( moments=[ops.Moment(operation.transform_qubits(qubit_mapping) for operation in moment.operations) for moment in self._moments], device=new_device )
Maps the current circuit onto a new device, and validates. Args: new_device: The new device that the circuit should be on. qubit_mapping: How to translate qubits from the old device into qubits on the new device. Returns: The translated circuit.
def _check_update_fw(self, tenant_id, drvr_name): if self.fwid_attr[tenant_id].is_fw_complete(): fw_dict = self.fwid_attr[tenant_id].get_fw_dict() self.modify_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict)
Update the Firewall config by calling the driver. This function calls the device manager routine to update the device with modified FW cfg.
def on_subscribe(self): def decorator(handler): self.client.on_subscribe = handler return handler return decorator
Decorate a callback function to handle subscritions. **Usage:**:: @mqtt.on_subscribe() def handle_subscribe(client, userdata, mid, granted_qos): print('Subscription id {} granted with qos {}.' .format(mid, granted_qos))
def _format_background(background): if os.path.isfile(background): with open(background, "r") as i_file: background = i_file.read().splitlines() else: background = background.splitlines() final_background = "" for line in background: if line == "": final_background += r"\\" + "\n\n" continue final_background += latex.wrap_lines(latex.sanitize_tex(line)) return final_background
Formats the background section :param background: the background content or file. :type background: str or file :returns: the background content. :rtype: str
def published(self, request=None): language = getattr(request, 'LANGUAGE_CODE', get_language()) if not language: return self.model.objects.none() qs = self.get_queryset() qs = qs.filter( translations__is_published=True, translations__language_code=language, ) qs = qs.filter( models.Q(category__isnull=True) | models.Q(category__is_published=True)) return qs
Returns the published documents in the current language. :param request: A Request instance.
def acquire_account(self, account=None, owner=None): with self.unlock_cond: if len(self.accounts) == 0: raise ValueError('account pool is empty') if account: while account not in self.unlocked_accounts: self.unlock_cond.wait() self.unlocked_accounts.remove(account) else: while len(self.unlocked_accounts) == 0: self.unlock_cond.wait() account = self.unlocked_accounts.popleft() if owner is not None: self.owner2account[owner].append(account) self.account2owner[account] = owner account.acquire(False) self.unlock_cond.notify_all() return account
Waits until an account becomes available, then locks and returns it. If an account is not passed, the next available account is returned. :type account: Account :param account: The account to be acquired, or None. :type owner: object :param owner: An optional descriptor for the owner. :rtype: :class:`Account` :return: The account that was acquired.
def as_dictionary(self, is_proof=True): if self._created is None: self._created = DDO._get_timestamp() data = { '@context': DID_DDO_CONTEXT_URL, 'id': self._did, 'created': self._created, } if self._public_keys: values = [] for public_key in self._public_keys: values.append(public_key.as_dictionary()) data['publicKey'] = values if self._authentications: values = [] for authentication in self._authentications: values.append(authentication) data['authentication'] = values if self._services: values = [] for service in self._services: values.append(service.as_dictionary()) data['service'] = values if self._proof and is_proof: data['proof'] = self._proof return data
Return the DDO as a JSON dict. :param if is_proof: if False then do not include the 'proof' element. :return: dict
def parse_component_by_typename(self, node, type_): if 'id' in node.lattrib: id_ = node.lattrib['id'] else: id_ = node.tag if 'type' in node.lattrib: type_ = node.lattrib['type'] else: type_ = node.tag component = Component(id_, type_) if self.current_component: component.set_parent_id(self.current_component.id) self.current_component.add_child(component) else: self.model.add_component(component) for key in node.attrib: if key.lower() not in ['id', 'type']: component.set_parameter(key, node.attrib[key]) old_component = self.current_component self.current_component = component self.process_nested_tags(node, 'component') self.current_component = old_component
Parses components defined directly by component name. @param node: Node containing the <Component> element @type node: xml.etree.Element @param type_: Type of this component. @type type_: string @raise ParseError: Raised when the component does not have an id.
def write_pre_script(self,fh): if self.__pre_script: fh.write( 'SCRIPT PRE ' + str(self) + ' ' + self.__pre_script + ' ' + ' '.join(self.__pre_script_args) + '\n' )
Write the pre script for the job, if there is one @param fh: descriptor of open DAG file.
def deserialize(serial_data: dict) -> 'Response': r = Response(serial_data.get('id')) r.data.update(serial_data.get('data', {})) r.ended = serial_data.get('ended', False) r.failed = not serial_data.get('success', True) def load_messages(message_type: str): messages = [ ResponseMessage(**data) for data in serial_data.get(message_type, []) ] setattr(r, message_type, getattr(r, message_type) + messages) load_messages('errors') load_messages('warnings') load_messages('messages') return r
Converts a serialized dictionary response to a Response object
def get_instance(self, payload): return UserInstance(self._version, payload, service_sid=self._solution['service_sid'], )
Build an instance of UserInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.user.UserInstance :rtype: twilio.rest.chat.v2.service.user.UserInstance
def get_projected_fantasy_defense_game_stats_by_week(self, season, week): result = self._method_call("FantasyDefenseProjectionsByGame/{season}/{week}", "projections", season=season, week=week) return result
Projected Fantasy Defense Game Stats by Week
def fileinfo(fileobj, filename=None, content_type=None, existing=None): return _FileInfo(fileobj, filename, content_type).get_info(existing)
Tries to extract from the given input the actual file object, filename and content_type This is used by the create and replace methods to correctly deduce their parameters from the available information when possible.
def calc_qar_v1(self): der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess for idx in range(der.nmb): flu.qar[idx] = 0. for jdx in range(der.ar_order[idx]): flu.qar[idx] += der.ar_coefs[idx, jdx] * log.logout[idx, jdx]
Calculate the discharge responses of the different AR processes. Required derived parameters: |Nmb| |AR_Order| |AR_Coefs| Required log sequence: |LogOut| Calculated flux sequence: |QAR| Examples: Assume there are four response functions, involving zero, one, two, and three AR coefficients respectively: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(4) >>> derived.ar_order.shape = 4 >>> derived.ar_order = 0, 1, 2, 3 >>> derived.ar_coefs.shape = (4, 3) >>> logs.logout.shape = (4, 3) >>> fluxes.qar.shape = 4 The coefficients of the different AR processes are stored in separate rows of the 2-dimensional parameter `ma_coefs`. Note the special case of the first AR process of zero order (first row), which involves no autoregressive memory at all: >>> derived.ar_coefs = ((nan, nan, nan), ... (1.0, nan, nan), ... (0.8, 0.2, nan), ... (0.5, 0.3, 0.2)) The "memory values" of the different AR processes are defined as follows (one row for each process). The values of the last time step are stored in first column, the values of the last time step in the second column, and so on: >>> logs.logout = ((nan, nan, nan), ... (1.0, nan, nan), ... (2.0, 3.0, nan), ... (4.0, 5.0, 6.0)) Applying method |calc_qar_v1| is equivalent to calculating the inner product of the different rows of both matrices: >>> model.calc_qar_v1() >>> fluxes.qar qar(0.0, 1.0, 2.2, 4.7)
def missing_node_cache(prov_dir, node_list, provider, opts): cached_nodes = [] for node in os.listdir(prov_dir): cached_nodes.append(os.path.splitext(node)[0]) for node in cached_nodes: if node not in node_list: delete_minion_cachedir(node, provider, opts) if 'diff_cache_events' in opts and opts['diff_cache_events']: fire_event( 'event', 'cached node missing from provider', 'salt/cloud/{0}/cache_node_missing'.format(node), args={'missing node': node}, sock_dir=opts.get( 'sock_dir', os.path.join(__opts__['sock_dir'], 'master')), transport=opts.get('transport', 'zeromq') )
Check list of nodes to see if any nodes which were previously known about in the cache have been removed from the node list. This function will only run if configured to do so in the main Salt Cloud configuration file (normally /etc/salt/cloud). .. code-block:: yaml diff_cache_events: True .. versionadded:: 2014.7.0
def compute_header_hmac_hash(context): return hmac.new( hashlib.sha512( b'\xff' * 8 + hashlib.sha512( context._.header.value.dynamic_header.master_seed.data + context.transformed_key + b'\x01' ).digest() ).digest(), context._.header.data, hashlib.sha256 ).digest()
Compute HMAC-SHA256 hash of header. Used to prevent header tampering.
def rtt_write(self, buffer_index, data): buf_size = len(data) buf = (ctypes.c_ubyte * buf_size)(*bytearray(data)) bytes_written = self._dll.JLINK_RTTERMINAL_Write(buffer_index, buf, buf_size) if bytes_written < 0: raise errors.JLinkRTTException(bytes_written) return bytes_written
Writes data to the RTT buffer. This method will write at most len(data) bytes to the specified RTT buffer. Args: self (JLink): the ``JLink`` instance buffer_index (int): the index of the RTT buffer to write to data (list): the list of bytes to write to the RTT buffer Returns: The number of bytes successfully written to the RTT buffer. Raises: JLinkRTTException if the underlying JLINK_RTTERMINAL_Write call fails.
def delete_error_message(sender, instance, name, source, target, **kwargs): if source != StateMixin.States.ERRED: return instance.error_message = '' instance.save(update_fields=['error_message'])
Delete error message if instance state changed from erred
def parse_row(self): fields = self.mapping for i, cell in enumerate(self.row[0:len(fields)]): field_name, field_type = fields[str(i)] parsed_cell = self.clean_cell(cell, field_type) self.parsed_row[field_name] = parsed_cell
Parses a row, cell-by-cell, returning a dict of field names to the cleaned field values.
def peer_retrieve(key, relation_name='cluster'): cluster_rels = relation_ids(relation_name) if len(cluster_rels) > 0: cluster_rid = cluster_rels[0] return relation_get(attribute=key, rid=cluster_rid, unit=local_unit()) else: raise ValueError('Unable to detect' 'peer relation {}'.format(relation_name))
Retrieve a named key from peer relation `relation_name`.
def download(self, song): song_id = song['id'] response = self._call( mm_calls.Export, self.uploader_id, song_id) audio = response.body suggested_filename = unquote( response.headers['Content-Disposition'].split("filename*=UTF-8''")[-1] ) return (audio, suggested_filename)
Download a song from a Google Music library. Parameters: song (dict): A song dict. Returns: tuple: Song content as bytestring, suggested filename.
def _handle_empty(self, user, response): if len(response.keys()) == 0: response = self.show_user(user) if len(response) == 0: response = self.show_user(user) return response
Apollo likes to return empty user arrays, even when you REALLY want a user response back... like creating a user.
def update_repository_config_acl(namespace, config, snapshot_id, acl_updates): uri = "configurations/{0}/{1}/{2}/permissions".format(namespace, config, snapshot_id) return __post(uri, json=acl_updates)
Set configuration permissions. The configuration should exist in the methods repository. Args: namespace (str): Configuration namespace config (str): Configuration name snapshot_id (int): snapshot_id of the method acl_updates (list(dict)): List of access control updates Swagger: https://api.firecloud.org/#!/Method_Repository/setConfigACL
def all_linked_artifacts_exist(self): if not self.has_resolved_artifacts: return False for path in self.resolved_artifact_paths: if not os.path.isfile(path): return False else: return True
All of the artifact paths for this resolve point to existing files.
def show_letter(letter, text_color=None, back_color=None): text_color = text_color or [255, 255, 255] back_color = back_color or [0, 0, 0] _sensehat.show_letter(letter, text_color, back_color) return {'letter': letter}
Displays a single letter on the LED matrix. letter The letter to display text_color The color in which the letter is shown. Defaults to '[255, 255, 255]' (white). back_color The background color of the display. Defaults to '[0, 0, 0]' (black). CLI Example: .. code-block:: bash salt 'raspberry' sensehat.show_letter O salt 'raspberry' sensehat.show_letter X '[255, 0, 0]' salt 'raspberry' sensehat.show_letter B '[0, 0, 255]' '[255, 255, 0]'
def get_as_nullable_datetime(self, key): value = self.get(key) return DateTimeConverter.to_nullable_datetime(value)
Converts map element into a Date or returns None if conversion is not possible. :param key: an index of element to get. :return: Date value of the element or None if conversion is not supported.
def safe_process_files(path, files, args, state): for fn in files: full_fn = os.path.join(path, fn) try: if not process_file(path, fn, args, state): return False except Exception, e: sys.stderr.write("error: %s\n%s\n" % (os.path.join(path, fn), traceback.format_exc())) state.log_failed(full_fn) if state.should_quit(): return False return True
Process a number of files in a directory. Catches any exception from the processing and checks if we should fail directly or keep going.
def _init_titles(self): super(ModelRestApi, self)._init_titles() class_name = self.datamodel.model_name if not self.list_title: self.list_title = "List " + self._prettify_name(class_name) if not self.add_title: self.add_title = "Add " + self._prettify_name(class_name) if not self.edit_title: self.edit_title = "Edit " + self._prettify_name(class_name) if not self.show_title: self.show_title = "Show " + self._prettify_name(class_name) self.title = self.list_title
Init Titles if not defined
def create_detector(self, detector): resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX), data=detector) resp.raise_for_status() return resp.json()
Creates a new detector. Args: detector (object): the detector model object. Will be serialized as JSON. Returns: dictionary of the response (created detector model).
def get_sections_2d(self): sections_hdrgos_act = [] hdrgos_act_all = self.get_hdrgos() hdrgos_act_secs = set() if self.hdrobj.sections: for section_name, hdrgos_all_lst in self.hdrobj.sections: hdrgos_all_set = set(hdrgos_all_lst) hdrgos_act_set = hdrgos_all_set.intersection(hdrgos_act_all) if hdrgos_act_set: hdrgos_act_secs |= hdrgos_act_set hdrgos_act_lst = [] hdrgos_act_ctr = cx.Counter() for hdrgo_p in hdrgos_all_lst: if hdrgo_p in hdrgos_act_set and hdrgos_act_ctr[hdrgo_p] == 0: hdrgos_act_lst.append(hdrgo_p) hdrgos_act_ctr[hdrgo_p] += 1 sections_hdrgos_act.append((section_name, hdrgos_act_lst)) hdrgos_act_rem = hdrgos_act_all.difference(hdrgos_act_secs) if hdrgos_act_rem: sections_hdrgos_act.append((self.hdrobj.secdflt, hdrgos_act_rem)) else: sections_hdrgos_act.append((self.hdrobj.secdflt, hdrgos_act_all)) return sections_hdrgos_act
Get 2-D list of sections and hdrgos sets actually used in grouping.
def incomplete(transaction): transaction.block_transfer = True transaction.response = Response() transaction.response.destination = transaction.request.source transaction.response.token = transaction.request.token transaction.response.code = defines.Codes.REQUEST_ENTITY_INCOMPLETE.number return transaction
Notifies incomplete blockwise exchange. :type transaction: Transaction :param transaction: the transaction that owns the response :rtype : Transaction :return: the edited transaction
def _logger_levels(self): return { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, }
Return log levels.
def GetFeedItems(client, feed): feed_item_service = client.GetService('FeedItemService', 'v201809') feed_items = [] more_pages = True selector = { 'fields': ['FeedItemId', 'AttributeValues'], 'predicates': [ { 'field': 'Status', 'operator': 'EQUALS', 'values': ['ENABLED'] }, { 'field': 'FeedId', 'operator': 'EQUALS', 'values': [feed['id']] } ], 'paging': { 'startIndex': 0, 'numberResults': PAGE_SIZE } } while more_pages: page = feed_item_service.get(selector) if 'entries' in page: feed_items.extend(page['entries']) selector['paging']['startIndex'] += PAGE_SIZE more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries']) return feed_items
Returns the Feed Items for a given Feed. Args: client: an AdWordsClient instance. feed: the Feed we are retrieving Feed Items from. Returns: The Feed Items associated with the given Feed.
def compute_offset_to_first_complete_codon( offset_to_first_complete_reference_codon, n_trimmed_from_reference_sequence): if n_trimmed_from_reference_sequence <= offset_to_first_complete_reference_codon: return ( offset_to_first_complete_reference_codon - n_trimmed_from_reference_sequence) else: n_nucleotides_trimmed_after_first_codon = ( n_trimmed_from_reference_sequence - offset_to_first_complete_reference_codon) frame = n_nucleotides_trimmed_after_first_codon % 3 return (3 - frame) % 3
Once we've aligned the variant sequence to the ReferenceContext, we need to transfer reading frame from the reference transcripts to the variant sequences. Parameters ---------- offset_to_first_complete_reference_codon : int n_trimmed_from_reference_sequence : int Returns an offset into the variant sequence that starts from a complete codon.
def get_time_buckets(start, end): d = DatalakeRecord.TIME_BUCKET_SIZE_IN_MS first_bucket = start / d last_bucket = end / d return list(range( int(first_bucket), int(last_bucket) + 1))
get the time buckets spanned by the start and end times
def get_recent_async(self, count, callback): validate_nonnegative_int(count, 'count') Validation.callable_check(callback, allow_none=True) evt = self._client._request_sub_recent(self.subid, count=count) self._client._add_recent_cb_for(evt, callback) return evt
Similar to `get_recent` except instead of returning an iterable, passes each dict to the given function which must accept a single argument. Returns the request. `callback` (mandatory) (function) instead of returning an iterable, pass each dict (as described above) to the given function which must accept a single argument. Nothing is returned.
def _cs_path_exists(fspath): if not os.path.exists(fspath): return False abspath = os.path.abspath(fspath) directory, filename = os.path.split(abspath) return filename in os.listdir(directory)
Case-sensitive path existence check >>> sdist_add_defaults._cs_path_exists(__file__) True >>> sdist_add_defaults._cs_path_exists(__file__.upper()) False
def create_slug(self): name = self.slug_source counter = 0 while True: if counter == 0: slug = slugify(name) else: slug = slugify('{0} {1}'.format(name, str(counter))) try: self.__class__.objects.exclude(pk=self.pk).get(slug=slug) counter += 1 except ObjectDoesNotExist: break return slug
Creates slug, checks if slug is unique, and loop if not
def walk_data(cls, dist, path='/'): for rel_fn in filter(None, dist.resource_listdir(path)): full_fn = os.path.join(path, rel_fn) if dist.resource_isdir(full_fn): for fn, stream in cls.walk_data(dist, full_fn): yield fn, stream else: yield full_fn[1:], dist.get_resource_stream(dist._provider, full_fn)
Yields filename, stream for files identified as data in the distribution
def get_fw_extractor(fw_file): fw_img_extractor = FirmwareImageExtractor(fw_file) extension = fw_img_extractor.fw_file_ext.lower() if extension == '.scexe': fw_img_extractor._do_extract = types.MethodType( _extract_scexe_file, fw_img_extractor) elif extension == '.rpm': fw_img_extractor._do_extract = types.MethodType( _extract_rpm_file, fw_img_extractor) elif extension in RAW_FIRMWARE_EXTNS: def dummy_extract(self): return fw_img_extractor.fw_file, False fw_img_extractor.extract = types.MethodType( dummy_extract, fw_img_extractor) else: raise exception.InvalidInputError( 'Unexpected compact firmware file type: %s' % fw_file) return fw_img_extractor
Gets the firmware extractor object fine-tuned for specified type :param fw_file: compact firmware file to be extracted from :raises: InvalidInputError, for unsupported file types :returns: FirmwareImageExtractor object
def get(self, path, params=None): resp = self._session.get(path, params=params) if resp.status_code != 200: if resp.headers.get('Content-Type', '').startswith('text/html'): text = resp.reason else: text = resp.text raise requests.HTTPError('Error accessing {0}\n' 'Server Error ({1:d}: {2})'.format(resp.request.url, resp.status_code, text)) return resp
Make a GET request, optionally including a parameters, to a path. The path of the request is the full URL. Parameters ---------- path : str The URL to request params : DataQuery, optional The query to pass when making the request Returns ------- resp : requests.Response The server's response to the request Raises ------ HTTPError If the server returns anything other than a 200 (OK) code See Also -------- get_query, get
def graph_from_adjacency_matrix(matrix, node_prefix='', directed=False): node_orig = 1 if directed: graph = Dot(graph_type='digraph') else: graph = Dot(graph_type='graph') for row in matrix: if not directed: skip = matrix.index(row) r = row[skip:] else: skip = 0 r = row node_dest = skip + 1 for e in r: if e: graph.add_edge( Edge( node_prefix + node_orig, node_prefix + node_dest)) node_dest += 1 node_orig += 1 return graph
Creates a basic graph out of an adjacency matrix. The matrix has to be a list of rows of values representing an adjacency matrix. The values can be anything: bool, int, float, as long as they can evaluate to True or False.
def _get_args(self, kwargs): _args = list() _kwargs = salt.utils.args.clean_kwargs(**kwargs) return _args, _kwargs
Discard all keywords which aren't function-specific from the kwargs. :param kwargs: :return:
def update_fp(self, fp, length): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Inode is not yet initialized') self.original_data_location = self.DATA_IN_EXTERNAL_FP self.data_fp = fp self.data_length = length self.fp_offset = 0
Update the Inode to use a different file object and length. Parameters: fp - A file object that contains the data for this Inode. length - The length of the data. Returns: Nothing.
def extra(name: str, desc: str) -> Callable: def attr_dec(f): f.__setattr__("extra_fn", True) f.__setattr__("name", name) f.__setattr__("desc", desc) return f return attr_dec
Decorator for slave channel's "additional features" interface. Args: name (str): A human readable name for the function. desc (str): A short description and usage of it. Use ``{function_name}`` in place of the function name in the description. Returns: The decorated method.
def load_characters(self): characters_page = self.session.session.get(u'http://myanimelist.net/' + self.__class__.__name__.lower() + u'/' + str(self.id) + u'/' + utilities.urlencode(self.title) + u'/characters').text self.set(self.parse_characters(utilities.get_clean_dom(characters_page))) return self
Fetches the MAL media characters page and sets the current media's character attributes. :rtype: :class:`.Media` :return: current media object.
def deprecated(message): def f__(f): def f_(*args, **kwargs): from warnings import warn warn(message, category=DeprecationWarning, stacklevel=2) return f(*args, **kwargs) f_.__name__ = f.__name__ f_.__doc__ = f.__doc__ f_.__dict__.update(f.__dict__) return f_ return f__
Decorator for deprecating functions and methods. :: @deprecated("'foo' has been deprecated in favour of 'bar'") def foo(x): pass
def _get_balance(self): response = self.session.get(self.balance_url, verify=False) soup = BeautifulSoup(response.text, 'html.parser') first_line = soup.select( "table.data tr:nth-of-type(2)")[0].text.strip().split('\n') total, today = first_line[-2:] logging.info('%-26sTotal:%-8s', today, total) return '\n'.join([u"Today: {0}".format(today), "Total: {0}".format(total)])
Get to know how much you totally have and how much you get today.
def text_search(self, text, sort=None, offset=100, page=1): assert page >= 1, f'Invalid page value {page}. Required page >= 1.' payload = {"text": text, "sort": sort, "offset": offset, "page": page} response = self.requests_session.get( f'{self.url}/query', params=payload, headers=self._headers ) if response.status_code == 200: return self._parse_search_response(response.content) else: raise Exception(f'Unable to search for DDO: {response.content}')
Search in aquarius using text query. Given the string aquarius will do a full-text query to search in all documents. Currently implemented are the MongoDB and Elastic Search drivers. For a detailed guide on how to search, see the MongoDB driver documentation: mongodb driverCurrently implemented in: https://docs.mongodb.com/manual/reference/operator/query/text/ And the Elastic Search documentation: https://www.elastic.co/guide/en/elasticsearch/guide/current/full-text-search.html Other drivers are possible according to each implementation. :param text: String to be search. :param sort: 1/-1 to sort ascending or descending. :param offset: Integer with the number of elements displayed per page. :param page: Integer with the number of page. :return: List of DDO instance
def get(name, import_str=False): value = None default_value = getattr(default_settings, name) try: value = getattr(settings, name) except AttributeError: if name in default_settings.required_attrs: raise Exception('You must set ' + name + ' in your settings.') if isinstance(default_value, dict) and value: default_value.update(value) value = default_value else: if value is None: value = default_value value = import_from_str(value) if import_str else value return value
Helper function to use inside the package.
def append_field(self, fieldname): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('RR record not yet initialized!') if fieldname == 'PX': bit = 0 elif fieldname == 'PN': bit = 1 elif fieldname == 'SL': bit = 2 elif fieldname == 'NM': bit = 3 elif fieldname == 'CL': bit = 4 elif fieldname == 'PL': bit = 5 elif fieldname == 'RE': bit = 6 elif fieldname == 'TF': bit = 7 else: raise pycdlibexception.PyCdlibInternalError('Unknown RR field name %s' % (fieldname)) self.rr_flags |= (1 << bit)
Mark a field as present in the Rock Ridge records. Parameters: fieldname - The name of the field to mark as present; should be one of 'PX', 'PN', 'SL', 'NM', 'CL', 'PL', 'RE', or 'TF'. Returns: Nothing.
def entropy_bits( lst: Union[ List[Union[int, str, float, complex]], Tuple[Union[int, str, float, complex]] ] ) -> float: if not isinstance(lst, (tuple, list)): raise TypeError('lst must be a list or a tuple') size = len(lst) if ( size == 2 and isinstance(lst[0], (int, float)) and isinstance(lst[1], (int, float)) ): return calc_entropy_bits_nrange(lst[0], lst[1]) return calc_entropy_bits(lst)
Calculate the entropy of a wordlist or a numerical range. Keyword arguments: lst -- A wordlist as list or tuple, or a numerical range as a list: (minimum, maximum)
def _brightness(x, change:uniform): "Apply `change` in brightness of image `x`." return x.add_(scipy.special.logit(change))
Apply `change` in brightness of image `x`.
def GenerateConfigFile(load_hook, dump_hook, **kwargs) -> ConfigFile: def ConfigFileGenerator(filename, safe_load: bool=True): cfg = ConfigFile(fd=filename, load_hook=load_hook, dump_hook=dump_hook, safe_load=safe_load, **kwargs) return cfg return ConfigFileGenerator
Generates a ConfigFile object using the specified hooks. These hooks should be functions, and have one argument. When a hook is called, the ConfigFile object is passed to it. Use this to load your data from the fd object, or request, or whatever. This returns a ConfigFile object.
def rollback(cls, bigchain, new_height, txn_ids): bigchain.delete_elections(new_height) txns = [bigchain.get_transaction(tx_id) for tx_id in txn_ids] elections = cls._get_votes(txns) for election_id in elections: election = bigchain.get_transaction(election_id) election.on_rollback(bigchain, new_height)
Looks for election and vote transactions inside the block and cleans up the database artifacts possibly created in `process_blocks`. Part of the `end_block`/`commit` crash recovery.
async def _bind_key_to_queue(self, routing_key: AnyStr, queue_name: AnyStr) -> None: logger.info("Binding key='%s'", routing_key) result = await self._channel.queue_bind( exchange_name=self._exchange_name, queue_name=queue_name, routing_key=routing_key, ) return result
Bind to queue with specified routing key. :param routing_key: Routing key to bind with. :param queue_name: Name of the queue :return: Does not return anything
def import_enum(dest, src, name): dest.enums[name] = src.enums[name]
Import Enum `name` from Registry `src` to Registry `dest`. :param Registry dest: Destination Registry :param Registry src: Source Registry :param str name: Name of Enum to import
def create(self, username, password, tags=''): user_payload = json.dumps({ 'password': password, 'tags': tags }) return self.http_client.put(API_USER % username, payload=user_payload)
Create User. :param str username: Username :param str password: Password :param str tags: Comma-separate list of tags (e.g. monitoring) :rtype: None
def _process_key(evt): key = evt.GetKeyCode() if key in KEYMAP: return KEYMAP[key], '' if 97 <= key <= 122: key -= 32 if key >= 32 and key <= 127: return keys.Key(chr(key)), chr(key) else: return None, None
Helper to convert from wx keycode to vispy keycode
def format_number( x, use_rounding=True, is_population=False, coefficient=1): if use_rounding: x = rounding(x, is_population) x //= coefficient number = add_separators(x) return number
Format a number according to the standards. :param x: A number to be formatted in a locale friendly way. :type x: int :param use_rounding: Flag to enable a rounding. :type use_rounding: bool :param is_population: Flag if the number is population. It needs to be used with enable_rounding. :type is_population: bool :param coefficient: Divide the result after the rounding. :type coefficient:float :returns: A locale friendly formatted string e.g. 1,000,0000.00 representing the original x. If a ValueError exception occurs, x is simply returned. :rtype: basestring
def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False): if func is None: return functools.partial( wrap_json, encoder=encoder, preserve_raw_body=preserve_raw_body ) wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body) wrapped_func = wrap_json_response(wrapped_func, encoder=encoder) return wrapped_func
A middleware that parses the body of json requests and encodes the json responses. NOTE: this middleware exists just for backward compatibility, but it has some limitations in terms of response body encoding because it only accept list or dictionary outputs and json specification allows store other values also. It is recommended use the `wrap_json_body` and wrap_json_response` instead of this.
def _tokens_to_subtoken(self, tokens): ret = [] for token in tokens: ret.extend( self._escaped_token_to_subtoken_strings(_escape_token(token, self._alphabet))) return ret
Converts a list of tokens to a list of subtoken. Args: tokens: a list of strings. Returns: a list of integers in the range [0, vocab_size)
def show_member(self, member, **_params): return self.get(self.member_path % (member), params=_params)
Fetches information of a certain load balancer member.
def owned_expansions(self): owned = {} for el in self.expansion_locations: def is_near_to_expansion(t): return t.position.distance_to(el) < self.EXPANSION_GAP_THRESHOLD th = next((x for x in self.townhalls if is_near_to_expansion(x)), None) if th: owned[el] = th return owned
List of expansions owned by the player.
def refresh_context(self): User = self.model('res.user') self.context = User.get_preferences(True) return self.context
Get the default context of the user and save it
def record_to_objects(self, preference=None): from ambry.orm.file import File for f in self.list_records(): pref = preference if preference else f.record.preference if pref == File.PREFERENCE.FILE: self._bundle.logger.debug(' Cleaning objects for file {}'.format(f.path)) f.clean_objects() if pref in (File.PREFERENCE.FILE, File.PREFERENCE.MERGE): self._bundle.logger.debug(' rto {}'.format(f.path)) f.record_to_objects()
Create objects from files, or merge the files into the objects.
def _makeflags(self): if self.meta.makeflags in ["on", "ON"]: cpus = multiprocessing.cpu_count() os.environ["MAKEFLAGS"] = "-j{0}".format(cpus)
Set variable MAKEFLAGS with the numbers of processors
def _to_base36(number): if number < 0: raise ValueError("Cannot encode negative numbers") chars = "" while number != 0: number, i = divmod(number, 36) chars = _alphabet[i] + chars return chars or "0"
Convert a positive integer to a base36 string. Taken from Stack Overflow and modified.
def elements(self): elements = [] for el in ct: if isinstance(el[1], datapoint.Element.Element): elements.append(el[1]) return elements
Return a list of the elements which are not None
def _observed_name(field, name): if MARSHMALLOW_VERSION_INFO[0] < 3: dump_to = getattr(field, "dump_to", None) load_from = getattr(field, "load_from", None) return dump_to or load_from or name return field.data_key or name
Adjust field name to reflect `dump_to` and `load_from` attributes. :param Field field: A marshmallow field. :param str name: Field name :rtype: str
def cmd_output_add(self, args): device = args[0] print("Adding output %s" % device) try: conn = mavutil.mavlink_connection(device, input=False, source_system=self.settings.source_system) conn.mav.srcComponent = self.settings.source_component except Exception: print("Failed to connect to %s" % device) return self.mpstate.mav_outputs.append(conn) try: mp_util.child_fd_list_add(conn.port.fileno()) except Exception: pass
add new output
def set_config(self, config): self._configmixin_queue.append(copy.deepcopy(config)) self.new_config()
Update the component's configuration. Use the :py:meth:`get_config` method to get a copy of the component's configuration, update that copy then call :py:meth:`set_config` to update the component. This enables the configuration to be changed in a threadsafe manner while the component is running, and allows several values to be changed at once. :param ConfigParent config: New configuration.
def _warn_about_problematic_credentials(credentials): from google.auth import _cloud_sdk if credentials.client_id == _cloud_sdk.CLOUD_SDK_CLIENT_ID: warnings.warn(_CLOUD_SDK_CREDENTIALS_WARNING)
Determines if the credentials are problematic. Credentials from the Cloud SDK that are associated with Cloud SDK's project are problematic because they may not have APIs enabled and have limited quota. If this is the case, warn about it.
def capture_logger(name): import logging logger = logging.getLogger(name) try: import StringIO stream = StringIO.StringIO() except ImportError: from io import StringIO stream = StringIO() handler = logging.StreamHandler(stream) logger.addHandler(handler) try: yield stream finally: logger.removeHandler(handler)
Context manager to capture a logger output with a StringIO stream.
def _init_connection(self): if not self.servers: raise RuntimeError("No server defined") server = random.choice(self.servers) if server.scheme in ["http", "https"]: self.connection = http_connect( [server for server in self.servers if server.scheme in ["http", "https"]], timeout=self.timeout, basic_auth=self.basic_auth, max_retries=self.max_retries, retry_time=self.retry_time) return elif server.scheme == "thrift": self.connection = thrift_connect( [server for server in self.servers if server.scheme == "thrift"], timeout=self.timeout, max_retries=self.max_retries, retry_time=self.retry_time)
Create initial connection pool
def brake_on(self): data = [] data.append(0x0A) data.append(self.servoid) data.append(RAM_WRITE_REQ) data.append(TORQUE_CONTROL_RAM) data.append(0x01) data.append(0x40) send_data(data)
Set the Brakes of Herkulex In braked mode, position control and velocity control will not work, enable torque before that Args: none
def DiffPrimitiveArrays(self, oldObj, newObj): if len(oldObj) != len(newObj): __Log__.debug('DiffDoArrays: Array lengths do not match %d != %d' % (len(oldObj), len(newObj))) return False match = True if self._ignoreArrayOrder: oldSet = oldObj and frozenset(oldObj) or frozenset() newSet = newObj and frozenset(newObj) or frozenset() match = (oldSet == newSet) else: for i, j in zip(oldObj, newObj): if i != j: match = False break if not match: __Log__.debug( 'DiffPrimitiveArrays: One of the elements do not match.') return False return True
Diff two primitive arrays
def main_btn_clicked(self, widget, data=None): self.remove_link_button() data = dict() data['debugging'] = self.debugging self.run_window.hide() self.parent.open_window(widget, data)
Button switches to Dev Assistant GUI main window
def transmit(self, channel, message): target = ( self.slack.server.channels.find(channel) or self._find_user_channel(username=channel) ) message = self._expand_references(message) target.send_message(message, thread=getattr(channel, 'thread', None))
Send the message to Slack. :param channel: channel or user to whom the message should be sent. If a ``thread`` attribute is present, that thread ID is used. :param str message: message to send.
def mont_pub_to_ed_pub(cls, mont_pub): if not isinstance(mont_pub, bytes): raise TypeError("Wrong type passed for the mont_pub parameter.") if len(mont_pub) != cls.MONT_PUB_KEY_SIZE: raise ValueError("Invalid value passed for the mont_pub parameter.") return bytes(cls._mont_pub_to_ed_pub(bytearray(mont_pub)))
Derive a Twisted Edwards public key from given Montgomery public key. :param mont_pub: A bytes-like object encoding the public key with length MONT_PUB_KEY_SIZE. :returns: A bytes-like object encoding the public key with length ED_PUB_KEY_SIZE.
def _mount(device): dest = tempfile.mkdtemp() res = __states__['mount.mounted'](dest, device=device, fstype='btrfs', opts='subvol=/', persist=False) if not res['result']: log.error('Cannot mount device %s in %s', device, dest) _umount(dest) return None return dest
Mount the device in a temporary place.
def _post_compute(self): self._x_labels, self._y_labels = self._y_labels, self._x_labels self._x_labels_major, self._y_labels_major = ( self._y_labels_major, self._x_labels_major ) self._x_2nd_labels, self._y_2nd_labels = ( self._y_2nd_labels, self._x_2nd_labels ) self.show_y_guides, self.show_x_guides = ( self.show_x_guides, self.show_y_guides )
After computations transpose labels
def convert_to_ip(self): self._values, self._header._unit = self._header.data_type.to_ip( self._values, self._header.unit)
Convert the Data Collection to IP units.
def getL2Representations(self): return [set(L2.getSelf()._pooler.getActiveCells()) for L2 in self.L2Regions]
Returns the active representation in L2.
def fix_post_relative_url(rel_url): m = re.match( r'^(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/' r'(?P<post_name>[^/]+?)' r'(?:(?:\.html)|(?:/(?P<index>index(?:\.html?)?)?))?$', rel_url ) if not m: return None year, month, day, post_name = m.groups()[:4] try: d = date(year=int(year), month=int(month), day=int(day)) return '/'.join((d.strftime('%Y/%m/%d'), post_name, 'index.html' if m.group('index') else '')) except (TypeError, ValueError): return None
Fix post relative url to a standard, uniform format. Possible input: - 2016/7/8/my-post - 2016/07/08/my-post.html - 2016/8/09/my-post/ - 2016/8/09/my-post/index - 2016/8/09/my-post/index.htm - 2016/8/09/my-post/index.html :param rel_url: relative url to fix :return: fixed relative url, or None if cannot recognize
def _canonicalize_fraction(cls, non_repeating, repeating): if repeating == []: return (non_repeating, repeating) repeat_len = len(repeating) indices = range(len(non_repeating), -1, -repeat_len) end = next( i for i in indices if non_repeating[(i - repeat_len):i] != repeating ) indices = range(min(repeat_len - 1, end), 0, -1) index = next( (i for i in indices \ if repeating[-i:] == non_repeating[(end-i):end]), 0 ) return ( non_repeating[:(end - index)], repeating[-index:] + repeating[:-index] )
If the same fractional value can be represented by stripping repeating part from ``non_repeating``, do it. :param non_repeating: non repeating part of fraction :type non_repeating: list of int :param repeating: repeating part of fraction :type repeating: list of int :returns: new non_repeating and repeating parts :rtype: tuple of list of int * list of int Complexity: O(len(non_repeating))
def run(self): self._prepare() for recipe in self._recipes: run_recipe = True if not self.arguments.yes: run_recipe = pypro.console.ask_bool('Run %s.%s' % (recipe.module, recipe.name), "yes") if run_recipe: recipe.run(self, self.arguments) if self.arguments.verbose: pypro.console.out('Thanks for using pypro. Support this project at https://github.com/avladev/pypro')
Starts recipes execution.
def _shuffle(y, labels, random_state): if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return y[ind]
Return a shuffled copy of y eventually shuffle among same labels.
def load(): config = ConfigParser.RawConfigParser(DEFAULTS) config.readfp(open(CONF_PATH)) for section in config.sections(): globals()[section] = {} for key, val in config.items(section): globals()[section][key] = val
| Load the configuration file. | Add dynamically configuration to the module. :rtype: None
def normal(obj, params, **kwargs): normalize = kwargs.get('normalize', True) if isinstance(obj, abstract.Curve): if isinstance(params, (list, tuple)): return ops.normal_curve_single_list(obj, params, normalize) else: return ops.normal_curve_single(obj, params, normalize) if isinstance(obj, abstract.Surface): if isinstance(params[0], float): return ops.normal_surface_single(obj, params, normalize) else: return ops.normal_surface_single_list(obj, params, normalize)
Evaluates the normal vector of the curves or surfaces at the input parameter values. This function is designed to evaluate normal vectors of the B-Spline and NURBS shapes at single or multiple parameter positions. :param obj: input geometry :type obj: abstract.Curve or abstract.Surface :param params: parameters :type params: float, list or tuple :return: a list containing "point" and "vector" pairs :rtype: tuple
def apply_mask(data: bytes, mask: bytes) -> bytes: if len(mask) != 4: raise ValueError("mask must contain 4 bytes") return bytes(b ^ m for b, m in zip(data, itertools.cycle(mask)))
Apply masking to the data of a WebSocket message. ``data`` and ``mask`` are bytes-like objects. Return :class:`bytes`.