code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _add_fragment(cls, syncmap, identifier, lines, begin, end, language=None): syncmap.add_fragment( SyncMapFragment( text_fragment=TextFragment( identifier=identifier, lines=lines, language=language ), begin=begin, end=end ) )
Add a new fragment to ``syncmap``. :param syncmap: the syncmap to append to :type syncmap: :class:`~aeneas.syncmap.SyncMap` :param identifier: the identifier :type identifier: string :param lines: the lines of the text :type lines: list of string :param begin: the begin time :type begin: :class:`~aeneas.exacttiming.TimeValue` :param end: the end time :type end: :class:`~aeneas.exacttiming.TimeValue` :param language: the language :type language: string
def fix_times(self): pointlist = self.get_pointlist() times = [point['time'] for stroke in pointlist for point in stroke] times_min = max(min(times), 0) for i, stroke in enumerate(pointlist): for j, point in enumerate(stroke): if point['time'] is None: pointlist[i][j]['time'] = times_min else: times_min = point['time'] self.raw_data_json = json.dumps(pointlist)
Some recordings have wrong times. Fix them so that nothing after loading a handwritten recording breaks.
def _restore_vxlan_entries(self, switch_ip, vlans): count = 1 conf_str = '' vnsegment_sent = 0 path_str, conf_str = self.driver.start_create_vlan() while vnsegment_sent < const.CREATE_VLAN_BATCH and vlans: vlan_id, vni = vlans.pop(0) conf_str = self.driver.get_create_vlan( switch_ip, vlan_id, vni, conf_str) if (count == const.CREATE_VLAN_SEND_SIZE): conf_str = self.driver.end_create_vlan(conf_str) self.driver.send_edit_string(switch_ip, path_str, conf_str) vnsegment_sent += count conf_str = '' count = 1 else: count += 1 if conf_str: vnsegment_sent += count conf_str = self.driver.end_create_vlan(conf_str) self.driver.send_edit_string(switch_ip, path_str, conf_str) conf_str = '' LOG.debug("Switch %s VLAN vn-segment replay summary: %d", switch_ip, vnsegment_sent)
Restore vxlan entries on a Nexus switch.
def status_color(status): status_color = c.Fore.GREEN if not status: status_color = c.Fore.RED return status_color
Return the appropriate status color.
def from_yaml_to_list(cls, data: str, force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> TList[T]: return cls.from_dicts(util.load_yaml(data), force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict)
From yaml string to list of instance :param data: Yaml string :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: List of instance Usage: >>> from owlmixin.samples import Human >>> humans: TList[Human] = Human.from_yaml_to_list(''' ... - id: 1 ... name: Tom ... favorites: ... - name: Apple ... - id: 2 ... name: John ... favorites: ... - name: Orange ... ''') >>> humans[0].name 'Tom' >>> humans[1].name 'John' >>> humans[0].favorites[0].name 'Apple'
def _str(obj): values = [] for name in obj._attribs: val = getattr(obj, name) if isinstance(val, str): val = repr(val) val = str(val) if len(str(val)) < 10 else "(...)" values.append((name, val)) values = ", ".join("{}={}".format(k, v) for k, v in values) return "{}({})".format(obj.__class__.__name__, values)
Show nicely the generic object received.
def poll(poll, msg, server): poll = remove_smart_quotes(poll.replace(u"\u2014", u"--")) try: args = ARGPARSE.parse_args(shlex.split(poll)).poll except ValueError: return ERROR_INVALID_FORMAT if not 2 < len(args) < len(POLL_EMOJIS) + 1: return ERROR_WRONG_NUMBER_OF_ARGUMENTS result = ["Poll: {}\n".format(args[0])] for emoji, answer in zip(POLL_EMOJIS, args[1:]): result.append(":{}: {}\n".format(emoji, answer)) msg_posted = server.slack.post_message( msg['channel'], "".join(result), as_user=server.slack.username) ts = json.loads(msg_posted)["ts"] for i in range(len(args) - 1): server.slack.post_reaction(msg['channel'], ts, POLL_EMOJIS[i])
Given a question and answers, present a poll
def unpack(self, struct): v = struct.unpack(self.read(struct.size)) return v
Read as many bytes as are required to extract struct then unpack and return a tuple of the values. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to extract the bytes. Parameters ---------- struct: struct.Struct Returns ------- tuple Tuple of extracted values.
def find_behind_subscriptions(self, request): task_id = find_behind_subscriptions.delay() return Response( {"accepted": True, "task_id": str(task_id)}, status=status.HTTP_202_ACCEPTED )
Starts a celery task that looks through active subscriptions to find and subscriptions that are behind where they should be, and adds a BehindSubscription for them.
def calculate_length_statistics(source_iterables: Sequence[Iterable[Any]], target_iterable: Iterable[Any], max_seq_len_source: int, max_seq_len_target: int) -> 'LengthStatistics': mean_and_variance = OnlineMeanAndVariance() for sources, target in parallel_iter(source_iterables, target_iterable): source_len = len(sources[0]) target_len = len(target) if source_len > max_seq_len_source or target_len > max_seq_len_target: continue length_ratio = target_len / source_len mean_and_variance.update(length_ratio) return LengthStatistics(mean_and_variance.count, mean_and_variance.mean, mean_and_variance.std)
Returns mean and standard deviation of target-to-source length ratios of parallel corpus. :param source_iterables: Source sequence readers. :param target_iterable: Target sequence reader. :param max_seq_len_source: Maximum source sequence length. :param max_seq_len_target: Maximum target sequence length. :return: The number of sentences as well as the mean and standard deviation of target to source length ratios.
def check_error(when='periodic check'): errors = [] while True: err = glGetError() if err == GL_NO_ERROR or (errors and err == errors[-1]): break errors.append(err) if errors: msg = ', '.join([repr(ENUM_MAP.get(e, e)) for e in errors]) err = RuntimeError('OpenGL got errors (%s): %s' % (when, msg)) err.errors = errors err.err = errors[-1] raise err
Check this from time to time to detect GL errors. Parameters ---------- when : str Shown in the exception to help the developer determine when this check was done.
def titleize(text): if len(text) == 0: return text else: text = text.lower() chunks = [chunk[0].upper() + chunk[1:] for chunk in text.split(" ") if len(chunk) >= 1] return " ".join(chunks)
Capitalizes all the words and replaces some characters in the string to create a nicer looking title.
def bind(self, queue, exchange, routing_key='', nowait=True, arguments={}, ticket=None, cb=None): nowait = nowait and self.allow_nowait() and not cb args = Writer() args.write_short(ticket or self.default_ticket).\ write_shortstr(queue).\ write_shortstr(exchange).\ write_shortstr(routing_key).\ write_bit(nowait).\ write_table(arguments) self.send_frame(MethodFrame(self.channel_id, 50, 20, args)) if not nowait: self._bind_cb.append(cb) self.channel.add_synchronous_cb(self._recv_bind_ok)
bind to a queue.
def set_imap_cb(self, w, index): name = imap.get_names()[index] self.t_.set(intensity_map=name)
This callback is invoked when the user selects a new intensity map from the preferences pane.
def yaml_parse(yamlstr): try: return json.loads(yamlstr) except ValueError: yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor) return yaml.safe_load(yamlstr)
Parse a yaml string
def _remove_person_from_group(person, group): from karaage.datastores import remove_accounts_from_group from karaage.datastores import remove_accounts_from_project from karaage.datastores import remove_accounts_from_institute a_list = person.account_set remove_accounts_from_group(a_list, group) for project in group.project_set.all(): remove_accounts_from_project(a_list, project) for institute in group.institute_set.all(): remove_accounts_from_institute(a_list, institute)
Call datastores after removing a person from a group.
def jsd(p, q): try: _check_prob_dist(p) _check_prob_dist(q) except ValueError: return np.nan weight = 0.5 m = weight * (p + q) result = weight * kld(p, m) + (1 - weight) * kld(q, m) return result
Finds the per-column JSD between dataframes p and q Jensen-Shannon divergence of two probability distrubutions pandas dataframes, p and q. These distributions are usually created by running binify() on the dataframe. Parameters ---------- p : pandas.DataFrame An nbins x features DataFrame. q : pandas.DataFrame An nbins x features DataFrame. Returns ------- jsd : pandas.Series Jensen-Shannon divergence of each column with the same names between p and q Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError
def get_state_and_verify(self): try: state = get_state() except KeyError as ke: class state: name = str(ke.args[0]) if state not in self.ALLOWED_STATES: raise ValueError( f"Run state cherry-picker.state={state.name} in Git config " "is not known.\nPerhaps it has been set by a newer " "version of cherry-picker. Try upgrading.\n" "Valid states are: " f'{", ".join(s.name for s in self.ALLOWED_STATES)}. ' "If this looks suspicious, raise an issue at " "https://github.com/python/core-workflow/issues/new.\n" "As the last resort you can reset the runtime state " "stored in Git config using the following command: " "`git config --local --remove-section cherry-picker`" ) return state
Return the run progress state stored in the Git config. Raises ValueError if the retrieved state is not of a form that cherry_picker would have stored in the config.
def entity_tags_form(self, entity, ns=None): if ns is None: ns = self.entity_default_ns(entity) field = TagsField(label=_l("Tags"), ns=ns) cls = type("EntityNSTagsForm", (_TagsForm,), {"tags": field}) return cls
Construct a form class with a field for tags in namespace `ns`.
def pawns_at(self, x, y): for pawn in self.pawn.values(): if pawn.collide_point(x, y): yield pawn
Iterate over pawns that collide the given point.
def model_funcpointers(vk, model): model['funcpointers'] = {} funcs = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'funcpointer'] structs = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'struct'] for f in funcs: pfn_name = f['name'] for s in structs: if 'member' not in s: continue for m in s['member']: if m['type'] == pfn_name: struct_name = s['@name'] model['funcpointers'][pfn_name] = struct_name
Fill the model with function pointer model['funcpointers'] = {'pfn_name': 'struct_name'}
def get_developer_package(path, format=None): from rez.developer_package import DeveloperPackage return DeveloperPackage.from_path(path, format=format)
Create a developer package. Args: path (str): Path to dir containing package definition file. format (str): Package definition file format, detected if None. Returns: `DeveloperPackage`.
def enable_logging(self, bucket_name, object_prefix=""): info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix} self._patch_property("logging", info)
Enable access logging for this bucket. See https://cloud.google.com/storage/docs/access-logs :type bucket_name: str :param bucket_name: name of bucket in which to store access logs :type object_prefix: str :param object_prefix: prefix for access log filenames
def get_assessment_basic_authoring_session_for_bank(self, bank_id, proxy): if not self.supports_assessment_basic_authoring(): raise errors.Unimplemented() return sessions.AssessmentBasicAuthoringSession(bank_id, proxy, self._runtime)
Gets the ``OsidSession`` associated with the assessment authoring service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of a bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentBasicAuthoringSession) - an ``AssessmentBasicAuthoringSession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_basic_authoring()`` or ``supports_visibe_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_basic_authoring()`` and ``supports_visibe_federation()`` is ``true``.*
def search_location_check(cls, location): if not (isinstance(location, Mapping) and set(location.keys()) == _LOCATION_SEARCH_ARGS): raise ValueError('Search location should be mapping with keys: %s' % _LOCATION_SEARCH_ARGS) cls.location_check(location['lat'], location['long']) radius = location['radius'] if not (isinstance(radius, number_types) and 0 < radius <= 20038): raise ValueError("Radius: '{radius}' is invalid".format(radius=radius))
Core.Client.request_search location parameter should be a dictionary that contains lat, lon and radius floats
def get_account_holds(self, account_id, **kwargs): endpoint = '/accounts/{}/holds'.format(account_id) return self._send_paginated_message(endpoint, params=kwargs)
Get holds on an account. This method returns a generator which may make multiple HTTP requests while iterating through it. Holds are placed on an account for active orders or pending withdraw requests. As an order is filled, the hold amount is updated. If an order is canceled, any remaining hold is removed. For a withdraw, once it is completed, the hold is removed. The `type` field will indicate why the hold exists. The hold type is 'order' for holds related to open orders and 'transfer' for holds related to a withdraw. The `ref` field contains the id of the order or transfer which created the hold. Args: account_id (str): Account id to get holds of. kwargs (dict): Additional HTTP request parameters. Returns: generator(list): Hold information for the account. Example:: [ { "id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f", "account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3", "created_at": "2014-11-06T10:34:47.123456Z", "updated_at": "2014-11-06T10:40:47.123456Z", "amount": "4.23", "type": "order", "ref": "0a205de4-dd35-4370-a285-fe8fc375a273", }, { ... } ]
def add_edges(self, from_idx, to_idx, weight=1, symmetric=False, copy=False): raise NotImplementedError()
Adds all from->to edges. weight may be a scalar or 1d array. If symmetric=True, also adds to->from edges with the same weights.
def volumes(self): return [ EBSVolume(res) for res in db.Resource.join( ResourceProperty, Resource.resource_id == ResourceProperty.resource_id ).filter( Resource.resource_type_id == ResourceType.get('aws_ebs_volume').resource_type_id, ResourceProperty.name == 'attachments', func.JSON_CONTAINS(ResourceProperty.value, func.JSON_QUOTE(self.id)) ).all() ]
Returns a list of the volumes attached to the instance Returns: `list` of `EBSVolume`
def get_default_options(self): _, _, internal_usage = self.get_usage() args = docopt(internal_usage, []) return {k: v for k, v in args.items() if k.startswith('--')}
Get a dictionary of default options as used with run. Returns ------- dict A dictionary containing option keys of the form '--beat_interval'. Their values are boolean if the option is a flag, otherwise None or its default value.
def is_matching(cls, file_path): if file_path.endswith(".ndata") and os.path.exists(file_path): try: with open(file_path, "r+b") as fp: local_files, dir_files, eocd = parse_zip(fp) contains_data = b"data.npy" in dir_files contains_metadata = b"metadata.json" in dir_files file_count = contains_data + contains_metadata if len(dir_files) != file_count or file_count == 0: return False return True except Exception as e: logging.error("Exception parsing ndata file: %s", file_path) logging.error(str(e)) return False
Return whether the given absolute file path is an ndata file.
async def create_connection(self): connector = self.proxy or self.loop return await connector.create_connection( self.session_factory, self.host, self.port, **self.kwargs)
Initiate a connection.
def put_request_payment(self, bucket, payer): data = RequestPayment(payer).to_xml() details = self._details( method=b"PUT", url_context=self._url_context(bucket=bucket, object_name="?requestPayment"), body=data, ) d = self._submit(self._query_factory(details)) return d
Set request payment configuration on bucket to payer. @param bucket: The name of the bucket. @param payer: The name of the payer. @return: A C{Deferred} that will fire with the result of the request.
def running(concurrent=False): ret = [] if concurrent: return ret active = __salt__['saltutil.is_running']('state.*') for data in active: err = ( 'The function "{0}" is running as PID {1} and was started at ' '{2} with jid {3}' ).format( data['fun'], data['pid'], salt.utils.jid.jid_to_time(data['jid']), data['jid'], ) ret.append(err) return ret
Return a list of strings that contain state return data if a state function is already running. This function is used to prevent multiple state calls from being run at the same time. CLI Example: .. code-block:: bash salt '*' state.running
def check(line, queries): line = line.strip() spLine = line.replace('.', ' ').split() matches = set(spLine).intersection(queries) if len(matches) > 0: return matches, line.split('\t') return matches, False
check that at least one of queries is in list, l
def to_dict(self): return dict( (k, self.__dict__[k]) for k in self.__dict__ if k.find("_") != 0)
to_dict will clean all protected and private properties
def set_cpu_status(self, status): assert status in snap7.snap7types.cpu_statuses, 'unknown cpu state %s' % status logger.debug("setting cpu status to %s" % status) return self.library.Srv_SetCpuStatus(self.pointer, status)
Sets the Virtual CPU status.
def PDBasXMLwithSymwithPolarH(self, id): print _WARNING h_s_xml = urllib.urlopen("http://www.cmbi.ru.nl/wiwsd/rest/PDBasXMLwithSymwithPolarH/id/" + id) self.raw = h_s_xml p = self.parser h_s_smcra = p.read(h_s_xml, 'WHATIF_Output') return h_s_smcra
Adds Hydrogen Atoms to a Structure.
def keys_to_datetime(obj, *keys): if not keys: return obj for k in keys: if k not in obj: continue v = obj[k] if not isinstance(v, string_types): continue obj[k] = parse_datetime(v) return obj
Converts all the keys in an object to DateTime instances. Args: obj (dict): the JSON-like ``dict`` object to modify inplace. keys (str): keys of the object being converted into DateTime instances. Returns: dict: ``obj`` inplace. >>> keys_to_datetime(None) is None True >>> keys_to_datetime({}) {} >>> a = {} >>> id(keys_to_datetime(a)) == id(a) True >>> a = {'one': '2016-06-06T19:41:43.039284', 'two': '2016-06-06T19:41:43.039284'} >>> keys_to_datetime(a) == a True >>> keys_to_datetime(a, 'one')['one'] datetime.datetime(2016, 6, 6, 19, 41, 43, 39284) >>> keys_to_datetime(a, 'one')['two'] '2016-06-06T19:41:43.039284'
def get_crawler_stats(self): self.logger.debug("Gathering crawler stats") the_dict = {} the_dict['spiders'] = self.get_spider_stats()['spiders'] the_dict['machines'] = self.get_machine_stats()['machines'] the_dict['queue'] = self.get_queue_stats()['queues'] return the_dict
Gather crawler stats @return: A dict of stats
def _assert_validators(self, validators): for validator in sorted( validators, key=lambda validator: validator.insertion_index): try: validator.verify(self) except _exceptions.ValidationError as e: message = validator.print_flags_with_values(self) raise _exceptions.IllegalFlagValueError('%s: %s' % (message, str(e)))
Asserts if all validators in the list are satisfied. It asserts validators in the order they were created. Args: validators: Iterable(validators.Validator), validators to be verified. Raises: AttributeError: Raised if validators work with a non-existing flag. IllegalFlagValueError: Raised if validation fails for at least one validator.
def start(st_reg_number): divisor = 11 if len(st_reg_number) > 9: return False if len(st_reg_number) < 9: return False sum_total = 0 peso = 9 for i in range(len(st_reg_number)-1): sum_total = sum_total + int(st_reg_number[i]) * peso peso = peso - 1 rest_division = sum_total % divisor digit = divisor - rest_division if digit == 10 or digit == 11: digit = 0 return digit == int(st_reg_number[len(st_reg_number)-1])
Checks the number valiaty for the Sergipe state
def getvalue(self): if self.strategy == 0: return self._delegate.getvalue() self._delegate.flush() self._delegate.seek(0) value = self._delegate.read() if not isinstance(value, six.binary_type): value = value.encode('utf-8') return value
Get value of file. Work around for second strategy. Always returns bytes
def copy(self): other = Version(None) other.tokens = self.tokens[:] other.seps = self.seps[:] return other
Returns a copy of the version.
def _fix_dynamic_class_lookup(cls, pstfx): extnm = '_' + cls.__name__ + '_' + pstfx mdl = sys.modules[cls.__module__] setattr(mdl, extnm, cls) if hasattr(cls, '__qualname__'): cls.__qualname__ = extnm else: cls.__name__ = extnm
Fix name lookup problem that prevents pickling of dynamically defined classes. Parameters ---------- cls : class Dynamically generated class to which fix is to be applied pstfx : string Postfix that can be used to identify dynamically generated classes that are equivalent by construction
def dump(self, f, indent=''): print(("%s&%s %s" % (indent, self.__name, self.section_parameters)).rstrip(), file=f) self.dump_children(f, indent) print("%s&END %s" % (indent, self.__name), file=f)
Dump this section and its children to a file-like object
def _build_category_tree(slug, reference=None, items=None): if items is None: items = [] for key in reference: category = reference[key] if category["parent"] == slug: children = _build_category_tree(category["nicename"], reference=reference) category["children"] = children items.append(category) return items
Builds a recursive tree with category relations as children.
def _update_display(self, event=None): try: if self._showvalue: self.display_value(self.scale.get()) if self._tickinterval: self.place_ticks() except IndexError: pass
Redisplay the ticks and the label so that they adapt to the new size of the scale.
def apply_obb(self): if len(self.root) == 1: matrix, bounds = polygons.polygon_obb( self.polygons_closed[self.root[0]]) self.apply_transform(matrix) return matrix else: raise ValueError('Not implemented for multibody geometry')
Transform the current path so that its OBB is axis aligned and OBB center is at the origin.
def save_agent_profile(self, profile): request = HTTPRequest( method="PUT", resource="agents/profile", content=profile.content, ) if profile.content_type is not None: request.headers["Content-Type"] = profile.content_type else: request.headers["Content-Type"] = "application/octet-stream" if profile.etag is not None: request.headers["If-Match"] = profile.etag request.query_params = { "profileId": profile.id, "agent": profile.agent.to_json(self.version) } lrs_response = self._send_request(request) lrs_response.content = profile return lrs_response
Save an agent profile doc to the LRS :param profile: Agent profile doc to be saved :type profile: :class:`tincan.documents.agent_profile_document.AgentProfileDocument` :return: LRS Response object with the saved agent profile doc as content :rtype: :class:`tincan.lrs_response.LRSResponse`
def gatherBy(self, func): 'Generate only rows for which the given func returns True.' for i in rotate_range(len(self.rows), self.cursorRowIndex): try: r = self.rows[i] if func(r): yield r except Exception: pass
Generate only rows for which the given func returns True.
def would_move_be_promotion(self): return (self._end_loc.rank == 0 and not self.color) or \ (self._end_loc.rank == 7 and self.color)
Finds if move from current location would be a promotion
def exists(self, key, fresh=False): key = key.upper() if key in self._deleted: return False return self.get(key, fresh=fresh, default=missing) is not missing
Check if key exists :param key: the name of setting variable :param fresh: if key should be taken from source direclty :return: Boolean
def set_logger(self, log_level=logging.INFO): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=log_level, stream=sys.stderr) self.logger = logging.getLogger(__name__) logging.getLogger('requests').setLevel(logging.WARNING)
Configure the logger with log_level.
def asdict(self) -> Dict[str, Union[Dict, Union[str, Dict]]]: return { "service": self.service.name, **self.signature.serialize(), }
Return a dictionary describing the method. This can be used to dump the information into a JSON file.
def decode(obj, content_type): try: decoder = _decoders_map[content_type] return decoder(obj) except KeyError: raise _errors.UnsupportedFormatError(content_type)
Decode an object ton a one of the default content types to a numpy array. Args: obj (object): to be decoded. content_type (str): content type to be used. Returns: np.array: decoded object.
def AddMonths(start_date, months): current_date = start_date i = 0 while i < months: month_days = calendar.monthrange(current_date.year, current_date.month)[1] current_date += timedelta(days=month_days) i += 1 return current_date
A simple convenience utility for adding months to a given start date. This increments the months by adding the number of days in the current month to the current month, for each month. Args: start_date: date The date months are being added to. months: int The number of months to add. Returns: A date equal to the start date incremented by the given number of months.
def _check_uuid_fmt(self): if self.uuid_fmt not in UUIDField.FORMATS: raise FieldValueRangeException( "Unsupported uuid_fmt ({})".format(self.uuid_fmt))
Checks .uuid_fmt, and raises an exception if it is not valid.
def setOverlayRaw(self, ulOverlayHandle, pvBuffer, unWidth, unHeight, unDepth): fn = self.function_table.setOverlayRaw result = fn(ulOverlayHandle, pvBuffer, unWidth, unHeight, unDepth) return result
Separate interface for providing the data as a stream of bytes, but there is an upper bound on data that can be sent. This function can only be called by the overlay's renderer process.
def mv(hdfs_src, hdfs_dst): cmd = "hadoop fs -mv %s %s" % (hdfs_src, hdfs_dst) rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
Move a file on hdfs :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful
def make_analogous_scheme(self, angle=30, mode='ryb'): h, s, l = self.__hsl if mode == 'ryb': h = rgb_to_ryb(h) h += 360 h1 = (h - angle) % 360 h2 = (h + angle) % 360 if mode == 'ryb': h1 = ryb_to_rgb(h1) h2 = ryb_to_rgb(h2) return (Color((h1, s, l), 'hsl', self.__a, self.__wref), Color((h2, s, l), 'hsl', self.__a, self.__wref))
Return two colors analogous to this one. Args: :angle: The angle between the hues of the created colors and this one. :mode: Select which color wheel to use for the generation (ryb/rgb). Returns: A tuple of grapefruit.Colors analogous to this one. >>> c1 = Color.from_hsl(30, 1, 0.5) >>> c2, c3 = c1.make_analogous_scheme(angle=60, mode='rgb') >>> c2.hsl (330.0, 1.0, 0.5) >>> c3.hsl (90.0, 1.0, 0.5) >>> c2, c3 = c1.make_analogous_scheme(angle=10, mode='rgb') >>> c2.hsl (20.0, 1.0, 0.5) >>> c3.hsl (40.0, 1.0, 0.5)
def get_cust_cols(path): required_keys = ["title", "id", "sType", "visible"] with open(path, 'r') as f: try: cust_cols = ast.literal_eval(f.read()) except Exception as err: sys.stderr.write("Invalid custom columns file: {}\n".format(path)) sys.stderr.write("{}\n".format(err)) sys.exit(1) for col in cust_cols: for required_key in required_keys: if required_key not in col: sys.stderr.write("Missing required key '{}' in custom " "column {}\n".format(required_key, col)) sys.exit(1) if "jsonxs" not in col and "tpl" not in col: sys.stderr.write("You need to specify 'jsonxs' or 'tpl' " "for custom column {}\n".format(col)) sys.exit(1) return cust_cols
Load custom column definitions.
def ok(self): rgb, hsv, hexa = self.square.get() if self.alpha_channel: hexa = self.hexa.get() rgb += (self.alpha.get(),) self.color = rgb, hsv, hexa self.destroy()
Validate color selection and destroy dialog.
def create_config(self, name, data, labels=None): if not isinstance(data, bytes): data = data.encode('utf-8') data = base64.b64encode(data) if six.PY3: data = data.decode('ascii') body = { 'Data': data, 'Name': name, 'Labels': labels } url = self._url('/configs/create') return self._result( self._post_json(url, data=body), True )
Create a config Args: name (string): Name of the config data (bytes): Config data to be stored labels (dict): A mapping of labels to assign to the config Returns (dict): ID of the newly created config
def getClient(self): client = self.Schema().getField('Client').get(self) if client: return client client = self.aq_parent if IClient.providedBy(client): return client
Retrieves the Client for which the current Batch is attached to Tries to retrieve the Client from the Schema property, but if not found, searches for linked ARs and retrieve the Client from the first one. If the Batch has no client, returns None.
def _get_home(): try: if six.PY2 and sys.platform == 'win32': path = os.path.expanduser(b"~").decode(sys.getfilesystemencoding()) else: path = os.path.expanduser("~") except ImportError: pass else: if os.path.isdir(path): return path for evar in ('HOME', 'USERPROFILE', 'TMP'): path = os.environ.get(evar) if path is not None and os.path.isdir(path): return path return None
Find user's home directory if possible. Otherwise, returns None. :see: http://mail.python.org/pipermail/python-list/2005-February/325395.html This function is copied from matplotlib version 1.4.3, Jan 2016
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None): object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get('object', '')).strip('\'"') if obj[property_name] == property_value or property_value == obj_id: return obj['object'] return None
Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder.
def _unpack(self, content): if self.compressor: try: content = self.compressor.decompress(content) except CompressError: pass if self.serializer: content = self.serializer.deserialize(content) return content
unpack cache using serializer and compressor
def clear(self, objtype=[]): r if len(objtype) == 0: super().clear() else: names = [obj.name for obj in self] for name in names: try: obj = self[name] for t in objtype: if obj._isa(t): self.purge_object(obj) except KeyError: pass
r""" Clears objects from the project entirely or selectively, depdening on the received arguments. Parameters ---------- objtype : list of strings A list containing the object type(s) to be removed. If no types are specified, then all objects are removed. To clear only objects of a specific type, use *'network'*, *'geometry'*, *'phase'*, *'physics'*, or *'algorithm'*. It's also possible to use abbreviations, like *'geom'*.
def create_user(self, claims): username_claim = settings.USERNAME_CLAIM usermodel = get_user_model() user, created = usermodel.objects.get_or_create(**{ usermodel.USERNAME_FIELD: claims[username_claim] }) if created or not user.password: user.set_unusable_password() logger.debug("User '{}' has been created.".format(claims[username_claim])) return user
Create the user if it doesn't exist yet Args: claims (dict): claims from the access token Returns: django.contrib.auth.models.User: A Django user
def NewFromJSON(data): s = Shake( id=data.get('id', None), name=data.get('name', None), url=data.get('url', None), thumbnail_url=data.get('thumbnail_url', None), description=data.get('description', None), type=data.get('type', None), created_at=data.get('created_at', None), updated_at=data.get('updated_at', None) ) if data.get('owner', None): s.owner = User.NewFromJSON(data.get('owner', None)) return s
Create a new Shake instance from a JSON dict. Args: data (dict): JSON dictionary representing a Shake. Returns: A Shake instance.
def from_design_day_properties(cls, name, day_type, location, analysis_period, dry_bulb_max, dry_bulb_range, humidity_type, humidity_value, barometric_p, wind_speed, wind_dir, sky_model, sky_properties): dry_bulb_condition = DryBulbCondition( dry_bulb_max, dry_bulb_range) humidity_condition = HumidityCondition( humidity_type, humidity_value, barometric_p) wind_condition = WindCondition( wind_speed, wind_dir) if sky_model == 'ASHRAEClearSky': sky_condition = OriginalClearSkyCondition.from_analysis_period( analysis_period, sky_properties[0]) elif sky_model == 'ASHRAETau': sky_condition = RevisedClearSkyCondition.from_analysis_period( analysis_period, sky_properties[0], sky_properties[-1]) return cls(name, day_type, location, dry_bulb_condition, humidity_condition, wind_condition, sky_condition)
Create a design day object from various key properties. Args: name: A text string to set the name of the design day day_type: Choose from 'SummerDesignDay', 'WinterDesignDay' or other EnergyPlus days location: Location for the design day analysis_period: Analysis period for the design day dry_bulb_max: Maximum dry bulb temperature over the design day (in C). dry_bulb_range: Dry bulb range over the design day (in C). humidity_type: Type of humidity to use. Choose from Wetbulb, Dewpoint, HumidityRatio, Enthalpy humidity_value: The value of the condition above. barometric_p: Barometric pressure in Pa. wind_speed: Wind speed over the design day in m/s. wind_dir: Wind direction over the design day in degrees. sky_model: Type of solar model to use. Choose from ASHRAEClearSky, ASHRAETau sky_properties: A list of properties describing the sky above. For ASHRAEClearSky this is a single value for clearness For ASHRAETau, this is the tau_beam and tau_diffuse
def get_query(self, query_params=None): if query_params is None: query_params = {} query = '' query_params['idSite'] = self.site_id if self.api_token is not None: query_params['token_auth'] = self.api_token for key, value in query_params.iteritems(): if isinstance(value, list): value = ','.join(value) query += '{}={}&'.format(str(key), str(value)) return query[:-1]
Return a query string
def to_type(cls, typename): NAME_TYPES = {cls.TYPE_NAMES[x]: x for x in cls.TYPE_NAMES} return NAME_TYPES.get(typename, None)
Converts a type ID to name. On error returns None
def MAH(z, zi, Mi, **cosmo): z = np.array(z, ndmin=1, dtype=float) dMdt_array = np.empty_like(z) Mz_array = np.empty_like(z) for i_ind, zval in enumerate(z): dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo) dMdt_array[i_ind] = dMdt Mz_array[i_ind] = Mz return(dMdt_array, Mz_array)
Calculate mass accretion history by looping function acc_rate over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi' Parameters ---------- z : float / numpy array Redshift to output MAH over. Note zi<z always zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (dMdt, Mz) : float / numpy arrays of equivalent size to 'z' Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
def _generic_format(self, raid_config, controller=None): logical_drives = raid_config["LogicalDrives"] logical_disks = [] controller = controller for ld in logical_drives: prop = {'size_gb': ld['CapacityGiB'], 'raid_level': ld['Raid'].strip('Raid'), 'root_device_hint': { 'wwn': '0x' + ld['VolumeUniqueIdentifier']}, 'controller': controller, 'physical_disks': ld['DataDrives'], 'volume_name': ld['LogicalDriveName']} logical_disks.append(prop) return logical_disks
Convert redfish data of current raid config to generic format. :param raid_config: Raid configuration dictionary :param controller: Array controller model in post_create read else None :returns: current raid config.
def _hasExplicitOid(store, table): return any(info[1] == 'oid' for info in store.querySchemaSQL( 'PRAGMA *DATABASE*.table_info({})'.format(table)))
Does the given table have an explicit oid column?
def do_reset_ids(concatenated_meta_df, data_df, concat_direction): if concat_direction == "horiz": assert concatenated_meta_df.index.equals(data_df.columns), ( "cids in concatenated_meta_df do not agree with cids in data_df.") reset_ids_in_meta_df(concatenated_meta_df) data_df.columns = pd.Index(concatenated_meta_df.index.values) elif concat_direction == "vert": assert concatenated_meta_df.index.equals(data_df.index), ( "rids in concatenated_meta_df do not agree with rids in data_df.") reset_ids_in_meta_df(concatenated_meta_df) data_df.index = pd.Index(concatenated_meta_df.index.values)
Reset ids in concatenated metadata and data dfs to unique integers and save the old ids in a metadata column. Note that the dataframes are modified in-place. Args: concatenated_meta_df (pandas df) data_df (pandas df) concat_direction (string): 'horiz' or 'vert' Returns: None (dfs modified in-place)
def send_frame(self, frame): if self.get_mask_key: frame.get_mask_key = self.get_mask_key data = frame.format() length = len(data) trace("send: " + repr(data)) with self.lock: while data: l = self._send(data) data = data[l:] return length
Send the data frame. frame: frame data created by ABNF.create_frame >>> ws = create_connection("ws://echo.websocket.org/") >>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT) >>> ws.send_frame(frame) >>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0) >>> ws.send_frame(frame) >>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1) >>> ws.send_frame(frame)
def serialize_seeds(seeds, block): for seed_dict in block.seeds: seed = etree.SubElement(seeds, 'seed') seed.set('option', unicode(seed_dict.get('answer', 0) + 1)) seed.text = seed_dict.get('rationale', '')
Serialize the seeds in peer instruction XBlock to xml Args: seeds (lxml.etree.Element): The <seeds> XML element. block (PeerInstructionXBlock): The XBlock with configuration to serialize. Returns: None
def keys_to_values(self, keys): "Return the items in the keystore with keys in `keys`." return dict((k, v) for k, v in self.data.items() if k in keys)
Return the items in the keystore with keys in `keys`.
def copy(self): vs = ValueSet(bits=self.bits) vs._regions = self._regions.copy() vs._region_base_addrs = self._region_base_addrs.copy() vs._reversed = self._reversed vs._si = self._si.copy() return vs
Make a copy of self and return. :return: A new ValueSet object. :rtype: ValueSet
def visit_BoolOp(self, node): return sum((self.visit(value) for value in node.values), [])
Return type may come from any boolop operand.
def dimers(primer1, primer2, concentrations=[5e-7, 3e-11]): nupack = coral.analysis.NUPACK([primer1.primer(), primer2.primer(), primer1.primer().reverse_complement(), primer2.primer().reverse_complement()]) primer_concs = [concentrations[0]] * 2 template_concs = [concentrations[1]] * 2 concs = primer_concs + template_concs nupack_concs = nupack.concentrations(2, conc=concs) dimer_conc = nupack_concs['concentrations'][5] return dimer_conc / concs[0]
Calculate expected fraction of primer dimers. :param primer1: Forward primer. :type primer1: coral.DNA :param primer2: Reverse primer. :type primer2: coral.DNA :param template: DNA template. :type template: coral.DNA :param concentrations: list of concentrations for primers and the template. Defaults are those for PCR with 1kb template. :type concentrations: list :returns: Fraction of dimers versus the total amount of primer added. :rtype: float
def _handle_entity(self, token): token = self._tokens.pop() if isinstance(token, tokens.HTMLEntityNumeric): token = self._tokens.pop() if isinstance(token, tokens.HTMLEntityHex): text = self._tokens.pop() self._tokens.pop() return HTMLEntity(text.text, named=False, hexadecimal=True, hex_char=token.char) self._tokens.pop() return HTMLEntity(token.text, named=False, hexadecimal=False) self._tokens.pop() return HTMLEntity(token.text, named=True, hexadecimal=False)
Handle a case where an HTML entity is at the head of the tokens.
def unlink_parent_dir(path: Path) -> None: logger.info(f"unlink {str(path)}") path.unlink() parent_path = path.parent try: parent_path.rmdir() logger.info(f"rmdir {str(parent_path)}") except OSError as oe: logger.debug(f"Did not remove {str(parent_path)}: {str(oe)}")
Remove a file and if the dir is empty remove it
def validate_meta(meta): if not isinstance(meta, (dict,)): raise TypeError('Model Meta "linguist" must be a dict') required_keys = ("identifier", "fields") for key in required_keys: if key not in meta: raise KeyError('Model Meta "linguist" dict requires %s to be defined', key) if not isinstance(meta["fields"], (list, tuple)): raise ImproperlyConfigured( "Linguist Meta's fields attribute must be a list or tuple" )
Validates Linguist Meta attribute.
def _consume_add_and_get_tag(self, consume_rpc_result): consumer_tag = consume_rpc_result['consumer_tag'] self._channel.add_consumer_tag(consumer_tag) return consumer_tag
Add the tag to the channel and return it. :param dict consume_rpc_result: :rtype: str
def process_post_category(self, bulk_mode, api_category): category = None if bulk_mode: category = self.ref_data_map["categories"].get(api_category["ID"]) if not category: category, created = Category.objects.get_or_create(site_id=self.site_id, wp_id=api_category["ID"], defaults=self.api_object_data("category", api_category)) if category and not created: self.update_existing_category(category, api_category) if category: self.ref_data_map["categories"][api_category["ID"]] = category return category
Create or update a Category related to a post. :param bulk_mode: If True, minimize db operations by bulk creating post objects :param api_category: the API data for the Category :return: the Category object
def ij_jlk_to_ilk(A, B): return A.dot(B.reshape(B.shape[0], -1)).reshape(A.shape[0], B.shape[1], B.shape[2])
Faster version of einsum 'ij,jlk->ilk'
def get_validator_change(cls, bigchain): latest_block = bigchain.get_latest_block() if latest_block is None: return None return bigchain.get_validator_change(latest_block['height'])
Return the validator set from the most recent approved block :return: { 'height': <block_height>, 'validators': <validator_set> }
def empty(self): return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
Indicator whether DataFrame is empty. True if DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If DataFrame is empty, return True, if not return False. See Also -------- Series.dropna DataFrame.dropna Notes ----- If DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True
def _model_to_dict(obj): result = _properties_model_to_dict(obj.properties) for attribute in ('metadata', 'snapshot'): try: value = getattr(obj, attribute) except AttributeError: continue if value: result[attribute] = value return result
Convert object model to dict. Args: obj: Object model. Returns: dict: Converted model.
def backend_routing(self, context): satosa_logging(logger, logging.DEBUG, "Routing to backend: %s " % context.target_backend, context.state) backend = self.backends[context.target_backend]["instance"] context.state[STATE_KEY] = context.target_frontend return backend
Returns the targeted backend and an updated state :type context: satosa.context.Context :rtype satosa.backends.base.BackendModule :param context: The request context :return: backend
def get_api_field_data(self): field = self.setup_field() d = { 'class': field.__class__.__name__, 'widget': { 'class': field.widget.__class__.__name__ } } try: d['input_type'] = field.widget.input_type except AttributeError: d['input_type'] = None return d
Field data to serialize for use on front-end side, for example will include choices available for a choice field
def __update_throughput(table_name, key_name, read_units, write_units): try: current_ru = dynamodb.get_provisioned_table_read_units(table_name) current_wu = dynamodb.get_provisioned_table_write_units(table_name) except JSONResponseError: raise try: table_status = dynamodb.get_table_status(table_name) except JSONResponseError: raise logger.debug('{0} - Table status is {1}'.format(table_name, table_status)) if table_status != 'ACTIVE': logger.warning( '{0} - Not performing throughput changes when table ' 'is {1}'.format(table_name, table_status)) return if get_table_option(key_name, 'always_decrease_rw_together'): read_units, write_units = __calculate_always_decrease_rw_values( table_name, read_units, current_ru, write_units, current_wu) if read_units == current_ru and write_units == current_wu: logger.info('{0} - No changes to perform'.format(table_name)) return dynamodb.update_table_provisioning( table_name, key_name, int(read_units), int(write_units))
Update throughput on the DynamoDB table :type table_name: str :param table_name: Name of the DynamoDB table :type key_name: str :param key_name: Configuration option key name :type read_units: int :param read_units: New read unit provisioning :type write_units: int :param write_units: New write unit provisioning
def num_species(self): all_headers = reduce(lambda x, y: set(x) | set(y), (rec.get_names() for rec in self.records)) return len(all_headers)
Returns the number of species found over all records
def consumer_partitions_for_topic(consumer, topic): topic_partitions = [] partitions = consumer.partitions_for_topic(topic) if partitions is not None: for partition in partitions: topic_partitions.append(TopicPartition(topic, partition)) else: logging.error( "No partitions found for topic {}. Maybe it doesn't exist?".format(topic), ) return topic_partitions
Returns a list of all TopicPartitions for a given topic. Arguments: consumer: an initialized KafkaConsumer topic: a topic name to fetch TopicPartitions for :returns: list(TopicPartition): A list of TopicPartitions that belong to the given topic
def _config(): status_url = __salt__['config.get']('nagios.status_url') or \ __salt__['config.get']('nagios:status_url') if not status_url: raise CommandExecutionError('Missing Nagios URL in the configuration.') username = __salt__['config.get']('nagios.username') or \ __salt__['config.get']('nagios:username') password = __salt__['config.get']('nagios.password') or \ __salt__['config.get']('nagios:password') return { 'url': status_url, 'username': username, 'password': password }
Get configuration items for URL, Username and Password
def scramble_string(self, length): return fake.text(length) if length > 5 else ''.join([fake.random_letter() for n in range(0, length)])
Return random string
def filter_embeddings(embeddings, vocab, dim): if not isinstance(embeddings, dict): return _embeddings = np.zeros([len(vocab), dim]) for word in vocab: if word in embeddings: word_idx = vocab[word] _embeddings[word_idx] = embeddings[word] return _embeddings
Loads word vectors in numpy array. Args: embeddings (dict): a dictionary of numpy array. vocab (dict): word_index lookup table. Returns: numpy array: an array of word embeddings.