code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def expireat(key, timestamp, host=None, port=None, db=None, password=None): server = _connect(host, port, db, password) return server.expireat(key, timestamp)
Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000
def secret_hex(self, secret_hex): if secret_hex is None: raise ValueError("Invalid value for `secret_hex`, must not be `None`") if secret_hex is not None and not re.search('^(0[xX])?[0-9a-fA-F]{32,64}$', secret_hex): raise ValueError("Invalid value for `secret_hex`, must be a follow pattern or equal to `/^(0[xX])?[0-9a-fA-F]{32,64}$/`") self._secret_hex = secret_hex
Sets the secret_hex of this PreSharedKey. The secret of the pre-shared key in hexadecimal. It is not case sensitive; 4a is same as 4A, and it is allowed with or without 0x in the beginning. The minimum length of the secret is 128 bits and maximum 256 bits. :param secret_hex: The secret_hex of this PreSharedKey. :type: str
def parameterize_path(path, parameters): if parameters is None: parameters = {} try: return path.format(**parameters) except KeyError as key_error: raise PapermillMissingParameterException("Missing parameter {}".format(key_error))
Format a path with a provided dictionary of parameters Parameters ---------- path : string Path with optional parameters, as a python format string parameters : dict Arbitrary keyword arguments to fill in the path
def discrete_index(self, indices): elements = [] for i in indices: elements.append(self[i]) return elements
get elements by discrete indices :param indices: list discrete indices :return: elements
def del_cookie(self, name: str, *, domain: Optional[str]=None, path: str='/') -> None: self._cookies.pop(name, None) self.set_cookie(name, '', max_age=0, expires="Thu, 01 Jan 1970 00:00:00 GMT", domain=domain, path=path)
Delete cookie. Creates new empty expired cookie.
def modules_and_args(modules=True, states=False, names_only=False): dirs = [] module_dir = os.path.dirname(os.path.realpath(__file__)) state_dir = os.path.join(os.path.dirname(module_dir), 'states') if modules: dirs.append(module_dir) if states: dirs.append(state_dir) ret = _mods_with_args(dirs) if names_only: return sorted(ret.keys()) else: return OrderedDict(sorted(ret.items()))
Walk the Salt install tree and return a dictionary or a list of the functions therein as well as their arguments. :param modules: Walk the modules directory if True :param states: Walk the states directory if True :param names_only: Return only a list of the callable functions instead of a dictionary with arguments :return: An OrderedDict with callable function names as keys and lists of arguments as values (if ``names_only``==False) or simply an ordered list of callable function nanes (if ``names_only``==True). CLI Example: (example truncated for brevity) .. code-block:: bash salt myminion baredoc.modules_and_args myminion: ---------- [...] at.atrm: at.jobcheck: at.mod_watch: - name at.present: - unique_tag - name - timespec - job - tag - user at.watch: - unique_tag - name - timespec - job - tag - user [...]
def common_items_ratio(pronac, dt): segment_id = get_segment_id(str(pronac)) metrics = data.common_items_metrics.to_dict(orient='index')[segment_id] ratio = common_items_percentage(pronac, segment_common_items(segment_id)) k = 1.5 threshold = metrics['mean'] - k * metrics['std'] uncommon_items = get_uncommon_items(pronac) pronac_filter = data.all_items['PRONAC'] == pronac uncommon_items_filter = ( data.all_items['idPlanilhaItens'] .isin(uncommon_items) ) items_filter = (pronac_filter & uncommon_items_filter) filtered_items = ( data .all_items[items_filter] .drop_duplicates(subset='idPlanilhaItens') ) uncommon_items = add_info_to_uncommon_items(filtered_items, uncommon_items) return { 'is_outlier': ratio < threshold, 'valor': ratio, 'maximo_esperado': metrics['mean'], 'desvio_padrao': metrics['std'], 'items_incomuns': uncommon_items, 'items_comuns_que_o_projeto_nao_possui': get_common_items_not_present(pronac), }
Calculates the common items on projects in a cultural segment, calculates the uncommon items on projects in a cultural segment and verify if a project is an outlier compared to the other projects in his segment.
def extend(self, elts): elts = elts[:] self._in_deque.append(elts) event = self._event_for(elts) self._event_deque.append(event) return event
Adds elts to the tasks. Args: elts (Sequence): a iterable of elements that can be appended to the task's bundle_field. Returns: Event: an event that can be used to wait on the response.
def local_manager_rule(self): adm_gid = self.local_manager_gid if not adm_gid: return None config = self.root['settings']['ugm_localmanager'].attrs return config[adm_gid]
Return rule for local manager.
def vote_random(candidates, votes, n_winners): rcands = list(candidates) shuffle(rcands) rcands = rcands[:min(n_winners, len(rcands))] best = [(i, 0.0) for i in rcands] return best
Select random winners from the candidates. This voting method bypasses the given votes completely. :param candidates: All candidates in the vote :param votes: Votes from the agents :param int n_winners: The number of vote winners
def stop_all(self): pool = Pool(concurrency=3) for node in self.nodes.values(): pool.append(node.stop) yield from pool.join()
Stop all nodes
def load_extra(cls, filename): try: with open(filename, 'rb') as configuration_file: cls.load_extra_data(configuration_file.read()) sys.stderr.write("Config successfully loaded from {0:s}\n".format( filename)) return True except IOError: return False
Loads extra JSON configuration parameters from a file on the filesystem. Args: filename: str, the filename to open. Returns: bool: True if the extra configuration parameters were read.
def forward(self): if self._index >= len(self._history) - 1: return None self._index += 1 self._check_index() return self.current_item
Go forward in history if possible. Return the current item after going forward.
def parse_args(self, ctx, args): if args and args[0] in self.commands: args.insert(0, '') super(OptionalGroup, self).parse_args(ctx, args)
Check if the first argument is an existing command.
def merge(dest, src, merge_lists=False, in_place=True): if in_place: merged = dest else: merged = copy.deepcopy(dest) return dictupdate.update(merged, src, merge_lists=merge_lists)
defaults.merge Allows deep merging of dicts in formulas. merge_lists : False If True, it will also merge lists instead of replace their items. in_place : True If True, it will merge into dest dict, if not it will make a new copy from that dict and return it. CLI Example: .. code-block:: bash salt '*' default.merge a=b d=e It is more typical to use this in a templating language in formulas, instead of directly on the command-line.
def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 in left and n2 in right)
Returns the induced subgraph where only the nodes from the given sets are included.
def samefile(path1, path2): path1, path1_is_storage = format_and_is_storage(path1) path2, path2_is_storage = format_and_is_storage(path2) if not path1_is_storage and not path2_is_storage: return os_path_samefile(path1, path2) if not path1_is_storage or not path2_is_storage: return False with handle_os_exceptions(): system = get_instance(path1) if system is not get_instance(path2): return False elif system.relpath(path1) != system.relpath(path2): return False return True
Return True if both pathname arguments refer to the same file or directory. Equivalent to "os.path.samefile". Args: path1 (path-like object): Path or URL. path2 (path-like object): Path or URL. Returns: bool: True if same file or directory.
def get_change_price(self, plan_old, plan_new, period): if period is None or period < 1: return None plan_old_day_cost = self._calculate_day_cost(plan_old, period) plan_new_day_cost = self._calculate_day_cost(plan_new, period) if plan_new_day_cost <= plan_old_day_cost: return self._calculate_final_price(period, None) else: return self._calculate_final_price(period, plan_new_day_cost - plan_old_day_cost)
Calculates total price of plan change. Returns None if no payment is required.
def truncated_normal_expval(mu, tau, a, b): phia = np.exp(normal_like(a, mu, tau)) phib = np.exp(normal_like(b, mu, tau)) sigma = 1. / np.sqrt(tau) Phia = utils.normcdf((a - mu) / sigma) if b == np.inf: Phib = 1.0 else: Phib = utils.normcdf((b - mu) / sigma) return (mu + (phia - phib) / (Phib - Phia))[0]
Expected value of the truncated normal distribution. .. math:: E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T} where .. math:: T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi \left(\frac{A-\mu}{\sigma}\right)\text \\ \varphi_1 &= \varphi\left(\frac{A-\mu}{\sigma}\right) \\ \varphi_2 &= \varphi\left(\frac{B-\mu}{\sigma}\right) \\ and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`. :Parameters: - `mu` : Mean of the distribution. - `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0). - `a` : Left bound of the distribution. - `b` : Right bound of the distribution.
def print_evaluation(period=1, show_stdv=True): def callback(env): if env.rank != 0 or (not env.evaluation_result_list) or period is False or period == 0: return i = env.iteration if i % period == 0 or i + 1 == env.begin_iteration or i + 1 == env.end_iteration: msg = '\t'.join([_fmt_metric(x, show_stdv) for x in env.evaluation_result_list]) rabit.tracker_print('[%d]\t%s\n' % (i, msg)) return callback
Create a callback that print evaluation result. We print the evaluation results every **period** iterations and on the first and the last iterations. Parameters ---------- period : int The period to log the evaluation results show_stdv : bool, optional Whether show stdv if provided Returns ------- callback : function A callback that print evaluation every period iterations.
def checkInstalledBrew(package, similar=True, speak=True, speakSimilar=True): packages = subprocess.check_output(['brew', 'list']).split() installed = package in packages similar = [] if not installed: similar = [pkg for pkg in packages if package in pkg] if speak: speakInstalledPackages(package, "homebrew", installed, similar, speakSimilar) return (installed, similar)
checks if a given package is installed on homebrew
def stratified_kfold(df, n_folds): sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique()) sessions.apply(lambda x: np.random.shuffle(x)) folds = [] for i in range(n_folds): idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) / n_folds):(i + 1) * (len(x) / n_folds)])) idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values folds.append(df.loc[idx]) return folds
Create stratified k-folds from an indexed dataframe
def _get_stddevs(self, C, stddev_types, stddev_shape): stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(C["sigtot"] + np.zeros(stddev_shape)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(C['sig2'] + np.zeros(stddev_shape)) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(C['sig1'] + np.zeros(stddev_shape)) return stddevs
Returns the standard deviations given in Table 2
def read_data(self, timeout=10.0): start_time = time.time() while True: data = None try: data = self.stdout_reader.queue.get(timeout=timeout) if data: yield data else: break except queue.Empty: end_time = time.time() if not data: if end_time - start_time >= timeout: raise ReadTimeoutError('ffmpeg output: {}'.format( ''.join(self.stderr_reader.queue.queue) )) else: start_time = end_time continue
Read blocks of raw PCM data from the file.
def parse_template_json(self, template_json): assert template_json['type'] == self.DOCUMENT_TYPE, '' self.template_id = template_json['templateId'] self.name = template_json.get('name') self.creator = template_json.get('creator') self.template = template_json['serviceAgreementTemplate']
Parse a template from a json. :param template_json: json dict
def create(self, name, plugin_name, plugin_version, flavor_id, description=None, volumes_per_node=None, volumes_size=None, node_processes=None, node_configs=None, floating_ip_pool=None, security_groups=None, auto_security_group=None, availability_zone=None, volumes_availability_zone=None, volume_type=None, image_id=None, is_proxy_gateway=None, volume_local_to_instance=None, use_autoconfig=None, shares=None, is_public=None, is_protected=None, volume_mount_prefix=None, boot_from_volume=None, boot_volume_type=None, boot_volume_availability_zone=None, boot_volume_local_to_instance=None): data = { 'name': name, 'plugin_name': plugin_name, 'plugin_version': plugin_version, 'flavor_id': flavor_id, 'node_processes': node_processes } return self._do_create(data, description, volumes_per_node, volumes_size, node_configs, floating_ip_pool, security_groups, auto_security_group, availability_zone, volumes_availability_zone, volume_type, image_id, is_proxy_gateway, volume_local_to_instance, use_autoconfig, shares, is_public, is_protected, volume_mount_prefix, boot_from_volume, boot_volume_type, boot_volume_availability_zone, boot_volume_local_to_instance)
Create a Node Group Template.
def validate_empty_attributes(fully_qualified_name: str, spec: Dict[str, Any], *attributes: str) -> List[EmptyAttributeError]: return [ EmptyAttributeError(fully_qualified_name, spec, attribute) for attribute in attributes if not spec.get(attribute, None) ]
Validates to ensure that a set of attributes do not contain empty values
def remove_input(urls, preserves, verbose = False): for path in map(url2path, urls): if any(os.path.samefile(path, preserve) for preserve in preserves): continue if verbose: print >>sys.stderr, "removing \"%s\" ..." % path try: os.remove(path) except: pass
Attempt to delete all files identified by the URLs in urls except any that are the same as the files in the preserves list.
def _diff_group_position(group): old_start = group[0][0] new_start = group[0][1] old_length = new_length = 0 for old_line, new_line, line_or_conflict in group: if isinstance(line_or_conflict, tuple): old, new = line_or_conflict old_length += len(old) new_length += len(new) else: old_length += 1 new_length += 1 if old_length: old_start += 1 if new_length: new_start += 1 return color.LineNumber('@@ -%s,%s +%s,%s @@' % (old_start, old_length, new_start, new_length))
Generate a unified diff position line for a diff group
def assign(self, link_type, product, linked_product, data=None, identifierType=None): return bool(self.call('catalog_product_link.assign', [link_type, product, linked_product, data, identifierType]))
Assign a product link :param link_type: type of link, one of 'cross_sell', 'up_sell', 'related' or 'grouped' :param product: ID or SKU of product :param linked_product: ID or SKU of linked product :param data: dictionary of link data, (position, qty, etc.) Example: { 'position': '0', 'qty': 1} :param identifierType: Defines whether the product or SKU value is passed in the "product" parameter. :return: boolean
def __compress_attributes(self, dic): result = {} for k, v in dic.iteritems(): if isinstance(v, types.ListType) and len(v) == 1: if k not in ('msExchMailboxSecurityDescriptor', 'msExchSafeSendersHash', 'msExchBlockedSendersHash', 'replicationSignature', 'msExchSafeRecipientsHash', 'sIDHistory', 'msRTCSIP-UserRoutingGroupId', 'mSMQDigests', 'mSMQSignCertificates', 'msExchMasterAccountSid', 'msExchPreviousAccountSid', 'msExchUMPinChecksum', 'userSMIMECertificate', 'userCertificate', 'userCert', 'msExchDisabledArchiveGUID', 'msExchUMPinChecksum', 'msExchUMSpokenName', 'objectSid', 'objectGUID', 'msExchArchiveGUID', 'thumbnailPhoto', 'msExchMailboxGuid'): try: result[k] = v[0].decode('utf-8') except Exception as e: logging. error("Failed to decode attribute: %s -- %s" % (k, e)) result[k] = v[0] return result
This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn t make sense. :param dic: :return:
def get_new_broks(self): for elt in self.all_my_hosts_and_services(): for brok in elt.broks: self.add(brok) elt.broks = [] for contact in self.contacts: for brok in contact.broks: self.add(brok) contact.broks = []
Iter over all hosts and services to add new broks in internal lists :return: None
def get_relationship_dicts(self): if not self.relationships: return None for goid, goobj in self.go2obj.items(): for reltyp, relset in goobj.relationship.items(): relfwd_goids = set(o.id for o in relset) print("CountRelativesInit RELLLLS", goid, goobj.id, reltyp, relfwd_goids)
Given GO DAG relationships, return summaries per GO ID.
def unpack(cls, msg, client, server, request_id): flags, = _UNPACK_INT(msg[:4]) namespace, pos = _get_c_string(msg, 4) docs = bson.decode_all(msg[pos:], CODEC_OPTIONS) return cls(*docs, namespace=namespace, flags=flags, _client=client, request_id=request_id, _server=server)
Parse message and return an `OpInsert`. Takes the client message as bytes, the client and server socket objects, and the client request id.
def protocol(alias_name, default=None, allow_none=False): warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) try: return _split_docker_link(alias_name)[0] except KeyError as err: if default or allow_none: return default else: raise err
Get the protocol from the docker link alias or return the default. Args: alias_name: The docker link alias default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) Examples: Assuming a Docker link was created with ``docker --link postgres:db`` and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``. >>> envitro.docker.protocol('DB') tcp
def _all_inner(self, fields, limit, order_by, offset): response = self.session.get(self._get_table_url(), params=self._get_formatted_query(fields, limit, order_by, offset)) yield self._get_content(response) while 'next' in response.links: self.url_link = response.links['next']['url'] response = self.session.get(self.url_link) yield self._get_content(response)
Yields all records for the query and follows links if present on the response after validating :return: List of records with content
def debug_file(self): self.switch_to_plugin() current_editor = self.get_current_editor() if current_editor is not None: current_editor.sig_debug_start.emit() self.run_file(debug=True)
Debug current script
def publish(cls, message, client_filter=None): with cls._lock: for client in cls.subscribers: if (not client_filter) or client_filter(client): client.send(message)
Publish messages to subscribers. Args: message: The message to publish. client_filter: A filter function to call passing in each client. Only clients for whom the function returns True will have the message sent to them.
def read(self): data = self.dev.read() if len(data) == 0: self.log.warning("READ : Nothing received") return if data == b'\x00': self.log.warning("READ : Empty packet (Got \\x00)") return pkt = bytearray(data) data = self.dev.read(pkt[0]) pkt.extend(bytearray(data)) self.log.info("READ : %s" % self.format_packet(pkt)) self.do_callback(pkt) return pkt
We have been called to read! As a consumer, continue to read for the length of the packet and then pass to the callback.
def raw(text): new_string = '' for char in text: try: new_string += escape_dict[char] except KeyError: new_string += char return new_string
Returns a raw string representation of text
def class_repr(value): klass = value if not isinstance(value, type): klass = klass.__class__ return '.'.join([klass.__module__, klass.__name__])
Returns a representation of the value class. Arguments --------- value A class or a class instance Returns ------- str The "module.name" representation of the value class. Example ------- >>> from datetime import date >>> class_repr(date) 'datetime.date' >>> class_repr(date.today()) 'datetime.date'
def find_function(self, name): deffunction = lib.EnvFindDeffunction(self._env, name.encode()) if deffunction == ffi.NULL: raise LookupError("Function '%s' not found" % name) return Function(self._env, deffunction)
Find the Function by its name.
def communicate_through(self, file): if self._communication is not None: raise ValueError("Already communicating.") self._communication = communication = Communication( file, self._get_needle_positions, self._machine, [self._on_message_received], right_end_needle=self.right_end_needle, left_end_needle=self.left_end_needle) return communication
Setup communication through a file. :rtype: AYABInterface.communication.Communication
def parse(html_string, wrapper=Parser, *args, **kwargs): return Parser(lxml.html.fromstring(html_string), *args, **kwargs)
Parse html with wrapper
def nl_object_alloc(ops): new = nl_object() nl_init_list_head(new.ce_list) new.ce_ops = ops if ops.oo_constructor: ops.oo_constructor(new) _LOGGER.debug('Allocated new object 0x%x', id(new)) return new
Allocate a new object of kind specified by the operations handle. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/object.c#L54 Positional arguments: ops -- cache operations handle (nl_object_ops class instance). Returns: New nl_object class instance or None.
def stop_recording(): global _recording if not _recording: raise ValueError('Must call "start_recording" before.') recorded_events_queue, hooked = _recording unhook(hooked) return list(recorded_events_queue.queue)
Stops the global recording of events and returns a list of the events captured.
def wrap_threading_start(start_func): def call(self): self._opencensus_context = ( execution_context.get_opencensus_full_context() ) return start_func(self) return call
Wrap the start function from thread. Put the tracer informations in the threading object.
def bytes_array(self): assert len(self.dimensions) == 2, \ '{}: cannot get value as bytes array!'.format(self.name) l, n = self.dimensions return [self.bytes[i*l:(i+1)*l] for i in range(n)]
Get the param as an array of raw byte strings.
def tag(tagname, content='', attrs=None): attrs_str = attrs and ' '.join(_generate_dom_attrs(attrs)) open_tag = tagname if attrs_str: open_tag += ' ' + attrs_str if content is None: return literal('<%s />' % open_tag) content = ''.join(iterate(content, unless=(basestring, literal))) return literal('<%s>%s</%s>' % (open_tag, content, tagname))
Helper for programmatically building HTML tags. Note that this barely does any escaping, and will happily spit out dangerous user input if used as such. :param tagname: Tag name of the DOM element we want to return. :param content: Optional content of the DOM element. If `None`, then the element is self-closed. By default, the content is an empty string. Supports iterables like generators. :param attrs: Optional dictionary-like collection of attributes for the DOM element. Example:: >>> tag('div', content='Hello, world.') u'<div>Hello, world.</div>' >>> tag('script', attrs={'src': '/static/js/core.js'}) u'<script src="/static/js/core.js"></script>' >>> tag('script', attrs=[('src', '/static/js/core.js'), ('type', 'text/javascript')]) u'<script src="/static/js/core.js" type="text/javascript"></script>' >>> tag('meta', content=None, attrs=dict(content='"quotedquotes"')) u'<meta content="\\\\"quotedquotes\\\\"" />' >>> tag('ul', (tag('li', str(i)) for i in xrange(3))) u'<ul><li>0</li><li>1</li><li>2</li></ul>'
def position_input(obj, visible=False): if not obj.generic_position.all(): ObjectPosition.objects.create(content_object=obj) return {'obj': obj, 'visible': visible, 'object_position': obj.generic_position.all()[0]}
Template tag to return an input field for the position of the object.
def list_databases(self): lines = output_lines(self.exec_psql('\\list')) return [line.split('|') for line in lines]
Runs the ``\\list`` command and returns a list of column values with information about all databases.
def random_stats(self, all_stats, race, ch_class): stats = [] res = {} for s in all_stats: stats.append(s['stat']) res[s['stat']] = 0 cur_stat = 0 for stat in stats: for ndx, i in enumerate(self.classes.dat): if i['name'] == ch_class: cur_stat = int(i[stat]) for ndx, i in enumerate(self.races.dat): if i['name'] == race: cur_stat += int(i[stat]) if cur_stat < 1: cur_stat = 1 elif cur_stat > 10: if stat not in ('Health', 'max_health'): cur_stat = 10 res[stat] = cur_stat return res
create random stats based on the characters class and race This looks up the tables from CharacterCollection to get base stats and applies a close random fit
def _LogRecord_msg(): def _LogRecord_msgProperty(self): return self.__msg def _LogRecord_msgSetter(self, value): self.__msg = to_unicode(value) logging.LogRecord.msg = property(_LogRecord_msgProperty, _LogRecord_msgSetter)
Overrides logging.LogRecord.msg attribute to ensure variable content is stored as unicode.
def with_pattern(pattern, regex_group_count=None): def decorator(func): func.pattern = pattern func.regex_group_count = regex_group_count return func return decorator
Attach a regular expression pattern matcher to a custom type converter function. This annotates the type converter with the :attr:`pattern` attribute. EXAMPLE: >>> import parse >>> @parse.with_pattern(r"\d+") ... def parse_number(text): ... return int(text) is equivalent to: >>> def parse_number(text): ... return int(text) >>> parse_number.pattern = r"\d+" :param pattern: regular expression pattern (as text) :param regex_group_count: Indicates how many regex-groups are in pattern. :return: wrapped function
def log_game_start(self, players, terrain, numbers, ports): self.reset() self._set_players(players) self._logln('{} v{}'.format(__name__, __version__)) self._logln('timestamp: {0}'.format(self.timestamp_str())) self._log_players(players) self._log_board_terrain(terrain) self._log_board_numbers(numbers) self._log_board_ports(ports) self._logln('...CATAN!')
Begin a game. Erase the log, set the timestamp, set the players, and write the log header. The robber is assumed to start on the desert (or off-board). :param players: iterable of catan.game.Player objects :param terrain: list of 19 catan.board.Terrain objects. :param numbers: list of 19 catan.board.HexNumber objects. :param ports: list of catan.board.Port objects.
def read(path): url = '{}/{}/{}'.format(settings.VAULT_BASE_URL.rstrip('/'), settings.VAULT_BASE_SECRET_PATH.strip('/'), path.lstrip('/')) headers = {'X-Vault-Token': settings.VAULT_ACCESS_TOKEN} resp = requests.get(url, headers=headers) if resp.ok: return resp.json()['data'] else: log.error('Failed VAULT GET request: %s %s', resp.status_code, resp.text) raise Exception('Failed Vault GET request: {} {}'.format(resp.status_code, resp.text))
Read a secret from Vault REST endpoint
def set(self, key, value, confidence=100): if value is None: return if key in self.info: old_confidence, old_value = self.info.get(key) if old_confidence >= confidence: return self.info[key] = (confidence, value)
Defines the given value with the given confidence, unless the same value is already defined with a higher confidence level.
def savemat(file_name, mdict, appendmat=True, format='7.3', oned_as='row', store_python_metadata=True, action_for_matlab_incompatible='error', marshaller_collection=None, truncate_existing=False, truncate_invalid_matlab=False, **keywords): if float(format) < 7.3: import scipy.io scipy.io.savemat(file_name, mdict, appendmat=appendmat, format=format, oned_as=oned_as, **keywords) return if appendmat and not file_name.endswith('.mat'): file_name = file_name + '.mat' options = Options(store_python_metadata=store_python_metadata, \ matlab_compatible=True, oned_as=oned_as, \ action_for_matlab_incompatible=action_for_matlab_incompatible, \ marshaller_collection=marshaller_collection) writes(mdict=mdict, filename=file_name, truncate_existing=truncate_existing, truncate_invalid_matlab=truncate_invalid_matlab, options=options)
Save a dictionary of python types to a MATLAB MAT file. Saves the data provided in the dictionary `mdict` to a MATLAB MAT file. `format` determines which kind/vesion of file to use. The '7.3' version, which is HDF5 based, is handled by this package and all types that this package can write are supported. Versions 4 and 5 are not HDF5 based, so everything is dispatched to the SciPy package's ``scipy.io.savemat`` function, which this function is modelled after (arguments not specific to this package have the same names, etc.). Parameters ---------- file_name : str or file-like object Name of the MAT file to store in. The '.mat' extension is added on automatically if not present if `appendmat` is set to ``True``. An open file-like object can be passed if the writing is being dispatched to SciPy (`format` < 7.3). mdict : dict The dictionary of variables and their contents to store in the file. appendmat : bool, optional Whether to append the '.mat' extension to `file_name` if it doesn't already end in it or not. format : {'4', '5', '7.3'}, optional The MATLAB mat file format to use. The '7.3' format is handled by this package while the '4' and '5' formats are dispatched to SciPy. oned_as : {'row', 'column'}, optional Whether 1D arrays should be turned into row or column vectors. store_python_metadata : bool, optional Whether or not to store Python type information. Doing so allows most types to be read back perfectly. Only applicable if not dispatching to SciPy (`format` >= 7.3). action_for_matlab_incompatible: str, optional The action to perform writing data that is not MATLAB compatible. The actions are to write the data anyways ('ignore'), don't write the incompatible data ('discard'), or throw a ``TypeNotMatlabCompatibleError`` exception. marshaller_collection : MarshallerCollection, optional Collection of marshallers to disk to use. Only applicable if not dispatching to SciPy (`format` >= 7.3). truncate_existing : bool, optional Whether to truncate the file if it already exists before writing to it. truncate_invalid_matlab : bool, optional Whether to truncate a file if the file doesn't have the proper header (userblock in HDF5 terms) setup for MATLAB metadata to be placed. **keywords : Additional keywords arguments to be passed onto ``scipy.io.savemat`` if dispatching to SciPy (`format` < 7.3). Raises ------ ImportError If `format` < 7.3 and the ``scipy`` module can't be found. NotImplementedError If writing a variable in `mdict` is not supported. exceptions.TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `action_for_matlab_incompatible` is set to ``'error'``. Notes ----- Writing the same data and then reading it back from disk using the HDF5 based version 7.3 format (the functions in this package) or the older format (SciPy functions) can lead to very different results. Each package supports a different set of data types and converts them to and from the same MATLAB types differently. See Also -------- loadmat : Equivelent function to do reading. scipy.io.savemat : SciPy function this one models after and dispatches to. Options writes : Function used to do the actual writing.
def write(self, file_or_filename): if isinstance(file_or_filename, basestring): file = None try: file = open(file_or_filename, "wb") except Exception, detail: logger.error("Error opening %s." % detail) finally: if file is not None: self._write_data(file) file.close() else: file = file_or_filename self._write_data(file) return file
Writes the case data to file.
def copytree_hardlink(source, dest): copy2 = shutil.copy2 try: shutil.copy2 = os.link shutil.copytree(source, dest) finally: shutil.copy2 = copy2
Recursively copy a directory ala shutils.copytree, but hardlink files instead of copying. Available on UNIX systems only.
def info(ctx): dev = ctx.obj['dev'] controller = ctx.obj['controller'] slot1, slot2 = controller.slot_status click.echo('Slot 1: {}'.format(slot1 and 'programmed' or 'empty')) click.echo('Slot 2: {}'.format(slot2 and 'programmed' or 'empty')) if dev.is_fips: click.echo('FIPS Approved Mode: {}'.format( 'Yes' if controller.is_in_fips_mode else 'No'))
Display status of YubiKey Slots.
def prepare_editable_requirement( self, req, require_hashes, use_user_site, finder ): assert req.editable, "cannot prepare a non-editable req as editable" logger.info('Obtaining %s', req) with indent_log(): if require_hashes: raise InstallationError( 'The editable requirement %s cannot be installed when ' 'requiring hashes, because there is no single file to ' 'hash.' % req ) req.ensure_has_source_dir(self.src_dir) req.update_editable(not self._download_should_save) abstract_dist = make_abstract_dist(req) with self.req_tracker.track(req): abstract_dist.prep_for_dist(finder, self.build_isolation) if self._download_should_save: req.archive(self.download_dir) req.check_if_exists(use_user_site) return abstract_dist
Prepare an editable requirement
def Handle_Search(self, msg): search_term = msg['object']['searchTerm'] results = self.db.searchForItem(search_term) reply = {"status": "OK", "type": "search", "object": { "received search": msg['object']['searchTerm'], "results": results} } return json.dumps(reply)
Handle a search. :param msg: the received search :type msg: dict :returns: The message to reply with :rtype: str
def _set_mtu_to_nics(self, conf): for dom_name, dom_spec in conf.get('domains', {}).items(): for idx, nic in enumerate(dom_spec.get('nics', [])): net = self._get_net(conf, dom_name, nic) mtu = net.get('mtu', 1500) if mtu != 1500: nic['mtu'] = mtu
For all the nics of all the domains in the conf that have MTU set, save the MTU on the NIC definition. Args: conf (dict): Configuration spec to extract the domains from Returns: None
def team_info(): teams = __get_league_object().find('teams').findall('team') output = [] for team in teams: info = {} for x in team.attrib: info[x] = team.attrib[x] output.append(info) return output
Returns a list of team information dictionaries
def run(self, *args, **kw): if self._runFunc is not None: if 'mode' in kw: kw.pop('mode') if '_save' in kw: kw.pop('_save') return self._runFunc(self, *args, **kw) else: raise taskpars.NoExecError('No way to run task "'+self.__taskName+\ '". You must either override the "run" method in your '+ \ 'ConfigObjPars subclass, or you must supply a "run" '+ \ 'function in your package.')
This may be overridden by a subclass.
def _get_condition_json(self, index): condition = self.condition_data[index] condition_log = { 'name': condition[0], 'value': condition[1], 'type': condition[2], 'match': condition[3] } return json.dumps(condition_log)
Method to generate json for logging audience condition. Args: index: Index of the condition. Returns: String: Audience condition JSON.
def iter_orgs(username, number=-1, etag=None): return gh.iter_orgs(username, number, etag) if username else []
List the organizations associated with ``username``. :param str username: (required), login of the user :param int number: (optional), number of orgs to return. Default: -1, return all of the issues :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Organization <github3.orgs.Organization>`
def replace_file(from_file, to_file): try: os.remove(to_file) except OSError: pass copy(from_file, to_file)
Replaces to_file with from_file
def post(self, request, *args, **kwargs): self.object = self.get_object() form_class = self.get_form_class() form = self.get_form(form_class) formsets = self.get_formsets(form, saving=True) valid_formsets = True for formset in formsets.values(): if not formset.is_valid(): valid_formsets = False break if self.is_valid(form, formsets): return self.form_valid(form, formsets) else: adminForm = self.get_admin_form(form) adminFormSets = self.get_admin_formsets(formsets) context = { 'adminForm': adminForm, 'formsets': adminFormSets, 'obj': self.object, } return self.form_invalid(form=form, **context)
Method for handling POST requests. Validates submitted form and formsets. Saves if valid, re displays page with errors if invalid.
def pdb(self): if self.embed_disabled: self.warning_log("Pdb is disabled when runned from the grid runner because of the multithreading") return False if BROME_CONFIG['runner']['play_sound_on_pdb']: say(BROME_CONFIG['runner']['sound_on_pdb']) set_trace()
Start the python debugger Calling pdb won't do anything in a multithread context
def _confirm_constant(a): a = np.asanyarray(a) return np.isclose(a, 1.0).all(axis=0).any()
Confirm `a` has volumn vector of 1s.
def approximant(self, index): if 'approximant' not in self.table.fieldnames: raise ValueError("approximant not found in input file and no " "approximant was specified on initialization") return self.table["approximant"][index]
Return the name of the approximant ot use at the given index
def _try_lookup(table, value, default = ""): try: string = table[ value ] except KeyError: string = default return string
try to get a string from the lookup table, return "" instead of key error
def connection_made(self, transport): self.transport = transport self.transport.write(self.method.message.encode()) self.time_out_handle = self.loop.call_later( TIME_OUT_LIMIT, self.time_out)
Connect to device is successful. Start configuring RTSP session. Schedule time out handle in case device doesn't respond.
def create_cell_renderer_text(self, tree_view, title="title", assign=0, editable=False): renderer = Gtk.CellRendererText() renderer.set_property('editable', editable) column = Gtk.TreeViewColumn(title, renderer, text=assign) tree_view.append_column(column)
Function creates a CellRendererText with title
def set_interval(self, timer_id, interval): return lib.ztimerset_set_interval(self._as_parameter_, timer_id, interval)
Set timer interval. Returns 0 if OK, -1 on failure. This method is slow, canceling the timer and adding a new one yield better performance.
def _process(self, resource=None, data={}): _data = data or self._data rsc_url = self.get_rsc_endpoint(resource) if _data: req = requests.post(rsc_url, data=json.dumps(_data), headers=self.headers) else: req = requests.get(rsc_url, params=_data, headers=self.headers) if req.status_code == 200: self._response = json.loads(req.text) if int(self._response['response_code']) == 00: return (True, self._response) else: return (False, self._response['response_text']) else: return (500, "Request Failed")
Processes the current transaction Sends an HTTP request to the PAYDUNYA API server
def sample(self, data_1, data_2, sample_size=15000, blocked_proportion=.5, original_length_1=None, original_length_2=None): self._checkData(data_1, data_2) self.active_learner = self.ActiveLearner(self.data_model) self.active_learner.sample_product(data_1, data_2, blocked_proportion, sample_size, original_length_1, original_length_2)
Draws a random sample of combinations of records from the first and second datasets, and initializes active learning with this sample Arguments: data_1 -- Dictionary of records from first dataset, where the keys are record_ids and the values are dictionaries with the keys being field names data_2 -- Dictionary of records from second dataset, same form as data_1 sample_size -- Size of the sample to draw
def put(self, *args, **kwargs): self.inq.put((self._putcount, (args, kwargs))) self._putcount += 1
place a new item into the pool to be handled by the workers all positional and keyword arguments will be passed in as the arguments to the function run by the pool's workers
def avg_time(self, source=None): if source is None: runtimes = [] for rec in self.source_stats.values(): runtimes.extend([r for r in rec.runtimes if r != 0]) return avg(runtimes) else: if callable(source): return avg(self.source_stats[source.__name__].runtimes) else: return avg(self.source_stats[source].runtimes)
Returns the average time taken to scrape lyrics. If a string or a function is passed as source, return the average time taken to scrape lyrics from that source, otherwise return the total average.
def random_seed_np_tf(seed): if seed >= 0: np.random.seed(seed) tf.set_random_seed(seed) return True else: return False
Seed numpy and tensorflow random number generators. :param seed: seed parameter
def beginningPage(R): p = R['PG'] if p.startswith('suppl '): p = p[6:] return p.split(' ')[0].split('-')[0].replace(';', '')
As pages may not be given as numbers this is the most accurate this function can be
async def request(self, method: base.String, data: Optional[Dict] = None, files: Optional[Dict] = None, **kwargs) -> Union[List, Dict, base.Boolean]: return await api.make_request(self.session, self.__token, method, data, files, proxy=self.proxy, proxy_auth=self.proxy_auth, timeout=self.timeout, **kwargs)
Make an request to Telegram Bot API https://core.telegram.org/bots/api#making-requests :param method: API method :type method: :obj:`str` :param data: request parameters :type data: :obj:`dict` :param files: files :type files: :obj:`dict` :return: result :rtype: Union[List, Dict] :raise: :obj:`aiogram.exceptions.TelegramApiError`
def get_conversation_between(self, um_from_user, um_to_user): messages = self.filter(Q(sender=um_from_user, recipients=um_to_user, sender_deleted_at__isnull=True) | Q(sender=um_to_user, recipients=um_from_user, messagerecipient__deleted_at__isnull=True)) return messages
Returns a conversation between two users
def vsepg(v1, v2, ndim): v1 = stypes.toDoubleVector(v1) v2 = stypes.toDoubleVector(v2) ndim = ctypes.c_int(ndim) return libspice.vsepg_c(v1, v2, ndim)
Find the separation angle in radians between two double precision vectors of arbitrary dimension. This angle is defined as zero if either vector is zero. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vsepg_c.html :param v1: First vector :type v1: Array of floats :param v2: Second vector :type v2: Array of floats :param ndim: The number of elements in v1 and v2. :type ndim: int :return: separation angle in radians :rtype: float
def read_dir_tree(self, file_hash): json_d = self.read_index_object(file_hash, 'tree') node = {'files' : json_d['files'], 'dirs' : {}} for name, hsh in json_d['dirs'].iteritems(): node['dirs'][name] = self.read_dir_tree(hsh) return node
Recursively read the directory structure beginning at hash
def is_lossy(label): val = getkey(from_=label, keyword='INST_CMPRS_TYPE').decode().strip() if val == 'LOSSY': return True else: return False
Check Label file for the compression type.
def _get_anchor(module_to_name, fullname): if not _anchor_re.match(fullname): raise ValueError("'%s' is not a valid anchor" % fullname) anchor = fullname for module_name in module_to_name.values(): if fullname.startswith(module_name + "."): rest = fullname[len(module_name)+1:] if len(anchor) > len(rest): anchor = rest return anchor
Turn a full member name into an anchor. Args: module_to_name: Dictionary mapping modules to short names. fullname: Fully qualified name of symbol. Returns: HTML anchor string. The longest module name prefix of fullname is removed to make the anchor. Raises: ValueError: If fullname uses characters invalid in an anchor.
def union(self, rdds): first_jrdd_deserializer = rdds[0]._jrdd_deserializer if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds): rdds = [x._reserialize() for x in rdds] cls = SparkContext._jvm.org.apache.spark.api.java.JavaRDD jrdds = SparkContext._gateway.new_array(cls, len(rdds)) for i in range(0, len(rdds)): jrdds[i] = rdds[i]._jrdd return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)
Build the union of a list of RDDs. This supports unions() of RDDs with different serialized formats, although this forces them to be reserialized using the default serializer: >>> path = os.path.join(tempdir, "union-text.txt") >>> with open(path, "w") as testFile: ... _ = testFile.write("Hello") >>> textFile = sc.textFile(path) >>> textFile.collect() [u'Hello'] >>> parallelized = sc.parallelize(["World!"]) >>> sorted(sc.union([textFile, parallelized]).collect()) [u'Hello', 'World!']
def mark_time(times, msg=None): tt = time.clock() times.append(tt) if (msg is not None) and (len(times) > 1): print msg, times[-1] - times[-2]
Time measurement utility. Measures times of execution between subsequent calls using time.clock(). The time is printed if the msg argument is not None. Examples -------- >>> times = [] >>> mark_time(times) ... do something >>> mark_time(times, 'elapsed') elapsed 0.1 ... do something else >>> mark_time(times, 'elapsed again') elapsed again 0.05 >>> times [0.10000000000000001, 0.050000000000000003]
def _get_sorted_action_keys(self, keys_list): action_list = [] for key in keys_list: if key.startswith('action-'): action_list.append(key) action_list.sort() return action_list
This function returns only the elements starting with 'action-' in 'keys_list'. The returned list is sorted by the index appended to the end of each element
async def generate_image(self, imgtype, face=None, hair=None): if not isinstance(imgtype, str): raise TypeError("type of 'imgtype' must be str.") if face and not isinstance(face, str): raise TypeError("type of 'face' must be str.") if hair and not isinstance(hair, str): raise TypeError("type of 'hair' must be str.") if (face or hair) and imgtype != 'awooo': raise InvalidArguments('\'face\' and \'hair\' are arguments only available on the \'awoo\' image type') url = f'https://api.weeb.sh/auto-image/generate?type={imgtype}' + ("&face="+face if face else "")+ ("&hair="+hair if hair else "") async with aiohttp.ClientSession() as session: async with session.get(url, headers=self.__headers) as resp: if resp.status == 200: return await resp.read() else: raise Exception((await resp.json())['message'])
Generate a basic image using the auto-image endpoint of weeb.sh. This function is a coroutine. Parameters: imgtype: str - type of the generation to create, possible types are awooo, eyes, or won. face: str - only used with awooo type, defines color of face hair: str - only used with awooo type, defines color of hair/fur Return Type: image data
def get_tx_identity_info(self, tx_ac): rows = self._fetchall(self._queries['tx_identity_info'], [tx_ac]) if len(rows) == 0: raise HGVSDataNotAvailableError( "No transcript definition for (tx_ac={tx_ac})".format(tx_ac=tx_ac)) return rows[0]
returns features associated with a single transcript. :param tx_ac: transcript accession with version (e.g., 'NM_199425.2') :type tx_ac: str # database output -[ RECORD 1 ]--+------------- tx_ac | NM_199425.2 alt_ac | NM_199425.2 alt_aln_method | transcript cds_start_i | 283 cds_end_i | 1003 lengths | {707,79,410} hgnc | VSX1
def add_item(self, item, **options): export_item = { "item": item.url, } export_item.update(options) self.items.append(export_item) return self
Add a layer or table item to the export. :param Layer|Table item: The Layer or Table to add :rtype: self
def _calculate_duration(start_time, finish_time): if not (start_time and finish_time): return 0 start = datetime.datetime.fromtimestamp(start_time) finish = datetime.datetime.fromtimestamp(finish_time) duration = finish - start decimals = float(("0." + str(duration.microseconds))) return duration.seconds + decimals
Calculates how long it took to execute the testcase.
def task_path(cls, project, location, queue, task): return google.api_core.path_template.expand( "projects/{project}/locations/{location}/queues/{queue}/tasks/{task}", project=project, location=location, queue=queue, task=task, )
Return a fully-qualified task string.
def _filehandle(self): if self._fh and self._has_file_rolled(): try: self._fh.close() except Exception: pass self._fh = None if not self._fh: self._open_file(self.filename) if not self.opened_before: self.opened_before = True self._fh.seek(0, os.SEEK_END) return self._fh
Return a filehandle to the file being tailed
def copy(self, **params): new_params = dict() for name in ['owner', 'priority', 'key', 'final']: new_params[name] = params.get(name, getattr(self, name)) return Route(**new_params)
Creates the new instance of the Route substituting the requested parameters.
def find_by(cls, **kwargs) -> 'Entity': logger.debug(f'Lookup `{cls.__name__}` object with values ' f'{kwargs}') results = cls.query.filter(**kwargs).limit(1).all() if not results: raise ObjectNotFoundError( f'`{cls.__name__}` object with values {[item for item in kwargs.items()]} ' f'does not exist.') return results.first
Find a specific entity record that matches one or more criteria. :param kwargs: named arguments consisting of attr_name and attr_value pairs to search on