code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def end_poll(args): if not args.isadmin: return "Nope, not gonna do it." if not args.msg: return "Syntax: !vote end <pollnum>" if not args.msg.isdigit(): return "Not A Valid Positive Integer." poll = get_open_poll(args.session, int(args.msg)) if poll is None: return "That poll doesn't exist or has already been deleted!" if poll.active == 0: return "Poll already ended!" poll.active = 0 return "Poll ended!"
Ends a poll.
def _update(self, uri, body, **kwargs): self.run_hooks("modify_body_for_update", body, **kwargs) resp, resp_body = self.api.method_put(uri, body=body) return resp_body
Handles the communication with the API when updating a specific resource managed by this class.
def get_release_id(self, package_name: str, version: str) -> bytes: validate_package_name(package_name) validate_package_version(version) self._validate_set_registry() return self.registry._get_release_id(package_name, version)
Returns the 32 byte identifier of a release for the given package name and version, if they are available on the current registry.
def collect_ip6table(self, tablename): modname = "ip6table_"+tablename if self.check_ext_prog("grep -q %s /proc/modules" % modname): cmd = "ip6tables -t "+tablename+" -nvL" self.add_cmd_output(cmd)
Same as function above, but for ipv6
def validate_known_curve(): plt.figure() N = 100 x = numpy.linspace(-1, 1, N) y = numpy.sin(4 * x) smoother.DEFAULT_BASIC_SMOOTHER = smoother.BasicFixedSpanSmootherSlowUpdate smooth = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmoother) plt.plot(x, smooth.smooth_result, label='Slow') smoother.DEFAULT_BASIC_SMOOTHER = smoother.BasicFixedSpanSmoother smooth = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmoother) plt.plot(x, smooth.smooth_result, label='Fast') plt.plot(x, y, '.', label='data') plt.legend() plt.show()
Validate on a sin function.
def _micro_service_filter(cls): is_microservice_module = issubclass(cls, MicroService) is_correct_subclass = cls != MicroService and cls != ResponseMicroService and cls != RequestMicroService return is_microservice_module and is_correct_subclass
Will only give a find on classes that is a subclass of MicroService, with the exception that the class is not allowed to be a direct ResponseMicroService or RequestMicroService. :type cls: type :rtype: bool :param cls: A class object :return: True if match, else false
def infos(self, type=None, failed=False): if type is None: type = Info if not issubclass(type, Info): raise TypeError( "Cannot get infos of type {} " "as it is not a valid type.".format(type) ) if failed not in ["all", False, True]: raise ValueError("{} is not a valid vector failed".format(failed)) if failed == "all": return type.query.filter_by(origin_id=self.id).all() else: return type.query.filter_by(origin_id=self.id, failed=failed).all()
Get infos that originate from this node. Type must be a subclass of :class:`~dallinger.models.Info`, the default is ``Info``. Failed can be True, False or "all".
def with_lock(lock, func, *args, **kwargs): d = lock.acquire() def release_lock(result): deferred = lock.release() return deferred.addCallback(lambda x: result) def lock_acquired(lock): return defer.maybeDeferred(func, *args, **kwargs).addBoth(release_lock) d.addCallback(lock_acquired) return d
A 'context manager' for performing operations requiring a lock. :param lock: A BasicLock instance :type lock: silverberg.lock.BasicLock :param func: A callable to execute while the lock is held. :type func: function
def _convert_char_to_type(self, type_char): typecode = type_char if type(type_char) is int: typecode = chr(type_char) if typecode in self.TYPECODES_LIST: return typecode else: raise RuntimeError( "Typecode {0} ({1}) isn't supported.".format(type_char, typecode) )
Ensures a read character is a typecode. :param type_char: Read typecode :return: The typecode as a string (using chr) :raise RuntimeError: Unknown typecode
def bitlist_to_int(bitlist: Sequence[int]) -> int: return int(''.join([str(d) for d in bitlist]), 2)
Converts a sequence of bits to an integer. >>> from quantumflow.utils import bitlist_to_int >>> bitlist_to_int([1, 0, 0]) 4
def get_game_id(self, date): df = self.get_game_logs() game_id = df[df.GAME_DATE == date].Game_ID.values[0] return game_id
Returns the Game ID associated with the date that is passed in. Parameters ---------- date : str The date associated with the game whose Game ID. The date that is passed in can take on a numeric format of MM/DD/YY (like "01/06/16" or "01/06/2016") or the expanded Month Day, Year format (like "Jan 06, 2016" or "January 06, 2016"). Returns ------- game_id : str The desired Game ID.
def phisheye_term_list(self, include_inactive=False, **kwargs): return self._results('phisheye_term_list', '/v1/phisheye/term-list', include_inactive=include_inactive, items_path=('terms', ), **kwargs)
Provides a list of terms that are set up for this account. This call is not charged against your API usage limit. NOTE: The terms must be configured in the PhishEye web interface: https://research.domaintools.com/phisheye. There is no API call to set up the terms.
def store_extra_keys(self, d: Dict[str, Any]) -> None: new_dict = dict(self.extra_keys, **d) self.extra_keys = new_dict.copy()
Store several extra values in the messaging storage. :param d: dictionary entry to merge with current self.extra_keys. :returns: None
def get_edge(self, src_or_list, dst=None): if isinstance( src_or_list, (list, tuple)) and dst is None: edge_points = tuple(src_or_list) edge_points_reverse = (edge_points[1], edge_points[0]) else: edge_points = (src_or_list, dst) edge_points_reverse = (dst, src_or_list) match = list() if edge_points in self.obj_dict['edges'] or ( self.get_top_graph_type() == 'graph' and edge_points_reverse in self.obj_dict['edges']): edges_obj_dict = self.obj_dict['edges'].get( edge_points, self.obj_dict['edges'].get( edge_points_reverse, None )) for edge_obj_dict in edges_obj_dict: match.append( Edge(edge_points[0], edge_points[1], obj_dict=edge_obj_dict)) return match
Retrieved an edge from the graph. Given an edge's source and destination the corresponding Edge instance(s) will be returned. If one or more edges exist with that source and destination a list of Edge instances is returned. An empty list is returned otherwise.
def print_search_results(self, search_results, buf=sys.stdout): formatted_lines = self.format_search_results(search_results) pr = Printer(buf) for txt, style in formatted_lines: pr(txt, style)
Print formatted search results. Args: search_results (list of `ResourceSearchResult`): Search to format.
def backend_from_mime(mime): try: mod_name = MIMETYPE_TO_BACKENDS[mime] except KeyError: msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME) if 'FULLTEXT_TESTING' in os.environ: warn(msg) else: LOGGER.debug(msg) mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME] mod = import_mod(mod_name) return mod
Determine backend module object from a mime string.
def _register_converter(cls, conv_func, conv_type): cls.converters.append(ConverterFunctionInfo(conv_func, conv_type, len(cls.converters))) cls._sort_converters()
Triggered by the @converter_function decorator
def get_dict(*keys, **extras): _keys = ('url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json', 'method') assert all(map(_keys.__contains__, keys)) data = request.data form = semiflatten(request.form) try: _json = json.loads(data.decode('utf-8')) except (ValueError, TypeError): _json = None d = dict( url=get_url(request), args=semiflatten(request.args), form=form, data=json_safe(data), origin=request.headers.get('X-Forwarded-For', request.remote_addr), headers=get_headers(), files=get_files(), json=_json, method=request.method, ) out_d = dict() for key in keys: out_d[key] = d.get(key) out_d.update(extras) return out_d
Returns request dict of given keys.
def _reply_json(self, json_payload, status_code=200): self._send_headers(status_code=status_code) json_str = json.dumps(json_payload) self.wfile.write(json_str)
Return a JSON-serializable data structure
def get_instance(self, payload): return UserBindingInstance( self._version, payload, service_sid=self._solution['service_sid'], user_sid=self._solution['user_sid'], )
Build an instance of UserBindingInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance :rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
def get_authenticated_person(self): try: output = self._get_data() self._logger.debug(output) person = Person([ self.email, output[9][1], None, None, None, None, [ None, None, self.email, self.email ], None, None, None, None, None, None, None, ]) except (IndexError, TypeError, InvalidData): self._logger.debug('Missing essential info, cannot instantiate authenticated person') return None return person
Retrieves the person associated with this account
def user_roles_exists(name, roles, database, user=None, password=None, host=None, port=None, authdb=None): try: roles = _to_dict(roles) except Exception: return 'Roles provided in wrong format' users = user_list(user, password, host, port, database, authdb) if isinstance(users, six.string_types): return 'Failed to connect to mongo database' for user in users: if name == dict(user).get('user'): for role in roles: if not isinstance(role, dict): role = {'role': role, 'db': database} if role not in dict(user).get('roles', []): return False return True return False
Checks if a user of a MongoDB database has specified roles CLI Examples: .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017 .. code-block:: bash salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017
def _with_env(self, env): res = self._browse(env, self._ids) return res
As the `with_env` class method but for recordset.
def canonical_query_string(self): results = [] for key, values in iteritems(self.query_parameters): if key == _x_amz_signature: continue for value in values: results.append("%s=%s" % (key, value)) return "&".join(sorted(results))
The canonical query string from the query parameters. This takes the query string from the request and orders the parameters in
def readCfgJson(cls, working_path): cfg_json_filename = os.path.join(working_path, cls.CFG_JSON_FILENAME) if os.path.isfile(cfg_json_filename): with open(cfg_json_filename) as json_file: cfg = json.load(json_file) return cfg return None
Read cmWalk configuration data of a working directory from a json file. :param working_path: working path for reading the configuration data. :return: the configuration data represented in a json object, None if the configuration files does not exist.
def GetRandomDatetime(): seconds_offset = random.randint(0, 60 * 60 * 24 * 7) dt = datetime.today() + timedelta(seconds=seconds_offset) return dt.replace(second=0, microsecond=0)
Return a datetime in the next week.
def stats(self, *args): result = self._fetch_cmd(b'stats', args, False) for key, value in six.iteritems(result): converter = STAT_TYPES.get(key, int) try: result[key] = converter(value) except Exception: pass return result
The memcached "stats" command. The returned keys depend on what the "stats" command returns. A best effort is made to convert values to appropriate Python types, defaulting to strings when a conversion cannot be made. Args: *arg: extra string arguments to the "stats" command. See the memcached protocol documentation for more information. Returns: A dict of the returned stats.
def analyze(self, scratch, **kwargs): results = Counter() for script in self.iter_scripts(scratch): gen = self.iter_blocks(script.blocks) name = 'start' level = None while name != '': if name in self.ANIMATION: gen, count = self._check_animation(name, level, gen) results.update(count) name, level, _ = next(gen, ('', 0, '')) return {'animation': results}
Run and return the results from the Animation plugin.
def parse_top_level(self): contacts = [] while not self.eos: contact = self.parse_contact() if not contact: break contacts.append(contact) self.parse_whitespace() output = {} for key, value in contacts: output[key] = value return output
The top level parser will do a loop where it looks for a single contact parse and then eats all whitespace until there is no more input left or another contact is found to be parsed and stores them.
def is_in_shard(self, s): return self.compute_shard(s, self._nshards) == self._shard
Returns True iff the string s is in this shard. :param string s: The string to check.
def _symlink_remote_lib(self, gopath, go_remote_lib, required_links): def source_iter(): remote_lib_source_dir = self.context.products.get_data('go_remote_lib_src')[go_remote_lib] for path in os.listdir(remote_lib_source_dir): remote_src = os.path.join(remote_lib_source_dir, path) if os.path.isfile(remote_src): yield (remote_src, os.path.basename(path)) return self._symlink_lib(gopath, go_remote_lib, source_iter(), required_links)
Creates symlinks from the given gopath to the source files of the given remote lib. Also duplicates directory structure leading to source files of package within gopath, in order to provide isolation to the package. Adds the symlinks to the source files to required_links.
def get_command_line(instance_type, env, message, data, mode, open_notebook, command_str): floyd_command = ["floyd", "run"] if instance_type: floyd_command.append('--' + INSTANCE_NAME_MAP[instance_type]) if env and not env == DEFAULT_ENV: floyd_command += ["--env", env] if message: floyd_command += ["--message", shell_quote(message)] if data: for data_item in data: parts = data_item.split(':') if len(parts) > 1: data_item = normalize_data_name(parts[0], use_data_config=False) + ':' + parts[1] floyd_command += ["--data", data_item] if mode and mode != "job": floyd_command += ["--mode", mode] if mode == 'jupyter': if not open_notebook: floyd_command.append("--no-open") else: if command_str: floyd_command.append(shell_quote(command_str)) return ' '.join(floyd_command)
Return a string representing the full floyd command entered in the command line
def _create_injector(self, injector): if injector == "block_info": block_info_injector = importlib.import_module( "sawtooth_validator.journal.block_info_injector") return block_info_injector.BlockInfoInjector( self._state_view_factory, self._signer) raise UnknownBatchInjectorError(injector)
Returns a new batch injector
def make_avro_schema(i, loader ): names = Names() avro = make_avro(i, loader) make_avsc_object(convert_to_dict(avro), names) return names
All in one convenience function. Call make_avro() and make_avro_schema_from_avro() separately if you need the intermediate result for diagnostic output.
def to_positions(self): if self.coords_are_displacement: cumulative_displacements = np.cumsum(self.frac_coords, axis=0) positions = self.base_positions + cumulative_displacements self.frac_coords = positions self.coords_are_displacement = False return
Converts fractional coordinates of trajectory into positions
def verify_counter(self, signature, counter): devices = self.__get_u2f_devices() for device in devices: if device['keyHandle'] == signature['keyHandle']: if counter > device['counter']: device['counter'] = counter self.__save_u2f_devices(devices) return True else: return False
Verifies that counter value is greater than previous signature
def use_service(bundle_context, svc_reference): if svc_reference is None: raise TypeError("Invalid ServiceReference") try: yield bundle_context.get_service(svc_reference) finally: try: bundle_context.unget_service(svc_reference) except pelix.constants.BundleException: pass
Utility context to safely use a service in a "with" block. It looks after the the given service and releases its reference when exiting the context. :param bundle_context: The calling bundle context :param svc_reference: The reference of the service to use :return: The requested service :raise BundleException: Service not found :raise TypeError: Invalid service reference
def _remove_dependency(self, dependlist, i, isSubroutine, anexec): if dependlist[i] in anexec.dependencies: all_depends = anexec.dependencies[dependlist[i]] if len(all_depends) > 0: clean_args = all_depends[0].clean(dependlist[i + 1]) for idepend in range(len(all_depends)): if (all_depends[idepend].argslist == clean_args and all_depends[idepend].isSubroutine == isSubroutine): del anexec.dependencies[dependlist[i]][idepend] break
Removes the specified dependency from the executable if it exists and matches the call signature.
def get_distance_matrix(self): assert self.lons.ndim == 1 distances = geodetic.geodetic_distance( self.lons.reshape(self.lons.shape + (1, )), self.lats.reshape(self.lats.shape + (1, )), self.lons, self.lats) return numpy.matrix(distances, copy=False)
Compute and return distances between each pairs of points in the mesh. This method requires that the coordinate arrays are one-dimensional. NB: the depth of the points is ignored .. warning:: Because of its quadratic space and time complexity this method is safe to use for meshes of up to several thousand points. For mesh of 10k points it needs ~800 Mb for just the resulting matrix and four times that much for intermediate storage. :returns: Two-dimensional numpy array, square matrix of distances. The matrix has zeros on main diagonal and positive distances in kilometers on all other cells. That is, value in cell (3, 5) is the distance between mesh's points 3 and 5 in km, and it is equal to value in cell (5, 3). Uses :func:`openquake.hazardlib.geo.geodetic.geodetic_distance`.
def get(self, sid): return FieldContext( self._version, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=sid, )
Constructs a FieldContext :param sid: The unique string that identifies the resource :returns: twilio.rest.autopilot.v1.assistant.task.field.FieldContext :rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldContext
def external_answer(self, *sequences): if not getattr(self, 'external', False): return if hasattr(self, 'test_func') and self.test_func is not self._ident: return libs = libraries.get_libs(self.__class__.__name__) for lib in libs: if not lib.check_conditions(self, *sequences): continue if not lib.get_function(): continue prepared_sequences = lib.prepare(*sequences) try: return lib.func(*prepared_sequences) except Exception: pass
Try to get answer from known external libraries.
def _snapshot(self) -> Dict[str, Any]: try: return {name: item._snapshot for name, item in self._nested_items.items()} except Exception as e: raise SnapshotError('Error while creating snapshot for {}'.format(self._name)) from e
Implements snapshot for collections by recursively invoking snapshot of all child items
def voronoi(data, line_color=None, line_width=2, f_tooltip=None, cmap=None, max_area=1e4, alpha=220): from geoplotlib.layers import VoronoiLayer _global_config.layers.append(VoronoiLayer(data, line_color, line_width, f_tooltip, cmap, max_area, alpha))
Draw the voronoi tesselation of the points :param data: data access object :param line_color: line color :param line_width: line width :param f_tooltip: function to generate a tooltip on mouseover :param cmap: color map :param max_area: scaling constant to determine the color of the voronoi areas :param alpha: color alpha
def nvrtcCompileProgram(self, prog, options): options_array = (c_char_p * len(options))() options_array[:] = encode_str_list(options) code = self._lib.nvrtcCompileProgram(prog, len(options), options_array) self._throw_on_error(code) return
Compiles the NVRTC program object into PTX, using the provided options array. See the NVRTC API documentation for accepted options.
def make_article_info_correspondences(self, article_info_div): corresps = self.article.root.xpath('./front/article-meta/author-notes/corresp') if corresps: corresp_div = etree.SubElement(article_info_div, 'div', {'id': 'correspondence'}) for corresp in corresps: sub_div = etree.SubElement(corresp_div, 'div', {'id': corresp.attrib['id']}) append_all_below(sub_div, corresp)
Articles generally provide a first contact, typically an email address for one of the authors. This will supply that content.
def _check_ubridge_version(self, env=None): try: output = yield from subprocess_check_output(self._path, "-v", cwd=self._working_dir, env=env) match = re.search("ubridge version ([0-9a-z\.]+)", output) if match: self._version = match.group(1) if sys.platform.startswith("win") or sys.platform.startswith("darwin"): minimum_required_version = "0.9.12" else: minimum_required_version = "0.9.14" if parse_version(self._version) < parse_version(minimum_required_version): raise UbridgeError("uBridge executable version must be >= {}".format(minimum_required_version)) else: raise UbridgeError("Could not determine uBridge version for {}".format(self._path)) except (OSError, subprocess.SubprocessError) as e: raise UbridgeError("Error while looking for uBridge version: {}".format(e))
Checks if the ubridge executable version
def edit_section(self, id, course_section_end_at=None, course_section_name=None, course_section_restrict_enrollments_to_section_dates=None, course_section_sis_section_id=None, course_section_start_at=None): path = {} data = {} params = {} path["id"] = id if course_section_name is not None: data["course_section[name]"] = course_section_name if course_section_sis_section_id is not None: data["course_section[sis_section_id]"] = course_section_sis_section_id if course_section_start_at is not None: data["course_section[start_at]"] = course_section_start_at if course_section_end_at is not None: data["course_section[end_at]"] = course_section_end_at if course_section_restrict_enrollments_to_section_dates is not None: data["course_section[restrict_enrollments_to_section_dates]"] = course_section_restrict_enrollments_to_section_dates self.logger.debug("PUT /api/v1/sections/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/sections/{id}".format(**path), data=data, params=params, single_item=True)
Edit a section. Modify an existing section.
def _readResponse(self): traps = [] reply_word = None while reply_word != '!done': reply_word, words = self._readSentence() if reply_word == '!trap': traps.append(TrapError(**words)) elif reply_word in ('!re', '!done') and words: yield words if len(traps) > 1: raise MultiTrapError(*traps) elif len(traps) == 1: raise traps[0]
Yield each row of response untill !done is received. :throws TrapError: If one !trap is received. :throws MultiTrapError: If > 1 !trap is received.
def occurrence(self, file_name=None, path=None, date=None): if self._indicator_data.get('type') != 'File': return None occurrence_obj = FileOccurrence(file_name, path, date) self._occurrences.append(occurrence_obj) return occurrence_obj
Add a file Occurrence. Args: file_name (str, optional): The file name for this occurrence. path (str, optional): The file path for this occurrence. date (str, optional): The datetime expression for this occurrence. Returns: obj: An instance of Occurrence.
def get_answer_mdata(): return { 'item': { 'element_label': { 'text': 'item', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, }
Return default mdata map for Answer
def get_scheduling_block(sub_array_id, block_id): block_ids = DB.get_sub_array_sbi_ids(sub_array_id) if block_id in block_ids: block = DB.get_block_details([block_id]).__next__() return block, HTTPStatus.OK return dict(error="unknown id"), HTTPStatus.NOT_FOUND
Return the list of scheduling blocks instances associated with the sub array
def insert(self, text: str): undoObj = UndoInsert(self, text) self.qteUndoStack.push(undoObj)
Undo safe wrapper for the native ``insert`` method. |Args| * ``text`` (**str**): text to insert at the current position. |Returns| **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
def ratio_to_delta(self, isos_ss, ratio, oneover=False): if type(isos_ss) == float: ss_ratio = isos_ss elif type(isos_ss) == list: ss_ratio = self.inut.isoratio_init(isos_ss) else: print('Check input of isos_ss into ratio_to_delta routine') return None if oneover: ratio = old_div(1,ratio) delta = (old_div(ratio, ss_ratio) - 1.) * 1000. return delta
Transforms an isotope ratio into a delta value Parameters ---------- isos_ss: list or float list w/ isotopes, e.g., ['N-14','N-15'] OR the solar system ratio. ratio : float ratio of the isotopes to transform. oneover : boolean take the inverse of the ratio before transforming (never inverse of delta value!). The default is False. Returns ------- float delta value
def union(seq1=(), *seqs): r if not seqs: return list(seq1) res = set(seq1) for seq in seqs: res.update(set(seq)) return list(res)
r"""Return the set union of `seq1` and `seqs`, duplicates removed, order random. Examples: >>> union() [] >>> union([1,2,3]) [1, 2, 3] >>> union([1,2,3], {1:2, 5:1}) [1, 2, 3, 5] >>> union((1,2,3), ['a'], "bcd") ['a', 1, 2, 3, 'd', 'b', 'c'] >>> union([1,2,3], iter([0,1,1,1])) [0, 1, 2, 3]
def extra_html_properties(self, prefix=None, postfix=None, template_type=None): prefix = prefix if prefix else "django-plotly-dash" post_part = "-%s" % postfix if postfix else "" template_type = template_type if template_type else "iframe" slugified_id = self.slugified_id() return "%(prefix)s %(prefix)s-%(template_type)s %(prefix)s-app-%(slugified_id)s%(post_part)s" % {'slugified_id':slugified_id, 'post_part':post_part, 'template_type':template_type, 'prefix':prefix, }
Return extra html properties to allow individual apps to be styled separately. The content returned from this function is injected unescaped into templates.
def merge_variables(variables, **kwargs): var_dict = OrderedDict() for v in variables: if v.name not in var_dict: var_dict[v.name] = [] var_dict[v.name].append(v) return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]
Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables.
def delete_user_from_group(self, GroupID, UserID): log.info('Delete user %s from group %s' % (UserID, GroupID)) self.put('groups/%s/delete_user/%s.json' % (GroupID, UserID))
Delete a user from a group.
def hour(self, value=None): if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `hour`'.format(value)) if value < 1: raise ValueError('value need to be greater or equal 1 ' 'for field `hour`') if value > 24: raise ValueError('value need to be smaller 24 ' 'for field `hour`') self._hour = value
Corresponds to IDD Field `hour` Args: value (int): value for IDD Field `hour` value >= 1 value <= 24 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def check_staged(filename=None): retcode, _, stdout = git['diff-index', '--quiet', '--cached', 'HEAD', filename].run(retcode=None) if retcode == 1: return True elif retcode == 0: return False else: raise RuntimeError(stdout)
Check if there are 'changes to be committed' in the index.
def _update_Prxy_diag(self): for r in range(self.nsites): pr_half = self.prx[r]**0.5 pr_neghalf = self.prx[r]**-0.5 symm_pr = (pr_half * (self.Prxy[r] * pr_neghalf).transpose()).transpose() (evals, evecs) = scipy.linalg.eigh(symm_pr) self.D[r] = evals self.Ainv[r] = evecs.transpose() * pr_half self.A[r] = (pr_neghalf * evecs.transpose()).transpose()
Update `D`, `A`, `Ainv` from `Prxy`, `prx`.
def get_face_colors(self, indexed=None): if indexed is None: return self._face_colors elif indexed == 'faces': if (self._face_colors_indexed_by_faces is None and self._face_colors is not None): Nf = self._face_colors.shape[0] self._face_colors_indexed_by_faces = \ np.empty((Nf, 3, 4), dtype=self._face_colors.dtype) self._face_colors_indexed_by_faces[:] = \ self._face_colors.reshape(Nf, 1, 4) return self._face_colors_indexed_by_faces else: raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
Get the face colors Parameters ---------- indexed : str | None If indexed is None, return (Nf, 4) array of face colors. If indexed=='faces', then instead return an indexed array (Nf, 3, 4) (note this is just the same array with each color repeated three times). Returns ------- colors : ndarray The colors.
def get_all_children(self): children = self._children[:] oldlen = 0 newlen = len(children) while oldlen != newlen: start = oldlen oldlen = len(children) for i in range(start, len(children)): children.extend(children[i]._children) newlen = len(children) return children
Get all children including children of children :returns: all children including children of children :rtype: list of :class:`Reftrack` :raises: None
def add(self, fact): token = Token.valid(fact) MATCHER.debug("<BusNode> added %r", token) for child in self.children: child.callback(token)
Create a VALID token and send it to all children.
def stop_sync(self): conn_ids = self.conns.get_connections() for conn in list(conn_ids): try: self.disconnect_sync(conn) except HardwareError: pass self.client.disconnect() self.conns.stop()
Synchronously stop this adapter
def _start_check_timer(self): if self.timer: self.timer.cancel() self.timer = threading.Timer(2.5, self.check_complete) self.timer.daemon = True self.timer.start()
Periodically checks to see if the task has completed.
def under_attack(col, queens): left = right = col for _, column in reversed(queens): left, right = left - 1, right + 1 if column in (left, col, right): return True return False
Checks if queen is under attack :param col: Column number :param queens: list of queens :return: True iff queen is under attack
def transformer_big(): hparams = transformer_base() hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.batch_size = 2048 hparams.num_heads = 16 hparams.layer_prepostprocess_dropout = 0.3 return hparams
HParams for transformer big model on WMT.
def roll_mean(input, window): nobs, i, j, sum_x = 0,0,0,0. N = len(input) if window > N: raise ValueError('Out of bound') output = np.ndarray(N-window+1,dtype=input.dtype) for val in input[:window]: if val == val: nobs += 1 sum_x += val output[j] = NaN if not nobs else sum_x / nobs for val in input[window:]: prev = input[j] if prev == prev: sum_x -= prev nobs -= 1 if val == val: nobs += 1 sum_x += val j += 1 output[j] = NaN if not nobs else sum_x / nobs return output
Apply a rolling mean function to an array. This is a simple rolling aggregation.
def match_set(self, tokens, item): match, = tokens self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Set)") self.add_check("_coconut.len(" + item + ") == " + str(len(match))) for const in match: self.add_check(const + " in " + item)
Matches a set.
def process_python_symbol_data(oedata): symbol_list = [] for key in oedata: val = oedata[key] if val and key != 'found_cell_separators': if val.is_class_or_function(): symbol_list.append((key, val.def_name, val.fold_level, val.get_token())) return sorted(symbol_list)
Returns a list with line number, definition name, fold and token.
def chars(self, length, name, value=None, terminator=None): self._add_field(Char(length, name, value, terminator))
Add a char array to template. `length` is given in bytes and can refer to earlier numeric fields in template. Special value '*' in length means that length is encoded to length of value and decoded as all available bytes. `value` is optional. `value` could be either a "String" or a "Regular Expression" and if it is a Regular Expression it must be prefixed by 'REGEXP:'. Examples: | chars | 16 | field | Hello World! | | u8 | charLength | | chars | charLength | field | | chars | * | field | Hello World! | | chars | * | field | REGEXP:^{[a-zA-Z ]+}$ |
def add_argument(self, dest, nargs=1, obj=None): if obj is None: obj = dest self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
Adds a positional argument named `dest` to the parser. The `obj` can be used to identify the option in the order list that is returned from the parser.
def set_cover(self, file_name, content, create_page=True): c0 = EpubCover(file_name=file_name) c0.content = content self.add_item(c0) if create_page: c1 = EpubCoverHtml(image_name=file_name) self.add_item(c1) self.add_metadata(None, 'meta', '', OrderedDict([('name', 'cover'), ('content', 'cover-img')]))
Set cover and create cover document if needed. :Args: - file_name: file name of the cover page - content: Content for the cover image - create_page: Should cover page be defined. Defined as bool value (optional). Default value is True.
def write(self, fptr): fptr.write(struct.pack('>I4s', 12, b'jP ')) fptr.write(struct.pack('>BBBB', *self.signature))
Write a JPEG 2000 Signature box to file.
def get_candidate_electoral_votes(self, candidate): candidate_election = CandidateElection.objects.get( candidate=candidate, election=self ) return candidate_election.electoral_votes.all()
Get all electoral votes for a candidate in this election.
def _get_service_connection(self, service, service_command=None, create=True, timeout_ms=None): connection = self._service_connections.get(service, None) if connection: return connection if not connection and not create: return None if service_command: destination_str = b'%s:%s' % (service, service_command) else: destination_str = service connection = self.protocol_handler.Open( self._handle, destination=destination_str, timeout_ms=timeout_ms) self._service_connections.update({service: connection}) return connection
Based on the service, get the AdbConnection for that service or create one if it doesnt exist :param service: :param service_command: Additional service parameters to append :param create: If False, dont create a connection if it does not exist :return:
def hide(self): if self._prev_contrast is None: self._prev_contrast = self._contrast self.contrast(0x00)
Simulates switching the display mode OFF; this is achieved by setting the contrast level to zero.
def AND(classical_reg1, classical_reg2): left, right = unpack_reg_val_pair(classical_reg1, classical_reg2) return ClassicalAnd(left, right)
Produce an AND instruction. NOTE: The order of operands was reversed in pyQuil <=1.9 . :param classical_reg1: The first classical register, which gets modified. :param classical_reg2: The second classical register or immediate value. :return: A ClassicalAnd instance.
def createObjectMachine(machineType, **kwargs): if machineType not in ObjectMachineTypes.getTypes(): raise RuntimeError("Unknown model type: " + machineType) return getattr(ObjectMachineTypes, machineType)(**kwargs)
Return an object machine of the appropriate type. @param machineType (str) A supported ObjectMachine type @param kwargs (dict) Constructor argument for the class that will be instantiated. Keyword parameters specific to each model type should be passed in here.
def find_best_matching_node(self, new, old_nodes): name = new.__class__.__name__ matches = [c for c in old_nodes if name == c.__class__.__name__] if self.debug: print("Found matches for {}: {} ".format(new, matches)) return matches[0] if matches else None
Find the node that best matches the new node given the old nodes. If no good match exists return `None`.
def _call(self, x): if self.prior is None: tmp = self.domain.element((np.exp(x) - 1)).inner(self.domain.one()) else: tmp = (self.prior * (np.exp(x) - 1)).inner(self.domain.one()) return tmp
Return the value in the point ``x``.
def periods(ts, phi=0.0): ts = np.squeeze(ts) if ts.ndim <= 1: return np.diff(phase_crossings(ts, phi)) else: return np.hstack([ts[...,i].periods(phi) for i in range(ts.shape[-1])])
For a single variable timeseries representing the phase of an oscillator, measure the period of each successive oscillation. An individual oscillation is defined to start and end when the phase passes phi (by default zero) after completing a full cycle. If the timeseries begins (or ends) exactly at phi, then the first (or last) oscillation will be included. Arguments: ts: Timeseries (single variable) The timeseries of an angle variable (radians) phi (float): A single oscillation starts and ends at phase phi (by default zero).
def get_device_elements(self): plain = self._aha_request('getdevicelistinfos') dom = xml.dom.minidom.parseString(plain) _LOGGER.debug(dom) return dom.getElementsByTagName("device")
Get the DOM elements for the device list.
def compose_args(self, action_name, in_argdict): for action in self.actions: if action.name == action_name: break else: raise AttributeError('Unknown Action: {0}'.format(action_name)) unexpected = set(in_argdict) - \ set(argument.name for argument in action.in_args) if unexpected: raise ValueError( "Unexpected argument '{0}'. Method signature: {1}" .format(next(iter(unexpected)), str(action)) ) composed = [] for argument in action.in_args: name = argument.name if name in in_argdict: composed.append((name, in_argdict[name])) continue if name in self.DEFAULT_ARGS: composed.append((name, self.DEFAULT_ARGS[name])) continue if argument.vartype.default is not None: composed.append((name, argument.vartype.default)) raise ValueError( "Missing argument '{0}'. Method signature: {1}" .format(argument.name, str(action)) ) return composed
Compose the argument list from an argument dictionary, with respect for default values. Args: action_name (str): The name of the action to be performed. in_argdict (dict): Arguments as a dict, eg ``{'InstanceID': 0, 'Speed': 1}. The values can be a string or something with a string representation. Returns: list: a list of ``(name, value)`` tuples. Raises: `AttributeError`: If this service does not support the action. `ValueError`: If the argument lists do not match the action signature.
def _current_size(self): deletes, adds, _ = Watch._extract_changes(self.doc_map, self.change_map, None) return len(self.doc_map) + len(adds) - len(deletes)
Returns the current count of all documents, including the changes from the current changeMap.
def pass_control_back(self, primary, secondary): if secondary is None: self._write(('*PCB', Integer(min=0, max=30)), primary) else: self._write( ('*PCB', [Integer(min=0, max=30), Integer(min=0, max=30)]), primary, secondary )
The address to which the controll is to be passed back. Tells a potential controller device the address to which the control is to be passed back. :param primary: An integer in the range 0 to 30 representing the primary address of the controller sending the command. :param secondary: An integer in the range of 0 to 30 representing the secondary address of the controller sending the command. If it is missing, it indicates that the controller sending this command does not have extended addressing.
def return_type(rettype): def wrap(f): @functools.wraps(f) def converter(*pargs, **kwargs): result = f(*pargs, **kwargs) try: result = rettype(result) except ValueError as e: http_status(500, "Return Value Conversion Failed") content_type("application/json") return {"error": str(e)} return result return converter return wrap
Decorate a function to automatically convert its return type to a string using a custom function. Web-based service functions must return text to the client. Tangelo contains default logic to convert many kinds of values into string, but this decorator allows the service writer to specify custom behavior falling outside of the default. If the conversion fails, an appropriate server error will be raised.
def read(self, *, level=0, alignment=1) -> bytes: return self.mglo.read(level, alignment)
Read the content of the texture into a buffer. Keyword Args: level (int): The mipmap level. alignment (int): The byte alignment of the pixels. Returns: bytes
def _get_real_instance_if_mismatch(vpc_info, ipaddr, instance, eni): inst_id = instance.id if instance else "" eni_id = eni.id if eni else "" if ipaddr: real_instance, real_eni = \ find_instance_and_eni_by_ip(vpc_info, ipaddr) if real_instance.id != inst_id or real_eni.id != eni_id: return real_instance return None
Return the real instance for the given IP address, if that instance is different than the passed in instance or has a different eni. If the ipaddr belongs to the same instance and eni that was passed in then this returns None.
def write(self, face, data, viewport=None, *, alignment=1) -> None: if type(data) is Buffer: data = data.mglo self.mglo.write(face, data, viewport, alignment)
Update the content of the texture. Args: face (int): The face to update. data (bytes): The pixel data. viewport (tuple): The viewport. Keyword Args: alignment (int): The byte alignment of the pixels.
def random_ipv4(cidr='10.0.0.0/8'): try: u_cidr = unicode(cidr) except NameError: u_cidr = cidr network = ipaddress.ip_network(u_cidr) start = int(network.network_address) + 1 end = int(network.broadcast_address) randint = random.randrange(start, end) return ipaddress.ip_address(randint)
Return a random IPv4 address from the given CIDR block. :key str cidr: CIDR block :returns: An IPv4 address from the given CIDR block :rtype: ipaddress.IPv4Address
def _WriteOutputValues(self, output_values): for index, value in enumerate(output_values): if not isinstance(value, py2to3.STRING_TYPES): value = '' output_values[index] = value.replace(',', ' ') output_line = ','.join(output_values) output_line = '{0:s}\n'.format(output_line) self._output_writer.Write(output_line)
Writes values to the output. Args: output_values (list[str]): output values.
def is_valid_sid_for_new_standalone(sysmeta_pyxb): sid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'seriesId') if not d1_gmn.app.did.is_valid_sid_for_new_standalone(sid): raise d1_common.types.exceptions.IdentifierNotUnique( 0, 'Identifier is already in use as {}. did="{}"'.format( d1_gmn.app.did.classify_identifier(sid), sid ), identifier=sid, )
Assert that any SID in ``sysmeta_pyxb`` can be assigned to a new standalone object.
def iter_ROOT_classes(): class_index = "http://root.cern.ch/root/html/ClassIndex.html" for s in minidom.parse(urlopen(class_index)).getElementsByTagName("span"): if ("class", "typename") in s.attributes.items(): class_name = s.childNodes[0].nodeValue try: yield getattr(QROOT, class_name) except AttributeError: pass
Iterator over all available ROOT classes
def prune_volumes(self, filters=None): params = {} if filters: params['filters'] = utils.convert_filters(filters) url = self._url('/volumes/prune') return self._result(self._post(url, params=params), True)
Delete unused volumes Args: filters (dict): Filters to process on the prune list. Returns: (dict): A dict containing a list of deleted volume names and the amount of disk space reclaimed in bytes. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def _upload(self, items: Iterable[Tuple[str, str]]) -> None: for src, key in items: logger.info(f'Uploading {src} to {key}') mimetype, _ = mimetypes.guess_type(src) if mimetype is None: logger.warning(f'Could not guess MIME type for {src}') mimetype = 'application/octet-stream' logger.debug(f'Deduced MIME type: {mimetype}') self._bucket.upload_file(src, key, ExtraArgs={ 'ContentType': mimetype })
Upload a collection of paths to S3. :param items: An iterable of pairs containing the local path of the file to upload, and the remote path to upload it to. The prefix will be appended to each remote path.
def user(self, id, expand=None): user = User(self._options, self._session) params = {} if expand is not None: params['expand'] = expand user.find(id, params=params) return user
Get a user Resource from the server. :param id: ID of the user to get :param id: str :param expand: Extra information to fetch inside each resource :type expand: Optional[Any] :rtype: User
def write_pad_codewords(buff, version, capacity, length): write = buff.extend if version in (consts.VERSION_M1, consts.VERSION_M3): write([0] * (capacity - length)) else: pad_codewords = ((1, 1, 1, 0, 1, 1, 0, 0), (0, 0, 0, 1, 0, 0, 0, 1)) for i in range(capacity // 8 - length // 8): write(pad_codewords[i % 2])
\ Writes the pad codewords iff the data does not fill the capacity of the symbol. :param buff: The byte buffer. :param int version: The (Micro) QR Code version. :param int capacity: The total capacity of the symbol (incl. error correction) :param int length: Length of the data bit stream.
def _unzip_handle(handle): if isinstance(handle, basestring): handle = _gzip_open_filename(handle) else: handle = _gzip_open_handle(handle) return handle
Transparently unzip the file handle
def on_origin(self, *args): if self.origin is None: Clock.schedule_once(self.on_origin, 0) return self.origin.bind( pos=self._trigger_repoint, size=self._trigger_repoint )
Make sure to redraw whenever the origin moves.