code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def preserve_namespace(newns=None): ns = cmds.namespaceInfo(an=True) try: cmds.namespace(set=newns) yield finally: cmds.namespace(set=ns)
Contextmanager that will restore the current namespace :param newns: a name of namespace that should be set in the beginning. the original namespace will be restored afterwards. If None, does not set a namespace. :type newns: str | None :returns: None :rtype: None :raises: None
def make_dir_structure(base_dir): def maybe_makedir(*args): p = join(base_dir, *args) if exists(p) and not isdir(p): raise IOError("File '{}' exists but is not a directory ".format(p)) if not exists(p): makedirs(p) maybe_makedir(DOWNLOAD_DIR) maybe_makedir(PACKAGE_DIR) maybe_makedir(OLD_DIR)
Make the build directory structure.
def paste(self): html = QApplication.clipboard().text() if not self.isRichTextEditEnabled(): self.insertPlainText(projex.text.toAscii(html)) else: super(XTextEdit, self).paste()
Pastes text from the clipboard into this edit.
def download_image(self, handle, dest): shutil.copyfile(self._prefixed(handle), dest)
Copies over the handl to the destination Args: handle (str): path to copy over dest (str): path to copy to Returns: None
def get_job_id_from_name(self, job_name): jobs = self._client.list_jobs(jobQueue=self._queue, jobStatus='RUNNING')['jobSummaryList'] matching_jobs = [job for job in jobs if job['jobName'] == job_name] if matching_jobs: return matching_jobs[0]['jobId']
Retrieve the first job ID matching the given name
def filter_options(cls, kwargs, keys): return dict((k, v) for k, v in filter_kwargs(keys, kwargs))
Make optional kwargs valid and optimized for each template engines. :param kwargs: keyword arguements to process :param keys: optional argument names >>> Engine.filter_options(dict(aaa=1, bbb=2), ("aaa", )) {'aaa': 1} >>> Engine.filter_options(dict(bbb=2), ("aaa", )) {}
async def _request(self, method, *args, **kwargs): if not self.open: await self._connect() while self.waiting: await asyncio.sleep(0.1) if self.client.protocol is None or not self.client.protocol.connected: raise TimeoutError("Not connected to device.") try: future = getattr(self.client.protocol, method)(*args, **kwargs) except AttributeError: raise TimeoutError("Not connected to device.") self.waiting = True try: return await asyncio.wait_for(future, timeout=self.timeout) except asyncio.TimeoutError as e: if self.open: if hasattr(self, 'modbus'): self.client.protocol_lost_connection(self.modbus) self.open = False raise TimeoutError(e) except pymodbus.exceptions.ConnectionException as e: raise ConnectionError(e) finally: self.waiting = False
Send a request to the device and awaits a response. This mainly ensures that requests are sent serially, as the Modbus protocol does not allow simultaneous requests (it'll ignore any request sent while it's processing something). The driver handles this by assuming there is only one client instance. If other clients exist, other logic will have to be added to either prevent or manage race conditions.
def get_all_pipelines(app=''): url = '{host}/applications/{app}/pipelineConfigs'.format(host=API_URL, app=app) response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) assert response.ok, 'Could not retrieve Pipelines for {0}.'.format(app) pipelines = response.json() LOG.debug('Pipelines:\n%s', pipelines) return pipelines
Get a list of all the Pipelines in _app_. Args: app (str): Name of Spinnaker Application. Returns: requests.models.Response: Response from Gate containing Pipelines.
def set_cookie(response, name, value, expiry_seconds=None, secure=False): if expiry_seconds is None: expiry_seconds = 90 * 24 * 60 * 60 expires = datetime.strftime(datetime.utcnow() + timedelta(seconds=expiry_seconds), "%a, %d-%b-%Y %H:%M:%S GMT") try: response.set_cookie(name, value, expires=expires, secure=secure) except (KeyError, TypeError): response.set_cookie(name.encode('utf-8'), value, expires=expires, secure=secure)
Set cookie wrapper that allows number of seconds to be given as the expiry time, and ensures values are correctly encoded.
def _extra_stats(self): return ['loglr'] + \ ['{}_optimal_snrsq'.format(det) for det in self._data] + \ ['{}_matchedfilter_snrsq'.format(det) for det in self._data]
Adds ``loglr``, ``optimal_snrsq`` and matched filter snrsq in each detector to the default stats.
def histogram(title, title_x, title_y, x, bins_x): plt.figure() plt.hist(x, bins_x) plt.xlabel(title_x) plt.ylabel(title_y) plt.title(title)
Plot a basic histogram.
def add_node_from_data(self, node: BaseEntity) -> BaseEntity: assert isinstance(node, BaseEntity) if node in self: return node self.add_node(node) if VARIANTS in node: self.add_has_variant(node.get_parent(), node) elif MEMBERS in node: for member in node[MEMBERS]: self.add_has_component(node, member) elif PRODUCTS in node and REACTANTS in node: for reactant_tokens in node[REACTANTS]: self.add_has_reactant(node, reactant_tokens) for product_tokens in node[PRODUCTS]: self.add_has_product(node, product_tokens) return node
Add an entity to the graph.
def state_dict(self) -> Dict[str, Any]: return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
Returns the state of the scheduler as a ``dict``.
def wiggleFileHandleToProtocol(self, fileHandle): for line in fileHandle: self.readWiggleLine(line) return self._data
Return a continuous protocol object satsifiying the given query parameters from the given wiggle file handle.
def conv2d(self, x_in: Connection, w_in: Connection, receptive_field_size, filters_number, stride=1, padding=1, name=""): x_cols = self.tensor_3d_to_cols(x_in, receptive_field_size, stride=stride, padding=padding) mul = self.transpose(self.matrix_multiply(x_cols, w_in), 0, 2, 1) output = self.reshape(mul, (-1, filters_number, receptive_field_size, receptive_field_size)) output.name = name return output
Computes a 2-D convolution given 4-D input and filter tensors.
def url_add_params(url, **kwargs): parsed_url = urlparse.urlsplit(url) params = urlparse.parse_qsl(parsed_url.query) parsed_url = list(parsed_url) for pair in kwargs.iteritems(): params.append(pair) parsed_url[3] = urllib.urlencode(params) return urlparse.urlunsplit(parsed_url)
Add parameters to an url >>> url_add_params('http://example.com/', a=1, b=3) 'http://example.com/?a=1&b=3' >>> url_add_params('http://example.com/?c=8', a=1, b=3) 'http://example.com/?c=8&a=1&b=3' >>> url_add_params('http://example.com/#/irock', a=1, b=3) 'http://example.com/?a=1&b=3#/irock' >>> url_add_params('http://example.com/?id=10#/irock', a=1, b=3) 'http://example.com/?id=10&a=1&b=3#/irock'
def name(self): return ffi.string( lib.EnvGetDeftemplateName(self._env, self._tpl)).decode()
Template name.
def get_cert_serial(cert_file): cmd = "certutil.exe -silent -verify {0}".format(cert_file) out = __salt__['cmd.run'](cmd) matches = re.search(r":\s*(\w*)\r\n\r\n", out) if matches is not None: return matches.groups()[0].strip() else: return None
Get the serial number of a certificate file cert_file The certificate file to find the serial for CLI Example: .. code-block:: bash salt '*' certutil.get_cert_serial <certificate name>
def cut_references(text_lines): ref_sect_start = find_reference_section(text_lines) if ref_sect_start is not None: start = ref_sect_start["start_line"] end = find_end_of_reference_section(text_lines, start, ref_sect_start["marker"], ref_sect_start["marker_pattern"]) del text_lines[start:end + 1] else: current_app.logger.warning("Found no references to remove.") return text_lines return text_lines
Return the text lines with the references cut.
def _check_iscsi_rest_patch_allowed(self): headers, bios_uri, bios_settings = self._check_bios_resource() if('links' in bios_settings and 'iScsi' in bios_settings['links']): iscsi_uri = bios_settings['links']['iScsi']['href'] status, headers, settings = self._rest_get(iscsi_uri) if status != 200: msg = self._get_extended_error(settings) raise exception.IloError(msg) if not self._operation_allowed(headers, 'PATCH'): headers, iscsi_uri, settings = ( self._get_iscsi_settings_resource(settings)) self._validate_if_patch_supported(headers, iscsi_uri) return iscsi_uri else: msg = ('"links/iScsi" section in bios' ' does not exist') raise exception.IloCommandNotSupportedError(msg)
Checks if patch is supported on iscsi. :returns: iscsi url. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server.
def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None): return slicewise( tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims, grad_function=grad_function, name=name or "cwise")
Component-wise operation with no broadcasting. Args: tf_fn: a component-wise function taking n tf.Tensor inputs and producing a tf.Tensor output xs: n Tensors output_dtype: an optional dtype grad_function: an optional python function name: an optional string Returns: a Tensor
def encode_conjure_union_type(cls, obj): encoded = {} encoded["type"] = obj.type for attr, field_definition in obj._options().items(): if field_definition.identifier == obj.type: attribute = attr break else: raise ValueError( "could not find attribute for union " + "member {0} of type {1}".format(obj.type, obj.__class__) ) defined_field_definition = obj._options()[attribute] encoded[defined_field_definition.identifier] = cls.do_encode( getattr(obj, attribute) ) return encoded
Encodes a conjure union into json
def handle_message(self, msg): self._messages.append({ 'type': msg.category, 'module': msg.module, 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path': msg.path, 'symbol': msg.symbol, 'message': str(msg.msg) or '', 'message-id': msg.msg_id, })
Store new message for later use. .. seealso:: :meth:`~JsonExtendedReporter.on_close`
def save(self): for name in self.mutedGlyphsNames: if name not in self.font: continue if self.logger: self.logger.info("removing muted glyph %s", name) del self.font[name] directory = os.path.dirname(os.path.normpath(self.path)) if directory and not os.path.exists(directory): os.makedirs(directory) try: self.font.save(os.path.abspath(self.path), self.ufoVersion) except defcon.DefconError as error: if self.logger: self.logger.exception("Error generating.") return False, error.report return True, None
Save the UFO.
def to_dict(self): task_desc_as_dict = { 'uid': self._uid, 'name': self._name, 'state': self._state, 'state_history': self._state_history, 'pre_exec': self._pre_exec, 'executable': self._executable, 'arguments': self._arguments, 'post_exec': self._post_exec, 'cpu_reqs': self._cpu_reqs, 'gpu_reqs': self._gpu_reqs, 'lfs_per_process': self._lfs_per_process, 'upload_input_data': self._upload_input_data, 'copy_input_data': self._copy_input_data, 'link_input_data': self._link_input_data, 'move_input_data': self._move_input_data, 'copy_output_data': self._copy_output_data, 'move_output_data': self._move_output_data, 'download_output_data': self._download_output_data, 'stdout': self._stdout, 'stderr': self._stderr, 'exit_code': self._exit_code, 'path': self._path, 'tag': self._tag, 'parent_stage': self._p_stage, 'parent_pipeline': self._p_pipeline, } return task_desc_as_dict
Convert current Task into a dictionary :return: python dictionary
def add_component_definition(self, definition): if definition.identity not in self._components.keys(): self._components[definition.identity] = definition else: raise ValueError("{} has already been defined".format(definition.identity))
Add a ComponentDefinition to the document
def featureclass_to_json(fc): if arcpyFound == False: raise Exception("ArcPy is required to use this function") desc = arcpy.Describe(fc) if desc.dataType == "Table" or desc.dataType == "TableView": return recordset_to_json(table=fc) else: return arcpy.FeatureSet(fc).JSON
converts a feature class to JSON
def format_ret(return_set, as_json=False): ret_list = list() for item in return_set: d = {"ip": item} ret_list.append(d) if as_json: return json.dumps(ret_list) return ret_list
decouple, allow for modifications to return type returns a list of ip addresses in object or json form
def from_content(cls, content): if "An internal error has occurred" in content: return None parsed_content = parse_tibiacom_content(content) try: name_header = parsed_content.find('h1') guild = Guild(name_header.text.strip()) except AttributeError: raise InvalidContent("content does not belong to a Tibia.com guild page.") if not guild._parse_logo(parsed_content): raise InvalidContent("content does not belong to a Tibia.com guild page.") info_container = parsed_content.find("div", id="GuildInformationContainer") guild._parse_guild_info(info_container) guild._parse_application_info(info_container) guild._parse_guild_homepage(info_container) guild._parse_guild_guildhall(info_container) guild._parse_guild_disband_info(info_container) guild._parse_guild_members(parsed_content) if guild.guildhall and guild.members: guild.guildhall.owner = guild.members[0].name return guild
Creates an instance of the class from the HTML content of the guild's page. Parameters ----------- content: :class:`str` The HTML content of the page. Returns ---------- :class:`Guild` The guild contained in the page or None if it doesn't exist. Raises ------ InvalidContent If content is not the HTML of a guild's page.
def _get_dbt_columns_from_bq_table(self, table): "Translates BQ SchemaField dicts into dbt BigQueryColumn objects" columns = [] for col in table.schema: dtype = self.Column.translate_type(col.field_type) column = self.Column( col.name, dtype, col.fields, col.mode) columns.append(column) return columns
Translates BQ SchemaField dicts into dbt BigQueryColumn objects
def _accumulate_random(count, found, oldthing, newthing): if randint(1, count + found) <= found: return count + found, newthing else: return count + found, oldthing
This performs on-line random selection. We have a stream of objects o_1,c_1; o_2,c_2; ... where there are c_i equivalent objects like o_1. We'd like to pick a random object o uniformly at random from the list [o_1]*c_1 + [o_2]*c_2 + ... (actually, this algorithm allows arbitrary positive weights, not necessarily integers) without spending the time&space to actually create that list. Luckily, the following works: thing = None c_tot for o_n, c_n in things: c_tot += c_n if randint(1,c_tot) <= c_n: thing = o_n This function is written in an accumulator format, so it can be used one call at a time: EXAMPLE: > thing = None > count = 0 > for i in range(10): > c = 10-i > count, thing = accumulate_random(count,c,thing,i) INPUTS: count: integer, sum of weights found before newthing found: integer, weight for newthing oldthing: previously selected object (will never be selected if count == 0) newthing: incoming object OUTPUT: (newcount, pick): newcount is count+found, pick is the newly selected object.
def load(cls, filename, gzipped, byteorder='big'): open_file = gzip.open if gzipped else open with open_file(filename, 'rb') as buff: return cls.from_buffer(buff, byteorder)
Read, parse and return the file at the specified location. The `gzipped` argument is used to indicate if the specified file is gzipped. The `byteorder` argument lets you specify whether the file is big-endian or little-endian.
def dfs_recursive(graph, node, seen): seen[node] = True for neighbor in graph[node]: if not seen[neighbor]: dfs_recursive(graph, neighbor, seen)
DFS, detect connected component, recursive implementation :param graph: directed graph in listlist or listdict format :param int node: to start graph exploration :param boolean-table seen: will be set true for the connected component containing node. :complexity: `O(|V|+|E|)`
def init_app(self, app): if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['flask-jwt-simple'] = self self._set_default_configuration_options(app) self._set_error_handler_callbacks(app) app.config['PROPAGATE_EXCEPTIONS'] = True
Register this extension with the flask app :param app: A flask application
def construct_ctcp(*parts): message = ' '.join(parts) message = message.replace('\0', CTCP_ESCAPE_CHAR + '0') message = message.replace('\n', CTCP_ESCAPE_CHAR + 'n') message = message.replace('\r', CTCP_ESCAPE_CHAR + 'r') message = message.replace(CTCP_ESCAPE_CHAR, CTCP_ESCAPE_CHAR + CTCP_ESCAPE_CHAR) return CTCP_DELIMITER + message + CTCP_DELIMITER
Construct CTCP message.
def _worker_status(target, worker, activation, profile='default', tgt_type='glob'): ret = { 'result': True, 'errors': [], 'wrong_state': [], } args = [worker, profile] status = __salt__['publish.publish']( target, 'modjk.worker_status', args, tgt_type ) if not status: ret['result'] = False return ret for balancer in status: if not status[balancer]: ret['errors'].append(balancer) elif status[balancer]['activation'] != activation: ret['wrong_state'].append(balancer) return ret
Check if the worker is in `activation` state in the targeted load balancers The function will return the following dictionary: result - False if no server returned from the published command errors - list of servers that couldn't find the worker wrong_state - list of servers that the worker was in the wrong state (not activation)
def to_chords(progression, key='C'): if type(progression) == str: progression = [progression] result = [] for chord in progression: (roman_numeral, acc, suffix) = parse_string(chord) if roman_numeral not in numerals: return [] if suffix == '7' or suffix == '': roman_numeral += suffix r = chords.__dict__[roman_numeral](key) else: r = chords.__dict__[roman_numeral](key) r = chords.chord_shorthand[suffix](r[0]) while acc < 0: r = map(notes.diminish, r) acc += 1 while acc > 0: r = map(notes.augment, r) acc -= 1 result.append(r) return result
Convert a list of chord functions or a string to a list of chords. Examples: >>> to_chords(['I', 'V7']) [['C', 'E', 'G'], ['G', 'B', 'D', 'F']] >>> to_chords('I7') [['C', 'E', 'G', 'B']] Any number of accidentals can be used as prefix to augment or diminish; for example: bIV or #I. All the chord abbreviations in the chord module can be used as suffixes; for example: Im7, IVdim7, etc. You can combine prefixes and suffixes to manage complex progressions: #vii7, #iidim7, iii7, etc. Using 7 as suffix is ambiguous, since it is classicly used to denote the seventh chord when talking about progressions instead of just the dominant seventh chord. We have taken the classic route; I7 will get you a major seventh chord. If you specifically want a dominanth seventh, use Idom7.
def _validate_open_msg(self, open_msg): assert open_msg.type == BGP_MSG_OPEN opt_param_cap_map = open_msg.opt_param_cap_map remote_as = open_msg.my_as cap4as = opt_param_cap_map.get(BGP_CAP_FOUR_OCTET_AS_NUMBER, None) if cap4as is None: if remote_as == AS_TRANS: raise bgp.BadPeerAs() self.cap_four_octet_as_number = False else: remote_as = cap4as.as_number self.cap_four_octet_as_number = True if remote_as != self._peer.remote_as: raise bgp.BadPeerAs() if open_msg.version != BGP_VERSION_NUM: raise bgp.UnsupportedVersion(BGP_VERSION_NUM)
Validates BGP OPEN message according from application context. Parsing modules takes care of validating OPEN message that need no context. But here we validate it according to current application settings. RTC or RR/ERR are MUST capability if peer does not support either one of them we have to end session.
def makepipebranch(idf, bname): pname = "%s_pipe" % (bname,) apipe = makepipecomponent(idf, pname) abranch = idf.newidfobject("BRANCH", Name=bname) abranch.Component_1_Object_Type = 'Pipe:Adiabatic' abranch.Component_1_Name = pname abranch.Component_1_Inlet_Node_Name = apipe.Inlet_Node_Name abranch.Component_1_Outlet_Node_Name = apipe.Outlet_Node_Name abranch.Component_1_Branch_Control_Type = "Bypass" return abranch
make a branch with a pipe use standard inlet outlet names
def pad(a, desiredlength): if len(a) >= desiredlength: return a islist = isinstance(a, list) a = np.array(a) diff = desiredlength - len(a) shape = list(a.shape) shape[0] = diff padded = np.concatenate([a, np.zeros(shape, dtype=a.dtype)]) return padded.tolist() if islist else padded
Pad an n-dimensional numpy array with zeros along the zero-th dimension so that it is the desired length. Return it unchanged if it is greater than or equal to the desired length
def check_dependee_exists(self, depender, dependee, dependee_id): shutit_global.shutit_global_object.yield_to_draw() if dependee is None: return 'module: \n\n' + dependee_id + '\n\nnot found in paths: ' + str(self.host['shutit_module_path']) + ' but needed for ' + depender.module_id + '\nCheck your --shutit_module_path setting and ensure that all modules configured to be built are in that path setting, eg "--shutit_module_path /path/to/other/module/:."\n\nAlso check that the module is configured to be built with the correct module id in that module\'s configs/build.cnf file.\n\nSee also help.' return ''
Checks whether a depended-on module is available.
def build_current_graph(): graph = SQLStateGraph() for app_name, config in apps.app_configs.items(): try: module = import_module( '.'.join((config.module.__name__, SQL_CONFIG_MODULE))) sql_items = module.sql_items except (ImportError, AttributeError): continue for sql_item in sql_items: graph.add_node((app_name, sql_item.name), sql_item) for dep in sql_item.dependencies: graph.add_lazy_dependency((app_name, sql_item.name), dep) graph.build_graph() return graph
Read current state of SQL items from the current project state. Returns: (SQLStateGraph) Current project state graph.
def attach_protocol(self, proto): if self._protocol is not None: raise RuntimeError("Already have a protocol.") self.save() self.__dict__['_protocol'] = proto del self.__dict__['_accept_all_'] self.__dict__['post_bootstrap'] = defer.Deferred() if proto.post_bootstrap: proto.post_bootstrap.addCallback(self.bootstrap) return self.__dict__['post_bootstrap']
returns a Deferred that fires once we've set this object up to track the protocol. Fails if we already have a protocol.
def to_line_protocol(self): tags = self.get_output_tags() return u"{0}{1} {2}{3}".format( self.get_output_measurement(), "," + tags if tags else '', self.get_output_values(), self.get_output_timestamp() )
Converts the given metrics as a single line of InfluxDB line protocol
def do_tagg(self, arglist: List[str]): if len(arglist) >= 2: tag = arglist[0] content = arglist[1:] self.poutput('<{0}>{1}</{0}>'.format(tag, ' '.join(content))) else: self.perror("tagg requires at least 2 arguments")
version of creating an html tag using arglist instead of argparser
def parseFASTAFilteringCommandLineOptions(args, reads): keepSequences = ( parseRangeString(args.keepSequences, convertToZeroBased=True) if args.keepSequences else None) removeSequences = ( parseRangeString(args.removeSequences, convertToZeroBased=True) if args.removeSequences else None) return reads.filter( minLength=args.minLength, maxLength=args.maxLength, whitelist=set(args.whitelist) if args.whitelist else None, blacklist=set(args.blacklist) if args.blacklist else None, whitelistFile=args.whitelistFile, blacklistFile=args.blacklistFile, titleRegex=args.titleRegex, negativeTitleRegex=args.negativeTitleRegex, keepSequences=keepSequences, removeSequences=removeSequences, head=args.head, removeDuplicates=args.removeDuplicates, removeDuplicatesById=args.removeDuplicatesById, randomSubset=args.randomSubset, trueLength=args.trueLength, sampleFraction=args.sampleFraction, sequenceNumbersFile=args.sequenceNumbersFile)
Examine parsed FASTA filtering command-line options and return filtered reads. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @param reads: A C{Reads} instance to filter. @return: The filtered C{Reads} instance.
def wait(self, *, timeout=None): for _ in self.get_results(block=True, timeout=timeout): pass
Block until all the jobs in the group have finished or until the timeout expires. Parameters: timeout(int): The maximum amount of time, in ms, to wait. Defaults to 10 seconds.
def _hack_namedtuple(cls): name = cls.__name__ fields = cls._fields def __reduce__(self): return (_restore, (name, fields, tuple(self))) cls.__reduce__ = __reduce__ cls._is_namedtuple_ = True return cls
Make class generated by namedtuple picklable
def get_gravatar(email, size=80, rating='g', default=None, protocol=PROTOCOL): gravatar_protocols = {'http': 'http://www', 'https': 'https://secure'} url = '%s.gravatar.com/avatar/%s' % ( gravatar_protocols[protocol], md5(email.strip().lower().encode('utf-8')).hexdigest()) options = {'s': size, 'r': rating} if default: options['d'] = default url = '%s?%s' % (url, urlencode(options)) return url.replace('&', '&amp;')
Return url for a Gravatar.
def get_authenticity_token(self, url=_SIGNIN_URL): res = self.client._get(url=url, expected_status_code=200) soup = BeautifulSoup(res.text, _DEFAULT_BEAUTIFULSOUP_PARSER) selection = soup.select(_AUTHENTICITY_TOKEN_SELECTOR) try: authenticity_token = selection[0].get("content") except: raise ValueError( "authenticity_token not found in {} with {}\n{}".format( _SIGNIN_URL, _AUTHENTICITY_TOKEN_SELECTOR, res.text)) return authenticity_token
Returns an authenticity_token, mandatory for signing in
def _updateEndpoints(self,*args,**kwargs): sender = self.sender() if not self.ignoreEvents: self.ignoreEvents = True for binding in self.bindings.values(): if binding.instanceId == id(sender): continue if args: binding.setter(*args,**kwargs) else: binding.setter(self.bindings[id(sender)].getter()) self.ignoreEvents = False
Updates all endpoints except the one from which this slot was called. Note: this method is probably not complete threadsafe. Maybe a lock is needed when setter self.ignoreEvents
def _generateGUID(slnfile, name): m = hashlib.md5() m.update(bytearray(ntpath.normpath(str(slnfile)) + str(name),'utf-8')) solution = m.hexdigest().upper() solution = "{" + solution[:8] + "-" + solution[8:12] + "-" + solution[12:16] + "-" + solution[16:20] + "-" + solution[20:32] + "}" return solution
This generates a dummy GUID for the sln file to use. It is based on the MD5 signatures of the sln filename plus the name of the project. It basically just needs to be unique, and not change with each invocation.
def visit_constant(self, node, parent): return nodes.Const( node.value, getattr(node, "lineno", None), getattr(node, "col_offset", None), parent, )
visit a Constant node by returning a fresh instance of Const
def mid(self, value): if not isinstance(value, int) or value > 65536: raise AttributeError self._mid = value
Sets the MID of the message. :type value: Integer :param value: the MID :raise AttributeError: if value is not int or cannot be represented on 16 bits.
def update_room(self, stream_id, room_definition): req_hook = 'pod/v2/room/' + str(stream_id) + '/update' req_args = json.dumps(room_definition) status_code, response = self.__rest__.POST_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
update a room definition
def coerce(cls, key, value): if not isinstance(value, MutationDict): if isinstance(value, dict): return MutationDict(value) return Mutable.coerce(key, value) else: return value
Convert plain dictionaries to MutationDict.
def get_screenshot_as_file(self, filename): if not filename.lower().endswith('.png'): warnings.warn("name used for saved screenshot does not match file " "type. It should end with a `.png` extension", UserWarning) png = self.get_screenshot_as_png() try: with open(filename, 'wb') as f: f.write(png) except IOError: return False finally: del png return True
Saves a screenshot of the current window to a PNG image file. Returns False if there is any IOError, else returns True. Use full paths in your filename. :Args: - filename: The full path you wish to save your screenshot to. This should end with a `.png` extension. :Usage: :: driver.get_screenshot_as_file('/Screenshots/foo.png')
def _get_lonely_contract(self): contracts = {} try: raw_res = yield from self._session.get(MAIN_URL, timeout=self._timeout) except OSError: raise PyHydroQuebecError("Can not get main page") content = yield from raw_res.text() soup = BeautifulSoup(content, 'html.parser') info_node = soup.find("div", {"class": "span3 contrat"}) if info_node is None: raise PyHydroQuebecError("Can not found contract") research = re.search("Contrat ([0-9]{4} [0-9]{5})", info_node.text) if research is not None: contracts[research.group(1).replace(" ", "")] = None if contracts == {}: raise PyHydroQuebecError("Can not found contract") return contracts
Get contract number when we have only one contract.
def unit_vector(self): return Point2D( self.x / self.magnitude, self.y / self.magnitude )
Return the unit vector.
def _open_for_read(self): ownerid, datasetid = parse_dataset_key(self._dataset_key) response = requests.get( '{}/file_download/{}/{}/{}'.format( self._query_host, ownerid, datasetid, self._file_name), headers={ 'User-Agent': self._user_agent, 'Authorization': 'Bearer {}'.format( self._config.auth_token) }, stream=True) try: response.raise_for_status() except Exception as e: raise RestApiError(cause=e) self._read_response = response
open the file in read mode
def resizeToContents(self): if self._toolbar.isVisible(): doc = self.document() h = doc.documentLayout().documentSize().height() offset = 34 edit = self._attachmentsEdit if self._attachments: edit.move(2, self.height() - edit.height() - 31) edit.setTags(sorted(self._attachments.keys())) edit.show() offset = 34 + edit.height() else: edit.hide() offset = 34 self.setFixedHeight(h + offset) self._toolbar.move(2, self.height() - 32) else: super(XCommentEdit, self).resizeToContents()
Resizes this toolbar based on the contents of its text.
def _get_all_merges(routing_table): considered_entries = set() for i, entry in enumerate(routing_table): if i in considered_entries: continue merge = set([i]) merge.update( j for j, other_entry in enumerate(routing_table[i+1:], start=i+1) if entry.route == other_entry.route ) considered_entries.update(merge) if len(merge) > 1: yield _Merge(routing_table, merge)
Get possible sets of entries to merge. Yields ------ :py:class:`~.Merge`
def _prepare_errcheck(): def errcheck(result, *args): global _exc_info_from_callback if _exc_info_from_callback is not None: exc = _exc_info_from_callback _exc_info_from_callback = None _reraise(exc[1], exc[2]) return result for symbol in dir(_glfw): if symbol.startswith('glfw'): getattr(_glfw, symbol).errcheck = errcheck _globals = globals() for symbol in _globals: if symbol.startswith('_GLFW') and symbol.endswith('fun'): def wrapper_cfunctype(func, cfunctype=_globals[symbol]): return cfunctype(_callback_exception_decorator(func)) _globals[symbol] = wrapper_cfunctype
This function sets the errcheck attribute of all ctypes wrapped functions to evaluate the _exc_info_from_callback global variable and re-raise any exceptions that might have been raised in callbacks. It also modifies all callback types to automatically wrap the function using the _callback_exception_decorator.
def parseSetEnv(l): d = dict() for i in l: try: k, v = i.split('=', 1) except ValueError: k, v = i, None if not k: raise ValueError('Empty name') d[k] = v return d
Parses a list of strings of the form "NAME=VALUE" or just "NAME" into a dictionary. Strings of the latter from will result in dictionary entries whose value is None. :type l: list[str] :rtype: dict[str,str] >>> parseSetEnv([]) {} >>> parseSetEnv(['a']) {'a': None} >>> parseSetEnv(['a=']) {'a': ''} >>> parseSetEnv(['a=b']) {'a': 'b'} >>> parseSetEnv(['a=a', 'a=b']) {'a': 'b'} >>> parseSetEnv(['a=b', 'c=d']) {'a': 'b', 'c': 'd'} >>> parseSetEnv(['a=b=c']) {'a': 'b=c'} >>> parseSetEnv(['']) Traceback (most recent call last): ... ValueError: Empty name >>> parseSetEnv(['=1']) Traceback (most recent call last): ... ValueError: Empty name
def CommitAll(close=None): if close: warnings.simplefilter('default') warnings.warn("close parameter will not need at all.", DeprecationWarning) for k, v in engine_manager.items(): session = v.session(create=False) if session: session.commit()
Commit all transactions according Local.conn
def get_config(self): if 'rmq_port' in self.config: self.rmq_port = int(self.config['rmq_port']) if 'rmq_user' in self.config: self.rmq_user = self.config['rmq_user'] if 'rmq_password' in self.config: self.rmq_password = self.config['rmq_password'] if 'rmq_vhost' in self.config: self.rmq_vhost = self.config['rmq_vhost'] if 'rmq_exchange_type' in self.config: self.rmq_exchange_type = self.config['rmq_exchange_type'] if 'rmq_durable' in self.config: self.rmq_durable = bool(self.config['rmq_durable']) if 'rmq_heartbeat_interval' in self.config: self.rmq_heartbeat_interval = int( self.config['rmq_heartbeat_interval'])
Get and set config options from config file
def get_string(string): truestring = string if string is not None: if '/' in string: if os.path.isfile(string): try: with open_(string,'r') as f: truestring = ' '.join(line.strip() for line in f) except: pass if truestring.strip() == '': truestring = None return truestring
This function checks if a path was given as string, and tries to read the file and return the string.
def set_default_decoder_parameters(): ARGTYPES = [ctypes.POINTER(DecompressionParametersType)] OPENJP2.opj_set_default_decoder_parameters.argtypes = ARGTYPES OPENJP2.opj_set_default_decoder_parameters.restype = ctypes.c_void_p dparams = DecompressionParametersType() OPENJP2.opj_set_default_decoder_parameters(ctypes.byref(dparams)) return dparams
Wraps openjp2 library function opj_set_default_decoder_parameters. Sets decoding parameters to default values. Returns ------- dparam : DecompressionParametersType Decompression parameters.
def _safe_minmax(values): isfinite = np.isfinite(values) if np.any(isfinite): values = values[isfinite] minval = np.min(values) maxval = np.max(values) return minval, maxval
Calculate min and max of array with guards for nan and inf.
def commit_api(api): if api == QT_API_PYSIDE: ID.forbid('PyQt4') ID.forbid('PyQt5') else: ID.forbid('PySide')
Commit to a particular API, and trigger ImportErrors on subsequent dangerous imports
def setdefault(self, key, value): with self.lock: if key in self: return self.getitem(key) else: self.setitem(key, value) return value
Atomic store conditional. Stores _value_ into dictionary at _key_, but only if _key_ does not already exist in the dictionary. Returns the old value found or the new value.
def release(self): self.monitor.acquire() if self.rwlock < 0: self.rwlock = 0 else: self.rwlock -= 1 wake_writers = self.writers_waiting and self.rwlock == 0 wake_readers = self.writers_waiting == 0 self.monitor.release() if wake_writers: self.writers_ok.acquire() self.writers_ok.notify() self.writers_ok.release() elif wake_readers: self.readers_ok.acquire() self.readers_ok.notifyAll() self.readers_ok.release()
Release a lock, whether read or write.
def wrap(cls, meth): async def inner(*args, **kwargs): sock = await meth(*args, **kwargs) return cls(sock) return inner
Wraps a connection opening method in this class.
def get_all_changes(self, *args, **kwds): result = [] for project, refactoring in zip(self.projects, self.refactorings): args, kwds = self._resources_for_args(project, args, kwds) result.append((project, refactoring.get_changes(*args, **kwds))) return result
Get a project to changes dict
def invite_user(self, user_id): try: self.client.api.invite_user(self.room_id, user_id) return True except MatrixRequestError: return False
Invite a user to this room. Returns: boolean: Whether invitation was sent.
def add(self, key): if key not in self.map: self.map[key] = len(self.items) self.items.append(key) return self.map[key]
Add `key` as an item to this OrderedSet, then return its index. If `key` is already in the OrderedSet, return the index it already had.
def learningCurve(expPath, suite): print("\nLEARNING CURVE ================",expPath,"=====================") try: headers=["testerror","totalCorrect","elapsedTime","entropy"] result = suite.get_value(expPath, 0, headers, "all") info = [] for i,v in enumerate(zip(result["testerror"],result["totalCorrect"], result["elapsedTime"],result["entropy"])): info.append([i, v[0], v[1], int(v[2]), v[3]]) headers.insert(0,"iteration") print(tabulate(info, headers=headers, tablefmt="grid")) except: print("Couldn't load experiment",expPath)
Print the test and overall noise errors from each iteration of this experiment
def decompile(input_, file_, output, format_, jar, limit, decompiler): from androguard import session if file_ and input_: print("Can not give --input and positional argument! " "Please use only one of them!", file=sys.stderr) sys.exit(1) if not input_ and not file_: print("Give one file to decode!", file=sys.stderr) sys.exit(1) if input_: fname = input_ else: fname = file_ s = session.Session() with open(fname, "rb") as fd: s.add(fname, fd.read()) export_apps_to_format(fname, s, output, limit, jar, decompiler, format_)
Decompile an APK and create Control Flow Graphs. Example: \b $ androguard resources.arsc
def update(self): pixels = len(self.matrix) for x in range(self.width): for y in range(self.height): pixel = y * self.width * 3 + x * 3 if pixel < pixels: pygame.draw.circle(self.screen, (self.matrix[pixel], self.matrix[pixel + 1], self.matrix[pixel + 2]), (x * self.dotsize + self.dotsize / 2, y * self.dotsize + self.dotsize / 2), self.dotsize / 2, 0)
Generate the output from the matrix.
def _mark_lines(lines, sender): global EXTRACTOR candidate = get_signature_candidate(lines) markers = list('t' * len(lines)) for i, line in reversed(list(enumerate(candidate))): j = len(lines) - len(candidate) + i if not line.strip(): markers[j] = 'e' elif is_signature_line(line, sender, EXTRACTOR): markers[j] = 's' return "".join(markers)
Mark message lines with markers to distinguish signature lines. Markers: * e - empty line * s - line identified as signature * t - other i.e. ordinary text line >>> mark_message_lines(['Some text', '', 'Bob'], 'Bob') 'tes'
def extract_root_meta(cls, serializer, resource): many = False if hasattr(serializer, 'child'): many = True serializer = serializer.child data = {} if getattr(serializer, 'get_root_meta', None): json_api_meta = serializer.get_root_meta(resource, many) assert isinstance(json_api_meta, dict), 'get_root_meta must return a dict' data.update(json_api_meta) return data
Calls a `get_root_meta` function on a serializer, if it exists.
def find_range_ix_in_section_list(start, end, section_list): if start > section_list[-1] or end < section_list[0]: return [0, 0] if start < section_list[0]: start_section = section_list[0] else: start_section = find_point_in_section_list(start, section_list) if end > section_list[-1]: end_section = section_list[-2] else: end_section = find_point_in_section_list(end, section_list) return [ section_list.index(start_section), section_list.index(end_section)+1]
Returns the index range all sections belonging to the given range. The given list is assumed to contain start points of consecutive sections, except for the final point, assumed to be the end point of the last section. For example, the list [5, 8, 30, 31] is interpreted as the following list of sections: [5-8), [8-30), [30-31]. As such, this function will return [5,8] for the range (7,9) and [5,8,30] while for (7, 30). Parameters --------- start : float The start of the desired range. end : float The end of the desired range. section_list : sortedcontainers.SortedList A list of start points of consecutive sections. Returns ------- iterable The index range of all sections belonging to the given range. Example ------- >>> from sortedcontainers import SortedList >>> seclist = SortedList([5, 8, 30, 31]) >>> find_range_ix_in_section_list(3, 4, seclist) [0, 0] >>> find_range_ix_in_section_list(6, 7, seclist) [0, 1] >>> find_range_ix_in_section_list(7, 9, seclist) [0, 2] >>> find_range_ix_in_section_list(7, 30, seclist) [0, 3] >>> find_range_ix_in_section_list(7, 321, seclist) [0, 3] >>> find_range_ix_in_section_list(4, 321, seclist) [0, 3]
def calculate(price, to_code, **kwargs): qs = kwargs.get('qs', get_active_currencies_qs()) kwargs['qs'] = qs default_code = qs.default().code return convert(price, default_code, to_code, **kwargs)
Converts a price in the default currency to another currency
def query(self, sql, args=None, many=None, as_dict=False): con = self.pool.pop() c = None try: c = con.cursor(as_dict) LOGGER.debug("Query sql: " + sql + " args:" + str(args)) c.execute(sql, args) if many and many > 0: return self._yield(con, c, many) else: return c.fetchall() except Exception as e: LOGGER.error("Error Qeury on %s", str(e)) raise DBError(e.args[0], e.args[1]) finally: many or (c and c.close()) many or (con and self.pool.push(con))
The connection raw sql query, when select table, show table to fetch records, it is compatible the dbi execute method. :param sql string: the sql stamtement like 'select * from %s' :param args list: Wen set None, will use dbi execute(sql), else dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list :param many int: when set, the query method will return genarate an iterate :param as_dict bool: when is true, the type of row will be dict, otherwise is tuple
def rotation(self): if self.screen_rotation in range(4): return self.screen_rotation return self.adb_device.rotation() or self.info['displayRotation']
Rotaion of the phone 0: normal 1: home key on the right 2: home key on the top 3: home key on the left
def push(self, message, device=None, title=None, url=None, url_title=None, priority=None, timestamp=None, sound=None): api_url = 'https://api.pushover.net/1/messages.json' payload = { 'token': self.api_token, 'user': self.user, 'message': message, 'device': device, 'title': title, 'url': url, 'url_title': url_title, 'priority': priority, 'timestamp': timestamp, 'sound': sound } return requests.post(api_url, params=payload)
Pushes the notification, returns the Requests response. Arguments: message -- your message Keyword arguments: device -- your user's device name to send the message directly to that device, rather than all of the user's devices title -- your message's title, otherwise your app's name is used url -- a supplementary URL to show with your message url_title -- a title for your supplementary URL, otherwise just the URL is shown priority -- send as --1 to always send as a quiet notification, 1 to display as high--priority and bypass the user's quiet hours, or 2 to also require confirmation from the user timestamp -- a Unix timestamp of your message's date and time to display to the user, rather than the time your message is received by our API sound -- the name of one of the sounds supported by device clients to override the user's default sound choice.
def mail_partial_json(self): if self.mail_partial.get("date"): self._mail_partial["date"] = self.date.isoformat() return json.dumps(self.mail_partial, ensure_ascii=False, indent=2)
Return the JSON of mail parsed partial
def download_next_song_cache(self): if len(self.queue) == 0: return cache_ydl_opts = dict(ydl_opts) cache_ydl_opts["outtmpl"] = self.output_format_next with youtube_dl.YoutubeDL(cache_ydl_opts) as ydl: try: url = self.queue[0][0] ydl.download([url]) except: pass
Downloads the next song in the queue to the cache
def _has_valid_token(self): return bool(self.token and (self.expires > datetime.datetime.now()))
This only checks the token's existence and expiration. If it has been invalidated on the server, this method may indicate that the token is valid when it might actually not be.
def build_branch(self): if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None): return self.dutinformation.get(0).build.branch return None
get build branch. :return: build branch or None if not found
def check_validity(self): if not isinstance(self.pianoroll, np.ndarray): raise TypeError("`pianoroll` must be a numpy array.") if not (np.issubdtype(self.pianoroll.dtype, np.bool_) or np.issubdtype(self.pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` must be np.bool_ or " "a subdtype of np.number.") if self.pianoroll.ndim != 2: raise ValueError("`pianoroll` must have exactly two dimensions.") if self.pianoroll.shape[1] != 128: raise ValueError("The length of the second axis of `pianoroll` " "must be 128.") if not isinstance(self.program, int): raise TypeError("`program` must be int.") if self.program < 0 or self.program > 127: raise ValueError("`program` must be in between 0 to 127.") if not isinstance(self.is_drum, bool): raise TypeError("`is_drum` must be bool.") if not isinstance(self.name, string_types): raise TypeError("`name` must be a string.")
Raise error if any invalid attribute found.
def get_identities(self, identity=None, attrs=None): resp = self.request('GetIdentities') if 'identity' in resp: identities = resp['identity'] if type(identities) != list: identities = [identities] if identity or attrs: wanted_identities = [] for u_identity in [ zobjects.Identity.from_dict(i) for i in identities]: if identity: if isinstance(identity, zobjects.Identity): if u_identity.name == identity.name: return [u_identity] else: if u_identity.name == identity: return [u_identity] elif attrs: for attr, value in attrs.items(): if (attr in u_identity._a_tags and u_identity._a_tags[attr] == value): wanted_identities.append(u_identity) return wanted_identities else: return [zobjects.Identity.from_dict(i) for i in identities] else: return []
Get identities matching name and attrs of the user, as a list :param: zobjects.Identity or identity name (string) :param: attrs dict of attributes to return only identities matching :returns: list of zobjects.Identity
def GetSubFileEntryByName(self, name, case_sensitive=True): name_lower = name.lower() matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries: if sub_file_entry.name == name: return sub_file_entry if not case_sensitive and sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry
Retrieves a sub file entry by name. Args: name (str): name of the file entry. case_sensitive (Optional[bool]): True if the name is case sensitive. Returns: FileEntry: a file entry or None if not available.
def _full_axis_reduce(self, axis, func, alternate_index=None): result = self.data.map_across_full_axis(axis, func) if axis == 0: columns = alternate_index if alternate_index is not None else self.columns return self.__constructor__(result, index=["__reduced__"], columns=columns) else: index = alternate_index if alternate_index is not None else self.index return self.__constructor__(result, index=index, columns=["__reduced__"])
Applies map that reduce Manager to series but require knowledge of full axis. Args: func: Function to reduce the Manager by. This function takes in a Manager. axis: axis to apply the function to. alternate_index: If the resulting series should have an index different from the current query_compiler's index or columns. Return: Pandas series containing the reduced data.
def _has_bad_coords(root, stream): if stream == "com.dc3/dc3.broker": return True if not stream.split('/')[0] == 'nasa.gsfc.gcn': return False toplevel_params = vp.get_toplevel_params(root) if "Coords_String" in toplevel_params: if (toplevel_params["Coords_String"]['value'] == "unavailable/inappropriate"): return True return False
Predicate function encapsulating 'data clean up' filter code. Currently minimal, but these sort of functions tend to grow over time. Problem 1: Some of the GCN packets have an RA /Dec equal to (0,0) in the WhereWhen, and a flag in the What signifying that those are actually dummy co-ords. (This is used for time-stamping an event which is not localised). So, we don't load those positions, to avoid muddying the database corpus. Problem 2: com.dc3/dc3.broker#BrokerTest packets have dummy RA/Dec values, with no units specified. (They're also marked role=test, so it's not such a big deal, but it generates a lot of debug-log churn.)
def check_existing_filename (filename, onlyfiles=True): if not os.path.exists(filename): raise PatoolError("file `%s' was not found" % filename) if not os.access(filename, os.R_OK): raise PatoolError("file `%s' is not readable" % filename) if onlyfiles and not os.path.isfile(filename): raise PatoolError("`%s' is not a file" % filename)
Ensure that given filename is a valid, existing file.
def do_create(marfile, files, compress, productversion=None, channel=None, signing_key=None, signing_algorithm=None): with open(marfile, 'w+b') as f: with MarWriter(f, productversion=productversion, channel=channel, signing_key=signing_key, signing_algorithm=signing_algorithm, ) as m: for f in files: m.add(f, compress=compress)
Create a new MAR file.
def interfaces_info(): def replace(value): if value == netifaces.AF_LINK: return 'link' if value == netifaces.AF_INET: return 'ipv4' if value == netifaces.AF_INET6: return 'ipv6' return value results = {} for iface in netifaces.interfaces(): addrs = netifaces.ifaddresses(iface) results[iface] = {replace(k): v for k, v in addrs.items()} return results
Returns interfaces data.
def start(self): Global.LOGGER.info("starting the flow manager") self._start_actions() self._start_message_fetcher() Global.LOGGER.debug("flow manager started")
Start all the processes
def tabs_or_spaces(physical_line, indent_char): indent = indent_match(physical_line).group(1) for offset, char in enumerate(indent): if char != indent_char: return offset, "E101 indentation contains mixed spaces and tabs"
Never mix tabs and spaces. The most popular way of indenting Python is with spaces only. The second-most popular way is with tabs only. Code indented with a mixture of tabs and spaces should be converted to using spaces exclusively. When invoking the Python command line interpreter with the -t option, it issues warnings about code that illegally mixes tabs and spaces. When using -tt these warnings become errors. These options are highly recommended!