code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def readInfoElement(self, infoElement, instanceObject): infoLocation = self.locationFromElement(infoElement) instanceObject.addInfo(infoLocation, copySourceName=self.infoSource)
Read the info element. :: <info/> <info"> <location/> </info>
def matlab_compatible(name): compatible_name = [ch if ch in ALLOWED_MATLAB_CHARS else "_" for ch in name] compatible_name = "".join(compatible_name) if compatible_name[0] not in string.ascii_letters: compatible_name = "M_" + compatible_name return compatible_name[:60]
make a channel name compatible with Matlab variable naming Parameters ---------- name : str channel name Returns ------- compatible_name : str channel name compatible with Matlab
def close(self): if self.fd: os.close(self.fd) self.fd = None
Close the i2c connection.
def LEB128toint(LEBinput): reversedbytes = hexreverse(LEBinput) binstr = "" for i in range(len(LEBinput) // 2): if i == 0: assert int(reversedbytes[2*i:(2*i + 2)],16) < 128 else: assert int(reversedbytes[2*i:(2*i + 2)],16) >= 128 tempbin = str(bin(int(reversedbytes[2*i:(2*i + 2)],16))) \ .lstrip("0b").replace("b","").replace("L","") \ .replace("'","").replace('"',"") \ .zfill(8) binstr += tempbin[1:] return int(binstr,2)
Convert unsigned LEB128 hex to integer
def inspect(self, **kwargs): try: scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path) except IOError: return None if scf_cycle is not None: if "title" not in kwargs: kwargs["title"] = str(self) return scf_cycle.plot(**kwargs) return None
Plot the SCF cycle results with matplotlib. Returns `matplotlib` figure, None if some error occurred.
async def async_init(self): self.pool = await aioredis.create_pool( (self.host, self.port), db=self.db_id, minsize=self.min_pool_size, maxsize=self.max_pool_size, loop=asyncio.get_event_loop(), )
Handle here the asynchronous part of the init.
def account(self, id): id = self.__unpack_id(id) url = '/api/v1/accounts/{0}'.format(str(id)) return self.__api_request('GET', url)
Fetch account information by user `id`. Does not require authentication. Returns a `user dict`_.
async def await_rpc(self, address, rpc_id, *args, **kwargs): self.verify_calling_thread(True, "await_rpc must be called from **inside** the event loop") if isinstance(rpc_id, RPCDeclaration): arg_format = rpc_id.arg_format resp_format = rpc_id.resp_format rpc_id = rpc_id.rpc_id else: arg_format = kwargs.get('arg_format', None) resp_format = kwargs.get('resp_format', None) arg_payload = b'' if arg_format is not None: arg_payload = pack_rpc_payload(arg_format, args) self._logger.debug("Sending rpc to %d:%04X, payload=%s", address, rpc_id, args) response = AwaitableResponse() self._rpc_queue.put_rpc(address, rpc_id, arg_payload, response) try: resp_payload = await response.wait(1.0) except RPCRuntimeError as err: resp_payload = err.binary_error if resp_format is None: return [] resp = unpack_rpc_payload(resp_format, resp_payload) return resp
Send an RPC from inside the EmulationLoop. This is the primary method by which tasks running inside the EmulationLoop dispatch RPCs. The RPC is added to the queue of waiting RPCs to be drained by the RPC dispatch task and this coroutine will block until it finishes. **This method must only be called from inside the EmulationLoop** Args: address (int): The address of the tile that has the RPC. rpc_id (int): The 16-bit id of the rpc we want to call *args: Any required arguments for the RPC as python objects. **kwargs: Only two keyword arguments are supported: - arg_format: A format specifier for the argument list - result_format: A format specifier for the result Returns: list: A list of the decoded response members from the RPC.
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None): d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance) return self.reindex(**d)
Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. .. versionadded:: 0.21.0 (list-like tolerance) Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium
def get_script_extension_property(value, is_bytes=False): obj = unidata.ascii_script_extensions if is_bytes else unidata.unicode_script_extensions if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['script'].get(negated, negated) else: value = unidata.unicode_alias['script'].get(value, value) return obj[value]
Get `SCX` property.
def save_model(self, file_name='model.cx'): with open(file_name, 'wt') as fh: cx_str = self.print_cx() fh.write(cx_str)
Save the assembled CX network in a file. Parameters ---------- file_name : Optional[str] The name of the file to save the CX network to. Default: model.cx
def letter_set(self): end_str = ctypes.create_string_buffer(MAX_CHARS) cgaddag.gdg_letter_set(self.gdg, self.node, end_str) return [char for char in end_str.value.decode("ascii")]
Return the letter set of this node.
def _from_keras_log_format(data, **kwargs): data_val = pd.DataFrame(data[['epoch']]) data_val['acc'] = data['val_acc'] data_val['loss'] = data['val_loss'] data_val['data'] = 'validation' data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']]) data_training['data'] = 'training' result = pd.concat([data_training, data_val], sort=False) plot(result, **kwargs)
Plot accuracy and loss from a panda's dataframe. Args: data: Panda dataframe in the format of the Keras CSV log. output_dir_path: The path to the directory where the resultings plots should end up.
def get_xritdecompress_outfile(stdout): outfile = b'' for line in stdout: try: k, v = [x.strip() for x in line.split(b':', 1)] except ValueError: break if k == b'Decompressed file': outfile = v break return outfile
Analyse the output of the xRITDecompress command call and return the file.
def stop_job(self, job_id, array_id = None): self.lock() job, array_job = self._job_and_array(job_id, array_id) if job is not None: if job.status in ('executing', 'queued', 'waiting'): logger.info("Reset job '%s' (%s) in the database", job.name, self._format_log(job.id)) job.status = 'submitted' if array_job is not None and array_job.status in ('executing', 'queued', 'waiting'): logger.debug("Reset array job '%s' in the database", array_job) array_job.status = 'submitted' if array_job is None: for array_job in job.array: if array_job.status in ('executing', 'queued', 'waiting'): logger.debug("Reset array job '%s' in the database", array_job) array_job.status = 'submitted' self.session.commit() self.unlock()
Resets the status of the given to 'submitted' when they are labeled as 'executing'.
def small_integer(self, column, auto_increment=False, unsigned=False): return self._add_column( "small_integer", column, auto_increment=auto_increment, unsigned=unsigned )
Create a new small integer column on the table. :param column: The column :type column: str :type auto_increment: bool :type unsigned: bool :rtype: Fluent
def once(self, event, callback): 'Define a callback to handle the first event emitted by the server' self._once_events.add(event) self.on(event, callback)
Define a callback to handle the first event emitted by the server
def by_login(cls, session, login, local=True): user = cls.first(session, where=((cls.login == login), (cls.local == local),) ) return user if user and user.login == login else None
Get a user from a given login. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: the user login :type login: unicode :return: the associated user :rtype: :class:`pyshop.models.User`
def _is_exception_rule(self, element): if element[0].isdigit() and element[-1].isdigit(): return True if len(element) > 1 and element[0].isdigit() and element[-2].isdigit() and element[-1].isalpha(): return True if len(element) == 1 and element.isalpha(): return True return False
Check for "exception rule". Address elements will be appended onto a new line on the lable except for when the penultimate lable line fulfils certain criteria, in which case the element will be concatenated onto the penultimate line. This method checks for those criteria. i) First and last characters of the Building Name are numeric (eg '1to1' or '100:1') ii) First and penultimate characters are numeric, last character is alphabetic (eg '12A') iii) Building Name has only one character (eg 'A')
def _register_default_option(nsobj, opt): item = ConfigItem.get(nsobj.namespace_prefix, opt.name) if not item: logger.info('Adding {} ({}) = {} to {}'.format( opt.name, opt.type, opt.default_value, nsobj.namespace_prefix )) item = ConfigItem() item.namespace_prefix = nsobj.namespace_prefix item.key = opt.name item.value = opt.default_value item.type = opt.type item.description = opt.description nsobj.config_items.append(item) else: if item.description != opt.description: logger.info('Updating description of {} / {}'.format(item.namespace_prefix, item.key)) item.description = opt.description db.session.add(item)
Register default ConfigOption value if it doesn't exist. If does exist, update the description if needed
def _transform_field(field): if isinstance(field, bool): return TRUE if field else FALSE elif isinstance(field, (list, dict)): return json.dumps(field, sort_keys=True, ensure_ascii=False) else: return field
transform field for displaying
def splits(cls, datasets, batch_sizes=None, **kwargs): if batch_sizes is None: batch_sizes = [kwargs.pop('batch_size')] * len(datasets) ret = [] for i in range(len(datasets)): train = i == 0 ret.append(cls( datasets[i], batch_size=batch_sizes[i], train=train, **kwargs)) return tuple(ret)
Create Iterator objects for multiple splits of a dataset. Arguments: datasets: Tuple of Dataset objects corresponding to the splits. The first such object should be the train set. batch_sizes: Tuple of batch sizes to use for the different splits, or None to use the same batch_size for all splits. Remaining keyword arguments: Passed to the constructor of the iterator class being used.
def _check_cb(cb_): if cb_ is not None: if hasattr(cb_, '__call__'): return cb_ else: log.error('log_callback is not callable, ignoring') return lambda x: x
If the callback is None or is not callable, return a lambda that returns the value passed.
def dedent(s): head, _, tail = s.partition('\n') dedented_tail = textwrap.dedent(tail) result = "{head}\n{tail}".format( head=head, tail=dedented_tail) return result
Removes the hanging dedent from all the first line of a string.
def update(self, request, datum): if getattr(self, 'action_present', False): self.verbose_name = self._get_action_name() self.verbose_name_plural = self._get_action_name('plural')
Switches the action verbose name, if needed.
def normalize(rp_pyxb): def sort(r, a): d1_common.xml.sort_value_list_pyxb(_get_attr_or_list(r, a)) rp_pyxb.preferredMemberNode = set(_get_attr_or_list(rp_pyxb, 'pref')) - set( _get_attr_or_list(rp_pyxb, 'block') ) sort(rp_pyxb, 'block') sort(rp_pyxb, 'pref')
Normalize a ReplicationPolicy PyXB type in place. The preferred and blocked lists are sorted alphabetically. As blocked nodes override preferred nodes, and any node present in both lists is removed from the preferred list. Args: rp_pyxb : ReplicationPolicy PyXB object The object will be normalized in place.
def get_analysis_element(self, element, sep='|'): return [self.__get_key(word[ANALYSIS], element, sep) for word in self.words]
The list of analysis elements of ``words`` layer. Parameters ---------- element: str The name of the element, for example "lemma", "postag". sep: str The separator for ambiguous analysis (default: "|"). As morphological analysis cannot always yield unambiguous results, we return ambiguous values separated by the pipe character as default.
def authenticate(self, password): if self.isClosed: raise ValueError("operation illegal for closed doc") val = _fitz.Document_authenticate(self, password) if val: self.isEncrypted = 0 self.initData() self.thisown = True return val
Decrypt document with a password.
def cmd_gimbal_status(self, args): master = self.master if 'GIMBAL_REPORT' in master.messages: print(master.messages['GIMBAL_REPORT']) else: print("No GIMBAL_REPORT messages")
show gimbal status
def subtract_months(self, months: int) -> datetime: self.value = self.value - relativedelta(months=months) return self.value
Subtracts a number of months from the current value
def crypt(password, cost=2): salt = _generate_salt(cost) hashed = pbkdf2.pbkdf2_hex(password, salt, cost * 500) return "$pbkdf2-256-1$" + str(cost) + "$" + salt.decode("utf-8") + "$" + hashed
Hash a password result sample: $pbkdf2-256-1$8$FRakfnkgpMjnqs1Xxgjiwgycdf68be9b06451039cc\ 0f7075ec1c369fa36f055b1705ec7a The returned string is broken down into - The algorithm and version used - The cost factor, number of iterations over the hash - The salt - The password
def union(self, *others, **kwargs): return self._combine_variant_collections( combine_fn=set.union, variant_collections=(self,) + others, kwargs=kwargs)
Returns the union of variants in a several VariantCollection objects.
def pad_to_3d(a): a_pad = np.zeros([len(a), 3], dtype=a.dtype) a_pad[:, :a.shape[-1]] = a return a_pad
Return 1- or 2-dimensional cartesian vectors, converted into a 3-dimensional representation, with additional dimensional coordinates assumed to be zero. Parameters ---------- a: array, shape (n, d), d < 3 Returns ------- ap: array, shape (n, 3)
def get_point_source_fluxes(self, id, energies, tag=None): return self._point_sources.values()[id](energies, tag=tag)
Get the fluxes from the id-th point source :param id: id of the source :param energies: energies at which you need the flux :param tag: a tuple (integration variable, a, b) specifying the integration to perform. If this parameter is specified then the returned value will be the average flux for the source computed as the integral between a and b over the integration variable divided by (b-a). The integration variable must be an independent variable contained in the model. If b is None, then instead of integrating the integration variable will be set to a and the model evaluated in a. :return: fluxes
def validate(self, command, token, team_id, method): if (team_id, command) not in self._commands: raise SlackError('Command {0} is not found in team {1}'.format( command, team_id)) func, _token, methods, kwargs = self._commands[(team_id, command)] if method not in methods: raise SlackError('{} request is not allowed'.format(method)) if token != _token: raise SlackError('Your token {} is invalid'.format(token))
Validate request queries with registerd commands :param command: command parameter from request :param token: token parameter from request :param team_id: team_id parameter from request :param method: the request method
def act(self, action): action = int(action) assert isinstance(action, int) assert action < self.actions_num, "%r (%s) invalid"%(action, type(action)) for k in self.world_layer.buttons: self.world_layer.buttons[k] = 0 for key in self.world_layer.player.controls[action]: if key in self.world_layer.buttons: self.world_layer.buttons[key] = 1 self.step() observation = self.world_layer.get_state() reward = self.world_layer.player.get_reward() terminal = self.world_layer.player.game_over info = {} return observation, reward, terminal, info
Take one action for one step
def listener(messages): for m in messages: if m.content_type == 'text': print(str(m.chat.first_name) + " [" + str(m.chat.id) + "]: " + m.text)
When new messages arrive TeleBot will call this function.
def find_cookie(self): return_cookies = [] origin_domain = self.request_object.dest_addr for cookie in self.cookiejar: for cookie_morsals in cookie[0].values(): cover_domain = cookie_morsals['domain'] if cover_domain == '': if origin_domain == cookie[1]: return_cookies.append(cookie[0]) else: bvalue = cover_domain.lower() hdn = origin_domain.lower() nend = hdn.find(bvalue) if nend is not False: return_cookies.append(cookie[0]) return return_cookies
Find a list of all cookies for a given domain
def is_rpm_package_installed(pkg): with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): result = sudo("rpm -q %s" % pkg) if result.return_code == 0: return True elif result.return_code == 1: return False else: print(result) raise SystemExit()
checks if a particular rpm package is installed
def branches(self): branches = [] if self._taken_branch: branches += [(self._taken_branch, 'taken')] if self._not_taken_branch: branches += [(self._not_taken_branch, 'not-taken')] if self._direct_branch: branches += [(self._direct_branch, 'direct')] return branches
Get basic block branches.
def pipe_sort(context=None, _INPUT=None, conf=None, **kwargs): test = kwargs.pop('pass_if', None) _pass = utils.get_pass(test=test) key_defs = imap(DotDict, utils.listize(conf['KEY'])) get_value = partial(utils.get_value, **kwargs) parse_conf = partial(utils.parse_conf, parse_func=get_value, **kwargs) keys = imap(parse_conf, key_defs) order = ('%s%s' % ('-' if k.dir == 'DESC' else '', k.field) for k in keys) comparers = map(get_comparer, order) cmp_func = partial(multikeysort, comparers=comparers) _OUTPUT = _INPUT if _pass else iter(sorted(_INPUT, cmp=cmp_func)) return _OUTPUT
An operator that sorts the input source according to the specified key. Not loopable. Not lazy. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) kwargs -- other inputs, e.g. to feed terminals for rule values conf : { 'KEY': [ { 'field': {'type': 'text', 'value': 'title'}, 'dir': {'type': 'text', 'value': 'DESC'} } ] } Returns ------- _OUTPUT : generator of sorted items
def _parse_cpe_name(cpe): part = { 'o': 'operating system', 'h': 'hardware', 'a': 'application', } ret = {} cpe = (cpe or '').split(':') if len(cpe) > 4 and cpe[0] == 'cpe': if cpe[1].startswith('/'): ret['vendor'], ret['product'], ret['version'] = cpe[2:5] ret['phase'] = cpe[5] if len(cpe) > 5 else None ret['part'] = part.get(cpe[1][1:]) elif len(cpe) == 13 and cpe[1] == '2.3': ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]] ret['part'] = part.get(cpe[2]) return ret
Parse CPE_NAME data from the os-release Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe :param cpe: :return:
def doc_children(self, doctype, limiters=[]): result = [] for doc in self.docstring: if len(limiters) == 0 or doc.doctype in limiters: result.extend(doc.children(doctype)) return result
Finds all grand-children of this element's docstrings that match the specified doctype. If 'limiters' is specified, only docstrings with those doctypes are searched.
def wr_row_mergeall(self, worksheet, txtstr, fmt, row_idx): hdridxval = len(self.hdrs) - 1 worksheet.merge_range(row_idx, 0, row_idx, hdridxval, txtstr, fmt) return row_idx + 1
Merge all columns and place text string in widened cell.
def cli(context, host, username, password): context.obj = FritzBox(host, username, password)
FritzBox SmartHome Tool \b Provides the following functions: - A easy to use library for querying SmartHome actors - This CLI tool for testing - A carbon client for pipeing data into graphite
def example_reading_spec(self): processed_reward_type = tf.float32 if self.is_processed_rewards_discrete: processed_reward_type = tf.int64 data_fields = { TIMESTEP_FIELD: tf.FixedLenFeature((1,), tf.int64), RAW_REWARD_FIELD: tf.FixedLenFeature((1,), tf.float32), PROCESSED_REWARD_FIELD: tf.FixedLenFeature((1,), processed_reward_type), DONE_FIELD: tf.FixedLenFeature((1,), tf.int64), OBSERVATION_FIELD: self.observation_spec, ACTION_FIELD: self.action_spec, } data_items_to_decoders = { field: tf.contrib.slim.tfexample_decoder.Tensor(field) for field in data_fields } return data_fields, data_items_to_decoders
Data fields to store on disk and their decoders.
def prepare_auth(self, auth, url=''): if auth is None: url_auth = get_auth_from_url(self.url) auth = url_auth if any(url_auth) else None if auth: if isinstance(auth, tuple) and len(auth) == 2: auth = HTTPBasicAuth(*auth) r = auth(self) self.__dict__.update(r.__dict__) self.prepare_content_length(self.body)
Prepares the given HTTP auth data.
def _prime_group_perm_caches(self): perm_cache = self._get_group_cached_perms() self.group._authority_perm_cache = perm_cache self.group._authority_perm_cache_filled = True
Prime the group cache and put them on the ``self.group``. In addition add a cache filled flag on ``self.group``.
def _prettify_response(self, response): if response.content_type == 'text/html; charset=utf-8': ugly = response.get_data(as_text=True) soup = BeautifulSoup(ugly, 'html.parser') pretty = soup.prettify(formatter='html') response.direct_passthrough = False response.set_data(pretty) return response
Prettify the HTML response. :param response: A Flask Response object.
def field_keyword_for_the_layer(self): layer_purpose_key = self.step_kw_purpose.selected_purpose()['key'] if layer_purpose_key == layer_purpose_aggregation['key']: return get_compulsory_fields(layer_purpose_key)['key'] elif layer_purpose_key in [ layer_purpose_exposure['key'], layer_purpose_hazard['key']]: layer_subcategory_key = \ self.step_kw_subcategory.selected_subcategory()['key'] return get_compulsory_fields( layer_purpose_key, layer_subcategory_key)['key'] else: raise InvalidParameterError
Return the proper keyword for field for the current layer. :returns: the field keyword :rtype: str
def persistent_attributes(self): if not self._persistence_adapter: raise AttributesManagerException( "Cannot get PersistentAttributes without Persistence adapter") if not self._persistent_attributes_set: self._persistence_attributes = ( self._persistence_adapter.get_attributes( request_envelope=self._request_envelope)) self._persistent_attributes_set = True return self._persistence_attributes
Attributes stored at the Persistence level of the skill lifecycle. :return: persistent_attributes retrieved from persistence adapter :rtype: Dict[str, object] :raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException` if trying to get persistent attributes without persistence adapter
def build_blast_cmd(self, fname, dbname): return self.funcs.blastn_func(fname, dbname, self.outdir, self.exes.blast_exe)
Return BLASTN command
def _set_visible(self, visibility, grid_index=None): if grid_index is None: for ax in self.flat_grid: ax.set_visible(visibility) else: if grid_index < 0 or grid_index >= len(self.grids): raise IndexError('Valid indices : 0 to {}'.format(len(self.grids) - 1)) for ax in self.grids[grid_index]: ax.set_visible(visibility)
Sets the visibility property of all axes.
def load(self, geojson, uri=None, db=None, collection=None): logging.info("Mongo URI: {0}".format(uri)) logging.info("Mongo DB: {0}".format(db)) logging.info("Mongo Collection: {0}".format(collection)) logging.info("Geojson File to be loaded: {0}".format(geojson)) mongo = MongoGeo(db=db, collection=collection, uri=uri) GeoJSONLoader().load(geojson, mongo.insert)
Load geojson file into mongodb instance
def add_path(G, data, one_way): path_nodes = data['nodes'] del data['nodes'] data['oneway'] = one_way path_edges = list(zip(path_nodes[:-1], path_nodes[1:])) G.add_edges_from(path_edges, **data) if not one_way: path_edges_opposite_direction = [(v, u) for u, v in path_edges] G.add_edges_from(path_edges_opposite_direction, **data)
Add a path to the graph. Parameters ---------- G : networkx multidigraph data : dict the attributes of the path one_way : bool if this path is one-way or if it is bi-directional Returns ------- None
def example_lab_to_xyz(): print("=== Simple Example: Lab->XYZ ===") lab = LabColor(0.903, 16.296, -2.22) print(lab) xyz = convert_color(lab, XYZColor) print(xyz) print("=== End Example ===\n")
This function shows a simple conversion of an Lab color to an XYZ color.
def throw(self, typ, val=None, tb=None): if self._hub is None or not self._fiber.is_alive(): return self._hub.run_callback(self._fiber.throw, typ, val, tb) self._hub = self._fiber = None
Throw an exception into the origin fiber. The exception is thrown the next time the event loop runs.
def get_initkwargs(cls, form_list, initial_dict=None, instance_dict=None, condition_dict=None, *args, **kwargs): kwargs.update({ 'initial_dict': initial_dict or {}, 'instance_dict': instance_dict or {}, 'condition_dict': condition_dict or {}, }) init_form_list = SortedDict() assert len(form_list) > 0, 'at least one form is needed' for i, form in enumerate(form_list): if isinstance(form, (list, tuple)): init_form_list[unicode(form[0])] = form[1] else: init_form_list[unicode(i)] = form for form in init_form_list.itervalues(): if issubclass(form, formsets.BaseFormSet): form = form.form for field in form.base_fields.itervalues(): if (isinstance(field, forms.FileField) and not hasattr(cls, 'file_storage')): raise NoFileStorageConfigured kwargs['form_list'] = init_form_list return kwargs
Creates a dict with all needed parameters for the form wizard instances. * `form_list` - is a list of forms. The list entries can be single form classes or tuples of (`step_name`, `form_class`). If you pass a list of forms, the formwizard will convert the class list to (`zero_based_counter`, `form_class`). This is needed to access the form for a specific step. * `initial_dict` - contains a dictionary of initial data dictionaries. The key should be equal to the `step_name` in the `form_list` (or the str of the zero based counter - if no step_names added in the `form_list`) * `instance_dict` - contains a dictionary of instance objects. This list is only used when `ModelForm`s are used. The key should be equal to the `step_name` in the `form_list`. Same rules as for `initial_dict` apply. * `condition_dict` - contains a dictionary of boolean values or callables. If the value of for a specific `step_name` is callable it will be called with the formwizard instance as the only argument. If the return value is true, the step's form will be used.
def call_many(self, callback, args): if isinstance(callback, str): callback = getattr(self, callback) f = None for arg in args: f = callback(*arg) return f
callback is run with each arg but run a call per second
def _enough_time_has_passed(self, FPS): if FPS == 0: return False else: earliest_time = self.last_update_time + (1.0 / FPS) return time.time() >= earliest_time
For limiting how often frames are computed.
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
def get_position(self, rst_tree, node_id=None): if node_id is None: node_id = self.root_id if node_id in rst_tree.edu_set: return rst_tree.edus.index(node_id) return min(self.get_position(rst_tree, child_node_id) for child_node_id in rst_tree.child_dict[node_id])
Get the linear position of an element of this DGParentedTree in an RSTTree. If ``node_id`` is given, this will return the position of the subtree with that node ID. Otherwise, the position of the root of this DGParentedTree in the given RSTTree is returned.
def _get_rate(self, value): if value == 0: return 0 else: return MINIMAL_RATE_HZ * math.exp(value * self._get_factor())
Return the rate in Hz from the short int value
def _combined_wildcards_iter(flatterm: Iterator[TermAtom]) -> Iterator[TermAtom]: last_wildcard = None for term in flatterm: if isinstance(term, Wildcard) and not isinstance(term, SymbolWildcard): if last_wildcard is not None: new_min_count = last_wildcard.min_count + term.min_count new_fixed_size = last_wildcard.fixed_size and term.fixed_size last_wildcard = Wildcard(new_min_count, new_fixed_size) else: last_wildcard = Wildcard(term.min_count, term.fixed_size) else: if last_wildcard is not None: yield last_wildcard last_wildcard = None yield term if last_wildcard is not None: yield last_wildcard
Combine consecutive wildcards in a flatterm into a single one.
def _find_path(self, search_dirs, file_name): for dir_path in search_dirs: file_path = os.path.join(dir_path, file_name) if os.path.exists(file_path): return file_path return None
Search for the given file, and return the path. Returns None if the file is not found.
def validate_deprecation_semver(version_string, version_description): if version_string is None: raise MissingSemanticVersionError('The {} must be provided.'.format(version_description)) if not isinstance(version_string, six.string_types): raise BadSemanticVersionError('The {} must be a version string.'.format(version_description)) try: v = Version(version_string) if len(v.base_version.split('.')) != 3: raise BadSemanticVersionError('The given {} is not a valid version: ' '{}'.format(version_description, version_string)) if not v.is_prerelease: raise NonDevSemanticVersionError('The given {} is not a dev version: {}\n' 'Features should generally be removed in the first `dev` release ' 'of a release cycle.'.format(version_description, version_string)) return v except InvalidVersion as e: raise BadSemanticVersionError('The given {} {} is not a valid version: ' '{}'.format(version_description, version_string, e))
Validates that version_string is a valid semver. If so, returns that semver. Raises an error otherwise. :param str version_string: A pantsbuild.pants version which affects some deprecated entity. :param str version_description: A string used in exception messages to describe what the `version_string` represents. :rtype: `packaging.version.Version` :raises DeprecationApplicationError: if the version_string parameter is invalid.
def get_composition(self): if not bool(self._my_map['compositionId']): raise errors.IllegalState('composition empty') mgr = self._get_provider_manager('REPOSITORY') if not mgr.supports_composition_lookup(): raise errors.OperationFailed('Repository does not support Composition lookup') lookup_session = mgr.get_composition_lookup_session(proxy=getattr(self, "_proxy", None)) lookup_session.use_federated_repository_view() return lookup_session.get_composition(self.get_composition_id())
Gets the Composition corresponding to this asset. return: (osid.repository.Composition) - the composiiton raise: IllegalState - ``is_composition()`` is ``false`` raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
def _get_appointee(id): url = "%s%s.json" % (URL_PREFIX, id) response = get_resource(url) return process_json(response)
Return a restclients.models.hrp.AppointeePerson object
def debug(self, msg, indent=0, **kwargs): return self.logger.debug(self._indent(msg, indent), **kwargs)
invoke ``self.logger.debug``
def rlw_filter(name, location, size, unsize): arch = _meta_.arch if arch.startswith("i") and arch.endswith("86"): arch = "i486" (fname, flocation, fsize, funsize) = ([] for i in range(4)) for n, l, s, u in zip(name, location, size, unsize): loc = l.split("/") if arch == loc[-1]: fname.append(n) flocation.append(l) fsize.append(s) funsize.append(u) return [fname, flocation, fsize, funsize]
Filter rlw repository data
def post(self, path, data={}): response = requests.post(API_URL + path, data=json.dumps(data), headers=self._set_headers()) return self._check_response(response, self.post, path, data)
Perform POST Request
def fi_business_id(business_id): if not business_id or not re.match(business_id_pattern, business_id): return False factors = [7, 9, 10, 5, 8, 4, 2] numbers = map(int, business_id[:7]) checksum = int(business_id[8]) sum_ = sum(f * n for f, n in zip(factors, numbers)) modulo = sum_ % 11 return (11 - modulo == checksum) or (modulo == 0 and checksum == 0)
Validate a Finnish Business ID. Each company in Finland has a distinct business id. For more information see `Finnish Trade Register`_ .. _Finnish Trade Register: http://en.wikipedia.org/wiki/Finnish_Trade_Register Examples:: >>> fi_business_id('0112038-9') # Fast Monkeys Ltd True >>> fi_business_id('1234567-8') # Bogus ID ValidationFailure(func=fi_business_id, ...) .. versionadded:: 0.4 .. versionchanged:: 0.5 Method renamed from ``finnish_business_id`` to ``fi_business_id`` :param business_id: business_id to validate
def update_on_event(self, e): if e.type == QUIT: self.running = False elif e.type == KEYDOWN: if e.key == K_ESCAPE: self.running = False elif e.key == K_F4 and e.mod & KMOD_ALT: self.running = False elif e.type == VIDEORESIZE: self.SCREEN_SIZE = e.size self.screen = self.new_screen()
Process a single event.
def init_app(self, app): app.url_rule_class = partial(NavigationRule, copilot=self) app.context_processor(self.inject_context)
Register the extension with the application. Args: app (flask.Flask): The application to register with.
def get_edge_list(self): edge_objs = list() for obj_dict_list in self.obj_dict['edges'].values(): edge_objs.extend([ Edge(obj_dict=obj_d) for obj_d in obj_dict_list]) return edge_objs
Get the list of Edge instances. This method returns the list of Edge instances composing the graph.
def configure_client( cls, address: Union[str, Tuple[str, int], Path] = 'localhost', port: int = 6379, db: int = 0, password: str = None, ssl: Union[bool, str, SSLContext] = False, **client_args) -> Dict[str, Any]: assert check_argument_types() if isinstance(address, str) and not address.startswith('/'): address = (address, port) elif isinstance(address, Path): address = str(address) client_args.update({ 'address': address, 'db': db, 'password': password, 'ssl': resolve_reference(ssl) }) return client_args
Configure a Redis client. :param address: IP address, host name or path to a UNIX socket :param port: port number to connect to (ignored for UNIX sockets) :param db: database number to connect to :param password: password used if the server requires authentication :param ssl: one of the following: * ``False`` to disable SSL * ``True`` to enable SSL using the default context * an :class:`~ssl.SSLContext` instance * a ``module:varname`` reference to an :class:`~ssl.SSLContext` instance * name of an :class:`~ssl.SSLContext` resource :param client_args: extra keyword arguments passed to :func:`~aioredis.create_redis_pool`
def build_authorization_endpoint(self, request, disable_sso=None): self.load_config() redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None) if not redirect_to: redirect_to = django_settings.LOGIN_REDIRECT_URL redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode() query = QueryDict(mutable=True) query.update({ "response_type": "code", "client_id": settings.CLIENT_ID, "resource": settings.RELYING_PARTY_ID, "redirect_uri": self.redirect_uri(request), "state": redirect_to, }) if self._mode == "openid_connect": query["scope"] = "openid" if (disable_sso is None and settings.DISABLE_SSO) or disable_sso is True: query["prompt"] = "login" return "{0}?{1}".format(self.authorization_endpoint, query.urlencode())
This function returns the ADFS authorization URL. Args: request(django.http.request.HttpRequest): A django Request object disable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt. Returns: str: The redirect URI
def registerTrailingStop(self, tickerId, orderId=0, quantity=1, lastPrice=0, trailPercent=100., trailAmount=0., parentId=0, **kwargs): ticksize = self.contractDetails(tickerId)["m_minTick"] trailingStop = self.trailingStops[tickerId] = { "orderId": orderId, "parentId": parentId, "lastPrice": lastPrice, "trailAmount": trailAmount, "trailPercent": trailPercent, "quantity": quantity, "ticksize": ticksize } return trailingStop
adds trailing stop to monitor list
def inc(self): self.lock.acquire() cur = self.counter self.counter += 1 self.lock.release() return cur
Get index for new entry.
async def amiUsage(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["amiUsage"], *args, **kwargs)
See the list of AMIs and their usage List AMIs and their usage by returning a list of objects in the form: { region: string volumetype: string lastused: timestamp } This method is ``experimental``
def listdir(self, target_directory): target_directory = self.resolve_path(target_directory, allow_fd=True) directory = self.confirmdir(target_directory) directory_contents = directory.contents return list(directory_contents.keys())
Return a list of file names in target_directory. Args: target_directory: Path to the target directory within the fake filesystem. Returns: A list of file names within the target directory in arbitrary order. Raises: OSError: if the target is not a directory.
def rand_crop(*args, padding_mode='reflection', p:float=1.): "Randomized version of `crop_pad`." return crop_pad(*args, **rand_pos, padding_mode=padding_mode, p=p)
Randomized version of `crop_pad`.
def _set_tag(self, tag=None, tags=None, value=True): existing_tags = self._requirements.get("tags") if tags and not tag: existing_tags = merge(existing_tags, tags) self._requirements["tags"] = existing_tags elif tag and not tags: existing_tags[tag] = value self._requirements["tags"] = existing_tags
Sets the value of a specific tag or merges existing tags with a dict of new tags. Either tag or tags must be None. :param tag: Tag which needs to be set. :param tags: Set of tags which needs to be merged with existing tags. :param value: Value to set for net tag named by :param tag. :return: Nothing
def is_date(self): dt = DATA_TYPES['date'] if type(self.data) is dt['type'] and '-' in str(self.data) and str(self.data).count('-') == 2: date_split = str(self.data).split('-') y, m, d = date_split[0], date_split[1], date_split[2] valid_year, valid_months, valid_days = int(y) in YEARS, int(m) in MONTHS, int(d) in DAYS if all(i is True for i in (valid_year, valid_months, valid_days)): self.type = 'date'.upper() self.len = None return True
Determine if a data record is of type DATE.
def pixel_data(self): from .. import extensions as _extensions data = _np.zeros((self.height, self.width, self.channels), dtype=_np.uint8) _extensions.image_load_to_numpy(self, data.ctypes.data, data.strides) if self.channels == 1: data = data.squeeze(2) return data
Returns the pixel data stored in the Image object. Returns ------- out : numpy.array The pixel data of the Image object. It returns a multi-dimensional numpy array, where the shape of the array represents the shape of the image (height, weight, channels). See Also -------- width, channels, height Examples -------- >>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg') >>> image_array = img.pixel_data
def entry_verifier(entries, regex, delimiter): cregex = re.compile(regex) python_version = int(sys.version.split('.')[0]) decoder = 'unicode-escape' if python_version == 3 else 'string-escape' dedelimiter = codecs.decode(delimiter, decoder) for entry in entries: match = re.match(cregex, entry) if not match: split_regex = regex.split(delimiter) split_entry = entry.split(dedelimiter) part = 0 for regex_segment, entry_segment in zip(split_regex, split_entry): if not regex_segment[0] == '^': regex_segment = '^' + regex_segment if not regex_segment[-1] == '$': regex_segment += '$' if not re.match(regex_segment, entry_segment): raise FormatError(template=regex_segment, subject=entry_segment, part=part) part += 1
Checks each entry against regex for validity, If an entry does not match the regex, the entry and regex are broken down by the delimiter and each segment is analyzed to produce an accurate error message. Args: entries (list): List of entries to check with regex regex (str): Regular expression to compare entries with delimiter (str): Character to split entry and regex by, used to check parts of entry and regex to narrow in on the error Raises: FormatError: Class containing regex match error data Example: >>> regex = r'^>.+\\n[ACGTU]+\\n$' >>> entry = [r'>entry1\\nAGGGACTA\\n'] >>> entry_verifier(entry, regex, '\\n')
def inverse_tile_2d(input, k_x, k_y, name): batch_size, h, w, c = input.get_shape().as_list() if batch_size is None: batch_size = -1 assert w % k_x == 0 and h % k_y == 0 with tf.variable_scope(name) as scope: tmp = input tmp = tf.reshape(tmp, (batch_size, int(h * k_y), w, int(c * k_x))) tmp = tf.transpose(tmp, [0, 2, 1, 3]) tmp = tf.reshape(tmp, (batch_size, w, h, int(c * k_y * k_x))) tmp = tf.transpose(tmp, [0, 2, 1, 3]) return tmp
An inverse tiling layer. An inverse to the tiling layer can be of great use, since you can keep the resolution of your output low, but harness the benefits of the resolution of a higher level feature layer. If you insist on a source you can call it very lightly inspired by yolo9000 "passthrough layer". :param input: Your input tensor. (Assert input.shape[1] % k_y = 0 and input.shape[2] % k_x = 0) :param k_x: The tiling factor in x direction [int]. :param k_y: The tiling factor in y direction [int]. :param name: The name of the layer. :return: The output tensor of shape [batch_size, inp.height / k_y, inp.width / k_x, inp.channels * k_x * k_y].
def remote_sys_name_uneq_store(self, remote_system_name): if remote_system_name != self.remote_system_name: self.remote_system_name = remote_system_name return True return False
This function saves the system name, if different from stored.
def get_node(self, key): self._check_if_open() try: if not key.startswith('/'): key = '/' + key return self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None
return the node with the key or None if it does not exist
def _crc16_checksum(bytes): crc = 0x0000 polynomial = 0x1021 for byte in bytes: for i in range(8): bit = (byte >> (7 - i) & 1) == 1 c15 = (crc >> 15 & 1) == 1 crc <<= 1 if c15 ^ bit: crc ^= polynomial return crc & 0xFFFF
Returns the CRC-16 checksum of bytearray bytes Ported from Java implementation at: http://introcs.cs.princeton.edu/java/61data/CRC16CCITT.java.html Initial value changed to 0x0000 to match Stellar configuration.
def get(self, *args, **kwargs): payload = self.buf.get(*args, **kwargs) logger.debug("Removing RPC payload from ControlBuffer queue: %s", payload) return payload
Call from main thread.
def get_one(self, criteria): try: items = [item for item in self._get_with_criteria(criteria, limit=1)] return items[0] except: return None
return one item
def _list_response(self, response): if type(response) is list: return response if type(response) is dict: return [response]
This method check if the response is a dict and wrap it into a list. If the response is already a list, it returns the response directly. This workaround is necessary because the API doesn't return a list if only one item is found.
def get_Generic_parameters(tp, generic_supertype): try: res = _select_Generic_superclass_parameters(tp, generic_supertype) except TypeError: res = None if res is None: raise TypeError("%s has no proper parameters defined by %s."% (type_str(tp), type_str(generic_supertype))) else: return tuple(res)
tp must be a subclass of generic_supertype. Retrieves the type values from tp that correspond to parameters defined by generic_supertype. E.g. get_Generic_parameters(tp, typing.Mapping) is equivalent to get_Mapping_key_value(tp) except for the error message. Note that get_Generic_itemtype(tp) is not exactly equal to get_Generic_parameters(tp, typing.Container), as that method additionally contains treatment for typing.Tuple and typing.Iterable.
def validate(self): if self.access_token is None: raise ConfigurationError('No access token provided. ' 'Set your access token during client initialization using: ' '"basecrm.Client(access_token= <YOUR_PERSONAL_ACCESS_TOKEN>)"') if re.search(r'\s', self.access_token): raise ConfigurationError('Provided access token is invalid ' 'as it contains disallowed characters. ' 'Please double-check you access token.') if len(self.access_token) != 64: raise ConfigurationError('Provided access token is invalid ' 'as it has invalid length. ' 'Please double-check your access token.') if not self.base_url or not re.match(self.URL_REGEXP, self.base_url): raise ConfigurationError('Provided base url is invalid ' 'as it not a valid URI. ' 'Please make sure it incldues the schema part, ' 'both http and https are accepted, ' 'and the hierarchical part') return True
Validates whether a configuration is valid. :rtype: bool :raises ConfigurationError: if no ``access_token`` provided. :raises ConfigurationError: if provided ``access_token`` is invalid - contains disallowed characters. :raises ConfigurationError: if provided ``access_token`` is invalid - has invalid length. :raises ConfigurationError: if provided ``base_url`` is invalid.
def set_tag(self, key, value, update_session=True): existing_tags = {x.key: x for x in self.tags} if key in existing_tags: tag = existing_tags[key] if tag.value == value: return False tag.value = value else: tag = Tag() tag.resource_id = self.id tag.key = key tag.value = value self.tags.append(tag) if update_session: db.session.add(tag) return True
Create or set the value of the tag with `key` to `value`. Returns `True` if the tag was created or updated or `False` if there were no changes to be made. Args: key (str): Key of the tag value (str): Value of the tag update_session (bool): Automatically add the change to the SQLAlchemy session. Default: True Returns: `bool`
def get_history(self, filters=(), pagesize=15, offset=0): response = None try: response = requests.get( urls.history(self._giid), headers={ 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)}, params={ "offset": int(offset), "pagesize": int(pagesize), "notificationCategories": filters}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
Get recent events Args: filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION', 'TECHNICAL', 'SOS', 'WARNING', 'LOCK', 'UNLOCK' pagesize (int): Number of events to display offset (int): Skip pagesize * offset first events
def _HandleLegacy(self, args, token=None): hunt_obj = aff4.FACTORY.Open( args.hunt_id.ToURN(), aff4_type=implementation.GRRHunt, token=token) if isinstance(hunt_obj.context, rdf_hunts.HuntContext): state = api_call_handler_utils.ApiDataObject() result = ApiGetHuntContextResult(context=hunt_obj.context, state=state) result.args = hunt_obj.args return result else: context = api_call_handler_utils.ApiDataObject().InitFromDataObject( hunt_obj.context) return ApiGetHuntContextResult(state=context)
Retrieves the context for a hunt.
def deprecated(replacement_description): def decorate(fn_or_class): if isinstance(fn_or_class, type): pass else: try: fn_or_class.__doc__ = "This API point is obsolete. %s\n\n%s" % ( replacement_description, fn_or_class.__doc__, ) except AttributeError: pass return fn_or_class return decorate
States that method is deprecated. :param replacement_description: Describes what must be used instead. :return: the original method with modified docstring.
def build_napp_package(napp_name): ignored_extensions = ['.swp', '.pyc', '.napp'] ignored_dirs = ['__pycache__', '.git', '.tox'] files = os.listdir() for filename in files: if os.path.isfile(filename) and '.' in filename and \ filename.rsplit('.', 1)[1] in ignored_extensions: files.remove(filename) elif os.path.isdir(filename) and filename in ignored_dirs: files.remove(filename) napp_file = tarfile.open(napp_name + '.napp', 'x:xz') for local_f in files: napp_file.add(local_f) napp_file.close() file_payload = open(napp_name + '.napp', 'rb') os.remove(napp_name + '.napp') return file_payload
Build the .napp file to be sent to the napps server. Args: napp_identifier (str): Identifier formatted as <username>/<napp_name> Return: file_payload (binary): The binary representation of the napp package that will be POSTed to the napp server.