code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def f2tc(f,base=25): try: f = int(f) except: return "--:--:--:--" hh = int((f / base) / 3600) mm = int(((f / base) / 60) - (hh*60)) ss = int((f/base) - (hh*3600) - (mm*60)) ff = int(f - (hh*3600*base) - (mm*60*base) - (ss*base)) return "{:02d}:{:02d}:{:02d}:{:02d}".format(hh, mm, ss, ff)
Converts frames to timecode
def undecorate(cls, function): if cls.is_function_validated(function): return cls.get_function_validator(function).function return function
Remove validator decoration from a function. The `function` argument is the function to be cleaned from the validator decorator.
def remove_event(self, func_name: str, event: str) -> None: event_funcs_copy = self._events[event].copy() for func in self._event_funcs(event): if func.__name__ == func_name: event_funcs_copy.remove(func) if self._events[event] == event_funcs_copy: err_msg = "function doesn't exist inside event {} ".format(event) raise EventDoesntExist(err_msg) else: self._events[event] = event_funcs_copy
Removes a subscribed function from a specific event. :param func_name: The name of the function to be removed. :type func_name: str :param event: The name of the event. :type event: str :raise EventDoesntExist if there func_name doesn't exist in event.
def archs(self, _args): print('{Style.BRIGHT}Available target architectures are:' '{Style.RESET_ALL}'.format(Style=Out_Style)) for arch in self.ctx.archs: print(' {}'.format(arch.arch))
List the target architectures available to be built for.
def mean_values(self): if not self.istransformed: return self.pst.parameter_data.parval1.copy() else: vals = self.pst.parameter_data.parval1.copy() vals[self.log_indexer] = np.log10(vals[self.log_indexer]) return vals
the mean value vector while respecting log transform Returns ------- mean_values : pandas.Series
def getThirdPartyLibCmakeFlags(self, libs): fmt = PrintingFormat.singleLine() if libs[0] == '--multiline': fmt = PrintingFormat.multiLine() libs = libs[1:] platformDefaults = True if libs[0] == '--nodefaults': platformDefaults = False libs = libs[1:] details = self.getThirdpartyLibs(libs, includePlatformDefaults=platformDefaults) CMakeCustomFlags.processLibraryDetails(details) return details.getCMakeFlags(self.getEngineRoot(), fmt)
Retrieves the CMake invocation flags for building against the Unreal-bundled versions of the specified third-party libraries
def serialize(self, attr, obj, accessor=None, **kwargs): if self._CHECK_ATTRIBUTE: value = self.get_value(obj, attr, accessor=accessor) if value is missing_ and hasattr(self, 'default'): default = self.default value = default() if callable(default) else default if value is missing_: return value else: value = None return self._serialize(value, attr, obj, **kwargs)
Pulls the value for the given key from the object, applies the field's formatting and returns the result. :param str attr: The attribute or key to get from the object. :param str obj: The object to pull the key from. :param callable accessor: Function used to pull values from ``obj``. :param dict kwargs': Field-specific keyword arguments. :raise ValidationError: In case of formatting problem
def open(fn, expand_includes=True, include_comments=False, include_position=False, **kwargs): p = Parser(expand_includes=expand_includes, include_comments=include_comments, **kwargs) ast = p.parse_file(fn) m = MapfileToDict(include_position=include_position, include_comments=include_comments, **kwargs) d = m.transform(ast) return d
Load a Mapfile from the supplied filename into a Python dictionary. Parameters ---------- fn: string The path to the Mapfile, or partial Mapfile expand_includes: boolean Load any ``INCLUDE`` files in the MapFile include_comments: boolean Include or discard comment strings from the Mapfile - *experimental* include_position: boolean Include the position of the Mapfile tokens in the output Returns ------- dict A Python dictionary representing the Mapfile in the mappyfile format Example ------- To open a Mapfile from a filename and return it as a dictionary object:: d = mappyfile.open('mymap.map') Notes ----- Partial Mapfiles can also be opened, for example a file containing a ``LAYER`` object.
def get_connectable_volume_templates(self, start=0, count=-1, filter='', query='', sort=''): uri = self.URI + "/connectable-volume-templates" get_uri = self._client.build_query_uri(start=start, count=count, filter=filter, query=query, sort=sort, uri=uri) return self._client.get(get_uri)
Gets the storage volume templates that are available on the specified networks based on the storage system port's expected network connectivity. If there are no storage volume templates that meet the specified connectivity criteria, an empty collection will be returned. Returns: list: Storage volume templates.
def parse_groups(self, group, params): if group['GroupName'] in self.groups: return api_client = params['api_client'] group['id'] = group.pop('GroupId') group['name'] = group.pop('GroupName') group['arn'] = group.pop('Arn') group['users'] = self.__fetch_group_users(api_client, group['name']); policies = self.__get_inline_policies(api_client, 'group', group['id'], group['name']) if len(policies): group['inline_policies'] = policies group['inline_policies_count'] = len(policies) self.groups[group['id']] = group
Parse a single IAM group and fetch additional information
def transaction_fail(self, name): if not name: raise ValueError("Transaction name cannot be empty") if self.transaction_count > 0: logger.debug("{}. Failing transaction {}".format(self.transaction_count, name)) if self.transaction_count == 1: self._transaction_fail() else: self._transaction_failing(name) self.transaction_count -= 1
rollback a transaction if currently in one e -- Exception() -- if passed in, bubble up the exception by re-raising it
def sample(self, hash, limit=None, offset=None): uri = self._uris['sample'].format(hash) params = {'limit': limit, 'offset': offset} return self.get_parse(uri, params)
Return an object representing the sample identified by the input hash, or an empty object if that sample is not found
def strict_deps_for_target(self, target, predicate=None): if self._native_build_settings.get_strict_deps_value_for_target(target): strict_deps = target.strict_dependencies(DependencyContext()) if predicate: filtered_deps = list(filter(predicate, strict_deps)) else: filtered_deps = strict_deps deps = [target] + filtered_deps else: deps = self.context.build_graph.transitive_subgraph_of_addresses( [target.address], predicate=predicate) deps = filter(predicate, deps) return deps
Get the dependencies of `target` filtered by `predicate`, accounting for 'strict_deps'. If 'strict_deps' is on, instead of using the transitive closure of dependencies, targets will only be able to see their immediate dependencies declared in the BUILD file. The 'strict_deps' setting is obtained from the result of `get_compile_settings()`. NB: This includes the current target in the result.
def rdf_source(self, aformat="turtle"): if aformat and aformat not in self.SUPPORTED_FORMATS: return "Sorry. Allowed formats are %s" % str(self.SUPPORTED_FORMATS) if aformat == "dot": return self.__serializedDot() else: return self.rdflib_graph.serialize(format=aformat)
Serialize graph using the format required
def create_primary_zone_by_upload(self, account_name, zone_name, bind_file): zone_properties = {"name": zone_name, "accountName": account_name, "type": "PRIMARY"} primary_zone_info = {"forceImport": True, "createType": "UPLOAD"} zone_data = {"properties": zone_properties, "primaryCreateInfo": primary_zone_info} files = {'zone': ('', json.dumps(zone_data), 'application/json'), 'file': ('file', open(bind_file, 'rb'), 'application/octet-stream')} return self.rest_api_connection.post_multi_part("/v1/zones", files)
Creates a new primary zone by uploading a bind file Arguments: account_name -- The name of the account that will contain this zone. zone_name -- The name of the zone. It must be unique. bind_file -- The file to upload.
def _md5_compare(self, file_path, checksum, block_size=2 ** 13): with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B", unit_scale=True)) as progress: md5 = hashlib.md5() with open(file_path, "rb") as f: while True: block_data = f.read(block_size) if not block_data: break md5.update(block_data) progress.update(len(block_data)) return md5.hexdigest().lower() == checksum.lower()
Compare a given MD5 checksum with one calculated from a file.
def unmarshal_event(self, data: str, response_type): js = json.loads(data) js['raw_object'] = js['object'] if js['type'].lower() == 'error': return js if response_type is not None: js['object'] = self._api_client.deserialize( response=SimpleNamespace(data=json.dumps(js['raw_object'])), response_type=response_type ) if hasattr(js['object'], 'metadata'): self.resource_version = js['object'].metadata.resource_version elif (isinstance(js['object'], dict) and 'metadata' in js['object'] and 'resourceVersion' in js['object']['metadata']): self.resource_version = js['object']['metadata']['resourceVersion'] return js
Return the K8s response `data` in JSON format.
def find_filter_class(filtername): if filtername in FILTERS: return FILTERS[filtername] for name, cls in find_plugin_filters(): if name == filtername: return cls return None
Lookup a filter by name. Return None if not found.
def verify_database(trusted_consensus_hash, consensus_block_height, untrusted_working_dir, trusted_working_dir, start_block=None, expected_snapshots={}): db = BlockstackDB.get_readwrite_instance(trusted_working_dir) consensus_impl = virtualchain_hooks return virtualchain.state_engine_verify(trusted_consensus_hash, consensus_block_height, consensus_impl, untrusted_working_dir, db, start_block=start_block, expected_snapshots=expected_snapshots)
Verify that a database is consistent with a known-good consensus hash. Return True if valid. Return False if not
async def get_default(cls): data = await cls._handler.read(id=cls._default_fabric_id) return cls(data)
Get the 'default' Fabric for the MAAS.
def normalize_shape(shape): if shape is None: raise TypeError('shape is None') if isinstance(shape, numbers.Integral): shape = (int(shape),) shape = tuple(int(s) for s in shape) return shape
Convenience function to normalize the `shape` argument.
def create_files(filedef, cleanup=True): cwd = os.getcwd() tmpdir = tempfile.mkdtemp() try: Filemaker(tmpdir, filedef) if not cleanup: pass os.chdir(tmpdir) yield tmpdir finally: os.chdir(cwd) if cleanup: shutil.rmtree(tmpdir, ignore_errors=True)
Contextmanager that creates a directory structure from a yaml descripttion.
def get_appended_name(name, columns): loop = 0 while name in columns: loop += 1 if loop > 10: logger_misc.warn("get_appended_name: Too many loops: Tried to get appended name but something looks wrong") break tmp = name + "-" + str(loop) if tmp not in columns: return tmp return name + "-99"
Append numbers to a name until it no longer conflicts with the other names in a column. Necessary to avoid overwriting columns and losing data. Loop a preset amount of times to avoid an infinite loop. There shouldn't ever be more than two or three identical variable names in a table. :param str name: Variable name in question :param dict columns: Columns listed by variable name :return str: Appended variable name
def tracking_save(sender, instance, raw, using, update_fields, **kwargs): if _has_changed(instance): if instance._original_fields['pk'] is None: _create_create_tracking_event(instance) else: _create_update_tracking_event(instance) if _has_changed_related(instance): _create_update_tracking_related_event(instance) if _has_changed(instance) or _has_changed_related(instance): _set_original_fields(instance)
Post save, detect creation or changes and log them. We need post_save to have the object for a create.
def clear_state(self): self.state = {} self.state['steps'] = [] self.state['current_step'] = None self.state['scope'] = [] self.state['counters'] = {} self.state['strings'] = {} for step in self.matchers: self.state[step] = {} self.state[step]['pending'] = {} self.state[step]['actions'] = [] self.state[step]['counters'] = {} self.state[step]['strings'] = {} self.state[step]['recipe'] = False
Clear the recipe state.
def get_prev_block_hash(block_representation, coin_symbol='btc', api_key=None): return get_block_overview(block_representation=block_representation, coin_symbol=coin_symbol, txn_limit=1, api_key=api_key)['prev_block']
Takes a block_representation and returns the previous block hash
def DbAddServer(self, argin): self._log.debug("In DbAddServer()") if len(argin) < 3 or not len(argin) % 2: self.warn_stream("DataBase::AddServer(): incorrect number of input arguments ") th_exc(DB_IncorrectArguments, "incorrect no. of input arguments, needs at least 3 (server,device,class)", "DataBase::AddServer()") server_name = argin[0] for i in range((len(argin) - 1) / 2): d_name, klass_name = argin[i * 2 + 1], argin[i * 2 + 2] ret, dev_name, dfm = check_device_name(d_name) if not ret: th_exc(DB_IncorrectDeviceName, "device name (" + d_name + ") syntax error (should be [tango:][//instance/]domain/family/member)", "DataBase::AddServer()") self.db.add_device(server_name, (dev_name, dfm) , klass_name)
Create a device server process entry in database :param argin: Str[0] = Full device server name Str[1] = Device(s) name Str[2] = Tango class name Str[n] = Device name Str[n + 1] = Tango class name :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid
def transfer(self, transfer_payload=None, *, from_user, to_user): if self.persist_id is None: raise EntityNotYetPersistedError(('Entities cannot be transferred ' 'until they have been ' 'persisted')) return self.plugin.transfer(self.persist_id, transfer_payload, from_user=from_user, to_user=to_user)
Transfer this entity to another owner on the backing persistence layer Args: transfer_payload (dict): Payload for the transfer from_user (any): A user based on the model specified by the persistence layer to_user (any): A user based on the model specified by the persistence layer Returns: str: Id of the resulting transfer action on the persistence layer Raises: :exc:`~.EntityNotYetPersistedError`: If the entity being transferred is not associated with an id on the persistence layer (:attr:`~Entity.persist_id`) yet :exc:`~.EntityNotFoundError`: If the entity could not be found on the persistence layer :exc:`~.EntityTransferError`: If the entity fails to be transferred on the persistence layer :exc:`~.PersistenceError`: If any other unhandled error in the plugin occurred
def cluster_path(cls, project, instance, cluster): return google.api_core.path_template.expand( "projects/{project}/instances/{instance}/clusters/{cluster}", project=project, instance=instance, cluster=cluster, )
Return a fully-qualified cluster string.
def symmetrized(self): perms = list(itertools.permutations(range(self.rank))) return sum([np.transpose(self, ind) for ind in perms]) / len(perms)
Returns a generally symmetrized tensor, calculated by taking the sum of the tensor and its transpose with respect to all possible permutations of indices
def count(self): if hasattr(self, '_response'): return self._response.hits.total es = connections.get_connection(self._using) d = self.to_dict(count=True) return es.count( index=self._index, body=d, **self._params )['count']
Return the number of hits matching the query and filters. Note that only the actual number is returned.
def update_room(room): if room.custom_server: return def _update_room(xmpp): muc = xmpp.plugin['xep_0045'] muc.joinMUC(room.jid, xmpp.requested_jid.user) muc.configureRoom(room.jid, _set_form_values(xmpp, room, muc.getRoomConfig(room.jid))) current_plugin.logger.info('Updating room %s', room.jid) _execute_xmpp(_update_room)
Updates a MUC room on the XMPP server.
def typecounter(table, field): counter = Counter() for v in values(table, field): try: counter[v.__class__.__name__] += 1 except IndexError: pass return counter
Count the number of values found for each Python type. >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 1, 2], ... ['B', u'2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', u'xyz', 9.0], ... ['E', 42]] >>> etl.typecounter(table, 'foo') Counter({'str': 5}) >>> etl.typecounter(table, 'bar') Counter({'str': 3, 'int': 2}) >>> etl.typecounter(table, 'baz') Counter({'str': 2, 'int': 1, 'float': 1, 'NoneType': 1}) The `field` argument can be a field name or index (starting from zero).
def absolute_uri(self, location=None, scheme=None, **query): if not is_absolute_uri(location): if location or location is None: location = self.full_path(location, **query) if not scheme: scheme = self.is_secure and 'https' or 'http' base = '%s://%s' % (scheme, self.get_host()) return '%s%s' % (base, location) elif not scheme: return iri_to_uri(location) else: raise ValueError('Absolute location with scheme not valid')
Builds an absolute URI from ``location`` and variables available in this request. If no ``location`` is specified, the relative URI is built from :meth:`full_path`.
def min(self): if self.is_quantized or self.base_dtype in ( bool, string, complex64, complex128, ): raise TypeError("Cannot find minimum value of %s." % self) try: return np.finfo(self.as_numpy_dtype()).min except: try: return np.iinfo(self.as_numpy_dtype()).min except: if self.base_dtype == bfloat16: return _np_bfloat16(float.fromhex("-0x1.FEp127")) raise TypeError("Cannot find minimum value of %s." % self)
Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type.
def flat_list(input_list): r x = input_list if isinstance(x, list): return [a for i in x for a in flat_list(i)] else: return [x]
r""" Given a list of nested lists of arbitrary depth, returns a single level or 'flat' list.
def putParamset(self, paramset, data={}): try: if paramset in self._PARAMSETS and data: self._proxy.putParamset(self._ADDRESS, paramset, data) self.updateParamsets() return True else: return False except Exception as err: LOG.error("HMGeneric.putParamset: Exception: " + str(err)) return False
Some devices act upon changes to paramsets. A "putted" paramset must not contain all keys available in the specified paramset, just the ones which are writable and should be changed.
def gateways_info(): data = netifaces.gateways() results = {'default': {}} with suppress(KeyError): results['ipv4'] = data[netifaces.AF_INET] results['default']['ipv4'] = data['default'][netifaces.AF_INET] with suppress(KeyError): results['ipv6'] = data[netifaces.AF_INET6] results['default']['ipv6'] = data['default'][netifaces.AF_INET6] return results
Returns gateways data.
def library_hierarchy_depth(self): current_library_hierarchy_depth = 1 library_root_state = self.get_next_upper_library_root_state() while library_root_state is not None: current_library_hierarchy_depth += 1 library_root_state = library_root_state.parent.get_next_upper_library_root_state() return current_library_hierarchy_depth
Calculates the library hierarchy depth Counting starts at the current library state. So if the there is no upper library state the depth is one. :return: library hierarchy depth :rtype: int
def token_cache_pkgs(source=None, release=None): packages = [] if enable_memcache(source=source, release=release): packages.extend(['memcached', 'python-memcache']) return packages
Determine additional packages needed for token caching @param source: source string for charm @param release: release of OpenStack currently deployed @returns List of package to enable token caching
def verify_connectivity(config): logger.debug("Verifying Connectivity") ic = InsightsConnection(config) try: branch_info = ic.get_branch_info() except requests.ConnectionError as e: logger.debug(e) logger.debug("Failed to connect to satellite") return False except LookupError as e: logger.debug(e) logger.debug("Failed to parse response from satellite") return False try: remote_leaf = branch_info['remote_leaf'] return remote_leaf except LookupError as e: logger.debug(e) logger.debug("Failed to find accurate branch_info") return False
Verify connectivity to satellite server
def _inject_patched_examples(self, existing_item, patched_item): for key, _ in patched_item.examples.items(): patched_example = patched_item.examples[key] existing_examples = existing_item.examples if key in existing_examples: existing_examples[key].fields.update(patched_example.fields) else: error_msg = 'Example defined in patch {} must correspond to a pre-existing example.' raise InvalidSpec(error_msg.format( quote(patched_item.name)), patched_example.lineno, patched_example.path)
Injects patched examples into original examples.
def validate_argmin_with_skipna(skipna, args, kwargs): skipna, args = process_skipna(skipna, args) validate_argmin(args, kwargs) return skipna
If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean
def next(self): if self._selfiter is None: warnings.warn( "Calling 'next' directly on a query is deprecated. " "Perhaps you want to use iter(query).next(), or something " "more expressive like store.findFirst or store.findOrCreate?", DeprecationWarning, stacklevel=2) self._selfiter = self.__iter__() return self._selfiter.next()
This method is deprecated, a holdover from when queries were iterators, rather than iterables. @return: one element of massaged data.
def rollback(self, release): r = self._h._http_resource( method='POST', resource=('apps', self.name, 'releases'), data={'rollback': release} ) return self.releases[-1]
Rolls back the release to the given version.
def most_recent_submission(project, group): return (Submission.query_by(project=project, group=group) .order_by(Submission.created_at.desc()).first())
Return the most recent submission for the user and project id.
def params_for(prefix, kwargs): if not prefix.endswith('__'): prefix += '__' return {key[len(prefix):]: val for key, val in kwargs.items() if key.startswith(prefix)}
Extract parameters that belong to a given sklearn module prefix from ``kwargs``. This is useful to obtain parameters that belong to a submodule. Examples -------- >>> kwargs = {'encoder__a': 3, 'encoder__b': 4, 'decoder__a': 5} >>> params_for('encoder', kwargs) {'a': 3, 'b': 4}
def joint_entropy_calc(classes, table, POP): try: result = 0 for i in classes: for index, j in enumerate(classes): p_prime = table[i][j] / POP[i] if p_prime != 0: result += p_prime * math.log(p_prime, 2) return -result except Exception: return "None"
Calculate joint entropy. :param classes: confusion matrix classes :type classes : list :param table: confusion matrix table :type table : dict :param POP: population :type POP : dict :return: joint entropy as float
def create_placement_group(self, name, strategy='cluster'): params = {'GroupName':name, 'Strategy':strategy} group = self.get_status('CreatePlacementGroup', params, verb='POST') return group
Create a new placement group for your account. This will create the placement group within the region you are currently connected to. :type name: string :param name: The name of the new placement group :type strategy: string :param strategy: The placement strategy of the new placement group. Currently, the only acceptable value is "cluster". :rtype: bool :return: True if successful
def _Enum(docstring, *names): enums = dict(zip(names, range(len(names)))) reverse = dict((value, key) for key, value in enums.iteritems()) enums['reverse_mapping'] = reverse enums['__doc__'] = docstring return type('Enum', (object,), enums)
Utility to generate enum classes used by annotations. Args: docstring: Docstring for the generated enum class. *names: Enum names. Returns: A class that contains enum names as attributes.
def setdatastrs(self, label, unit, format, coord_sys): status = _C.SDsetdatastrs(self._id, label, unit, format, coord_sys) _checkErr('setdatastrs', status, 'cannot execute')
Set the dataset standard string type attributes. Args:: label dataset label (attribute 'long_name') unit dataset unit (attribute 'units') format dataset format (attribute 'format') coord_sys dataset coordinate system (attribute 'coordsys') Returns:: None Those strings are part of the so-called standard SDS attributes. Calling 'setdatastrs' is equivalent to setting the following attributes, which correspond to the method parameters, in order:: long_name, units, format, coordsys C library equivalent: SDsetdatastrs
def get_local_ip_address(target): ip_adr = '' try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect((target, 8000)) ip_adr = s.getsockname()[0] s.close() except: pass return ip_adr
Get the local ip address to access one specific target.
def hideEvent(self, event): super(CallTipWidget, self).hideEvent(event) self._text_edit.cursorPositionChanged.disconnect( self._cursor_position_changed) self._text_edit.removeEventFilter(self)
Reimplemented to disconnect signal handlers and event filter.
def importpath(path, error_text=None): result = None attrs = [] parts = path.split('.') exception = None while parts: try: result = __import__('.'.join(parts), {}, {}, ['']) except ImportError as e: if exception is None: exception = e attrs = parts[-1:] + attrs parts = parts[:-1] else: break for attr in attrs: try: result = getattr(result, attr) except (AttributeError, ValueError) as e: if error_text is not None: raise ImproperlyConfigured('Error: %s can import "%s"' % ( error_text, path)) else: raise exception return result
Import value by specified ``path``. Value can represent module, class, object, attribute or method. If ``error_text`` is not None and import will raise ImproperlyConfigured with user friendly text.
def list_principals(): ret = {} cmd = __execute_kadmin('list_principals') if cmd['retcode'] != 0 or cmd['stderr']: ret['comment'] = cmd['stderr'].splitlines()[-1] ret['result'] = False return ret ret = {'principals': []} for i in cmd['stdout'].splitlines()[1:]: ret['principals'].append(i) return ret
Get all principals CLI Example: .. code-block:: bash salt 'kde.example.com' kerberos.list_principals
def parse(self, **global_args): if self.build_file not in ParseContext._parsed: butcher_context = {} for str_to_exec in self._strs_to_exec: ast = compile(str_to_exec, '<string>', 'exec') exec_function(ast, butcher_context) with ParseContext.activate(self): startdir = os.path.abspath(os.curdir) try: os.chdir(self.build_file.path_on_disk) if self.build_file not in ParseContext._parsed: ParseContext._parsed.add(self.build_file) eval_globals = copy.copy(butcher_context) eval_globals.update( {'ROOT_DIR': self.build_file.path_on_disk, '__file__': 'bogus please fix this'}) eval_globals.update(global_args) exec_function(self.build_file.code, eval_globals) finally: os.chdir(startdir)
Entry point to parsing a BUILD file. Args: **global_args: Variables to include in the parsing environment.
def set_type(self,type): self.add_var_opt('type',str(type)) self.__type = str(type) self.__set_output()
sets the frame type that we are querying
def logger(self): if self._logger: return self._logger else: log_builder = p_logging.ProsperLogger( self.PROGNAME, self.config.get_option('LOGGING', 'log_path'), config_obj=self.config ) if self.verbose: log_builder.configure_debug_logger() else: id_string = '({platform}--{version})'.format( platform=platform.node(), version=self.VERSION ) if self.config.get_option('LOGGING', 'discord_webhook'): log_builder.configure_discord_logger( custom_args=id_string ) if self.config.get_option('LOGGING', 'slack_webhook'): log_builder.configure_slack_logger( custom_args=id_string ) if self.config.get_option('LOGGING', 'hipchat_webhook'): log_builder.configure_hipchat_logger( custom_args=id_string ) self._logger = log_builder.get_logger() return self._logger
uses "global logger" for logging
def dispatch(self, *args, **kwargs): if not self.registration_allowed(): return HttpResponseRedirect(force_text(self.disallowed_url)) return super(RegistrationView, self).dispatch(*args, **kwargs)
Check that user signup is allowed before even bothering to dispatch or do other processing.
def _get_path_from_parent(self, parent): if hasattr(self, 'get_path_from_parent'): return self.get_path_from_parent(parent) if self.model is parent: return [] model = self.concrete_model chain = model._meta.get_base_chain(parent) or [] chain.reverse() chain.append(model) path = [] for i, ancestor in enumerate(chain[:-1]): child = chain[i + 1] link = child._meta.get_ancestor_link(ancestor) path.extend(link.get_reverse_path_info()) return path
Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model.
def Prod(a, axis, keep_dims): return np.prod(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
Prod reduction op.
def normalizeGlyphTopMargin(value): if not isinstance(value, (int, float)) and value is not None: raise TypeError("Glyph top margin must be an :ref:`type-int-float`, " "not %s." % type(value).__name__) return value
Normalizes glyph top margin. * **value** must be a :ref:`type-int-float` or `None`. * Returned value is the same type as the input value.
def parse_remote(cls, filename): blob_file = cls._URL_FORMAT.search(filename) return cls._REMOTE_FILE("blob", storage=blob_file.group("storage"), container=blob_file.group("container"), blob=blob_file.group("blob"))
Parses a remote filename into blob information.
def relocate(source, destination, move=False): venv = api.VirtualEnvironment(source) if not move: venv.relocate(destination) return None venv.move(destination) return None
Adjust the virtual environment settings and optional move it. Args: source (str): Path to the existing virtual environment. destination (str): Desired path of the virtual environment. move (bool): Whether or not to actually move the files. Default False.
def add_to_sources(self, action, doc_source): mapping = self.sources.setdefault(action["_index"], {}).setdefault( action["_type"], {} ) mapping[action["_id"]] = doc_source
Store sources locally
def override_build_kwarg(workflow, k, v, platform=None): key = OrchestrateBuildPlugin.key workspace = workflow.plugin_workspace.setdefault(key, {}) override_kwargs = workspace.setdefault(WORKSPACE_KEY_OVERRIDE_KWARGS, {}) override_kwargs.setdefault(platform, {}) override_kwargs[platform][k] = v
Override a build-kwarg for all worker builds
def fave(self, deviationid, folderid=""): if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") post_data = {} post_data['deviationid'] = deviationid if folderid: post_data['folderid'] = folderid response = self._req('/collections/fave', post_data = post_data) return response
Add deviation to favourites :param deviationid: Id of the Deviation to favourite :param folderid: Optional UUID of the Collection folder to add the favourite into
def save_metadata(self, metadata): if metadata in (None, {}): return None if SYSTEM_METADATA in metadata: raise StoreException("Not allowed to store %r in metadata" % SYSTEM_METADATA) path = self.temporary_object_path(str(uuid.uuid4())) with open(path, 'w') as fd: try: json.dump(metadata, fd, sort_keys=True, separators=(',', ':')) except (TypeError, ValueError): raise StoreException("Metadata is not serializable") metahash = digest_file(path) self._move_to_store(path, metahash) return metahash
Save metadata to the store.
def values_for_column(self, column_name, limit=10000): cols = {col.column_name: col for col in self.columns} target_col = cols[column_name] tp = self.get_template_processor() qry = ( select([target_col.get_sqla_col()]) .select_from(self.get_from_clause(tp)) .distinct() ) if limit: qry = qry.limit(limit) if self.fetch_values_predicate: tp = self.get_template_processor() qry = qry.where(tp.process_template(self.fetch_values_predicate)) engine = self.database.get_sqla_engine() sql = '{}'.format( qry.compile(engine, compile_kwargs={'literal_binds': True}), ) sql = self.mutate_query_from_config(sql) df = pd.read_sql_query(sql=sql, con=engine) return [row[0] for row in df.to_records(index=False)]
Runs query against sqla to retrieve some sample values for the given column.
def setsockopt(self, *sockopts): if type(sockopts[0]) in (list, tuple): for sock_opt in sockopts[0]: level, option, value = sock_opt self.connection.sockopts.add((level, option, value)) else: level, option, value = sockopts self.connection.sockopts.add((level, option, value))
Add socket options to set
def replace_pattern(tokens, new_pattern): for state in tokens.values(): for index, pattern in enumerate(state): if isinstance(pattern, tuple) and pattern[1] == new_pattern[1]: state[index] = new_pattern
Given a RegexLexer token dictionary 'tokens', replace all patterns that match the token specified in 'new_pattern' with 'new_pattern'.
def drawPolyline(self, points): for i, p in enumerate(points): if i == 0: if not (self.lastPoint == Point(p)): self.draw_cont += "%g %g m\n" % JM_TUPLE(Point(p) * self.ipctm) self.lastPoint = Point(p) else: self.draw_cont += "%g %g l\n" % JM_TUPLE(Point(p) * self.ipctm) self.updateRect(p) self.lastPoint = Point(points[-1]) return self.lastPoint
Draw several connected line segments.
def links(self, base_link, current_page) -> dict: max_pages = self.max_pages - 1 if \ self.max_pages > 0 else self.max_pages base_link = '/%s' % (base_link.strip("/")) self_page = current_page prev = current_page - 1 if current_page is not 0 else None prev_link = '%s/page/%s/%s' % (base_link, prev, self.limit) if \ prev is not None else None next = current_page + 1 if current_page < max_pages else None next_link = '%s/page/%s/%s' % (base_link, next, self.limit) if \ next is not None else None first = 0 last = max_pages return { 'self': '%s/page/%s/%s' % (base_link, self_page, self.limit), 'prev': prev_link, 'next': next_link, 'first': '%s/page/%s/%s' % (base_link, first, self.limit), 'last': '%s/page/%s/%s' % (base_link, last, self.limit), }
Return JSON paginate links
def close(self): log.debug("Closing socket connection for %s:%d" % (self.host, self.port)) if self._sock: try: self._sock.shutdown(socket.SHUT_RDWR) except socket.error: pass self._sock.close() self._sock = None else: log.debug("No socket found to close!")
Shutdown and close the connection socket
def format_national_number_with_carrier_code(numobj, carrier_code): country_code = numobj.country_code nsn = national_significant_number(numobj) if not _has_valid_country_calling_code(country_code): return nsn region_code = region_code_for_country_code(country_code) metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code) formatted_number = _format_nsn(nsn, metadata, PhoneNumberFormat.NATIONAL, carrier_code) formatted_number = _maybe_append_formatted_extension(numobj, metadata, PhoneNumberFormat.NATIONAL, formatted_number) formatted_number = _prefix_number_with_country_calling_code(country_code, PhoneNumberFormat.NATIONAL, formatted_number) return formatted_number
Format a number in national format for dialing using the specified carrier. The carrier-code will always be used regardless of whether the phone number already has a preferred domestic carrier code stored. If carrier_code contains an empty string, returns the number in national format without any carrier code. Arguments: numobj -- The phone number to be formatted carrier_code -- The carrier selection code to be used Returns the formatted phone number in national format for dialing using the carrier as specified in the carrier_code.
def add_key_path(key_proto, *path_elements): for i in range(0, len(path_elements), 2): pair = path_elements[i:i+2] elem = key_proto.path.add() elem.kind = pair[0] if len(pair) == 1: return id_or_name = pair[1] if isinstance(id_or_name, (int, long)): elem.id = id_or_name elif isinstance(id_or_name, basestring): elem.name = id_or_name else: raise TypeError( 'Expected an integer id or string name as argument %d; ' 'received %r (a %s).' % (i + 2, id_or_name, type(id_or_name))) return key_proto
Add path elements to the given datastore.Key proto message. Args: key_proto: datastore.Key proto message. *path_elements: list of ancestors to add to the key. (kind1, id1/name1, ..., kindN, idN/nameN), the last 2 elements represent the entity key, if no terminating id/name: they key will be an incomplete key. Raises: TypeError: the given id or name has the wrong type. Returns: the same datastore.Key. Usage: >>> add_key_path(key_proto, 'Kind', 'name') # no parent, with name datastore.Key(...) >>> add_key_path(key_proto, 'Kind2', 1) # no parent, with id datastore.Key(...) >>> add_key_path(key_proto, 'Kind', 'name', 'Kind2', 1) # parent, complete datastore.Key(...) >>> add_key_path(key_proto, 'Kind', 'name', 'Kind2') # parent, incomplete datastore.Key(...)
def publish(self): " Publish last changes." response = self.put('/REST/Zone/%s' % ( self.zone, ), data={'publish': True}) return response.content['data']['serial']
Publish last changes.
def addHost(self, name=None): if name is None: while True: name = 'h' + str(self.__hnum) self.__hnum += 1 if name not in self.__nxgraph: break self.__addNode(name, Host) return name
Add a new host node to the topology.
def parse_instancepath(self, tup_tree): self.check_node(tup_tree, 'INSTANCEPATH') k = kids(tup_tree) if len(k) != 2: raise CIMXMLParseError( _format("Element {0!A} has invalid number of child elements " "{1!A} (expecting two child elements " "(NAMESPACEPATH, INSTANCENAME))", name(tup_tree), k), conn_id=self.conn_id) host, namespace = self.parse_namespacepath(k[0]) inst_path = self.parse_instancename(k[1]) inst_path.host = host inst_path.namespace = namespace return inst_path
Parse an INSTANCEPATH element and return the instance path it represents as a CIMInstanceName object. :: <!ELEMENT INSTANCEPATH (NAMESPACEPATH, INSTANCENAME)>
def download(self, path, file): resp = self._sendRequest("GET", path) if resp.status_code == 200: with open(file, "wb") as f: f.write(resp.content) else: raise YaDiskException(resp.status_code, resp.content)
Download remote file to disk.
def bind(self, study, **kwargs): if self.default is None: raise ArcanaError( "Attempted to bind '{}' to {} but only acquired specs with " "a default value should be bound to studies{})".format( self.name, study)) if self._study is not None: bound = self else: bound = copy(self) bound._study = study bound._default = bound.default.bind(study) return bound
Returns a copy of the AcquiredSpec bound to the given study Parameters ---------- study : Study A study to bind the fileset spec to (should happen in the study __init__)
def parse_xml_file(self, fileobj, id_generator=None): root = etree.parse(fileobj).getroot() usage_id = self._usage_id_from_node(root, None, id_generator) return usage_id
Parse an open XML file, returning a usage id.
def from_api(cls, **kwargs): vals = cls.get_non_empty_vals({ cls._to_snake_case(k): v for k, v in kwargs.items() }) remove = [] for attr, val in vals.items(): try: vals[attr] = cls._parse_property(attr, val) except HelpScoutValidationException: remove.append(attr) logger.info( 'Unexpected property received in API response', exc_info=True, ) for attr in remove: del vals[attr] return cls(**cls.get_non_empty_vals(vals))
Create a new instance from API arguments. This will switch camelCase keys into snake_case for instantiation. It will also identify any ``Instance`` or ``List`` properties, and instantiate the proper objects using the values. The end result being a fully Objectified and Pythonified API response. Returns: BaseModel: Instantiated model using the API values.
def discover_settings(conf_base=None): settings = { 'zmq_prefix': '', 'libzmq_extension': False, 'no_libzmq_extension': False, 'skip_check_zmq': False, 'build_ext': {}, 'bdist_egg': {}, } if sys.platform.startswith('win'): settings['have_sys_un_h'] = False if conf_base: merge(settings, load_config('config', conf_base)) merge(settings, get_cfg_args()) merge(settings, get_eargs()) return settings
Discover custom settings for ZMQ path
def detect_regions(bam_in, bed_file, out_dir, prefix): bed_file = _reorder_columns(bed_file) counts_reads_cmd = ("coverageBed -s -counts -b {bam_in} " "-a {bed_file} | sort -k4,4 " "> {out_dir}/loci.cov") with utils.chdir(out_dir): run(counts_reads_cmd.format(min_trimmed_read_len=min_trimmed_read_len, max_trimmed_read_len=max_trimmed_read_len, **locals()), "Run counts_reads") loci_file = _fix_score_column(op.join(out_dir, "loci.cov")) return loci_file
Detect regions using first CoRaL module
def assert_valid_input(cls, tag): if not cls.is_tag(tag): raise TypeError("Expected a BeautifulSoup 'Tag', but instead recieved type {}".format(type(tag)))
Check if valid input tag or document.
def _cast_to_type(self, value): if not isinstance(value, dict): self.fail('invalid', value=value) return value
Raise error if the value is not a dict
def cleanup(output_root): if os.path.exists(output_root): if os.path.isdir(output_root): rmtree(output_root) else: os.remove(output_root)
Remove any reST files which were generated by this extension
def _push_frontier(self, early_frontier: Dict[ops.Qid, int], late_frontier: Dict[ops.Qid, int], update_qubits: Iterable[ops.Qid] = None ) -> Tuple[int, int]: if update_qubits is None: update_qubits = set(early_frontier).difference(late_frontier) n_new_moments = (max(early_frontier.get(q, 0) - late_frontier[q] for q in late_frontier) if late_frontier else 0) if n_new_moments > 0: insert_index = min(late_frontier.values()) self._moments[insert_index:insert_index] = ( [ops.Moment()] * n_new_moments) for q in update_qubits: if early_frontier.get(q, 0) > insert_index: early_frontier[q] += n_new_moments return insert_index, n_new_moments return (0, 0)
Inserts moments to separate two frontiers. After insertion n_new moments, the following holds: for q in late_frontier: early_frontier[q] <= late_frontier[q] + n_new for q in update_qubits: early_frontier[q] the identifies the same moment as before (but whose index may have changed if this moment is after those inserted). Args: early_frontier: The earlier frontier. For qubits not in the later frontier, this is updated to account for the newly inserted moments. late_frontier: The later frontier. This is not modified. update_qubits: The qubits for which to update early_frontier to account for the newly inserted moments. Returns: (index at which new moments were inserted, how many new moments were inserted) if new moments were indeed inserted. (0, 0) otherwise.
def field_exists(self, well_x, well_y, field_x, field_y): "Check if field exists ScanFieldArray." return self.field(well_x, well_y, field_x, field_y) != None
Check if field exists ScanFieldArray.
def _get_candidates(self): candidates = np.where(self.dpp_vector == 0) return None if len(candidates[0]) == 0 else candidates[0]
Finds the pipelines that are not yet tried. Returns: np.array: Indices corresponding to columns in ``dpp_matrix`` that haven't been tried on ``X``. ``None`` if all pipelines have been tried on X.
async def send_frame(self, frame): if not self.connection.connected: await self.connect() await self.update_version() await set_utc(pyvlx=self) await house_status_monitor_enable(pyvlx=self) self.connection.write(frame)
Send frame to API via connection.
def get_enrollment(self, id): url = self._url('enrollments/{}'.format(id)) return self.client.get(url)
Retrieves an enrollment. Useful to check its type and related metadata. Args: id (str): The id of the device account to update See: https://auth0.com/docs/api/management/v2#!/Guardian/get_enrollments_by_id
def validate_request_certificate(headers, data): if 'SignatureCertChainUrl' not in headers or \ 'Signature' not in headers: log.error('invalid request headers') return False cert_url = headers['SignatureCertChainUrl'] sig = base64.b64decode(headers['Signature']) cert = _get_certificate(cert_url) if not cert: return False try: crypto.verify(cert, sig, data, 'sha1') return True except: log.error('invalid request signature') return False
Ensure that the certificate and signature specified in the request headers are truely from Amazon and correctly verify. Returns True if certificate verification succeeds, False otherwise. :param headers: Dictionary (or sufficiently dictionary-like) map of request headers. :param data: Raw POST data attached to this request.
def handle_exception(self, exception): can_redirect = getattr(exception, "can_redirect", True) redirect_uri = getattr(self, "redirect_uri", None) if can_redirect and redirect_uri: return self.redirect_exception(exception) else: return self.render_exception(exception)
Handle a unspecified exception and return the correct method that should be used for handling it. If the exception has the `can_redirect` property set to False, it is rendered to the browser. Otherwise, it will be redirected to the location provided in the `RedirectUri` object that is associated with the request.
def __clean_and_tokenize(self, doc_list): doc_list = filter( lambda x: x is not None and len(x) <= GitSuggest.MAX_DESC_LEN, doc_list, ) cleaned_doc_list = list() tokenizer = RegexpTokenizer(r"[a-zA-Z]+") stopwords = self.__get_words_to_ignore() dict_words = self.__get_words_to_consider() for doc in doc_list: lower = doc.lower() tokens = tokenizer.tokenize(lower) tokens = [tok for tok in tokens if tok in dict_words] tokens = [tok for tok in tokens if tok not in stopwords] tokens = [tok for tok in tokens if tok is not None] cleaned_doc_list.append(tokens) return cleaned_doc_list
Method to clean and tokenize the document list. :param doc_list: Document list to clean and tokenize. :return: Cleaned and tokenized document list.
def cacheback(lifetime=None, fetch_on_miss=None, cache_alias=None, job_class=None, task_options=None, **job_class_kwargs): if job_class is None: job_class = FunctionJob job = job_class(lifetime=lifetime, fetch_on_miss=fetch_on_miss, cache_alias=cache_alias, task_options=task_options, **job_class_kwargs) def _wrapper(fn): @wraps(fn, assigned=available_attrs(fn)) def __wrapper(*args, **kwargs): return job.get(fn, *args, **kwargs) __wrapper.fn = fn __wrapper.job = job return __wrapper return _wrapper
Decorate function to cache its return value. :lifetime: How long to cache items for :fetch_on_miss: Whether to perform a synchronous fetch when no cached result is found :cache_alias: The Django cache alias to store the result into. :job_class: The class to use for running the cache refresh job. Defaults using the FunctionJob. :job_class_kwargs: Any extra kwargs to pass to job_class constructor. Useful with custom job_class implementations.
def from_str(cls, s): r if '\x1b[' in s: try: tokens_and_strings = parse(s) except ValueError: return FmtStr(Chunk(remove_ansi(s))) else: chunks = [] cur_fmt = {} for x in tokens_and_strings: if isinstance(x, dict): cur_fmt.update(x) elif isinstance(x, (bytes, unicode)): atts = parse_args('', dict((k, v) for k, v in cur_fmt.items() if v is not None)) chunks.append(Chunk(x, atts=atts)) else: raise Exception("logic error") return FmtStr(*chunks) else: return FmtStr(Chunk(s))
r""" Return a FmtStr representing input. The str() of a FmtStr is guaranteed to produced the same FmtStr. Other input with escape sequences may not be preserved. >>> fmtstr("|"+fmtstr("hey", fg='red', bg='blue')+"|") '|'+on_blue(red('hey'))+'|' >>> fmtstr('|\x1b[31m\x1b[44mhey\x1b[49m\x1b[39m|') '|'+on_blue(red('hey'))+'|'
def run(path, code=None, params=None, ignore=None, select=None, **meta): complexity = params.get('complexity', 10) no_assert = params.get('no_assert', False) show_closures = params.get('show_closures', False) visitor = ComplexityVisitor.from_code(code, no_assert=no_assert) blocks = visitor.blocks if show_closures: blocks = add_inner_blocks(blocks) return [ {'lnum': block.lineno, 'col': block.col_offset, 'type': 'R', 'number': 'R709', 'text': 'R701: %s is too complex %d' % (block.name, block.complexity)} for block in visitor.blocks if block.complexity > complexity ]
Check code with Radon. :return list: List of errors.
def _Pluralize(value, unused_context, args): if len(args) == 0: s, p = '', 's' elif len(args) == 1: s, p = '', args[0] elif len(args) == 2: s, p = args else: raise AssertionError if value > 1: return p else: return s
Formatter to pluralize words.