code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def main(args=None): if args is None: args = tag.cli.parser().parse_args() assert args.cmd in mains mainmethod = mains[args.cmd] mainmethod(args)
Entry point for the tag CLI. Isolated as a method so that the CLI can be called by other Python code (e.g. for testing), in which case the arguments are passed to the function. If no arguments are passed to the function, parse them from the command line.
def reset_tasks(self, request, context): _log_request(request, context) self.listener.memory.clear_tasks() return clearly_pb2.Empty()
Resets all captured tasks.
def pauli_expansion( val: Any, *, default: Union[value.LinearDict[str], TDefault] = RaiseTypeErrorIfNotProvided, atol: float = 1e-9 ) -> Union[value.LinearDict[str], TDefault]: method = getattr(val, '_pauli_expansion_', None) expansion = NotImplemented if method is None else method() if expansion is not NotImplemented: return expansion.clean(atol=atol) matrix = unitary(val, default=None) if matrix is None: if default is RaiseTypeErrorIfNotProvided: raise TypeError('No Pauli expansion for object {} of type {}' .format(val, type(val))) return default num_qubits = matrix.shape[0].bit_length() - 1 basis = operator_spaces.kron_bases(operator_spaces.PAULI_BASIS, repeat=num_qubits) expansion = operator_spaces.expand_matrix_in_orthogonal_basis(matrix, basis) return expansion.clean(atol=atol)
Returns coefficients of the expansion of val in the Pauli basis. Args: val: The value whose Pauli expansion is to returned. default: Determines what happens when `val` does not have methods that allow Pauli expansion to be obtained (see below). If set, the value is returned in that case. Otherwise, TypeError is raised. atol: Ignore coefficients whose absolute value is smaller than this. Returns: If `val` has a _pauli_expansion_ method, then its result is returned. Otherwise, if `val` has a small unitary then that unitary is expanded in the Pauli basis and coefficients are returned. Otherwise, if default is set to None or other value then default is returned. Otherwise, TypeError is raised. Raises: TypeError if `val` has none of the methods necessary to obtain its Pauli expansion and no default value has been provided.
def get_validation_errors(data, schema=None): schema = _load_schema_for_record(data, schema) errors = Draft4Validator( schema, resolver=LocalRefResolver.from_schema(schema), format_checker=inspire_format_checker ) return errors.iter_errors(data)
Validation errors for a given record. Args: data (dict): record to validate. schema (Union[dict, str]): schema to validate against. If it is a string, it is intepreted as the name of the schema to load (e.g. ``authors`` or ``jobs``). If it is ``None``, the schema is taken from ``data['$schema']``. If it is a dictionary, it is used directly. Yields: jsonschema.exceptions.ValidationError: validation errors. Raises: SchemaNotFound: if the given schema was not found. SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was found in ``data``. jsonschema.SchemaError: if the schema is invalid.
def get(self, uri, query=None, **kwargs): return self.fetch('get', uri, query, **kwargs)
make a GET request
def export(self, directory, revision=None): directory = os.path.abspath(directory) self.create() timer = Timer() revision = revision or self.default_revision logger.info("Exporting revision '%s' in %s to %s ..", revision, format_path(self.local), directory) self.context.execute('mkdir', '-p', directory) self.context.execute(*self.get_export_command(directory, revision)) logger.debug("Took %s to pull changes from remote %s repository.", timer, self.friendly_name)
Export the complete tree from the local version control repository. :param directory: The directory where the tree should be exported (a string). :param revision: The revision to export (a string or :data:`None`, defaults to :attr:`default_revision`).
def _do_get(self, uri, **kwargs): scaleioapi_get_headers = {'Content-type':'application/json','Version':'1.0'} self.logger.debug("_do_get() " + "{}/{}".format(self._api_url,uri)) if kwargs: for key, value in kwargs.iteritems(): if key == 'headers': scaleio_get_headersvalue = value try: response = self._im_session.get("{}/{}".format(self._api_url, uri), **kwargs).json() if response.status_code == requests.codes.ok: return response else: raise RuntimeError("_do_get() - HTTP response error" + response.status_code) except: raise RuntimeError("_do_get() - Communication error with ScaleIO gateway") return response
Convinient method for GET requests Returns http request status value from a POST request
def q(self, val): self._q = np.asarray(val) self.Q = cumsum(val)
Setter method for q.
def _num_values(self, vdr_dict): values = 1 for x in range(0, vdr_dict['num_dims']): if (vdr_dict['dim_vary'][x] != 0): values = values * vdr_dict['dim_sizes'][x] return values
Returns the number of values in a record, using a given VDR dictionary. Multiplies the dimension sizes of each dimension, if it is varying.
def mmGetPermanencesPlot(self, title=None): plot = Plot(self, title) data = numpy.zeros((self.getNumColumns(), self.getNumInputs())) for i in xrange(self.getNumColumns()): self.getPermanence(i, data[i]) plot.add2DArray(data, xlabel="Permanences", ylabel="Column") return plot
Returns plot of column permanences. @param title an optional title for the figure @return (Plot) plot
def name_inner_event(cls): if hasattr(cls, 'Event'): cls.Event._event_name = '{}.Event'.format(cls.__name__) else: warnings.warn('Class {} does not have a inner Event'.format(cls)) return cls
Decorator to rename cls.Event 'Event' as 'cls.Event
def jsonl(self, jsonl_file): if isinstance(jsonl_file, str): file_open = get_read_function(jsonl_file, self.disable_compression) input_file = file_open(jsonl_file) else: input_file = jsonl_file return self(input_file).map(jsonapi.loads).cache(delete_lineage=True)
Reads and parses the input of a jsonl file stream or file. Jsonl formatted files must have a single valid json value on each line which is parsed by the python json module. >>> seq.jsonl('examples/chat_logs.jsonl').first() {u'date': u'10/09', u'message': u'hello anyone there?', u'user': u'bob'} :param jsonl_file: path or file containing jsonl content :return: Sequence wrapping jsonl file
def sub(value, arg): try: nvalue, narg = handle_float_decimal_combinations( valid_numeric(value), valid_numeric(arg), '-') return nvalue - narg except (ValueError, TypeError): try: return value - arg except Exception: return ''
Subtract the arg from the value.
def new_remove_attribute_transaction(self, ont_id: str, pub_key: str or bytes, attrib_key: str, b58_payer_address: str, gas_limit: int, gas_price: int): if isinstance(pub_key, str): bytes_pub_key = bytes.fromhex(pub_key) elif isinstance(pub_key, bytes): bytes_pub_key = pub_key else: raise SDKException(ErrorCode.params_type_error('a bytes or str type of public key is required.')) args = dict(ontid=ont_id.encode('utf-8'), attrib_key=attrib_key.encode('utf-8'), pk=bytes_pub_key) tx = self.__generate_transaction('removeAttribute', args, b58_payer_address, gas_limit, gas_price) return tx
This interface is used to generate a Transaction object which is used to remove attribute. :param ont_id: OntId. :param pub_key: the hexadecimal public key in the form of string. :param attrib_key: a string which is used to indicate which attribute we want to remove. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which is used to remove attribute.
def rest_del(self, url, params=None, session=None, verify=True, cert=None): res = session.delete(url, params=params, verify=verify, cert=cert) return res.text, res.status_code
Perform a DELETE request to url with requests.session
def decode_keys(store, encoding='utf-8'): keys = store.keys() for key in keys: if hasattr(key, 'decode'): decoded = key.decode(encoding) if key != decoded: store[key.decode(encoding)] = store[key] store.pop(key) return store
If a dictionary has keys that are bytes decode them to a str. Parameters --------- store : dict Dictionary with data Returns --------- result : dict Values are untouched but keys that were bytes are converted to ASCII strings. Example ----------- In [1]: d Out[1]: {1020: 'nah', b'hi': 'stuff'} In [2]: trimesh.util.decode_keys(d) Out[2]: {1020: 'nah', 'hi': 'stuff'}
def linkorcopy(src, dst): if not os.path.isfile(src): raise error.ButcherError('linkorcopy called with non-file source. ' '(src: %s dst: %s)' % src, dst) elif os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) elif os.path.exists(dst): os.unlink(dst) elif not os.path.exists(os.path.dirname(dst)): os.makedirs(os.path.dirname(dst)) try: os.link(src, dst) log.debug('Hardlinked: %s -> %s', src, dst) except OSError: shutil.copy2(src, dst) log.debug('Couldn\'t hardlink. Copied: %s -> %s', src, dst)
Hardlink src file to dst if possible, otherwise copy.
def get_application_groups(): groups = [] for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS: module_kwargs = groupdict.copy() if '*' in groupdict['models']: default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', [])) del module_kwargs['models'] else: default_module = 'CmsAppIconList' module = groupdict.get('module', default_module) if module in MODULE_ALIASES: module = MODULE_ALIASES[module] module_kwargs['module'] = module groups.append((title, module_kwargs),) return groups
Return the applications of the system, organized in various groups. These groups are not connected with the application names, but rather with a pattern of applications.
def namedb_is_history_snapshot( history_snapshot ): missing = [] assert 'op' in history_snapshot.keys(), "BUG: no op given" opcode = op_get_opcode_name( history_snapshot['op'] ) assert opcode is not None, "BUG: unrecognized op '%s'" % history_snapshot['op'] consensus_fields = op_get_consensus_fields( opcode ) for field in consensus_fields: if field not in history_snapshot.keys(): missing.append( field ) assert len(missing) == 0, ("BUG: operation '%s' is missing the following fields: %s" % (opcode, ",".join(missing))) return True
Given a dict, verify that it is a history snapshot. It must have all consensus fields. Return True if so. Raise an exception of it doesn't.
def _forceRefreshAutoRange(self): enabled = self.autoRangeCti and self.autoRangeCti.configValue self.rangeMinCti.enabled = not enabled self.rangeMaxCti.enabled = not enabled self.model.emitDataChanged(self)
The min and max config items will be disabled if auto range is on.
def SmartUnicode(string): if isinstance(string, Text): return string if isinstance(string, bytes): return string.decode("utf-8", "ignore") if compatibility.PY2: return str(string).__native__() else: return str(string)
Returns a unicode object. This function will always return a unicode object. It should be used to guarantee that something is always a unicode object. Args: string: The string to convert. Returns: a unicode object.
def update_footer(self): field_item = self.field_list.currentItem() if not field_item: self.footer_label.setText('') return field_name = field_item.data(Qt.UserRole) field = self.layer.fields().field(field_name) index = self.layer.fields().lookupField(field_name) unique_values = list(self.layer.uniqueValues(index)) pretty_unique_values = ', '.join([str(v) for v in unique_values[:10]]) footer_text = tr('Field type: {0}\n').format(field.typeName()) footer_text += tr('Unique values: {0}').format(pretty_unique_values) self.footer_label.setText(footer_text)
Update footer when the field list change.
def un(byts): return msgpack.loads(byts, use_list=False, raw=False, unicode_errors='surrogatepass')
Use msgpack to de-serialize a python object. Args: byts (bytes): The bytes to de-serialize Notes: String objects are decoded using utf8 encoding. In order to handle potentially malformed input, ``unicode_errors='surrogatepass'`` is set to allow decoding bad input strings. Returns: obj: The de-serialized object
async def traverse(self, func): async_executor = self if inspect.isasyncgenfunction(func): async for result in func(*async_executor.args): yield result else: yield await func(*async_executor.args)
Traverses an async function or generator, yielding each result. This function is private. The class should be used as an iterator instead of using this method.
def _replication_request(command, host=None, core_name=None, params=None): params = [] if params is None else params extra = ["command={0}".format(command)] + params url = _format_url('replication', host=host, core_name=core_name, extra=extra) return _http_request(url)
PRIVATE METHOD Performs the requested replication command and returns a dictionary with success, errors and data as keys. The data object will contain the JSON response. command : str The replication command to execute. host : str (None) The solr host to query. __opts__['host'] is default core_name: str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. params : list<str> ([]) Any additional parameters you want to send. Should be a lsit of strings in name=value format. e.g. ['name=value'] Return: dict<str, obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
def template_thinning(self, inj_filter_rejector): if not inj_filter_rejector.enabled or \ inj_filter_rejector.chirp_time_window is None: return injection_parameters = inj_filter_rejector.injection_params.table fref = inj_filter_rejector.f_lower threshold = inj_filter_rejector.chirp_time_window m1= self.table['mass1'] m2= self.table['mass2'] tau0_temp, _ = pycbc.pnutils.mass1_mass2_to_tau0_tau3(m1, m2, fref) indices = [] for inj in injection_parameters: tau0_inj, _ = \ pycbc.pnutils.mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2, fref) inj_indices = np.where(abs(tau0_temp - tau0_inj) <= threshold)[0] indices.append(inj_indices) indices_combined = np.concatenate(indices) indices_unique= np.unique(indices_combined) self.table = self.table[indices_unique]
Remove templates from bank that are far from all injections.
def create_logger(self): name = "bors" if hasattr(self, "name"): name = self.name self.log = logging.getLogger(name) try: lvl = self.conf.get_log_level() except AttributeError: lvl = self.context.get("log_level", None) self.log.setLevel(getattr(logging, lvl, logging.INFO))
Generates a logger instance from the singleton
def _legacy_pkcs1_v1_5_encode_md5_sha1(M, emLen): M = bytes_encode(M) md5_hash = hashes.Hash(_get_hash("md5"), backend=default_backend()) md5_hash.update(M) sha1_hash = hashes.Hash(_get_hash("sha1"), backend=default_backend()) sha1_hash.update(M) H = md5_hash.finalize() + sha1_hash.finalize() if emLen < 36 + 11: warning("pkcs_emsa_pkcs1_v1_5_encode: " "intended encoded message length too short") return None PS = b'\xff' * (emLen - 36 - 3) return b'\x00' + b'\x01' + PS + b'\x00' + H
Legacy method for PKCS1 v1.5 encoding with MD5-SHA1 hash.
def close_config(self): try: self.dev.rpc.close_configuration() except Exception as err: print err
Closes the exiting opened configuration Example: .. code-block:: python from pyJunosManager import JunosDevice dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper") dev.open() dev.open_config() dev.close_config() dev.close()
def create(self, access_tokens, days_requested, options=None): options = options or {} return self.client.post('/asset_report/create', { 'access_tokens': access_tokens, 'days_requested': days_requested, 'options': options, })
Create an asset report. :param [str] access_tokens: A list of access tokens, one token for each Item to be included in the Asset Report. :param int days_requested: Days of transaction history requested to be included in the Asset Report. :param dict options: An optional dictionary. For more information on the options object, see the documentation site listed above.
def read_varnames(self, path="/"): if path == "/": return self.rootgrp.variables.keys() else: group = self.path2group[path] return group.variables.keys()
List of variable names stored in the group specified by path.
def set_list_attribute(self, other, trigger_klass, property_name): if isinstance(other, trigger_klass): if not hasattr(self, property_name): raise AttributeError("%s has no property %s" % (self.__class__.__name__, property_name)) val = getattr(self, property_name, []) if other in val: raise ValueError("%s already exists in %s" % (other.__class__.__name__, self.__class__.__name__)) else: val.append(other) setattr(self, property_name, val)
Used to set guard the setting of a list attribute, ensuring the same element is not added twice.
def pickle(self) -> None: pickle_path = self.tgt_dir / "corpus.p" logger.debug("pickling %r object and saving it to path %s", self, pickle_path) with pickle_path.open("wb") as f: pickle.dump(self, f)
Pickles the Corpus object in a file in tgt_dir.
def linkUserToMostRecentCustomer(sender,**kwargs): email_address = kwargs.get('email_address',None) if not email_address or not email_address.primary or not email_address.verified: return user = email_address.user if not hasattr(user, 'customer'): last_reg = Registration.objects.filter( customer__email=email_address.email, customer__user__isnull=True, dateTime__isnull=False ).order_by('-dateTime').first() if last_reg: customer = last_reg.customer customer.user = user customer.save() if not user.first_name and not user.last_name: user.first_name = customer.first_name user.last_name = customer.last_name user.save()
If a new primary email address has just been confirmed, check if the user associated with that email has an associated customer object yet. If not, then look for the customer with that email address who most recently registered for something and that is not associated with another user. Automatically associate the User with with Customer, and if missing, fill in the user's name information with the Customer's name. This way, when a new or existing customer creates a user account, they are seamlessly linked to their most recent existing registration at the time they verify their email address.
def get_output_file(self, in_file, instance, field, **kwargs): return NamedTemporaryFile(mode='rb', suffix='_%s_%s%s' % ( get_model_name(instance), field.name, self.get_ext()), delete=False)
Creates a temporary file. With regular `FileSystemStorage` it does not need to be deleted, instaed file is safely moved over. With other cloud based storage it is a good idea to set `delete=True`.
def reset_password(token): expired, invalid, user = reset_password_token_status(token) if not user or invalid: invalid = True do_flash(*get_message('INVALID_RESET_PASSWORD_TOKEN')) if expired: send_reset_password_instructions(user) do_flash(*get_message('PASSWORD_RESET_EXPIRED', email=user.email, within=_security.reset_password_within)) if invalid or expired: return redirect(url_for('forgot_password')) form = _security.reset_password_form() if form.validate_on_submit(): after_this_request(_commit) update_password(user, form.password.data) do_flash(*get_message('PASSWORD_RESET')) return redirect(get_url(_security.post_reset_view) or get_url(_security.login_url)) return _security.render_template( config_value('RESET_PASSWORD_TEMPLATE'), reset_password_form=form, reset_password_token=token, **_ctx('reset_password') )
View function that handles a reset password request.
def serial_assimilate(self, rootpath): valid_paths = [] for (parent, subdirs, files) in os.walk(rootpath): valid_paths.extend(self._drone.get_valid_paths((parent, subdirs, files))) data = [] count = 0 total = len(valid_paths) for path in valid_paths: newdata = self._drone.assimilate(path) self._data.append(newdata) count += 1 logger.info('{}/{} ({:.2f}%) done'.format(count, total, count / total * 100)) for d in data: self._data.append(json.loads(d, cls=MontyDecoder))
Assimilate the entire subdirectory structure in rootpath serially.
def process_data(data, models): pdata = gvar.BufferDict() for m in MultiFitter.flatten_models(models): pdata[m.datatag] = ( m.builddata(data) if m.ncg <= 1 else MultiFitter.coarse_grain(m.builddata(data), ncg=m.ncg) ) return pdata
Convert ``data`` to processed data using ``models``. Data from dictionary ``data`` is processed by each model in list ``models``, and the results collected into a new dictionary ``pdata`` for use in :meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`.
def add_escape(self, idx, char): self.fmt.append_text(self.fmt._unescape.get( self.format[self.str_begin:idx], char))
Translates and adds the escape sequence. :param idx: Provides the ending index of the escape sequence. :param char: The actual character that was escaped.
def hash_folder(folder, regex='[!_]*'): file_hashes = {} for path in glob.glob(os.path.join(folder, regex)): if not os.path.isfile(path): continue with open(path, 'r') as fileP: md5_hash = hashlib.md5(fileP.read()).digest() file_name = os.path.basename(path) file_hashes[file_name] = urlsafe_b64encode(md5_hash) return file_hashes
Get the md5 sum of each file in the folder and return to the user :param folder: the folder to compute the sums over :param regex: an expression to limit the files we match :return: Note: by default we will hash every file in the folder Note: we will not match anything that starts with an underscore
def _match_excluded(self, filename, patterns): return _wcparse._match_real( filename, patterns._include, patterns._exclude, patterns._follow, self.symlinks )
Call match real directly to skip unnecessary `exists` check.
def xpointerNewRangeNodes(self, end): if end is None: end__o = None else: end__o = end._o ret = libxml2mod.xmlXPtrNewRangeNodes(self._o, end__o) if ret is None:raise treeError('xmlXPtrNewRangeNodes() failed') return xpathObjectRet(ret)
Create a new xmlXPathObjectPtr of type range using 2 nodes
def get_tags_of_invoice_per_page(self, invoice_id, per_page=1000, page=1): return self._get_resource_per_page( resource=INVOICE_TAGS, per_page=per_page, page=page, params={'invoice_id': invoice_id}, )
Get tags of invoice per page :param invoice_id: the invoice id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
def reflect_table(conn, table_name, schema='public'): column_meta = list(get_column_metadata(conn, table_name, schema=schema)) primary_key_columns = list(get_primary_keys(conn, table_name, schema=schema)) columns = [Column(**column_data) for column_data in column_meta] primary_key = PrimaryKey(primary_key_columns) return Table(table_name, columns, primary_key, schema=schema)
Reflect basic table attributes.
def set_states(self, states): states = pickle.loads(states) if isinstance(states, tuple) and len(states) == 2: self.states, self.optimizer = states else: self.states = states self.states_synced = dict.fromkeys(self.states.keys(), False)
Sets updater states.
def require_server(fn): @wraps(fn) def wrapper(*args, **kwargs): if env.machine is None: abort(red('ERROR: You must provide a server name to call this' ' task!')) return fn(*args, **kwargs) return wrapper
Checks if the user has called the task with a server name. Fabric tasks decorated with this decorator must be called like so:: fab <server name> <task name> If no server name is given, the task will not be executed.
def exception_message(): exc_type, exc_value, exc_tb = exc_info = sys.exc_info() return {'exception': {'type': exc_type, 'value': exc_value, 'traceback': exc_tb}, 'traceback': traceback.format_exception(*exc_info)}
Create a message with details on the exception.
def msvc14_get_vc_env(plat_spec): try: return get_unpatched(msvc14_get_vc_env)(plat_spec) except distutils.errors.DistutilsPlatformError: pass try: return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env() except distutils.errors.DistutilsPlatformError as exc: _augment_exception(exc, 14.0) raise
Patched "distutils._msvccompiler._get_vc_env" for support extra compilers. Set environment without use of "vcvarsall.bat". Known supported compilers ------------------------- Microsoft Visual C++ 14.0: Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) Microsoft Visual Studio 2017 (x86, x64, arm, arm64) Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) Parameters ---------- plat_spec: str Target architecture. Return ------ environment: dict
def dropColumnsFromRabaObjTable(self, name, lstFieldsToKeep) : "Removes columns from a RabaObj table. lstFieldsToKeep should not contain raba_id or json fileds" if len(lstFieldsToKeep) == 0 : raise ValueError("There are no fields to keep") cpy = name+'_copy' sqlFiledsStr = ', '.join(lstFieldsToKeep) self.createTable(cpy, 'raba_id INTEGER PRIMARY KEY AUTOINCREMENT, json, %s' % (sqlFiledsStr)) sql = "INSERT INTO %s SELECT %s FROM %s;" % (cpy, 'raba_id, json, %s' % sqlFiledsStr, name) self.execute(sql) self.dropTable(name) self.renameTable(cpy, name)
Removes columns from a RabaObj table. lstFieldsToKeep should not contain raba_id or json fileds
def _dstationarystate(self, k, param): if self._distributionmodel: return self.model.dstationarystate(k, param) else: return self.model.dstationarystate(param)
Returns the dstationarystate .
def calc_effective_permeability(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r phase = self.project.phases()[self.settings['phase']] d_normal = self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length) K = d_normal * sp.mean(phase['pore.viscosity']) return K
r""" This calculates the effective permeability in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes.
def encode_basic_auth(username, password): return "Basic {}".format( b64encode( "{}:{}".format( username, password, ).encode("utf-8") ).decode("utf-8") )
Encode basic auth credentials.
def load(self, path: str, k: Optional[int] = None): load_time_start = time.time() with open(path, 'rb') as inp: _lex = np.load(inp) loaded_k = _lex.shape[1] if k is not None: top_k = min(k, loaded_k) if k > loaded_k: logger.warning("Can not load top-%d translations from lexicon that " "contains at most %d entries per source.", k, loaded_k) else: top_k = loaded_k self.lex = np.zeros((len(self.vocab_source), top_k), dtype=_lex.dtype) for src_id, trg_ids in enumerate(_lex): self.lex[src_id, :] = np.sort(trg_ids[:top_k]) load_time = time.time() - load_time_start logger.info("Loaded top-%d lexicon from \"%s\" in %.4fs.", top_k, path, load_time)
Load lexicon from Numpy array file. The top-k target ids will be sorted by increasing target id. :param path: Path to Numpy array file. :param k: Optionally load less items than stored in path.
def get_ancestor_id_names(mention): span = _to_span(mention) id_names = [] i = _get_node(span.sentence) while i is not None: id_names.insert(0, str(i.get("id"))) i = i.getparent() return id_names
Return the HTML id's of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings
def update_lun(self, add_luns=None, remove_luns=None): if not add_luns and not remove_luns: log.debug("Empty add_luns and remove_luns passed in, " "skip update_lun.") return RESP_OK lun_add = self._prepare_luns_add(add_luns) lun_remove = self._prepare_luns_remove(remove_luns, True) return self.modify(lun_add=lun_add, lun_remove=lun_remove)
Updates the LUNs in CG, adding the ones in `add_luns` and removing the ones in `remove_luns`
def install(replace_existing=False): bunch = AutoBunch() bunch.name = 'DAPA' bunch.protocol = 'dapa' bunch.force = 'true' install_programmer('dapa', bunch, replace_existing=replace_existing)
install dapa programmer.
def BuildFindSpecs(self, artifact_filter_names, environment_variables=None): find_specs = [] for name in artifact_filter_names: definition = self._artifacts_registry.GetDefinitionByName(name) if not definition: logger.debug('undefined artifact definition: {0:s}'.format(name)) continue logger.debug('building find spec from artifact definition: {0:s}'.format( name)) artifact_find_specs = self._BuildFindSpecsFromArtifact( definition, environment_variables) find_specs.extend(artifact_find_specs) for find_spec in find_specs: if isinstance(find_spec, file_system_searcher.FindSpec): self.file_system_find_specs.append(find_spec) elif isinstance(find_spec, registry_searcher.FindSpec): self.registry_find_specs.append(find_spec) else: logger.warning('Unsupported find specification type: {0:s}'.format( type(find_spec)))
Builds find specifications from artifact definitions. Args: artifact_filter_names (list[str]): names of artifact definitions that are used for filtering file system and Windows Registry key paths. environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables.
def _create_thumbnail(self, model_instance, thumbnail, image_name): thumbnail = self._do_resize(thumbnail, self.thumbnail_size) full_image_name = self.generate_filename(model_instance, image_name) thumbnail_filename = _get_thumbnail_filename(full_image_name) thumb = self._get_simple_uploaded_file(thumbnail, thumbnail_filename) self.storage.save(thumbnail_filename, thumb)
Resizes and saves the thumbnail image
def set(self, key, value): if key == "tags": self._set_tag(tags=value) else: if isinstance(value, dict) and key in self._requirements and isinstance( self._requirements[key], dict): self._requirements[key] = merge(self._requirements[key], value) else: self._requirements[key] = value
Sets the value for a specific requirement. :param key: Name of requirement to be set :param value: Value to set for requirement key :return: Nothing, modifies requirement
def _start_new_worker_process(self, server_socket): from multiprocessing import Process p = Process(target=forked_child, args=self._get_child_args(server_socket)) p.start() return p
Start a new child worker process which will listen on the given socket and return a reference to the new process.
def _reverse_transform_column(self, table, metadata, table_name): column_name = metadata['name'] if column_name not in table: return null_name = '?' + column_name content = pd.DataFrame(columns=[column_name], index=table.index) transformer = self.transformers[(table_name, column_name)] content[column_name] = transformer.reverse_transform(table[column_name].to_frame()) if self.missing and null_name in table[column_name]: content[null_name] = table.pop(null_name) null_transformer = transformers.NullTransformer(metadata) content[column_name] = null_transformer.reverse_transform(content) return content
Reverses the transformtion on a column from table using the given parameters. Args: table (pandas.DataFrame): Dataframe containing column to transform. metadata (dict): Metadata for given column. table_name (str): Name of table in original dataset. Returns: pandas.DataFrame: Dataframe containing the transformed column. If self.missing=True, it will contain a second column containing 0 and 1 marking if that value was originally null or not. It will return None in the case the column is not in the table.
def get_new_members(self, results): for member in results: guid = member.pop('guid') yield Member(self.manager, self.group_id, **member) member['guid'] = guid
Return the newly added members. :param results: the results of a membership request check :type results: :class:`list` :return: the successful requests, as :class:`~groupy.api.memberships.Members` :rtype: generator
def change_customer_nc_users_quota(sender, structure, user, role, signal, **kwargs): assert signal in (signals.structure_role_granted, signals.structure_role_revoked), \ 'Handler "change_customer_nc_users_quota" has to be used only with structure_role signals' assert sender in (Customer, Project), \ 'Handler "change_customer_nc_users_quota" works only with Project and Customer models' if sender == Customer: customer = structure elif sender == Project: customer = structure.customer customer_users = customer.get_users() customer.set_quota_usage(Customer.Quotas.nc_user_count, customer_users.count())
Modify nc_user_count quota usage on structure role grant or revoke
def bounds(self): corners = [self.image_corner(corner) for corner in self.corner_types()] return Polygon([[corner.x, corner.y] for corner in corners])
Return image rectangle in pixels, as shapely.Polygon.
def grab_literal(template, l_del): global _CURRENT_LINE try: literal, template = template.split(l_del, 1) _CURRENT_LINE += literal.count('\n') return (literal, template) except ValueError: return (template, '')
Parse a literal from the template
def _get_scalexy(self, ims_width, ims_height): cell_attributes = self.code_array.cell_attributes[self.key] angle = cell_attributes["angle"] if abs(angle) == 90: scale_x = self.rect[3] / float(ims_width) scale_y = self.rect[2] / float(ims_height) else: scale_x = self.rect[2] / float(ims_width) scale_y = self.rect[3] / float(ims_height) return scale_x, scale_y
Returns scale_x, scale_y for bitmap display
def sign_nonce(key, nonce): return Cryptodome.Hash.HMAC.new(key, nonce, digestmod=Cryptodome.Hash.SHA256).digest()
sign the server nonce from the WWW-Authenticate header with an authKey
def nn_allocmsg(size, type): "allocate a message" pointer = _nn_allocmsg(size, type) if pointer is None: return None return _create_message(pointer, size)
allocate a message
def file_iterator(filehandle, verbose=False): if type(filehandle).__name__ == "str": filehandle = open(filehandle) if verbose: try: pind = ProgressIndicator(totalToDo=os.path.getsize(filehandle.name), messagePrefix="completed", messageSuffix="of processing " + filehandle.name) except AttributeError: sys.stderr.write("BEDIterator -- warning: " + "unable to show progress for stream") verbose = False for line in filehandle: line = line.rstrip('\n') if verbose: pind.done = filehandle.tell() pind.showProgress() if line == "": continue yield line
Iterate over a file and yield stripped lines. Optionally show progress.
def install_client_interceptors(client_interceptors=()): if not _valid_args(client_interceptors): raise ValueError('client_interceptors argument must be a list') from ..http_client import ClientInterceptors for client_interceptor in client_interceptors: logging.info('Loading client interceptor %s', client_interceptor) interceptor_class = _load_symbol(client_interceptor) logging.info('Adding client interceptor %s', client_interceptor) ClientInterceptors.append(interceptor_class())
Install client interceptors for the patchers. :param client_interceptors: a list of client interceptors to install. Should be a list of classes
def wait_for_at_least_one_message(self, channel): unpacker = msgpack.Unpacker(encoding='utf-8') while True: try: start = time.time() chunk = self.ssh_channel[channel].recv(1024) end = time.time() self.read_speeds.append( len(chunk) / (end-start) ) if len(self.read_speeds) > 20: self.read_speeds = self.read_speeds[10:] if chunk == b'': self.connection_error(channel, 'Connection broken w') return False except Exception as error: self.connection_error(channel, error) raise unpacker.feed(chunk) messages = [m for m in unpacker] if messages: return messages
Reads until we receive at least one message we can unpack. Return all found messages.
def branch_exists(self, branch): try: git(self.gitdir, self.gitwd, "rev-parse", branch) except sh.ErrorReturnCode: return False return True
Returns true or false depending on if a branch exists
def RunOnce(self): if config.CONFIG["Cron.active"]: self.cron_worker = CronWorker() self.cron_worker.RunAsync()
Main CronHook method.
def update_letter_comment(self, letter_comment_id, letter_comment_dict): return self._create_put_request( resource=LETTER_COMMENTS, billomat_id=letter_comment_id, send_data=letter_comment_dict )
Updates a letter comment :param letter_comment_id: the letter command id :param letter_comment_dict: dict :return: dict
def set_colour(self, r, g, b): if not 0 <= r <= 255: raise ValueError("The value for red needs to be between 0 and 255.") if not 0 <= g <= 255: raise ValueError("The value for green needs to be between 0 and 255.") if not 0 <= b <= 255: raise ValueError("The value for blue needs to be between 0 and 255.") hexvalue = BulbDevice._rgb_to_hexvalue(r, g, b) payload = self.generate_payload(SET, { self.DPS_INDEX_MODE: self.DPS_MODE_COLOUR, self.DPS_INDEX_COLOUR: hexvalue}) data = self._send_receive(payload) return data
Set colour of an rgb bulb. Args: r(int): Value for the colour red as int from 0-255. g(int): Value for the colour green as int from 0-255. b(int): Value for the colour blue as int from 0-255.
def new_stats_exporter(options=None, interval=None): if options is None: _, project_id = google.auth.default() options = Options(project_id=project_id) if str(options.project_id).strip() == "": raise ValueError(ERROR_BLANK_PROJECT_ID) ci = client_info.ClientInfo(client_library_version=get_user_agent_slug()) client = monitoring_v3.MetricServiceClient(client_info=ci) exporter = StackdriverStatsExporter(client=client, options=options) transport.get_exporter_thread(stats.stats, exporter, interval=interval) return exporter
Get a stats exporter and running transport thread. Create a new `StackdriverStatsExporter` with the given options and start periodically exporting stats to stackdriver in the background. Fall back to default auth if `options` is null. This will raise `google.auth.exceptions.DefaultCredentialsError` if default credentials aren't configured. See `opencensus.metrics.transport.get_exporter_thread` for details on the transport thread. :type options: :class:`Options` :param exporter: Options to pass to the exporter :type interval: int or float :param interval: Seconds between export calls. :rtype: :class:`StackdriverStatsExporter` :return: The newly-created exporter.
def start_event_loop(self, timeout=0): if hasattr(self, '_event_loop'): raise RuntimeError("Event loop already running") id = wx.NewId() timer = wx.Timer(self, id=id) if timeout > 0: timer.Start(timeout*1000, oneShot=True) bind(self, wx.EVT_TIMER, self.stop_event_loop, id=id) self._event_loop = wx.EventLoop() self._event_loop.Run() timer.Stop()
Start an event loop. This is used to start a blocking event loop so that interactive functions, such as ginput and waitforbuttonpress, can wait for events. This should not be confused with the main GUI event loop, which is always running and has nothing to do with this. Call signature:: start_event_loop(self,timeout=0) This call blocks until a callback function triggers stop_event_loop() or *timeout* is reached. If *timeout* is <=0, never timeout. Raises RuntimeError if event loop is already running.
def set_parameters(self, **args): for k, v in self.PARAMETERS.items(): new_value = args.get(k) if new_value != None: if not _same_type(new_value, v): raise Exception( "On processor {0}, argument {1} takes something like {2}, but {3} was given".format(self, k, v, new_value)) setattr(self, k, new_value) not_used = set(args.keys()).difference(set(self.PARAMETERS.keys())) not_given = set(self.PARAMETERS.keys()).difference(set(args.keys())) return not_used, not_given
sets the processor stored parameters
def visit_FunctionDef(self, node): node = self.get_function_node(node) if node is not None: node._async = False
Visit a function node.
def help(self): message = m.Message() message.add(m.Brand()) message.add(self.help_heading()) message.add(self.help_content()) return message
Return full help message for the step wizard. :returns: A message object contains help text. :rtype: m.Message
def read_welcome_message(self): reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'Server ready', ReplyCodes.service_ready_for_new_user, reply)
Read the welcome message. Coroutine.
def segment_pofiles(configuration, locale): files_written = set() for filename, segments in configuration.segment.items(): filename = configuration.get_messages_dir(locale) / filename files_written.update(segment_pofile(filename, segments)) return files_written
Segment all the pofiles for `locale`. Returns a set of filenames, all the segment files written.
def load(self, filepath): with open(filepath, 'rb') as fd: num_keys = struct.unpack(">i", fd.read(4))[0] for i in range(num_keys): row, value, kind = struct.unpack('>ifb', fd.read(9)) self.keys.append(TrackKey(row, value, kind))
Load the track file
def rdirichlet(theta, size=1): gammas = np.vstack([rgamma(theta, 1) for i in xrange(size)]) if size > 1 and np.size(theta) > 1: return (gammas.T / gammas.sum(1))[:-1].T elif np.size(theta) > 1: return (gammas[0] / gammas[0].sum())[:-1] else: return 1.
Dirichlet random variates.
def newline(self): self.write_str(self.eol) self.room = self.maxlinelen
Write eol, then start new line.
def _reformat(p, buf): if numpy.ndim(buf) != 1: raise ValueError("Buffer ``buf`` must be 1-d.") if hasattr(p, 'keys'): ans = _gvar.BufferDict(p) if ans.size != len(buf): raise ValueError( "p, buf size mismatch: %d, %d"%(ans.size, len(buf))) ans = _gvar.BufferDict(ans, buf=buf) else: if numpy.size(p) != len(buf): raise ValueError( "p, buf size mismatch: %d, %d"%(numpy.size(p), len(buf))) ans = numpy.array(buf).reshape(numpy.shape(p)) return ans
Apply format of ``p`` to data in 1-d array ``buf``.
def _get(self, key, identity='image'): value = self._get_raw(add_prefix(key, identity)) if not value: return None if identity == 'image': return deserialize_image_file(value) return deserialize(value)
Deserializing, prefix wrapper for _get_raw
def depth_first_iter(self, self_first=True): if self_first: yield self for child in list(self.children): for i in child.depth_first_iter(self_first): yield i if not self_first: yield self
Iterate over nodes below this node, optionally yielding children before self.
def get_primary_text(self, item_url): c = self.conn.cursor() c.execute("SELECT * FROM primary_texts WHERE item_url=?", (str(item_url),)) row = c.fetchone() c.close() if row is None: raise ValueError("Item not present in cache") return row[1]
Retrieve the primary text for the given item from the cache. :type item_url: String or Item :param item_url: the URL of the item, or an Item object :rtype: String :returns: the primary text :raises: ValueError if the primary text is not in the cache
def yahoo(base, target): api_url = 'http://download.finance.yahoo.com/d/quotes.csv' resp = requests.get( api_url, params={ 'e': '.csv', 'f': 'sl1d1t1', 's': '{0}{1}=X'.format(base, target) }, timeout=1, ) value = resp.text.split(',', 2)[1] return decimal.Decimal(value)
Parse data from Yahoo.
def check_status(self): for task in self: if task.status in (task.S_OK, task.S_LOCKED): continue task.check_status() for task in self: if task.status == task.S_LOCKED: continue if task.status < task.S_SUB and all(status == task.S_OK for status in task.deps_status): task.set_status(task.S_READY, "Status set to Ready")
Check the status of the tasks.
def fileids(self): return [os.path.join(self.path,i) for i in os.listdir(self.path)]
Returns files from SemEval2007 Coarse-grain All-words WSD task.
def convert(model, input_features, output_features): if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') _sklearn_util.check_expected_type(model, Normalizer) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'norm')) spec = _Model_pb2.Model() spec.specificationVersion = SPECIFICATION_VERSION spec = _set_transform_interface_params(spec, input_features, output_features) _normalizer_spec = spec.normalizer if model.norm == 'l1': _normalizer_spec.normType = _proto__normalizer.L1 elif model.norm == 'l2': _normalizer_spec.normType = _proto__normalizer.L2 elif model.norm == 'max': _normalizer_spec.normType = _proto__normalizer.LMax return _MLModel(spec)
Convert a normalizer model to the protobuf spec. Parameters ---------- model: Normalizer A Normalizer. input_features: str Name of the input column. output_features: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model
def load_pascal(image_set, year, devkit_path, shuffle=False): image_set = [y.strip() for y in image_set.split(',')] assert image_set, "No image_set specified" year = [y.strip() for y in year.split(',')] assert year, "No year specified" if len(image_set) > 1 and len(year) == 1: year = year * len(image_set) if len(image_set) == 1 and len(year) > 1: image_set = image_set * len(year) assert len(image_set) == len(year), "Number of sets and year mismatch" imdbs = [] for s, y in zip(image_set, year): imdbs.append(PascalVoc(s, y, devkit_path, shuffle, is_train=True)) if len(imdbs) > 1: return ConcatDB(imdbs, shuffle) else: return imdbs[0]
wrapper function for loading pascal voc dataset Parameters: ---------- image_set : str train, trainval... year : str 2007, 2012 or combinations splitted by comma devkit_path : str root directory of dataset shuffle : bool whether to shuffle initial list Returns: ---------- Imdb
def get_icon_for(self, brain_or_object): portal_types = api.get_tool("portal_types") fti = portal_types.getTypeInfo(api.get_portal_type(brain_or_object)) icon = fti.getIcon() if not icon: return "" icon_big = icon.replace(".png", "_big.png") if self.context.restrictedTraverse(icon_big, None) is None: icon_big = None portal_url = api.get_url(api.get_portal()) title = api.get_title(brain_or_object) html_tag = "<img title='{}' src='{}/{}' width='16' />".format( title, portal_url, icon_big or icon) logger.info("Generated Icon Tag for {}: {}".format( api.get_path(brain_or_object), html_tag)) return html_tag
Get the navigation portlet icon for the brain or object The cache key ensures that the lookup is done only once per domain name
def platform_from_version(major, minor): v = (major, minor) for low, high, name in _platforms: if low <= v <= high: return name return None
returns the minimum platform version that can load the given class version indicated by major.minor or None if no known platforms match the given version
def crval(self): try: return self.wcs.crval1, self.wcs.crval2 except Exception as ex: logging.debug("Couldn't get CRVAL from WCS: {}".format(ex)) logging.debug("Trying RA/DEC values") try: return (float(self['RA-DEG']), float(self['DEC-DEG'])) except KeyError as ke: KeyError("Can't build CRVAL1/2 missing keyword: {}".format(ke.args[0]))
Get the world coordinate of the reference pixel. @rtype: float, float
def get_datetime_issue_in_progress(self, issue): histories = issue.changelog.histories for history in reversed(histories): history_items = history.items for item in history_items: if item.field == 'status' and item.toString == "In Progress": return dateutil.parser.parse(history.created)
If the issue is in progress, gets that most recent time that the issue became 'In Progress'
def detachAcceptMsOriginating(): a = TpPd(pd=0x3) b = MessageType(mesType=0x6) c = ForceToStandbyAndSpareHalfOctets() packet = a / b / c return packet
DETACH ACCEPT Section 9.4.6.2
def targetMed(self): med_byte = None if self.target.addr is not None and self._messageFlags.isBroadcast: med_byte = self.target.bytes[1] return med_byte
Return the middle byte of the target message property. Used in All-Link Cleanup message types.