code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_single_int_autoincrement_colname(table_: Table) -> Optional[str]: n_autoinc = 0 int_autoinc_names = [] for col in table_.columns: if col.autoincrement: n_autoinc += 1 if is_sqlatype_integer(col.type): int_autoinc_names.append(col.name) if n_autoinc > 1: log.warning("Table {!r} has {} autoincrement columns", table_.name, n_autoinc) if n_autoinc == 1 and len(int_autoinc_names) == 1: return int_autoinc_names[0] return None
If a table has a single integer ``AUTOINCREMENT`` column, this will return its name; otherwise, ``None``. - It's unlikely that a database has >1 ``AUTOINCREMENT`` field anyway, but we should check. - SQL Server's ``IDENTITY`` keyword is equivalent to MySQL's ``AUTOINCREMENT``. - Verify against SQL Server: .. code-block:: sql SELECT table_name, column_name FROM information_schema.columns WHERE COLUMNPROPERTY(OBJECT_ID(table_schema + '.' + table_name), column_name, 'IsIdentity') = 1 ORDER BY table_name; ... http://stackoverflow.com/questions/87747 - Also: .. code-block:: sql sp_columns 'tablename'; ... which is what SQLAlchemy does (``dialects/mssql/base.py``, in :func:`get_columns`).
def get_target_from_spec(self, spec, relative_to=''): return self.get_target(Address.parse(spec, relative_to=relative_to))
Converts `spec` into an address and returns the result of `get_target` :API: public
def ftp_walk(ftpconn: FTP, rootpath=''): current_directory = rootpath try: directories, files = directory_listing(ftpconn, current_directory) except ftplib.error_perm: return yield current_directory, directories, files for name in directories: new_path = os.path.join(current_directory, name) for entry in ftp_walk(ftpconn, rootpath=new_path): yield entry else: return
Recursively traverse an ftp directory to discovery directory listing.
def noisy_wrap(__func: Callable) -> Callable: def wrapper(*args, **kwargs): DebugPrint.enable() try: __func(*args, **kwargs) finally: DebugPrint.disable() return wrapper
Decorator to enable DebugPrint for a given function. Args: __func: Function to wrap Returns: Wrapped function
def shrank(self, block=None, percent_diff=0, abs_diff=1): if block is None: block = self.block cur_nets = len(block.logic) net_goal = self.prev_nets * (1 - percent_diff) - abs_diff less_nets = (cur_nets <= net_goal) self.prev_nets = cur_nets return less_nets
Returns whether a block has less nets than before :param Block block: block to check (if changed) :param Number percent_diff: percentage difference threshold :param int abs_diff: absolute difference threshold :return: boolean This function checks whether the change in the number of nets is greater than the percentage and absolute difference thresholds.
def advance(self): self.cursor += 1 if self.cursor >= len(self.raw): self.char = None else: self.char = self.raw[self.cursor]
Increments the cursor position.
def insert_tile(self, tile_info): for i, tile in enumerate(self.registered_tiles): if tile.slot == tile_info.slot: self.registered_tiles[i] = tile_info return self.registered_tiles.append(tile_info)
Add or replace an entry in the tile cache. Args: tile_info (TileInfo): The newly registered tile.
def loglevel(level): if isinstance(level, str): level = getattr(logging, level.upper()) elif isinstance(level, int): pass else: raise ValueError('{0!r} is not a proper log level.'.format(level)) return level
Convert any representation of `level` to an int appropriately. :type level: int or str :rtype: int >>> loglevel('DEBUG') == logging.DEBUG True >>> loglevel(10) 10 >>> loglevel(None) Traceback (most recent call last): ... ValueError: None is not a proper log level.
def write_port(self, port, value): if port == 'A': self.GPIOA = value elif port == 'B': self.GPIOB = value else: raise AttributeError('Port {} does not exist, use A or B'.format(port)) self.sync()
Use a whole port as a bus and write a byte to it. :param port: Name of the port ('A' or 'B') :param value: Value to write (0-255)
def GetRequestFormatMode(request, method_metadata): if request.path.startswith("/api/v2/"): return JsonMode.PROTO3_JSON_MODE if request.args.get("strip_type_info", ""): return JsonMode.GRR_TYPE_STRIPPED_JSON_MODE for http_method, unused_url, options in method_metadata.http_methods: if (http_method == request.method and options.get("strip_root_types", False)): return JsonMode.GRR_ROOT_TYPES_STRIPPED_JSON_MODE return JsonMode.GRR_JSON_MODE
Returns JSON format mode corresponding to a given request and method.
def read_ncstream_err(fobj): err = read_proto_object(fobj, stream.Error) raise RuntimeError(err.message)
Handle reading an NcStream error from a file-like object and raise as error.
def kill_all_processes(self, check_alive=True, allow_graceful=False): if ray_constants.PROCESS_TYPE_RAYLET in self.all_processes: self._kill_process_type( ray_constants.PROCESS_TYPE_RAYLET, check_alive=check_alive, allow_graceful=allow_graceful) for process_type in list(self.all_processes.keys()): self._kill_process_type( process_type, check_alive=check_alive, allow_graceful=allow_graceful)
Kill all of the processes. Note that This is slower than necessary because it calls kill, wait, kill, wait, ... instead of kill, kill, ..., wait, wait, ... Args: check_alive (bool): Raise an exception if any of the processes were already dead.
def _validate_jpx_compatibility(self, boxes, compatibility_list): JPX_IDS = ['asoc', 'nlst'] jpx_cl = set(compatibility_list) for box in boxes: if box.box_id in JPX_IDS: if len(set(['jpx ', 'jpxb']).intersection(jpx_cl)) == 0: msg = ("A JPX box requires that either 'jpx ' or 'jpxb' " "be present in the ftype compatibility list.") raise RuntimeError(msg) if hasattr(box, 'box') != 0: self._validate_jpx_compatibility(box.box, compatibility_list)
If there is a JPX box then the compatibility list must also contain 'jpx '.
def send_multipart(self, *args, **kwargs): self.__in_send_multipart = True try: msg = super(GreenSocket, self).send_multipart(*args, **kwargs) finally: self.__in_send_multipart = False self.__state_changed() return msg
wrap send_multipart to prevent state_changed on each partial send
def SetExpression(self, expression): if isinstance(expression, lexer.Expression): self.args = [expression] else: raise errors.ParseError( 'Expected expression, got {0:s}.'.format(expression))
Set the expression.
def _read_config(correlation_id, path, parameters): value = YamlConfigReader(path)._read_object(correlation_id, parameters) return ConfigParams.from_value(value)
Reads configuration from a file, parameterize it with given values and returns a new ConfigParams object. :param correlation_id: (optional) transaction id to trace execution through call chain. :param path: a path to configuration file. :param parameters: values to parameters the configuration. :return: ConfigParams configuration.
def show_instance(name, call=None): if call != 'action': raise SaltCloudSystemExit( 'The show_instance action must be called with -a or --action.' ) items = query(action='ve', command=name) ret = {} for item in items: if 'text' in item.__dict__: ret[item.tag] = item.text else: ret[item.tag] = item.attrib if item._children: ret[item.tag] = {} children = item._children for child in children: ret[item.tag][child.tag] = child.attrib __utils__['cloud.cache_node'](ret, __active_provider_name__, __opts__) return ret
Show the details from Parallels concerning an instance
def view_pmap(token, dstore): grp = token.split(':')[1] pmap = {} rlzs_assoc = dstore['csm_info'].get_rlzs_assoc() pgetter = getters.PmapGetter(dstore, rlzs_assoc) pmap = pgetter.get_mean(grp) return str(pmap)
Display the mean ProbabilityMap associated to a given source group name
def ll(self, folder="", begin_from_file="", num=-1, all_grant_data=False): return self.ls(folder=folder, begin_from_file=begin_from_file, num=num, get_grants=True, all_grant_data=all_grant_data)
Get the list of files and permissions from S3. This is similar to LL (ls -lah) in Linux: List of files with permissions. Parameters ---------- folder : string Path to file on S3 num: integer, optional number of results to return, by default it returns all results. begin_from_file : string, optional which file to start from on S3. This is usedful in case you are iterating over lists of files and you need to page the result by starting listing from a certain file and fetching certain num (number) of files. all_grant_data : Boolean, optional More detailed file permission data will be returned. Examples -------- >>> from s3utils import S3utils >>> s3utils = S3utils( ... AWS_ACCESS_KEY_ID = 'your access key', ... AWS_SECRET_ACCESS_KEY = 'your secret key', ... AWS_STORAGE_BUCKET_NAME = 'your bucket name', ... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose ... ) >>> import json >>> # We use json.dumps to print the results more readable: >>> my_folder_stuff = s3utils.ll("/test/") >>> print(json.dumps(my_folder_stuff, indent=2)) { "test/myfolder/": [ { "name": "owner's name", "permission": "FULL_CONTROL" } ], "test/myfolder/em/": [ { "name": "owner's name", "permission": "FULL_CONTROL" } ], "test/myfolder/hoho/": [ { "name": "owner's name", "permission": "FULL_CONTROL" } ], "test/myfolder/hoho/.DS_Store": [ { "name": "owner's name", "permission": "FULL_CONTROL" }, { "name": null, "permission": "READ" } ], "test/myfolder/hoho/haha/": [ { "name": "owner's name", "permission": "FULL_CONTROL" } ], "test/myfolder/hoho/haha/ff": [ { "name": "owner's name", "permission": "FULL_CONTROL" }, { "name": null, "permission": "READ" } ], "test/myfolder/hoho/photo.JPG": [ { "name": "owner's name", "permission": "FULL_CONTROL" }, { "name": null, "permission": "READ" } ], }
def fd_sine_gaussian(amp, quality, central_frequency, fmin, fmax, delta_f): kmin = int(round(fmin / delta_f)) kmax = int(round(fmax / delta_f)) f = numpy.arange(kmin, kmax) * delta_f tau = quality / 2 / numpy.pi / central_frequency A = amp * numpy.pi ** 0.5 / 2 * tau d = A * numpy.exp(-(numpy.pi * tau * (f - central_frequency))**2.0) d *= (1 + numpy.exp(-quality ** 2.0 * f / central_frequency)) v = numpy.zeros(kmax, dtype=numpy.complex128) v[kmin:kmax] = d[:] return pycbc.types.FrequencySeries(v, delta_f=delta_f)
Generate a Fourier domain sine-Gaussian Parameters ---------- amp: float Amplitude of the sine-Gaussian quality: float The quality factor central_frequency: float The central frequency of the sine-Gaussian fmin: float The minimum frequency to generate the sine-Gaussian. This determines the length of the output vector. fmax: float The maximum frequency to generate the sine-Gaussian delta_f: float The size of the frequency step Returns ------- sg: pycbc.types.Frequencyseries A Fourier domain sine-Gaussian
def from_xmldict(cls, xml_dict): name = xml_dict['creatorName'] kwargs = {} if 'affiliation' in xml_dict: kwargs['affiliation'] = xml_dict['affiliation'] return cls(name, **kwargs)
Create an `Author` from a datacite3 metadata converted by `xmltodict`. Parameters ---------- xml_dict : :class:`collections.OrderedDict` A `dict`-like object mapping XML content for a single record (i.e., the contents of the ``record`` tag in OAI-PMH XML). This dict is typically generated from :mod:`xmltodict`.
def plot_pauli_transfer_matrix(ptransfermatrix, ax, labels, title): im = ax.imshow(ptransfermatrix, interpolation="nearest", cmap=rigetti_3_color_cm, vmin=-1, vmax=1) dim = len(labels) plt.colorbar(im, ax=ax) ax.set_xticks(range(dim)) ax.set_xlabel("Input Pauli Operator", fontsize=20) ax.set_yticks(range(dim)) ax.set_ylabel("Output Pauli Operator", fontsize=20) ax.set_title(title, fontsize=25) ax.set_xticklabels(labels, rotation=45) ax.set_yticklabels(labels) ax.grid(False) return ax
Visualize the Pauli Transfer Matrix of a process. :param numpy.ndarray ptransfermatrix: The Pauli Transfer Matrix :param ax: The matplotlib axes. :param labels: The labels for the operator basis states. :param title: The title for the plot :return: The modified axis object. :rtype: AxesSubplot
def cardinality(self): num_strings = {} def get_num_strings(state): if self.islive(state): if state in num_strings: if num_strings[state] is None: raise OverflowError(state) return num_strings[state] num_strings[state] = None n = 0 if state in self.finals: n += 1 if state in self.map: for symbol in self.map[state]: n += get_num_strings(self.map[state][symbol]) num_strings[state] = n else: num_strings[state] = 0 return num_strings[state] return get_num_strings(self.initial)
Consider the FSM as a set of strings and return the cardinality of that set, or raise an OverflowError if there are infinitely many
def get_data_source_bulk_request(self, rids, limit=5): headers = { 'User-Agent': self.user_agent(), 'Content-Type': self.content_type() } headers.update(self.headers()) r = requests.get( self.portals_url() +'/data-sources/[' +",".join(rids) +']/data?limit='+str(limit), headers=headers, auth=self.auth()) if HTTP_STATUS.OK == r.status_code: return r.json() else: print("Something went wrong: <{0}>: {1}".format( r.status_code, r.reason)) return {}
This grabs each datasource and its multiple datapoints for a particular device.
def json_dumps(self, data, **options): params = {'sort_keys': True, 'indent': 2} params.update(options) if json.__version__.split('.') >= ['2', '1', '3']: params.update({'use_decimal': False}) return json.dumps(data, cls=DjangoJSONEncoder, **params)
Wrapper around `json.dumps` that uses a special JSON encoder.
def error(code, message, **kwargs): assert code in Logger._error_code_to_exception exc_type, domain = Logger._error_code_to_exception[code] exc = exc_type(message, **kwargs) Logger._log(code, exc.message, ERROR, domain) raise exc
Call this to raise an exception and have it stored in the journal
def _get_elements(self, site): try: if isinstance(site.specie, Element): return [site.specie] return [Element(site.specie)] except: return site.species.elements
Get the list of elements for a Site Args: site (Site): Site to assess Returns: [Element]: List of elements
def yaml_dump_hook(cfg, text: bool=False): data = cfg.config.dump() if not text: yaml.dump(data, cfg.fd, Dumper=cfg.dumper, default_flow_style=False) else: return yaml.dump(data, Dumper=cfg.dumper, default_flow_style=False)
Dumps all the data into a YAML file.
def user_exists(self, username): path = "/users/{}".format(username) return self._get(path).ok
Returns whether a user with username ``username`` exists. :param str username: username of user :return: whether a user with the specified username exists :rtype: bool :raises NetworkFailure: if there is an error communicating with the server :return:
def create_url(artist, song): return (__BASE_URL__ + '/wiki/{artist}:{song}'.format(artist=urlize(artist), song=urlize(song)))
Create the URL in the LyricWikia format
def detach(self): if self.parent is not None: if self in self.parent.children: self.parent.children.remove(self) self.parent = None return self
Detach from parent. @return: This element removed from its parent's child list and I{parent}=I{None} @rtype: L{Element}
def abort(self): if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID: return self path = self._path() + '/abort' resp = self._get_resource_root().post(path) return ApiCommand.from_json_dict(resp, self._get_resource_root())
Abort a running command. @return: A new ApiCommand object with the updated information.
def get_squares(self, player=None): if player: return [k for k, v in enumerate(self.squares) if v == player] else: return self.squares
squares that belong to a player
def __fetch_heatmap_data_from_profile(self): with open(self.pyfile.path, "r") as file_to_read: for line in file_to_read: self.pyfile.lines.append(" " + line.strip("\n")) self.pyfile.length = len(self.pyfile.lines) line_profiles = self.__get_line_profile_data() arr = [] for line_num in range(1, self.pyfile.length + 1): if line_num in line_profiles: line_times = [ ltime for _, ltime in line_profiles[line_num].values() ] arr.append([sum(line_times)]) else: arr.append([0.0]) self.pyfile.data = np.array(arr)
Method to create heatmap data from profile information.
def create_set(self, set_id, etype, entities): if etype not in {"sample", "pair", "participant"}: raise ValueError("Unsupported entity type:" + str(etype)) payload = "membership:" + etype + "_set_id\t" + etype + "_id\n" for e in entities: if e.etype != etype: msg = "Entity type '" + e.etype + "' does not match " msg += "set type '" + etype + "'" raise ValueError(msg) payload += set_id + '\t' + e.entity_id + '\n' r = fapi.upload_entities(self.namespace, self.name, payload, self.api_url) fapi._check_response_code(r, 201)
Create a set of entities and upload to FireCloud. Args etype (str): one of {"sample, "pair", "participant"} entities: iterable of firecloud.Entity objects.
def _to_unicode(self, data, encoding, errors='strict'): if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding, errors) return newdata
Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases
def get_collection(source, name, collection_format, default): if collection_format in COLLECTION_SEP: separator = COLLECTION_SEP[collection_format] value = source.get(name, None) if value is None: return default return value.split(separator) if collection_format == 'brackets': return source.getall(name + '[]', default) else: return source.getall(name, default)
get collection named `name` from the given `source` that formatted accordingly to `collection_format`.
def close(self): if self.is_worker(): return for worker in self.workers: self.comm.send(None, worker, 0)
Tell all the workers to quit.
def get_param_names(cls): return [m[0] for m in inspect.getmembers(cls) \ if type(m[1]) == property]
Returns a list of plottable CBC parameter variables
def getWifiState(self): result = self.device.shell('dumpsys wifi') if result: state = result.splitlines()[0] if self.WIFI_IS_ENABLED_RE.match(state): return self.WIFI_STATE_ENABLED elif self.WIFI_IS_DISABLED_RE.match(state): return self.WIFI_STATE_DISABLED print >> sys.stderr, "UNKNOWN WIFI STATE:", state return self.WIFI_STATE_UNKNOWN
Gets the Wi-Fi enabled state. @return: One of WIFI_STATE_DISABLED, WIFI_STATE_DISABLING, WIFI_STATE_ENABLED, WIFI_STATE_ENABLING, WIFI_STATE_UNKNOWN
def register(model, fields, restrict_to=None, manager=None, properties=None, contexts=None): if not contexts: contexts = {} global _REGISTRY _REGISTRY[model] = { 'fields': fields, 'contexts': contexts, 'restrict_to': restrict_to, 'manager': manager, 'properties': properties, } for field in fields: setattr(model, field, VinaigretteDescriptor(field, contexts.get(field, None))) model.untranslated = lambda self, fieldname: self.__dict__[fieldname] pre_save.connect(_vinaigrette_pre_save, sender=model) post_save.connect(_vinaigrette_post_save, sender=model)
Tell vinaigrette which fields on a Django model should be translated. Arguments: model -- The relevant model class fields -- A list or tuple of field names. e.g. ['name', 'nickname'] restrict_to -- Optional. A django.db.models.Q object representing the subset of objects to collect translation strings from. manager -- Optional. A reference to a manager -- e.g. Person.objects -- to use when collecting translation strings. properties -- A dictionary of "read only" properties that are composed by more that one field e.g. {'full_name': ['first_name', 'last_name']} contexts -- A dictionary including any (pgettext) context that may need to be applied to each field. e.g. {'name': 'db category name', 'description': 'db detailed category description'} Note that both restrict_to and manager are only used when collecting translation strings. Gettext lookups will always be performed on relevant fields for all objects on registered models.
def open(filename, frame='unspecified'): data = BagOfPoints.load_data(filename) return Point(data, frame)
Create a Point from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created point. Returns ------- :obj:`Point` A point created from the data in the file.
def _normalize_number_values(self, parameters): for key, value in parameters.items(): if isinstance(value, (int, float)): parameters[key] = str(Decimal(value).normalize(self._context))
Assures equal precision for all number values
def get_index_line(self,lnum): if lnum < 1: sys.stderr.write("ERROR: line number should be greater than zero\n") sys.exit() elif lnum > len(self._lines): sys.stderr.write("ERROR: too far this line nuber is not in index\n") sys.exit() return self._lines[lnum-1]
Take the 1-indexed line number and return its index information
def get_parent_device(self): if not self.parent_instance_id: return "" dev_buffer_type = winapi.c_tchar * MAX_DEVICE_ID_LEN dev_buffer = dev_buffer_type() try: if winapi.CM_Get_Device_ID(self.parent_instance_id, byref(dev_buffer), MAX_DEVICE_ID_LEN, 0) == 0: return dev_buffer.value return "" finally: del dev_buffer del dev_buffer_type
Retreive parent device string id
def auth_password(self, username, password, event=None, fallback=True): if (not self.active) or (not self.initial_kex_done): raise SSHException("No existing session") if event is None: my_event = threading.Event() else: my_event = event self.auth_handler = AuthHandler(self) self.auth_handler.auth_password(username, password, my_event) if event is not None: return [] try: return self.auth_handler.wait_for_response(my_event) except BadAuthenticationType as e: if not fallback or ("keyboard-interactive" not in e.allowed_types): raise try: def handler(title, instructions, fields): if len(fields) > 1: raise SSHException("Fallback authentication failed.") if len(fields) == 0: return [] return [password] return self.auth_interactive(username, handler) except SSHException: raise e
Authenticate to the server using a password. The username and password are sent over an encrypted link. If an ``event`` is passed in, this method will return immediately, and the event will be triggered once authentication succeeds or fails. On success, `is_authenticated` will return ``True``. On failure, you may use `get_exception` to get more detailed error information. Since 1.1, if no event is passed, this method will block until the authentication succeeds or fails. On failure, an exception is raised. Otherwise, the method simply returns. Since 1.5, if no event is passed and ``fallback`` is ``True`` (the default), if the server doesn't support plain password authentication but does support so-called "keyboard-interactive" mode, an attempt will be made to authenticate using this interactive mode. If it fails, the normal exception will be thrown as if the attempt had never been made. This is useful for some recent Gentoo and Debian distributions, which turn off plain password authentication in a misguided belief that interactive authentication is "more secure". (It's not.) If the server requires multi-step authentication (which is very rare), this method will return a list of auth types permissible for the next step. Otherwise, in the normal case, an empty list is returned. :param str username: the username to authenticate as :param basestring password: the password to authenticate with :param .threading.Event event: an event to trigger when the authentication attempt is complete (whether it was successful or not) :param bool fallback: ``True`` if an attempt at an automated "interactive" password auth should be made if the server doesn't support normal password auth :return: list of auth types permissible for the next stage of authentication (normally empty) :raises: `.BadAuthenticationType` -- if password authentication isn't allowed by the server for this user (and no event was passed in) :raises: `.AuthenticationException` -- if the authentication failed (and no event was passed in) :raises: `.SSHException` -- if there was a network error
def _discover(self): for ep in pkg_resources.iter_entry_points('yamlsettings10'): ext = ep.load() if callable(ext): ext = ext() self.add(ext)
Find and install all extensions
def by_email_address(cls, email): return DBSession.query(cls).filter_by(email_address=email).first()
Return the user object whose email address is ``email``.
def LogLikelihood(self, data): m = len(data) if self.n < m: return float('-inf') x = self.Random() y = numpy.log(x[:m]) * data return y.sum()
Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability
def safe_translation_getter(self, field, default=None, language_code=None, any_language=False): meta = self._parler_meta._get_extension_by_field(field) if language_code and language_code != self._current_language: try: tr_model = self._get_translated_model(language_code, meta=meta, use_fallback=True) return getattr(tr_model, field) except TranslationDoesNotExist: pass else: try: return getattr(self, field) except TranslationDoesNotExist: pass if any_language: translation = self._get_any_translated_model(meta=meta) if translation is not None: try: return getattr(translation, field) except KeyError: pass if callable(default): return default() else: return default
Fetch a translated property, and return a default value when both the translation and fallback language are missing. When ``any_language=True`` is used, the function also looks into other languages to find a suitable value. This feature can be useful for "title" attributes for example, to make sure there is at least something being displayed. Also consider using ``field = TranslatedField(any_language=True)`` in the model itself, to make this behavior the default for the given field. .. versionchanged 1.5:: The *default* parameter may also be a callable.
def str2hashalgo(description): algo = getattr(hashlib, description.lower(), None) if not callable(algo): raise ValueError('Unknown hash algorithm %s' % description) return algo
Convert the name of a hash algorithm as described in the OATH specifications, to a python object handling the digest algorithm interface, PEP-xxx. :param description the name of the hash algorithm, example :rtype: a hash algorithm class constructor
def get_or_create_candidate_election( self, row, election, candidate, party ): return election.update_or_create_candidate( candidate, party.aggregate_candidates, row["uncontested"] )
For a given election, this function updates or creates the CandidateElection object using the model method on the election.
def get_cameras_rules(self): resource = "rules" rules_event = self.publish_and_get_event(resource) if rules_event: return rules_event.get('properties') return None
Return the camera rules.
def recursive(self): for m in self.members.values(): if m.kind is not None and m.kind.lower() == self.name.lower(): return True else: return False
When True, this CustomType has at least one member that is of the same type as itself.
def ip(addr, version=None): addr_obj = IPAddress(addr) if version and addr_obj.version != version: raise ValueError("{} is not an ipv{} address".format(addr, version)) return py23_compat.text_type(addr_obj)
Converts a raw string to a valid IP address. Optional version argument will detect that \ object matches specified version. Motivation: the groups of the IP addreses may contain leading zeros. IPv6 addresses can \ contain sometimes uppercase characters. E.g.: 2001:0dB8:85a3:0000:0000:8A2e:0370:7334 has \ the same logical value as 2001:db8:85a3::8a2e:370:7334. However, their values as strings are \ not the same. :param raw: the raw string containing the value of the IP Address :param version: (optional) insist on a specific IP address version. :type version: int. :return: a string containing the IP Address in a standard format (no leading zeros, \ zeros-grouping, lowercase) Example: .. code-block:: python >>> ip('2001:0dB8:85a3:0000:0000:8A2e:0370:7334') u'2001:db8:85a3::8a2e:370:7334'
def _try_convert_to_int_index(cls, data, copy, name, dtype): from .numeric import Int64Index, UInt64Index if not is_unsigned_integer_dtype(dtype): try: res = data.astype('i8', copy=False) if (res == data).all(): return Int64Index(res, copy=copy, name=name) except (OverflowError, TypeError, ValueError): pass try: res = data.astype('u8', copy=False) if (res == data).all(): return UInt64Index(res, copy=copy, name=name) except (OverflowError, TypeError, ValueError): pass raise ValueError
Attempt to convert an array of data into an integer index. Parameters ---------- data : The data to convert. copy : Whether to copy the data or not. name : The name of the index returned. Returns ------- int_index : data converted to either an Int64Index or a UInt64Index Raises ------ ValueError if the conversion was not successful.
def dist_calc(loc1, loc2): R = 6371.009 dlat = np.radians(abs(loc1[0] - loc2[0])) dlong = np.radians(abs(loc1[1] - loc2[1])) ddepth = abs(loc1[2] - loc2[2]) mean_lat = np.radians((loc1[0] + loc2[0]) / 2) dist = R * np.sqrt(dlat ** 2 + (np.cos(mean_lat) * dlong) ** 2) dist = np.sqrt(dist ** 2 + ddepth ** 2) return dist
Function to calculate the distance in km between two points. Uses the flat Earth approximation. Better things are available for this, like `gdal <http://www.gdal.org/>`_. :type loc1: tuple :param loc1: Tuple of lat, lon, depth (in decimal degrees and km) :type loc2: tuple :param loc2: Tuple of lat, lon, depth (in decimal degrees and km) :returns: Distance between points in km. :rtype: float
def export_subprocess_info(bpmn_diagram, subprocess_params, output_element): output_element.set(consts.Consts.triggered_by_event, subprocess_params[consts.Consts.triggered_by_event]) if consts.Consts.default in subprocess_params and subprocess_params[consts.Consts.default] is not None: output_element.set(consts.Consts.default, subprocess_params[consts.Consts.default]) subprocess_id = subprocess_params[consts.Consts.id] nodes = bpmn_diagram.get_nodes_list_by_process_id(subprocess_id) for node in nodes: node_id = node[0] params = node[1] BpmnDiagramGraphExport.export_node_data(bpmn_diagram, node_id, params, output_element) flows = bpmn_diagram.get_flows_list_by_process_id(subprocess_id) for flow in flows: params = flow[2] BpmnDiagramGraphExport.export_flow_process_data(params, output_element)
Adds Subprocess node attributes to exported XML element :param bpmn_diagram: BPMNDiagramGraph class instantion representing a BPMN process diagram, :param subprocess_params: dictionary with given subprocess parameters, :param output_element: object representing BPMN XML 'subprocess' element.
def load_csv(path, delimiter=','): try: with open(path, 'rb') as csvfile: reader = DictReader(csvfile, delimiter=delimiter) for row in reader: yield row except (OSError, IOError): raise ClientException("File not found: {}".format(path))
Load CSV file from path and yield CSV rows Usage: for row in load_csv('/path/to/file'): print(row) or list(load_csv('/path/to/file')) :param path: file path :param delimiter: CSV delimiter :return: a generator where __next__ is a row of the CSV
def set_servers(self, servers): if isinstance(servers, six.string_types): servers = [servers] assert servers, "No memcached servers supplied" self._servers = [Protocol( server=server, username=self.username, password=self.password, compression=self.compression, socket_timeout=self.socket_timeout, pickle_protocol=self.pickle_protocol, pickler=self.pickler, unpickler=self.unpickler, ) for server in servers]
Iter to a list of servers and instantiate Protocol class. :param servers: A list of servers :type servers: list :return: Returns nothing :rtype: None
def _create_extractors(col_params): result = [] for col_param in col_params: result.append(_create_extractor(col_param)) return result
Creates extractors to extract properties corresponding to 'col_params'. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. Returns: A list of extractor functions. The ith element in the returned list extracts the column corresponding to the ith element of _request.col_params
def _cont_norm_running_quantile_regions_mp(wl, fluxes, ivars, q, delta_lambda, ranges, n_proc=2, verbose=False): print("contnorm.py: continuum norm using running quantile") print("Taking spectra in %s chunks" % len(ranges)) nchunks = len(ranges) norm_fluxes = np.zeros(fluxes.shape) norm_ivars = np.zeros(ivars.shape) for i in xrange(nchunks): chunk = ranges[i, :] start = chunk[0] stop = chunk[1] if verbose: print('@Bo Zhang: Going to normalize Chunk [%d/%d], pixel:[%d, %d] ...' % (i+1, nchunks, start, stop)) output = _cont_norm_running_quantile_mp( wl[start:stop], fluxes[:, start:stop], ivars[:, start:stop], q, delta_lambda, n_proc=n_proc, verbose=verbose) norm_fluxes[:, start:stop] = output[0] norm_ivars[:, start:stop] = output[1] return norm_fluxes, norm_ivars
Perform continuum normalization using running quantile, for spectrum that comes in chunks. The same as _cont_norm_running_quantile_regions(), but using multi-processing. Bo Zhang (NAOC)
def load_labeled_events(filename, delimiter=r'\s+'): r events, labels = load_delimited(filename, [float, str], delimiter) events = np.array(events) try: util.validate_events(events) except ValueError as error: warnings.warn(error.args[0]) return events, labels
r"""Import labeled time-stamp events from an annotation file. The file should consist of two columns; the first having numeric values corresponding to the event times and the second having string labels for each event. This is primarily useful for processing labeled events which lack duration, such as beats with metric beat number or onsets with an instrument label. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- event_times : np.ndarray array of event times (float) labels : list of str list of labels
def load_profile(self, profile): profile_path = os.path.join( self.root_directory, 'minimum_needs', profile + '.json') self.read_from_file(profile_path)
Load a specific profile into the current minimum needs. :param profile: The profile's name :type profile: basestring, str
def _mapping(self): return (self.__search_client.get( "/unstable/index/{}/mapping".format(mdf_toolbox.translate_index(self.index))) ["mappings"])
Fetch the entire mapping for the specified index. Returns: dict: The full mapping for the index.
def route_to_alt_domain(request, url): alternative_domain = request.registry.settings.get("pyramid_notebook.alternative_domain", "").strip() if alternative_domain: url = url.replace(request.host_url, alternative_domain) return url
Route URL to a different subdomain. Used to rewrite URLs to point to websocket serving domain.
def terminate(library, session, degree, job_id): return library.viTerminate(session, degree, job_id)
Requests a VISA session to terminate normal execution of an operation. Corresponds to viTerminate function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param degree: Constants.NULL :param job_id: Specifies an operation identifier. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode`
def _handleSelectAllAxes(self, evt): if len(self._axisId) == 0: return for i in range(len(self._axisId)): self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip()
Called when the 'select all axes' menu item is selected.
def find_model(model_name, apps=settings.INSTALLED_APPS, fuzziness=0): if '/' in model_name: return model_name if not apps and isinstance(model_name, basestring) and '.' in model_name: apps = [model_name.split('.')[0]] apps = util.listify(apps or settings.INSTALLED_APPS) for app in apps: model = get_model(model=model_name, app=app, fuzziness=fuzziness) if model: return model return None
Find model_name among indicated Django apps and return Model class Examples: To find models in an app called "miner": >>> find_model('WikiItem', 'miner') >>> find_model('Connection', 'miner') >>> find_model('InvalidModelName')
def get_files(conn, aid: int) -> AnimeFiles: with conn: cur = conn.cursor().execute( 'SELECT anime_files FROM cache_anime WHERE aid=?', (aid,)) row = cur.fetchone() if row is None: raise ValueError('No cached files') return AnimeFiles.from_json(row[0])
Get cached files for anime.
def set_info_page(self): if self.info_page is not None: self.infowidget.setHtml( self.info_page, QUrl.fromLocalFile(self.css_path) )
Set current info_page.
def register(klass): assert(isinstance(klass, type)) name = klass.__name__.lower() if name in Optimizer.opt_registry: warnings.warn('WARNING: New optimizer %s.%s is overriding ' 'existing optimizer %s.%s' % (klass.__module__, klass.__name__, Optimizer.opt_registry[name].__module__, Optimizer.opt_registry[name].__name__)) Optimizer.opt_registry[name] = klass return klass
Registers a new optimizer. Once an optimizer is registered, we can create an instance of this optimizer with `create_optimizer` later. Examples -------- >>> @mx.optimizer.Optimizer.register ... class MyOptimizer(mx.optimizer.Optimizer): ... pass >>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer') >>> print(type(optim)) <class '__main__.MyOptimizer'>
def create_frvect(timeseries): dims = frameCPP.Dimension( timeseries.size, timeseries.dx.value, str(timeseries.dx.unit), 0) vect = frameCPP.FrVect( timeseries.name or '', FRVECT_TYPE_FROM_NUMPY[timeseries.dtype.type], 1, dims, str(timeseries.unit)) vect.GetDataArray()[:] = numpy.require(timeseries.value, requirements=['C']) return vect
Create a `~frameCPP.FrVect` from a `TimeSeries` This method is primarily designed to make writing data to GWF files a bit easier. Parameters ---------- timeseries : `TimeSeries` the input `TimeSeries` Returns ------- frvect : `~frameCPP.FrVect` the output `FrVect`
def file(ctx, data_dir, data_file): if not ctx.file: ctx.data_file = data_file if not ctx.data_dir: ctx.data_dir = data_dir ctx.type = 'file'
Use the File SWAG Backend
def calc_temp(Data_ref, Data): T = 300 * ((Data.A * Data_ref.Gamma) / (Data_ref.A * Data.Gamma)) Data.T = T return T
Calculates the temperature of a data set relative to a reference. The reference is assumed to be at 300K. Parameters ---------- Data_ref : DataObject Reference data set, assumed to be 300K Data : DataObject Data object to have the temperature calculated for Returns ------- T : uncertainties.ufloat The temperature of the data set
def powered_up(self): if not self.data.scripts.powered_up: return False for script in self.data.scripts.powered_up: if not script.check(self): return False return True
Returns True whether the card is "powered up".
def factors(number): if not (isinstance(number, int)): raise TypeError( "Incorrect number type provided. Only integers are accepted.") factors = [] for i in range(1, number + 1): if number % i == 0: factors.append(i) return factors
Find all of the factors of a number and return it as a list. :type number: integer :param number: The number to find the factors for.
def transcode(text, input=PREFERRED_ENCODING, output=PREFERRED_ENCODING): try: return text.decode("cp437").encode("cp1252") except UnicodeError: try: return text.decode("cp437").encode(output) except UnicodeError: return text
Transcode a text string
def cons(f, mindepth): C = ClustFile(f) for data in C: names, seqs, nreps = zip(*data) total_nreps = sum(nreps) if total_nreps < mindepth: continue S = [] for name, seq, nrep in data: S.append([seq, nrep]) res = stack(S) yield [x[:4] for x in res if sum(x[:4]) >= mindepth]
Makes a list of lists of reads at each site
def __get_time_range(self, startDate, endDate): today = date.today() start_date = today - timedelta(days=today.weekday(), weeks=1) end_date = start_date + timedelta(days=4) startDate = startDate if startDate else str(start_date) endDate = endDate if endDate else str(end_date) return startDate, endDate
Return time range
def welcome_message(): message = m.Message() message.add(m.Brand()) message.add(heading()) message.add(content()) return message
Welcome message for first running users. .. versionadded:: 4.3.0 :returns: A message object containing helpful information. :rtype: messaging.message.Message
def setdefault(obj, field, default): setattr(obj, field, getattr(obj, field, default))
Set an object's field to default if it doesn't have a value
def user_list(**connection_args): dbc = _connect(**connection_args) if dbc is None: return [] cur = dbc.cursor(MySQLdb.cursors.DictCursor) try: qry = 'SELECT User,Host FROM mysql.user' _execute(cur, qry) except MySQLdb.OperationalError as exc: err = 'MySQL Error {0}: {1}'.format(*exc.args) __context__['mysql.error'] = err log.error(err) return [] results = cur.fetchall() log.debug(results) return results
Return a list of users on a MySQL server CLI Example: .. code-block:: bash salt '*' mysql.user_list
def get_beamarea_deg2(self, ra, dec): beam = self.get_beam(ra, dec) if beam is None: return 0 return beam.a * beam.b * np.pi
Calculate the area of the beam in square degrees. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- area : float The area of the beam in square degrees.
def add_fields(self, **fields): self.__class__ = type(self.__class__.__name__, (self.__class__,), fields) for k, v in fields.items(): v.init_inst(self)
Add new data fields to this struct instance
def from_json(cls, data: str, force_snake_case=True, force_cast: bool=False, restrict: bool=False) -> T: return cls.from_dict(util.load_json(data), force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict)
From json string to instance :param data: Json string :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: Instance Usage: >>> from owlmixin.samples import Human >>> human: Human = Human.from_json('''{ ... "id": 1, ... "name": "Tom", ... "favorites": [ ... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}}, ... {"name": "Orange"} ... ] ... }''') >>> human.id 1 >>> human.name 'Tom' >>> human.favorites[0].names_by_lang.get()["de"] 'Apfel'
def open_image(fname_or_instance: Union[str, IO[bytes]]): if isinstance(fname_or_instance, Image.Image): return fname_or_instance return Image.open(fname_or_instance)
Opens a Image and returns it. :param fname_or_instance: Can either be the location of the image as a string or the Image.Image instance itself.
def findHTMLMeta(stream): parser = YadisHTMLParser() chunks = [] while 1: chunk = stream.read(CHUNK_SIZE) if not chunk: break chunks.append(chunk) try: parser.feed(chunk) except HTMLParseError, why: chunks.append(stream.read()) break except ParseDone, why: uri = why[0] if uri is None: chunks.append(stream.read()) break else: return uri content = ''.join(chunks) raise MetaNotFound(content)
Look for a meta http-equiv tag with the YADIS header name. @param stream: Source of the html text @type stream: Object that implements a read() method that works like file.read @return: The URI from which to fetch the XRDS document @rtype: str @raises MetaNotFound: raised with the content that was searched as the first parameter.
def run(self): self.started_queue.put('STARTED') while True: event = self.publisher_queue.get() if event == POISON_PILL: return else: self.dispatch(event)
Start the exchange
def get_edited_object(self, request): resolvermatch = urls.resolve(request.path_info) if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'): match = RE_CHANGE_URL.match(resolvermatch.url_name) if not match: return None try: object_id = resolvermatch.kwargs['object_id'] except KeyError: object_id = resolvermatch.args[0] return self.get_object_by_natural_key(match.group(1), match.group(2), object_id) return None
Return the object which is currently being edited. Returns ``None`` if the match could not be made.
def _process_hist(self, hist): edges, hvals, widths, lims, isdatetime = super(SideHistogramPlot, self)._process_hist(hist) offset = self.offset * lims[3] hvals *= 1-self.offset hvals += offset lims = lims[0:3] + (lims[3] + offset,) return edges, hvals, widths, lims, isdatetime
Subclassed to offset histogram by defined amount.
def _prep(e): if 'lastupdate' in e: e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate'])) for k in ['farm', 'server', 'id', 'secret']: if not k in e: return e e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"]) return e
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
def cli(env): table = formatting.Table([ 'Id', 'Name', 'Created', 'Expiration', 'Status', 'Package Name', 'Package Id' ]) table.align['Name'] = 'l' table.align['Package Name'] = 'r' table.align['Package Id'] = 'l' manager = ordering.OrderingManager(env.client) items = manager.get_quotes() for item in items: package = item['order']['items'][0]['package'] table.add_row([ item.get('id'), item.get('name'), clean_time(item.get('createDate')), clean_time(item.get('modifyDate')), item.get('status'), package.get('keyName'), package.get('id') ]) env.fout(table)
List all active quotes on an account
def scroll_mouse(self, mouse_x: int): scrollbar = self.horizontalScrollBar() if mouse_x - self.view_rect().x() > self.view_rect().width(): scrollbar.setValue(scrollbar.value() + 5) elif mouse_x < self.view_rect().x(): scrollbar.setValue(scrollbar.value() - 5)
Scrolls the mouse if ROI Selection reaches corner of view :param mouse_x: :return:
def format_index_array_attrs(series): attrs = {} for i, axis in zip(range(series.ndim), ('x', 'y')): unit = '{}unit'.format(axis) origin = '{}0'.format(axis) delta = 'd{}'.format(axis) aunit = getattr(series, unit) attrs.update({ unit: str(aunit), origin: getattr(series, origin).to(aunit).value, delta: getattr(series, delta).to(aunit).value, }) return attrs
Format metadata attributes for and indexed array This function is used to provide the necessary metadata to meet the (proposed) LIGO Common Data Format specification for series data in HDF5.
def _builder_connect_signals(self, _dict): assert not self.builder_connected, "Gtk.Builder not already connected" if _dict and not self.builder_pending_callbacks: GLib.idle_add(self.__builder_connect_pending_signals) for n, v in _dict.items(): if n not in self.builder_pending_callbacks: _set = set() self.builder_pending_callbacks[n] = _set else: _set = self.builder_pending_callbacks[n] _set.add(v)
Called by controllers which want to autoconnect their handlers with signals declared in internal Gtk.Builder. This method accumulates handlers, and books signal autoconnection later on the idle of the next occurring gtk loop. After the autoconnection is done, this method cannot be called anymore.
def sendRequest(self, extraHeaders=""): self.addParam('src', 'mc-python') params = urlencode(self._params) url = self._url if 'doc' in self._file.keys(): headers = {} if (extraHeaders is not None) and (extraHeaders is dict): headers = headers.update(extraHeaders) result = requests.post(url=url, data=self._params, files=self._file, headers=headers) result.encoding = 'utf-8' return result else: headers = {'Content-Type': 'application/x-www-form-urlencoded'} if (extraHeaders is not None) and (extraHeaders is dict): headers = headers.update(extraHeaders) result = requests.request("POST", url=url, data=params, headers=headers) result.encoding = 'utf-8' return result
Sends a request to the URL specified and returns a response only if the HTTP code returned is OK :param extraHeaders: Allows to configure additional headers in the request :return: Response object set to None if there is an error
def pexpire(self, key, timeout): if not isinstance(timeout, int): raise TypeError("timeout argument must be int, not {!r}" .format(timeout)) fut = self.execute(b'PEXPIRE', key, timeout) return wait_convert(fut, bool)
Set a milliseconds timeout on key. :raises TypeError: if timeout is not int
def append_with_data(url, data): if data is None: return url url_parts = list(urlparse(url)) query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True)) query.update(data) url_parts[4] = URLHelper.query_dict_to_string(query) return urlunparse(url_parts)
Append the given URL with the given data OrderedDict. Args: url (str): The URL to append. data (obj): The key value OrderedDict to append to the URL. Returns: str: The new URL.
def open_datasets(path, backend_kwargs={}, no_warn=False, **kwargs): if not no_warn: warnings.warn("open_datasets is an experimental API, DO NOT RELY ON IT!", FutureWarning) fbks = [] datasets = [] try: datasets.append(open_dataset(path, backend_kwargs=backend_kwargs, **kwargs)) except DatasetBuildError as ex: fbks.extend(ex.args[2]) for fbk in fbks: bks = backend_kwargs.copy() bks['filter_by_keys'] = fbk datasets.extend(open_datasets(path, backend_kwargs=bks, no_warn=True, **kwargs)) return datasets
Open a GRIB file groupping incompatible hypercubes to different datasets via simple heuristics.