code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def credentials(self, area_uuid): response = self._make_request("post", path="/area/{uuid}/credentials".format(uuid=area_uuid)) return response.json()
Get AWS credentials required to directly upload files to Upload Area in S3 :param str area_uuid: A RFC4122-compliant ID for the upload area :return: a dict containing an AWS AccessKey, SecretKey and SessionToken :rtype: dict :raises UploadApiException: if credentials could not be obtained
def OnPaste(self, event): data = self.main_window.clipboard.get_clipboard() focus = self.main_window.FindFocus() if isinstance(focus, wx.TextCtrl): focus.WriteText(data) else: key = self.main_window.grid.actions.cursor with undo.group(_("Paste")): self.main_window.actions.paste(key, data) self.main_window.grid.ForceRefresh() event.Skip()
Clipboard paste event handler
def get_service_address( self, block_identifier: BlockSpecification, index: int, ) -> Optional[AddressHex]: try: result = self.proxy.contract.functions.service_addresses(index).call( block_identifier=block_identifier, ) except web3.exceptions.BadFunctionCallOutput: result = None return result
Gets the address of a service by index. If index is out of range return None
def update_dependency_kinds(apps, schema_editor): DataDependency = apps.get_model('flow', 'DataDependency') for dependency in DataDependency.objects.all(): dependency.kind = 'subprocess' child = dependency.child parent = dependency.parent for field_schema, fields in iterate_fields(child.input, child.process.input_schema): name = field_schema['name'] value = fields[name] if field_schema.get('type', '').startswith('data:'): if value == parent.pk: dependency.kind = 'io' break elif field_schema.get('type', '').startswith('list:data:'): for data in value: if value == parent.pk: dependency.kind = 'io' break dependency.save()
Update historical dependency kinds as they may be wrong.
def crps_climo(self): o_bar = self.errors["O"].values / float(self.num_forecasts) crps_c = np.sum(self.num_forecasts * (o_bar ** 2) - o_bar * self.errors["O"].values * 2.0 + self.errors["O_2"].values) / float(self.thresholds.size * self.num_forecasts) return crps_c
Calculate the climatological CRPS.
def order_by(self, field, orientation='ASC'): if isinstance(field, list): self.raw_order_by.append(field) else: self.raw_order_by.append([field, orientation]) return self
Indica los campos y el criterio de ordenamiento
def pathName(self, pathName: str): if self.pathName == pathName: return pathName = self.sanitise(pathName) before = self.realPath after = self._realPath(pathName) assert (not os.path.exists(after)) newRealDir = os.path.dirname(after) if not os.path.exists(newRealDir): os.makedirs(newRealDir, DirSettings.defaultDirChmod) shutil.move(before, after) oldPathName = self._pathName self._pathName = pathName self._directory()._fileMoved(oldPathName, self)
Path Name Setter Set path name with passed in variable, create new directory and move previous directory contents to new path name. @param pathName: New path name string. @type pathName: String
def run(self, reporter=None): if not reporter: reporter = ConsoleReporter() benchmarks = sorted(self._find_benchmarks()) reporter.write_titles(map(self._function_name_to_title, benchmarks)) for value in self.input(): results = [] for b in benchmarks: method = getattr(self, b) arg_count = len(inspect.getargspec(method)[0]) if arg_count == 2: results.append(self._run_benchmark(method, value)) elif arg_count == 1: results.append(self._run_benchmark(method)) reporter.write_results(str(value), results)
This should generally not be overloaded. Runs the benchmark functions that are found in the child class.
def refuse_transfer(transfer, comment=None): TransferResponsePermission(transfer).test() transfer.responded = datetime.now() transfer.responder = current_user._get_current_object() transfer.status = 'refused' transfer.response_comment = comment transfer.save() return transfer
Refuse an incoming a transfer request
def _qteGetLabelInstance(self): layout = self.layout() label = QtGui.QLabel(self) style = 'QLabel { background-color : white; color : blue; }' label.setStyleSheet(style) return label
Return an instance of a ``QLabel`` with the correct color scheme. |Args| * **None** |Returns| * **QLabel** |Raises| * **None**
def get_answers(self): return AnswerList( self._my_map['answers'], runtime=self._runtime, proxy=self._proxy)
Gets the answers. return: (osid.assessment.AnswerList) - the answers raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name): sender = '"%s" <%s>' % (sender_name, sender_email) if sender_name else sender_email if not current_app.testing: try: from flask_mail import Message message = Message( subject, sender=sender, recipients=[recipient], html=html_message, body=text_message) self.mail.send(message) except (socket.gaierror, socket.error) as e: raise EmailError('SMTP Connection error: Check your MAIL_SERVER and MAIL_PORT settings.') except smtplib.SMTPAuthenticationError: raise EmailError('SMTP Authentication error: Check your MAIL_USERNAME and MAIL_PASSWORD settings.')
Send email message via Flask-Mail. Args: recipient: Email address or tuple of (Name, Email-address). subject: Subject line. html_message: The message body in HTML. text_message: The message body in plain text.
def _format_alignment(self, a1, a2): html = [] for index, char in enumerate(a1): output = self._substitutes.get(char, char) if a2[index] == char: html.append('<span class="match">{}</span>'.format(output)) elif char != '-': html.append(output) return ''.join(html)
Returns `a1` marked up with HTML spans around characters that are also at the same index in `a2`. :param a1: text sequence from one witness :type a1: `str` :param a2: text sequence from another witness :type a2: `str` :rtype: `str`
def launch(self, resource): schema = JobSchema(exclude=('id', 'status', 'package_name', 'config_name', 'device_name', 'result_id', 'user_id', 'created', 'updated', 'automatic')) json = self.service.encode(schema, resource) schema = JobSchema() resp = self.service.create(self.base, json) return self.service.decode(schema, resp)
Launch a new job. :param resource: :class:`jobs.Job <jobs.Job>` object :return: :class:`jobs.Job <jobs.Job>` object :rtype: jobs.Job
def normalize(self): cm = self.centerOfMass() coords = self.coordinates() if not len(coords): return pts = coords - cm xyz2 = np.sum(pts * pts, axis=0) scale = 1 / np.sqrt(np.sum(xyz2) / len(pts)) t = vtk.vtkTransform() t.Scale(scale, scale, scale) t.Translate(-cm) tf = vtk.vtkTransformPolyDataFilter() tf.SetInputData(self.poly) tf.SetTransform(t) tf.Update() return self.updateMesh(tf.GetOutput())
Shift actor's center of mass at origin and scale its average size to unit.
def get_next_repository(self): try: next_object = next(self) except StopIteration: raise IllegalState('no more elements available in this list') except Exception: raise OperationFailed() else: return next_object
Gets the next ``Repository`` in this list. :return: the next ``Repository`` in this list. The ``has_next()`` method should be used to test that a next ``Repository`` is available before calling this method. :rtype: ``osid.repository.Repository`` :raise: ``IllegalState`` -- no more elements available in this list :raise: ``OperationFailed`` -- unable to complete request *compliance: mandatory -- This method must be implemented.*
def reduceByKeyLocally(self, func): func = fail_on_stopiteration(func) def reducePartition(iterator): m = {} for k, v in iterator: m[k] = func(m[k], v) if k in m else v yield m def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] = func(m1[k], v) if k in m1 else v return m1 return self.mapPartitions(reducePartition).reduce(mergeMaps)
Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)]
def getPassagePlus(self, reference=None): if reference: urn = "{0}:{1}".format(self.urn, reference) else: urn = str(self.urn) response = xmlparser(self.retriever.getPassagePlus(urn=urn)) passage = CtsPassage(urn=urn, resource=response, retriever=self.retriever) passage._parse_request(response.xpath("//ti:reply/ti:label", namespaces=XPATH_NAMESPACES)[0]) self.citation = passage.citation return passage
Retrieve a passage and informations around it and store it in the object :param reference: Reference of the passage :type reference: CtsReference or List of text_type :rtype: CtsPassage :returns: Object representing the passage :raises: *TypeError* when reference is not a list or a Reference
def resolve_context(self): merged = dict() for context in self.located_context.values(): merged.update(context) return merged
Create a new dictionary that corresponds to the union of all of the contexts that have been entered but not exited at this point.
def initialise_logging(level: str, target: str, short_format: bool): try: log_level = getattr(logging, level) except AttributeError: raise SystemExit( "invalid log level %r, expected any of 'DEBUG', 'INFO', 'WARNING', 'ERROR' or 'CRITICAL'" % level ) handler = create_handler(target=target) logging.basicConfig( level=log_level, format='%(asctime)-15s (%(process)d) %(message)s' if not short_format else '%(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[handler] )
Initialise basic logging facilities
def _list_interface_private_addrs(eni_desc): primary = eni_desc.get('privateIpAddress') if not primary: return None addresses = [primary] lst = eni_desc.get('privateIpAddressesSet', {}).get('item', []) if not isinstance(lst, list): return addresses for entry in lst: if entry.get('primary') == 'true': continue if entry.get('privateIpAddress'): addresses.append(entry.get('privateIpAddress')) return addresses
Returns a list of all of the private IP addresses attached to a network interface. The 'primary' address will be listed first.
def acceptText(self): if not self.signalsBlocked(): self.textEntered.emit(self.toPlainText()) self.htmlEntered.emit(self.toHtml()) self.returnPressed.emit()
Emits the editing finished signals for this widget.
def process(self, line): self.raw = self.raw + line try: if not line[-1] == self.eol_char: line = line + self.eol_char except IndexError: line = self.eol_char for char in line: if char == self.escape_char: self.last_process_char = self.process_char self.process_char = self.process_escape continue self.process_char(char) if not self.complete: self.process( self.handler.readline(prompt=self.handler.CONTINUE_PROMPT) )
Step through the line and process each character
def get_var_dict_from_ctx(ctx: commands.Context, prefix: str = '_'): raw_var_dict = { 'author': ctx.author, 'bot': ctx.bot, 'channel': ctx.channel, 'ctx': ctx, 'find': discord.utils.find, 'get': discord.utils.get, 'guild': ctx.guild, 'message': ctx.message, 'msg': ctx.message } return {f'{prefix}{k}': v for k, v in raw_var_dict.items()}
Returns the dict to be used in REPL for a given Context.
def pole(conic, plane): v = dot(N.linalg.inv(conic),plane) return v[:-1]/v[-1]
Calculates the pole of a polar plane for a given conic section.
def create(cls, selective: typing.Optional[base.Boolean] = None): return cls(selective=selective)
Create new force reply :param selective: :return:
def register_postcmd_hook(self, func: Callable[[plugin.PostcommandData], plugin.PostcommandData]) -> None: self._validate_prepostcmd_hook(func, plugin.PostcommandData) self._postcmd_hooks.append(func)
Register a hook to be called after the command function.
def search_subscriptions(self, **kwargs): params = [(key, kwargs[key]) for key in sorted(kwargs.keys())] url = "/notification/v1/subscription?{}".format( urlencode(params, doseq=True)) response = NWS_DAO().getURL(url, self._read_headers) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) subscriptions = [] for datum in data.get("Subscriptions", []): subscriptions.append(self._subscription_from_json(datum)) return subscriptions
Search for all subscriptions by parameters
def _check_curtailment_target(curtailment, curtailment_target, curtailment_key): if not (abs(curtailment.sum(axis=1) - curtailment_target) < 1e-1).all(): message = 'Curtailment target not met for {}.'.format(curtailment_key) logging.error(message) raise TypeError(message)
Raises an error if curtailment target was not met in any time step. Parameters ----------- curtailment : :pandas:`pandas:DataFrame<dataframe>` Dataframe containing the curtailment in kW per generator and time step. Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the generator representatives. curtailment_target : :pandas:`pandas.Series<series>` The curtailment in kW that was to be distributed amongst the generators. Index of the series is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` The technology and weather cell ID if :obj:`tuple` or only the technology if :obj:`str` the curtailment was specified for.
def generateCatalog(wcs, mode='automatic', catalog=None, src_find_filters=None, **kwargs): if not isinstance(catalog,Catalog): if mode == 'automatic': catalog = ImageCatalog(wcs,catalog,src_find_filters,**kwargs) else: catalog = UserCatalog(wcs,catalog,**kwargs) return catalog
Function which determines what type of catalog object needs to be instantiated based on what type of source selection algorithm the user specified. Parameters ---------- wcs : obj WCS object generated by STWCS or PyWCS catalog : str or ndarray Filename of existing catalog or ndarray of image for generation of source catalog. kwargs : dict Parameters needed to interpret source catalog from input catalog with `findmode` being required. Returns ------- catalog : obj A Catalog-based class instance for keeping track of WCS and associated source catalog
def is_published(self): citeable = 'publication_info' in self.record and \ is_citeable(self.record['publication_info']) submitted = 'dois' in self.record and any( 'journal_title' in el for el in force_list(self.record.get('publication_info')) ) return citeable or submitted
Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True
def assert_not_in(self, actual_collection_or_string, unexpected_value, failure_message='Expected "{1}" not to be in "{0}"'): assertion = lambda: unexpected_value not in actual_collection_or_string self.webdriver_assert(assertion, unicode(failure_message).format(actual_collection_or_string, unexpected_value))
Calls smart_assert, but creates its own assertion closure using the expected and provided values with the 'not in' operator
def get_services(self): result = [] nodes = self.root.iterfind( './/ns:service', namespaces={'ns': self.namespace}) for node in nodes: result.append(FritzService( node.find(self.nodename('serviceType')).text, node.find(self.nodename('controlURL')).text, node.find(self.nodename('SCPDURL')).text)) return result
Returns a list of FritzService-objects.
def standings(self): headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain","User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/standings.phtml',headers=headers).content soup = BeautifulSoup(req) table = soup.find('table',{'id':'tablestandings'}).find_all('tr') clasificacion = [] [clasificacion.append(('%s\t%s\t%s\t%s\t%s')%(tablas.find('td').text,tablas.find('div')['id'],tablas.a.text,tablas.find_all('td')[3].text,tablas.find_all('td')[4].text)) for tablas in table[1:]] return clasificacion
Get standings from the community's account
def add_comment(self, text): response = self.reddit_session._add_comment(self.fullname, text) self.reddit_session.evict(self._api_link) return response
Comment on the submission using the specified text. :returns: A Comment object for the newly created comment.
def page_index(request): letters = {} for page in Page.query.order_by(Page.name): letters.setdefault(page.name.capitalize()[0], []).append(page) return Response( generate_template("page_index.html", letters=sorted(letters.items())) )
Index of all pages.
def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid)
Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid
def visit_binop(self, node, parent): newnode = nodes.BinOp( self._bin_op_classes[type(node.op)], node.lineno, node.col_offset, parent ) newnode.postinit( self.visit(node.left, newnode), self.visit(node.right, newnode) ) return newnode
visit a BinOp node by returning a fresh instance of it
def absolute(self): if self.is_absolute(): return self obj = self._from_parts([os.getcwd()] + self._parts, init=False) obj._init(template=self) return obj
Return an absolute version of this path. This function works even if the path doesn't point to anything. No normalization is done, i.e. all '.' and '..' will be kept along. Use resolve() to get the canonical path to a file.
def copy(self): return self.__class__(self.tag, self.data.copy(), self.context.copy())
Make a new instance of this Token. This method makes a copy of the mutable part of the token before making the instance.
def facts(puppet=False): ret = {} opt_puppet = '--puppet' if puppet else '' cmd_ret = __salt__['cmd.run_all']('facter {0}'.format(opt_puppet)) if cmd_ret['retcode'] != 0: raise CommandExecutionError(cmd_ret['stderr']) output = cmd_ret['stdout'] for line in output.splitlines(): if not line: continue fact, value = _format_fact(line) if not fact: continue ret[fact] = value return ret
Run facter and return the results CLI Example: .. code-block:: bash salt '*' puppet.facts
def _message_to_payload(cls, message): try: return json.loads(message.decode()) except UnicodeDecodeError: message = 'messages must be encoded in UTF-8' except json.JSONDecodeError: message = 'invalid JSON' raise cls._error(cls.PARSE_ERROR, message, True, None)
Returns a Python object or a ProtocolError.
def onerror(self, message, source, lineno, colno): return (message, source, lineno, colno)
Called when an error occurs.
def package_regex_filter(config, message, pattern=None, *args, **kw): pattern = kw.get('pattern', pattern) if pattern: packages = fmn.rules.utils.msg2packages(message, **config) regex = fmn.rules.utils.compile_regex(pattern.encode('utf-8')) return any([regex.search(p.encode('utf-8')) for p in packages])
All packages matching a regular expression Use this rule to include messages that relate to packages that match particular regular expressions (*i.e., (maven|javapackages-tools|maven-surefire)*).
def requires_open_handle(method): @functools.wraps(method) def wrapper_requiring_open_handle(self, *args, **kwargs): if self.is_closed(): raise usb_exceptions.HandleClosedError() return method(self, *args, **kwargs) return wrapper_requiring_open_handle
Decorator to ensure a handle is open for certain methods. Subclasses should decorate their Read() and Write() with this rather than checking their own internal state, keeping all "is this handle open" logic in is_closed(). Args: method: A class method on a subclass of UsbHandle Raises: HandleClosedError: If this handle has been closed. Returns: A wrapper around method that ensures the handle is open before calling through to the wrapped method.
def configure_logger(self, tc_config_log_filename=None, tc_output_log_filename=None): config_log_filename = DriverWrappersPool.get_configured_value('Config_log_filename', tc_config_log_filename, 'logging.conf') config_log_filename = os.path.join(DriverWrappersPool.config_directory, config_log_filename) if self.config_log_filename != config_log_filename: output_log_filename = DriverWrappersPool.get_configured_value('Output_log_filename', tc_output_log_filename, 'toolium.log') output_log_filename = os.path.join(DriverWrappersPool.output_directory, output_log_filename) output_log_filename = output_log_filename.replace('\\', '\\\\') try: logging.config.fileConfig(config_log_filename, {'logfilename': output_log_filename}, False) except Exception as exc: print("[WARN] Error reading logging config file '{}': {}".format(config_log_filename, exc)) self.config_log_filename = config_log_filename self.output_log_filename = output_log_filename self.logger = logging.getLogger(__name__)
Configure selenium instance logger :param tc_config_log_filename: test case specific logging config file :param tc_output_log_filename: test case specific output logger file
def num_frames(self, num_samples): return math.ceil(float(max(num_samples - self.frame_size, 0)) / float(self.hop_size)) + 1
Return the number of frames that will be used for a signal with the length of ``num_samples``.
def request(self, msgtype, msgid, method, params=[]): result = None error = None exception = None try: result = self.dispatch.call(method, params) except Exception as e: error = (e.__class__.__name__, str(e)) exception = e if isinstance(result, Deferred): result.add_callback(self._result, msgid) result.add_errback(self._error, msgid) else: self.send_response(msgid, error, result)
Handle an incoming call request.
def get_resource_mdata(): return { 'group': { 'element_label': { 'text': 'group', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter either true or false.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_boolean_values': [None], 'syntax': 'BOOLEAN', }, 'avatar': { 'element_label': { 'text': 'avatar', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, }
Return default mdata map for Resource
def rating(self, value): if not self.can_update(): self._tcex.handle_error(910, [self.type]) request_data = {'rating': value} return self.tc_requests.update( self.api_type, self.api_sub_type, self.unique_id, request_data, owner=self.owner )
Updates the Indicators rating Args: value:
def _get_size(self): if self._chan is None: return Size(rows=20, columns=79) else: width, height, pixwidth, pixheight = self._chan.get_terminal_size() return Size(rows=height, columns=width)
Callable that returns the current `Size`, required by Vt100_Output.
def have(cmd): try: from shutil import which except ImportError: def which(cmd): def _access_check(path): return (os.path.exists(path) and os.access( path, os.F_OK | os.X_OK) and not os.path.isdir(path)) if os.path.dirname(cmd): if _access_check(cmd): return cmd return None paths = os.environ.get('PATH', os.defpath.lstrip(':')).split(':') seen = set() for path in paths: if path not in seen: seen.add(path) name = os.path.join(path, cmd) if _access_check(name): return name return None return which(cmd) is not None
Determine whether supplied argument is a command on the PATH.
def _get_all_group_items(network_id): base_qry = db.DBSession.query(ResourceGroupItem) item_qry = base_qry.join(Scenario).filter(Scenario.network_id==network_id) x = time.time() logging.info("Getting all items") all_items = db.DBSession.execute(item_qry.statement).fetchall() log.info("%s groups jointly retrieved in %s", len(all_items), time.time()-x) logging.info("items retrieved. Processing results...") x = time.time() item_dict = dict() for item in all_items: items = item_dict.get(item.scenario_id, []) items.append(item) item_dict[item.scenario_id] = items logging.info("items processed in %s", time.time()-x) return item_dict
Get all the resource group items in the network, across all scenarios returns a dictionary of dict objects, keyed on scenario_id
def get_hash(input_string): if os.path.islink(input_string): directory, movie_hash = os.path.split(os.readlink(input_string)) input_string = movie_hash return input_string.lower()
Return the hash of the movie depending on the input string. If the input string looks like a symbolic link to a movie in a Kolekto tree, return its movies hash, else, return the input directly in lowercase.
def biopax_process_pc_pathsfromto(): if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) source = body.get('source') target = body.get('target') bp = biopax.process_pc_pathsfromto(source, target) return _stmts_from_proc(bp)
Process PathwayCommons paths from-to genes, return INDRA Statements.
def set_wd_noise(self, wd_noise): if isinstance(wd_noise, bool): wd_noise = str(wd_noise) if wd_noise.lower() == 'yes' or wd_noise.lower() == 'true': wd_noise = 'True' elif wd_noise.lower() == 'no' or wd_noise.lower() == 'false': wd_noise = 'False' elif wd_noise.lower() == 'both': wd_noise = 'Both' else: raise ValueError('wd_noise must be yes, no, True, False, or Both.') self.sensitivity_input.add_wd_noise = wd_noise return
Add White Dwarf Background Noise This adds the White Dwarf (WD) Background noise. This can either do calculations with, without, or with and without WD noise. Args: wd_noise (bool or str, optional): Add or remove WD background noise. First option is to have only calculations with the wd_noise. For this, use `yes` or True. Second option is no WD noise. For this, use `no` or False. For both calculations with and without WD noise, use `both`. Raises: ValueError: Input value is not one of the options.
def _maintain_dep_graph(self, p_todo): dep_id = p_todo.tag_value('id') if dep_id: self._parentdict[dep_id] = p_todo self._depgraph.add_node(hash(p_todo)) for dep in \ [dep for dep in self._todos if dep.has_tag('p', dep_id)]: self._add_edge(p_todo, dep, dep_id) for dep_id in p_todo.tag_values('p'): try: parent = self._parentdict[dep_id] self._add_edge(parent, p_todo, dep_id) except KeyError: pass
Makes sure that the dependency graph is consistent according to the given todo.
def od_reorder_keys(od, keys_in_new_order): if set(od.keys()) != set(keys_in_new_order): raise KeyError('Keys in the new order do not match existing keys') for key in keys_in_new_order: od[key] = od.pop(key) return od
Reorder the keys in an OrderedDict ``od`` in-place.
def generate(self, text): if not text: raise Exception("No text to speak") if len(text) >= self.MAX_CHARS: raise Exception("Number of characters must be less than 2000") params = self.__params.copy() params["text"] = text self._data = requests.get(self.TTS_URL, params=params, stream=False).iter_content()
Try to get the generated file. Args: text: The text that you want to generate.
def Gamma1_gasrad(beta): Gamma3minus1 = (old_div(2.,3.))*(4.-3.*beta)/(8.-7.*beta) Gamma1 = beta + (4.-3.*beta) * Gamma3minus1 return Gamma1
Gamma1 for a mix of ideal gas and radiation Hansen & Kawaler, page 177, Eqn. 3.110 Parameters ---------- beta : float Gas pressure fraction Pgas/(Pgas+Prad)
def _run_notty(self, writer): page_idx = page_offset = 0 while True: npage_idx, _ = self.draw(writer, page_idx + 1, page_offset) if npage_idx == self.last_page: break page_idx = npage_idx self.dirty = self.STATE_DIRTY return
Pager run method for terminals that are not a tty.
def setiddname(cls, iddname, testing=False): if cls.iddname == None: cls.iddname = iddname cls.idd_info = None cls.block = None elif cls.iddname == iddname: pass else: if testing == False: errortxt = "IDD file is set to: %s" % (cls.iddname,) raise IDDAlreadySetError(errortxt)
Set the path to the EnergyPlus IDD for the version of EnergyPlus which is to be used by eppy. Parameters ---------- iddname : str Path to the IDD file. testing : bool Flag to use if running tests since we may want to ignore the `IDDAlreadySetError`. Raises ------ IDDAlreadySetError
def offset(self): if not self._pages: return None pos = 0 for page in self._pages: if page is None: return None if not page.is_final: return None if not pos: pos = page.is_contiguous[0] + page.is_contiguous[1] continue if pos != page.is_contiguous[0]: return None pos += page.is_contiguous[1] page = self._pages[0] offset = page.is_contiguous[0] if (page.is_imagej or page.is_shaped) and len(self._pages) == 1: return offset if pos == offset + product(self.shape) * self.dtype.itemsize: return offset return None
Return offset to series data in file, if any.
def shutdown(self): if not process.proc_alive(self.proc): return logger.info("Attempting to connect to %s", self.hostname) client = self.connection attempts = 2 for i in range(attempts): logger.info("Attempting to send shutdown command to %s", self.hostname) try: client.admin.command("shutdown", force=True) except ConnectionFailure: pass try: return process.wait_mprocess(self.proc, 5) except TimeoutError as exc: logger.info("Timed out waiting on process: %s", exc) continue raise ServersError("Server %s failed to shutdown after %s attempts" % (self.hostname, attempts))
Send shutdown command and wait for the process to exit.
def run(self): try: import nose arguments = [sys.argv[0]] + list(self.test_args) return nose.run(argv=arguments) except ImportError: print() print("*** Nose library missing. Please install it. ***") print() raise
Runs the unit test framework. Can be overridden to run anything. Returns True on passing and False on failure.
def parse_modes(params, mode_types=None, prefixes=''): params = list(params) if params[0][0] not in '+-': raise Exception('first param must start with + or -') if mode_types is None: mode_types = ['', '', '', ''] mode_string = params.pop(0) args = params assembled_modes = [] direction = mode_string[0] for char in mode_string: if char in '+-': direction = char continue if (char in mode_types[0] or char in mode_types[1] or char in prefixes or (char in mode_types[2] and direction == '+') and len(args)): value = args.pop(0) else: value = None assembled_modes.append([direction, char, value]) return assembled_modes
Return a modelist. Args: params (list of str): Parameters from MODE event. mode_types (list): CHANMODES-like mode types. prefixes (str): PREFIX-like mode types.
def timecall(fn=None, immediate=True, timer=time.time): if fn is None: def decorator(fn): return timecall(fn, immediate=immediate, timer=timer) return decorator fp = FuncTimer(fn, immediate=immediate, timer=timer) def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
Wrap `fn` and print its execution time. Example:: @timecall def somefunc(x, y): time.sleep(x * y) somefunc(2, 3) will print the time taken by somefunc on every call. If you want just a summary at program termination, use @timecall(immediate=False) You can also choose a timing method other than the default ``time.time()``, e.g.: @timecall(timer=time.clock)
def find(self, path, match, flags): try: match = re.compile(match, flags) except sre_constants.error as ex: print("Bad regexp: %s" % (ex)) return offset = len(path) for cpath in Tree(self, path).get(): if match.search(cpath[offset:]): yield cpath
find every matching child path under path
def set_parent(self, parent): assert self._parent is None or self._parent is parent,\ "Cannot change the parent. Can only set from None." if parent and self._parent is parent: return self._parent = parent if parent: refobjinter = self.get_refobjinter() refobj = self.get_refobj() if refobj and not refobjinter.get_parent(refobj): refobjinter.set_parent(refobj, parent.get_refobj()) self._parent.add_child(self) if not self.get_refobj(): self.set_id(self.fetch_new_id()) pitem = self._parent._treeitem if self._parent else self.get_root().get_rootitem() self._treeitem.set_parent(pitem) self.fetch_alien()
Set the parent reftrack object If a parent gets deleted, the children will be deleted too. .. Note:: Once the parent is set, it cannot be set again! :param parent: the parent reftrack object :type parent: :class:`Reftrack` | None :returns: None :rtype: None :raises: AssertionError
def cleanup_custom_options(id, weakref=None): try: if Store._options_context: return weakrefs = Store._weakrefs.get(id, []) if weakref in weakrefs: weakrefs.remove(weakref) refs = [] for wr in list(weakrefs): r = wr() if r is None or r.id != id: weakrefs.remove(wr) else: refs.append(r) if not refs: for bk in Store.loaded_backends(): if id in Store._custom_options[bk]: Store._custom_options[bk].pop(id) if not weakrefs: Store._weakrefs.pop(id, None) except Exception as e: raise Exception('Cleanup of custom options tree with id %s ' 'failed with the following exception: %s, ' 'an unreferenced orphan tree may persist in ' 'memory' % (e, id))
Cleans up unused custom trees if all objects referencing the custom id have been garbage collected or tree is otherwise unreferenced.
def print_output(self, per_identity_data: 'RDD') -> None: if not self._window_bts: data = per_identity_data.flatMap( lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()]) else: data = per_identity_data.map( lambda x: json.dumps((x[0], x[1][1]), cls=BlurrJSONEncoder)) for row in data.collect(): print(row)
Basic helper function to write data to stdout. If window BTS was provided then the window BTS output is written, otherwise, the streaming BTS output is written to stdout. WARNING - For large datasets this will be extremely slow. :param per_identity_data: Output of the `execute()` call.
def get_iam_policy(self): checker = AwsLimitChecker() policy = checker.get_required_iam_policy() return json.dumps(policy, sort_keys=True, indent=2)
Return the current IAM policy as a json-serialized string
def key(self) -> Tuple[int, int]: return self._source.index, self._target.index
The unique identifier of the edge consisting of the indexes of its source and target nodes.
def wait_for_startup(self, timeout=None): if not self.is_starting(): logger.warning("wait_for_startup() called when not in starting state") while not self.check_startup_state_changed(): with self._cancellable_lock: if self._is_cancelled: return None if timeout and self.time_in_state() > timeout: return None time.sleep(1) return self.state == 'running'
Waits for PostgreSQL startup to complete or fail. :returns: True if start was successful, False otherwise
def reset_highlights(self): for dtype in ["specimens", "samples", "sites", "locations", "ages"]: wind = self.FindWindowByName(dtype + '_btn') wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button) self.Refresh() self.bSizer_msg.ShowItems(False) self.hbox.Fit(self)
Remove red outlines from all buttons
def gamma(alpha=1, beta=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs): return _random_helper(_internal._random_gamma, _internal._sample_gamma, [alpha, beta], shape, dtype, ctx, out, kwargs)
Draw random samples from a gamma distribution. Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). Parameters ---------- alpha : float or NDArray, optional The shape of the gamma distribution. Should be greater than zero. beta : float or NDArray, optional The scale of the gamma distribution. Should be greater than zero. Default is equal to 1. shape : int or tuple of ints, optional The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. dtype : {'float16', 'float32', 'float64'}, optional Data type of output samples. Default is 'float32' ctx : Context, optional Device context of output. Default is current context. Overridden by `alpha.context` when `alpha` is an NDArray. out : NDArray, optional Store output to an existing NDArray. Returns ------- NDArray If input `shape` has shape, e.g., `(m, n)` and `alpha` and `beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g., `(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair. Examples -------- >>> mx.nd.random.gamma(1, 1) [ 1.93308783] <NDArray 1 @cpu(0)> >>> mx.nd.random.gamma(1, 1, shape=(2,)) [ 0.48216391 2.09890771] <NDArray 2 @cpu(0)> >>> alpha = mx.nd.array([1,2,3]) >>> beta = mx.nd.array([2,3,4]) >>> mx.nd.random.gamma(alpha, beta, shape=2) [[ 3.24343276 0.94137681] [ 3.52734375 0.45568955] [ 14.26264095 14.0170126 ]] <NDArray 3x2 @cpu(0)>
def _check(self): _logger.debug('Check if timeout.') self._call_later_handle = None if self._touch_time is not None: difference = self._event_loop.time() - self._touch_time _logger.debug('Time difference %s', difference) if difference > self._timeout: self._connection.close() self._timed_out = True if not self._connection.closed(): self._schedule()
Check and close connection if needed.
def clean_html(self, html): result_type = type(html) if isinstance(html, six.string_types): doc = html_fromstring(html) else: doc = copy.deepcopy(html) self(doc) if issubclass(result_type, six.binary_type): return tostring(doc, encoding='utf-8') elif issubclass(result_type, six.text_type): return tostring(doc, encoding='unicode') else: return doc
Apply ``Cleaner`` to HTML string or document and return a cleaned string or document.
def delete(self, endpoint, **kwargs): kwargs.update(self.kwargs.copy()) response = requests.delete(self.make_url(endpoint), **kwargs) return _decode_response(response)
Send HTTP DELETE to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error.
def trim_N_nucleotides(prefix, suffix): if 'N' in prefix: rightmost_index = prefix.rfind('N') logger.debug( "Trimming %d nucleotides from read prefix '%s'", rightmost_index + 1, prefix) prefix = prefix[rightmost_index + 1:] if 'N' in suffix: leftmost_index = suffix.find('N') logger.debug( "Trimming %d nucleotides from read suffix '%s'", len(suffix) - leftmost_index, suffix) suffix = suffix[:leftmost_index] return prefix, suffix
Drop all occurrences of 'N' from prefix and suffix nucleotide strings by trimming.
def sizeof(self, context=None) -> int: if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._sizeof(context) except Error: raise except Exception as exc: raise SizeofError(str(exc))
Return the size of the construct in bytes. :param context: Optional context dictionary.
def helper_list(access_token, oid, path): if oid != "": path = ''.join([path, "('", oid, "')"]) endpoint = ''.join([ams_rest_endpoint, path]) return do_ams_get(endpoint, path, access_token)
Helper Function to list a URL path. Args: access_token (str): A valid Azure authentication token. oid (str): An OID. path (str): A URL Path. Returns: HTTP response. JSON body.
async def scp_to(self, source, destination, user='ubuntu', proxy=False, scp_opts=''): await self.machine.scp_to(source, destination, user=user, proxy=proxy, scp_opts=scp_opts)
Transfer files to this unit. :param str source: Local path of file(s) to transfer :param str destination: Remote destination of transferred files :param str user: Remote username :param bool proxy: Proxy through the Juju API server :param scp_opts: Additional options to the `scp` command :type scp_opts: str or list
def _write_to_cache(self, expr, res): res = dedent(res) super()._write_to_cache(expr, res)
Store the cached result without indentation, and without the keyname
def supported_auth_methods(self) -> List[str]: return [auth for auth in self.AUTH_METHODS if auth in self.server_auth_methods]
Get all AUTH methods supported by the both server and by us.
def clear(self): for node in self.dli(): node.empty = True node.key = None node.value = None self.head = _dlnode() self.head.next = self.head self.head.prev = self.head self.listSize = 1 self.table.clear() self.hit_cnt = 0 self.miss_cnt = 0 self.remove_cnt = 0
claar all the cache, and release memory
def _checkFileExists(self): if self._fileName and not os.path.exists(self._fileName): msg = "File not found: {}".format(self._fileName) logger.error(msg) self.setException(IOError(msg)) return False else: return True
Verifies that the underlying file exists and sets the _exception attribute if not Returns True if the file exists. If self._fileName is None, nothing is checked and True is returned.
def reset(self, value=None): if value is None: value = time.clock() self.start = value if self.value_on_reset: self.value = self.value_on_reset
Resets the start time of the interval to now or the specified value.
def get_recent_comments(number=5, template='zinnia/tags/comments_recent.html'): entry_published_pks = map(smart_text, Entry.published.values_list('id', flat=True)) content_type = ContentType.objects.get_for_model(Entry) comments = get_comment_model().objects.filter( Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL), content_type=content_type, object_pk__in=entry_published_pks, is_public=True).order_by('-pk')[:number] comments = comments.prefetch_related('content_object') return {'template': template, 'comments': comments}
Return the most recent comments.
def first_and_second_harmonic_function(phi, c): return (c[0] + c[1]*np.sin(phi) + c[2]*np.cos(phi) + c[3]*np.sin(2*phi) + c[4]*np.cos(2*phi))
Compute the harmonic function value used to calculate the corrections for ellipse fitting. This function includes simultaneously both the first and second order harmonics: .. math:: f(phi) = c[0] + c[1]*\\sin(phi) + c[2]*\\cos(phi) + c[3]*\\sin(2*phi) + c[4]*\\cos(2*phi) Parameters ---------- phi : float or `~numpy.ndarray` The angle(s) along the elliptical path, going towards the positive y axis, starting coincident with the position angle. That is, the angles are defined from the semimajor axis that lies in the positive x quadrant. c : `~numpy.ndarray` of shape (5,) Array containing the five harmonic coefficients. Returns ------- result : float or `~numpy.ndarray` The function value(s) at the given input angle(s).
def get_mean_table(self, imt, rctx): if imt.name in 'PGA PGV': interpolator = interp1d(self.magnitudes, numpy.log10(self.mean[imt.name]), axis=2) output_table = 10.0 ** ( interpolator(rctx.mag).reshape(self.shape[0], self.shape[3])) else: interpolator = interp1d(numpy.log10(self.periods), numpy.log10(self.mean["SA"]), axis=1) period_table = interpolator(numpy.log10(imt.period)) mag_interpolator = interp1d(self.magnitudes, period_table, axis=1) output_table = 10.0 ** mag_interpolator(rctx.mag) return output_table
Returns amplification factors for the mean, given the rupture and intensity measure type. :returns: amplification table as an array of [Number Distances, Number Levels]
def MatchBestComponentName(self, component): fd = self.OpenAsContainer() file_listing = set(fd.ListNames()) if component not in file_listing: lower_component = component.lower() for x in file_listing: if lower_component == x.lower(): component = x break if fd.supported_pathtype != self.pathspec.pathtype: new_pathspec = rdf_paths.PathSpec( path=component, pathtype=fd.supported_pathtype) else: new_pathspec = self.pathspec.last.Copy() new_pathspec.path = component return new_pathspec
Returns the name of the component which matches best our base listing. In order to do the best case insensitive matching we list the files in the base handler and return the base match for this component. Args: component: A component name which should be present in this directory. Returns: the best component name.
def extract_columns(data, *cols): out = [] try: for r in data: col = [] for c in cols: col.append(r[c]) out.append(col) except IndexError: raise IndexError("data=%s col=%s" % (data, col)) return out
Extract columns specified in the argument list. >>> chart_data.extract_columns([[10,20], [30,40], [50,60]], 0) [[10],[30],[50]]
def tas53(msg): d = hex2bin(data(msg)) if d[33] == '0': return None tas = bin2int(d[34:46]) * 0.5 return round(tas, 1)
Aircraft true airspeed, BDS 5,3 message Args: msg (String): 28 bytes hexadecimal message Returns: float: true airspeed in knots
def print_result(overview, *names): if names: for name in names: toprint = overview for part in name.split('/'): toprint = toprint[part] print(json.dumps(toprint, indent=4, separators=(',', ': '))) else: print(json.dumps(overview, indent=4, separators=(',', ': ')))
Print the result of a verisure request
def full_scope(self): maps = [self.temps] + self.resolvers.maps + self.scope.maps return DeepChainMap(*maps)
Return the full scope for use with passing to engines transparently as a mapping. Returns ------- vars : DeepChainMap All variables in this scope.
def _clear_current_task(self): self.current.task_name = None self.current.task_type = None self.current.task = None
Clear tasks related attributes, checks permissions While switching WF to WF, authentication and permissions are checked for new WF.
def parse(inp, format=None, encoding='utf-8', force_types=True): proper_inp = inp if hasattr(inp, 'read'): proper_inp = inp.read() if isinstance(proper_inp, six.text_type): proper_inp = proper_inp.encode(encoding) fname = None if hasattr(inp, 'name'): fname = inp.name fmt = _get_format(format, fname, proper_inp) proper_inp = six.BytesIO(proper_inp) try: res = _do_parse(proper_inp, fmt, encoding, force_types) except Exception as e: raise AnyMarkupError(e, traceback.format_exc()) if res is None: res = {} return res
Parse input from file-like object, unicode string or byte string. Args: inp: file-like object, unicode string or byte string with the markup format: explicitly override the guessed `inp` markup format encoding: `inp` encoding, defaults to utf-8 force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed input (dict or list) containing unicode values Raises: AnyMarkupError if a problem occurs while parsing or inp
def get(self, section, option, as_list=False): ret = super(GitConfigParser, self).get(section, option) if as_list and not isinstance(ret, list): ret = [ret] return ret
Adds an optional "as_list" argument to ensure a list is returned. This is helpful when iterating over an option which may or may not be a multivar.
def deleted(self, base: pathlib.PurePath = pathlib.PurePath(), include_children: bool = True, include_directories: bool = True) -> Iterator[str]: if self.is_deleted: yield str(base / self.left.name)
Find the paths of entities deleted between the left and right entities in this comparison. :param base: The base directory to recursively append to entities. :param include_children: Whether to recursively include children of deleted directories. These are themselves deleted by definition, however it may be useful to the caller to list them explicitly. :param include_directories: Whether to include directories in the returned iterable. :return: An iterable of deleted paths.