code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_dropout(x, rate=0.0, init=True): if init or rate == 0: return x return tf.layers.dropout(x, rate=rate, training=True)
Dropout x with dropout_rate = rate. Apply zero dropout during init or prediction time. Args: x: 4-D Tensor, shape=(NHWC). rate: Dropout rate. init: Initialization. Returns: x: activations after dropout.
def remove_quotes(self, value): if not value: return value if value[0] == value[-1] == '"': return value[1:-1].replace('\\"', '"') if value[0] == value[-1] == "'": return value[1:-1].replace("\\'", "'") return value
Remove any surrounding quotes from a value and unescape any contained quotes of that type.
def insert_json(table=None, bulk_size=1000, concurrency=25, hosts=None, output_fmt=None): if not hosts: return print_only(table) queries = (to_insert(table, d) for d in dicts_from_stdin()) bulk_queries = as_bulk_queries(queries, bulk_size) print('Executing inserts: bulk_size={} concurrency={}'.format( bulk_size, concurrency), file=sys.stderr) stats = Stats() with clients.client(hosts, concurrency=concurrency) as client: f = partial(aio.measure, stats, client.execute_many) try: aio.run_many(f, bulk_queries, concurrency) except clients.SqlException as e: raise SystemExit(str(e)) try: print(format_stats(stats.get(), output_fmt)) except KeyError: if not stats.sampler.values: raise SystemExit('No data received via stdin') raise
Insert JSON lines fed into stdin into a Crate cluster. If no hosts are specified the statements will be printed. Args: table: Target table name. bulk_size: Bulk size of the insert statements. concurrency: Number of operations to run concurrently. hosts: hostname:port pairs of the Crate nodes
def merge_entities(self, from_entity_ids, to_entity_id, force=False, mount_point=DEFAULT_MOUNT_POINT): params = { 'from_entity_ids': from_entity_ids, 'to_entity_id': to_entity_id, 'force': force, } api_path = '/v1/{mount_point}/entity/merge'.format(mount_point=mount_point) return self._adapter.post( url=api_path, json=params, )
Merge many entities into one entity. Supported methods: POST: /{mount_point}/entity/merge. Produces: 204 (empty body) :param from_entity_ids: Entity IDs which needs to get merged. :type from_entity_ids: array :param to_entity_id: Entity ID into which all the other entities need to get merged. :type to_entity_id: str | unicode :param force: Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the same type both in entities that are merged from and in entity into which all others are getting merged, secrets in the destination will be unaltered. If not set, this API will throw an error containing all the conflicts. :type force: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
def delete(self): ret = False q = self.query pk = self.pk if pk: pk_name = self.schema.pk.name self.query.is_field(pk_name, pk).delete() setattr(self, pk_name, None) self.reset_modified() for field_name in self.schema.fields: if getattr(self, field_name, None) != None: self.modified_fields.add(field_name) ret = True return ret
delete the object from the db if pk is set
def parse_xml(self, node): self._set_properties(node) self.name = node.get('name', None) self.opacity = node.get('opacity', self.opacity) self.visible = node.get('visible', self.visible) image_node = node.find('image') self.source = image_node.get('source', None) self.trans = image_node.get('trans', None) return self
Parse an Image Layer from ElementTree xml node :param node: ElementTree xml node :return: self
def list_packages_in_eups_table(table_text): logger = logging.getLogger(__name__) pattern = re.compile(r'setupRequired\((?P<name>\w+)\)') listed_packages = [m.group('name') for m in pattern.finditer(table_text)] logger.debug('Packages listed in the table file: %r', listed_packages) return listed_packages
List the names of packages that are required by an EUPS table file. Parameters ---------- table_text : `str` The text content of an EUPS table file. Returns ------- names : `list` [`str`] List of package names that are required byy the EUPS table file.
def set_device(cuda, local_rank): if cuda: torch.cuda.set_device(local_rank) device = torch.device('cuda') else: device = torch.device('cpu') return device
Sets device based on local_rank and returns instance of torch.device. :param cuda: if True: use cuda :param local_rank: local rank of the worker
def announcement_posted_hook(request, obj): logger.debug("Announcement posted") if obj.notify_post: logger.debug("Announcement notify on") announcement_posted_twitter(request, obj) try: notify_all = obj.notify_email_all except AttributeError: notify_all = False try: if notify_all: announcement_posted_email(request, obj, True) else: announcement_posted_email(request, obj) except Exception as e: logger.error("Exception when emailing announcement: {}".format(e)) messages.error(request, "Exception when emailing announcement: {}".format(e)) raise e else: logger.debug("Announcement notify off")
Runs whenever a new announcement is created, or a request is approved and posted. obj: The Announcement object
def _detect_available_configs(): with channels_lock: available_channels = list(channels.keys()) get_extra = lambda: "channel-{}".format(randint(0, 9999)) extra = get_extra() while extra in available_channels: extra = get_extra() available_channels += [extra] return [ {'interface': 'virtual', 'channel': channel} for channel in available_channels ]
Returns all currently used channels as well as one other currently unused channel. .. note:: This method will run into problems if thousands of autodetected busses are used at once.
def stop(self): if self.pid is None: return None try: while True: self.send(signal.SIGTERM) time.sleep(0.1) except RuntimeError as err: if "No such process" in str(err): LOG.info("Succesfully stopped the process.") return None LOG.exception("Failed to stop the process:") sys.exit(exit.STOP_FAILED) except TypeError as err: if "an integer is required" in str(err): LOG.info("Succesfully stopped the process.") return None LOG.exception("Failed to stop the process:") sys.exit(exit.STOP_FAILED)
Stop the daemonized process. If the process is already stopped this call should exit successfully. If the process cannot be stopped this call should exit with code STOP_FAILED.
def _parse_property(self, node): name = node.attrib[ATTR_NAME] vtype = node.attrib.get(ATTR_VALUE_TYPE, TYPE_STRING) try: value_node = next(iter(node)) value = self._parse_value_node(vtype, value_node) except StopIteration: value = self._convert_value(vtype, node.attrib[ATTR_VALUE]) return name, value
Parses a property node :param node: The property node :return: A (name, value) tuple :raise KeyError: Attribute missing
def getRloc16(self): print '%s call getRloc16' % self.port rloc16 = self.__sendCommand('rloc16')[0] return int(rloc16, 16)
get rloc16 short address
def is_subdomain(self, other): (nr, o, nl) = self.fullcompare(other) if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL: return True return False
Is self a subdomain of other? The notion of subdomain includes equality. @rtype: bool
def add_email_address(self, email, hidden=None): existing_emails = get_value(self.obj, 'email_addresses', []) found_email = next( (existing_email for existing_email in existing_emails if existing_email.get('value') == email), None ) if found_email is None: new_email = {'value': email} if hidden is not None: new_email['hidden'] = hidden self._append_to('email_addresses', new_email) elif hidden is not None: found_email['hidden'] = hidden
Add email address. Args: :param email: email of the author. :type email: string :param hidden: if email is public or not. :type hidden: boolean
def media_download(self, mxcurl, allow_remote=True): query_params = {} if not allow_remote: query_params["allow_remote"] = False if mxcurl.startswith('mxc://'): return self._send( "GET", mxcurl[6:], api_path="/_matrix/media/r0/download/", query_params=query_params, return_json=False ) else: raise ValueError( "MXC URL '%s' did not begin with 'mxc://'" % mxcurl )
Download raw media from provided mxc URL. Args: mxcurl (str): mxc media URL. allow_remote (bool): indicates to the server that it should not attempt to fetch the media if it is deemed remote. Defaults to true if not provided.
def _call_to(self, from_node, to_func, ret_node, stmt_idx=None, ins_addr=None, return_to_outside=False): self._register_nodes(True, from_node) if to_func.is_syscall: self.transition_graph.add_edge(from_node, to_func, type='syscall', stmt_idx=stmt_idx, ins_addr=ins_addr) else: self.transition_graph.add_edge(from_node, to_func, type='call', stmt_idx=stmt_idx, ins_addr=ins_addr) if ret_node is not None: self._fakeret_to(from_node, ret_node, to_outside=return_to_outside) self._local_transition_graph = None
Registers an edge between the caller basic block and callee function. :param from_addr: The basic block that control flow leaves during the transition. :type from_addr: angr.knowledge.CodeNode :param to_func: The function that we are calling :type to_func: Function :param ret_node The basic block that control flow should return to after the function call. :type to_func: angr.knowledge.CodeNode or None :param stmt_idx: Statement ID of this call. :type stmt_idx: int, str or None :param ins_addr: Instruction address of this call. :type ins_addr: int or None
def content(self, value): self._validator.validate_message_dict(value) self._content = value
The actual HTML content. :param value: The actual HTML content. :type value: string
def transform(self, crs): new_crs = CRS(crs) geometry = self.geometry if new_crs is not self.crs: project = functools.partial(pyproj.transform, self.crs.projection(), new_crs.projection()) geometry = shapely.ops.transform(project, geometry) return Geometry(geometry, crs=new_crs)
Transforms Geometry from current CRS to target CRS :param crs: target CRS :type crs: constants.CRS :return: Geometry in target CRS :rtype: Geometry
def imbalance_check(P): p_list = list(P.values()) max_value = max(p_list) min_value = min(p_list) if min_value > 0: balance_ratio = max_value / min_value else: balance_ratio = max_value is_imbalanced = False if balance_ratio > BALANCE_RATIO_THRESHOLD: is_imbalanced = True return is_imbalanced
Check if the dataset is imbalanced. :param P: condition positive :type P : dict :return: is_imbalanced as bool
def get_object(self, **kwargs): if hasattr(self, 'object') and self.object: return self.object obj = super(CommonSingleObjectViewMixin, self).get_object(**kwargs) self.object = obj return obj
Sometimes preprocessing of a view need to happen before the object attribute has been set for a view. In this case, just return the object if it has already been set when it's called down the road since there's no need to make another query.
def get_built_image_info(self): logger.info("getting information about built image '%s'", self.image) image_info = self.tasker.get_image_info_by_image_name(self.image) items_count = len(image_info) if items_count == 1: return image_info[0] elif items_count <= 0: logger.error("image '%s' not found", self.image) raise RuntimeError("image '%s' not found" % self.image) else: logger.error("multiple (%d) images found for image '%s'", items_count, self.image) raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count, self.image))
query docker about built image :return dict
def _compile_qt_resources(): if config.QT_RES_SRC(): epab.utils.ensure_exe('pyrcc5') LOGGER.info('compiling Qt resources') elib_run.run(f'pyrcc5 {config.QT_RES_SRC()} -o {config.QT_RES_TGT()}')
Compiles PyQT resources file
def _findroot(self, x): if x.startswith("."): x = x[1:] if not x.endswith("."): x += "." max = 0 root = "." root_key = "" for k in six.iterkeys(self): if x.startswith(k + "."): if max < len(k): max = len(k) root = self[k] root_key = k return root, root_key, x[max:-1]
Internal MIBDict function used to find a partial OID
def _get_or_create_s3_bucket(s3, name): exists = True try: s3.meta.client.head_bucket(Bucket=name) except botocore.exceptions.ClientError as e: error_code = int(e.response["Error"]["Code"]) if error_code == 404: exists = False else: raise if not exists: s3.create_bucket(Bucket=name) return s3.Bucket(name)
Get an S3 bucket resource after making sure it exists
def _update_from_pb(self, instance_pb): if not instance_pb.display_name: raise ValueError("Instance protobuf does not contain display_name") self.display_name = instance_pb.display_name self.configuration_name = instance_pb.config self.node_count = instance_pb.node_count
Refresh self from the server-provided protobuf. Helper for :meth:`from_pb` and :meth:`reload`.
def reset(self): self.L4.reset() for module in self.L6aModules: module.reset()
Clear all cell activity.
def _subtract(start, stop, intervals): remainder_start = start sub_stop = None for sub_start, sub_stop in _collapse(intervals): if remainder_start < sub_start: yield _Interval(remainder_start, sub_start) remainder_start = sub_stop if sub_stop is not None and sub_stop < stop: yield _Interval(sub_stop, stop)
Subtract intervals from a spanning interval.
def add_relation(self, url_arr): if MPost.get_by_uid(url_arr[1]): pass else: return False last_post_id = self.get_secure_cookie('last_post_uid') if last_post_id: last_post_id = last_post_id.decode('utf-8') last_app_id = self.get_secure_cookie('use_app_uid') if last_app_id: last_app_id = last_app_id.decode('utf-8') if url_arr[0] == 'info': if last_post_id: MRelation.add_relation(last_post_id, url_arr[1], 2) MRelation.add_relation(url_arr[1], last_post_id, 1) if url_arr[0] == 'post': if last_app_id: MRelation.add_relation(last_app_id, url_arr[1], 2) MRelation.add_relation(url_arr[1], last_app_id, 1)
Add relationship.
def cast( source: Union[DataType, str], target: Union[DataType, str], **kwargs ) -> DataType: source, result_target = dtype(source), dtype(target) if not castable(source, result_target, **kwargs): raise com.IbisTypeError( 'Datatype {} cannot be implicitly ' 'casted to {}'.format(source, result_target) ) return result_target
Attempts to implicitly cast from source dtype to target dtype
def isEquilateral(self): if not nearly_eq(self.a, self.b): return False if not nearly_eq(self.b, self.c): return False return nearly_eq(self.a, self.c)
True if all sides of the triangle are the same length. All equilateral triangles are also isosceles. All equilateral triangles are also acute.
def UninstallDriver(bundle_name): km = objc.KextManager() cf_bundle_name = km.PyStringToCFString(bundle_name) status = km.iokit.KextManagerUnloadKextWithIdentifier(cf_bundle_name) km.dll.CFRelease(cf_bundle_name) return status
Calls into the IOKit to unload a kext by its name. Args: bundle_name: The bundle identifier of the kernel extension as defined in Info.plist field CFBundleIdentifier. Returns: The error code from the library call. objc.OS_SUCCESS if successfull.
def previous_minute(self, dt): idx = previous_divider_idx(self._trading_minutes_nanos, dt.value) return self.all_minutes[idx]
Given a dt, return the previous exchange minute. Raises KeyError if the given timestamp is not an exchange minute. Parameters ---------- dt: pd.Timestamp The dt for which to get the previous exchange minute. Returns ------- pd.Timestamp The previous exchange minute.
def run(self, handler): import eventlet.patcher if not eventlet.patcher.is_monkey_patched(os): msg = ("%s requires eventlet.monkey_patch() (before " "import)" % self.__class__.__name__) raise RuntimeError(msg) wsgi_args = {} for arg in ('log', 'environ', 'max_size', 'max_http_version', 'protocol', 'server_event', 'minimum_chunk_size', 'log_x_forwarded_for', 'custom_pool', 'keepalive', 'log_output', 'log_format', 'url_length_limit', 'debug', 'socket_timeout', 'capitalize_response_headers'): try: wsgi_args[arg] = self.options.pop(arg) except KeyError: pass if 'log_output' not in wsgi_args: wsgi_args['log_output'] = not self.quiet import eventlet.wsgi sock = self.options.pop('shared_socket', None) or self.get_socket() eventlet.wsgi.server(sock, handler, **wsgi_args)
Start bottle server.
def requirements(ctx): echo_info('Freezing check releases') checks = get_valid_checks() checks.remove('datadog_checks_dev') entries = [] for check in checks: if check in AGENT_V5_ONLY: echo_info('Check `{}` is only shipped with Agent 5, skipping'.format(check)) continue try: version = get_version_string(check) entries.append('{}\n'.format(get_agent_requirement_line(check, version))) except Exception as e: echo_failure('Error generating line: {}'.format(e)) continue lines = sorted(entries) req_file = get_agent_release_requirements() write_file_lines(req_file, lines) echo_success('Successfully wrote to `{}`!'.format(req_file))
Write the `requirements-agent-release.txt` file at the root of the repo listing all the Agent-based integrations pinned at the version they currently have in HEAD.
def process_jwt(jwt): header, claims, _ = jwt.split('.') parsed_header = json_decode(base64url_decode(header)) parsed_claims = json_decode(base64url_decode(claims)) return parsed_header, parsed_claims
Process a JSON Web Token without verifying it. Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key. :param jwt: The JSON Web Token to verify. :type jwt: str or unicode :rtype: tuple :returns: ``(header, claims)``
def oauth2_token_setter(remote, resp, token_type='', extra_data=None): return token_setter( remote, resp['access_token'], secret='', token_type=token_type, extra_data=extra_data, )
Set an OAuth2 token. The refresh_token can be used to obtain a new access_token after the old one is expired. It is saved in the database for long term use. A refresh_token will be present only if `access_type=offline` is included in the authorization code request. :param remote: The remote application. :param resp: The response. :param token_type: The token type. (Default: ``''``) :param extra_data: Extra information. (Default: ``None``) :returns: A :class:`invenio_oauthclient.models.RemoteToken` instance.
def main(symbol: str): print("Displaying the balance for", symbol) with BookAggregate() as svc: security = svc.book.get(Commodity, mnemonic=symbol) sec_svc = SecurityAggregate(svc.book, security) shares_no = sec_svc.get_quantity() print("Quantity:", shares_no) avg_price = sec_svc.get_avg_price() print("Average price:", avg_price)
Displays the balance for the security symbol.
def _validate_config(self): if not self.backend: return if len(self.REQUIRED_CONFIG_KEYS) < 1: return self.config = self.config or {} required_keys_set = set(self.REQUIRED_CONFIG_KEYS) config_keys_set = set(self.config.keys()) missing_required_keys = required_keys_set - config_keys_set unrecognized_keys = config_keys_set - required_keys_set if len(missing_required_keys) > 0: missing_keys_string = ', '.join(missing_required_keys) raise ValidationError(_('Missing required config keys: "%s"') % missing_keys_string) elif len(unrecognized_keys) > 0: unrecognized_keys_string = ', '.join(unrecognized_keys) raise ValidationError(_('Unrecognized config keys: "%s"') % unrecognized_keys_string)
ensure REQUIRED_CONFIG_KEYS are filled
async def text(self) -> str: bytes_body = await self.read() encoding = self.charset or 'utf-8' return bytes_body.decode(encoding)
Return BODY as text using encoding from .charset.
def split_words(line): line = _NORM_REGEX.sub(r'\1 \2', line) return [normalize(w) for w in _WORD_REGEX.split(line)]
Return the list of words contained in a line.
def on_unselect(self, item, action): if not isinstance(item, int): item = self.items.index(item) self._on_unselect[item] = action
Add an action to make when an object is unfocused.
def spread(self, m: Union[int, pd.Series]) -> Union[int, pd.Series]: return (m * 111_111) % self.TEN_DIGIT_MODULUS
Spreads out integer values to give smaller values more weight.
def get(self, bus_name, object_path=None, **kwargs): for kwarg in kwargs: if kwarg not in ("timeout",): raise TypeError(self.__qualname__ + " got an unexpected keyword argument '{}'".format(kwarg)) timeout = kwargs.get("timeout", None) bus_name = auto_bus_name(bus_name) object_path = auto_object_path(bus_name, object_path) ret = self.con.call_sync( bus_name, object_path, 'org.freedesktop.DBus.Introspectable', "Introspect", None, GLib.VariantType.new("(s)"), 0, timeout_to_glib(timeout), None) if not ret: raise KeyError("no such object; you might need to pass object path as the 2nd argument for get()") xml, = ret.unpack() try: introspection = ET.fromstring(xml) except: raise KeyError("object provides invalid introspection XML") return CompositeInterface(introspection)(self, bus_name, object_path)
Get a remote object. Parameters ---------- bus_name : string Name of the service that exposes this object. You may start with "." - then org.freedesktop will be automatically prepended. object_path : string, optional Path of the object. If not provided, bus_name translated to path format is used. Returns ------- ProxyObject implementing all the Interfaces exposed by the remote object. Note that it inherits from multiple Interfaces, so the method you want to use may be shadowed by another one, eg. from a newer version of the interface. Therefore, to interact with only a single interface, use: >>> bus.get("org.freedesktop.systemd1")["org.freedesktop.systemd1.Manager"] or simply >>> bus.get(".systemd1")[".Manager"] which will give you access to the one specific interface.
def delete_commit(self, commit): req = proto.DeleteCommitRequest(commit=commit_from(commit)) self.stub.DeleteCommit(req, metadata=self.metadata)
Deletes a commit. Params: * commit: A tuple, string, or Commit object representing the commit.
def GetBEDnarrowPeakgz(URL_or_PATH_TO_file): if os.path.isfile(URL_or_PATH_TO_file): response=open(URL_or_PATH_TO_file, "r") compressedFile = StringIO.StringIO(response.read()) else: response = urllib2.urlopen(URL_or_PATH_TO_file) compressedFile = StringIO.StringIO(response.read()) decompressedFile = gzip.GzipFile(fileobj=compressedFile) out=decompressedFile.read().split("\n") out=[ s.split("\t") for s in out] out=pd.DataFrame(out) out.columns=["chrom","chromStart","chromEnd","name","score","strand","signalValue","-log10(pValue)","-log10(qvalue)","peak"] out["name"]=out.index.tolist() out["name"]="Peak_"+out["name"].astype(str) out=out[:-1] return out
Reads a gz compressed BED narrow peak file from a web address or local file :param URL_or_PATH_TO_file: web address of path to local file :returns: a Pandas dataframe
def watermark(app, env): if app.config.sphinxmark_enable is True: LOG.info('adding watermark...', nonl=True) buildpath, imagefile = getimage(app) cssname = buildcss(app, buildpath, imagefile) app.add_css_file(cssname) LOG.info(' done')
Add watermark.
def count_consonants(text): count = 0 for i in text: if i.lower() in config.AVRO_CONSONANTS: count += 1 return count
Count number of occurrences of consonants in a given string
def list(self): self._initialize_list() interested = True response = self._cloudFormation.list_stacks() print('Stack(s):') while interested: if 'StackSummaries' in response: for stack in response['StackSummaries']: stack_status = stack['StackStatus'] if stack_status != 'DELETE_COMPLETE': print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName'])) next_token = response.get('NextToken', None) if next_token: response = self._cloudFormation.list_stacks(NextToken=next_token) else: interested = False return True
List the existing stacks in the indicated region Args: None Returns: True if True Todo: Figure out what could go wrong and take steps to hanlde problems.
def _deriv_arctan2(y, x): r2 = x*x + y*y df_dy = x / r2 df_dx = -y / r2 return np.hstack([df_dy, df_dx])
Derivative of the arctan2 function
def _match_to_morph_parents(self, type, results): for result in results: if result.get_key() in self._dictionary.get(type, []): for model in self._dictionary[type][result.get_key()]: model.set_relation( self._relation, Result(result, self, model, related=result) )
Match the results for a given type to their parent. :param type: The parent type :type type: str :param results: The results to match to their parent :type results: Collection
def stop(self): yield from self._stop_ubridge() if self.is_running(): self._terminate_process() if self._process.returncode is None: try: yield from wait_for_process_termination(self._process, timeout=3) except asyncio.TimeoutError: if self._process.returncode is None: try: self._process.kill() except OSError as e: log.error("Cannot stop the VPCS process: {}".format(e)) if self._process.returncode is None: log.warn('VPCS VM "{}" with PID={} is still running'.format(self._name, self._process.pid)) self._process = None self._started = False yield from super().stop()
Stops the VPCS process.
def _replace_variables(data, variables): formatter = string.Formatter() return [formatter.vformat(item, [], variables) for item in data]
Replace the format variables in all items of data.
def get_check_result_brok(self): data = {'uuid': self.uuid} self.fill_data_brok_from(data, 'check_result') return Brok({'type': self.my_type + '_check_result', 'data': data})
Create check_result brok :return: Brok object :rtype: alignak.Brok
def download_image(image_id, url, x1, y1, x2, y2, output_dir): output_filename = os.path.join(output_dir, image_id + '.png') if os.path.exists(output_filename): return True try: url_file = urlopen(url) if url_file.getcode() != 200: return False image_buffer = url_file.read() image = Image.open(BytesIO(image_buffer)).convert('RGB') w = image.size[0] h = image.size[1] image = image.crop((int(x1 * w), int(y1 * h), int(x2 * w), int(y2 * h))) image = image.resize((299, 299), resample=Image.ANTIALIAS) image.save(output_filename) except IOError: return False return True
Downloads one image, crops it, resizes it and saves it locally.
def add(modname, features, required_version, installed_version=None, optional=False): global DEPENDENCIES for dependency in DEPENDENCIES: if dependency.modname == modname: raise ValueError("Dependency has already been registered: %s"\ % modname) DEPENDENCIES += [Dependency(modname, features, required_version, installed_version, optional)]
Add Spyder dependency
def confirm_or_abort(prompt, exitcode=os.EX_TEMPFAIL, msg=None, **extra_args): if click.confirm(prompt, **extra_args): return True else: if msg: sys.stderr.write(msg) sys.stderr.write('\n') sys.exit(exitcode)
Prompt user for confirmation and exit on negative reply. Arguments `prompt` and `extra_args` will be passed unchanged to `click.confirm`:func: (which is used for actual prompting). :param str prompt: Prompt string to display. :param int exitcode: Program exit code if negative reply given. :param str msg: Message to display before exiting.
def get_questions(self, answered=None, honor_sequential=True, update=True): def update_question_list(): latest_question_response = question_map['responses'][0] question_answered = False if 'missingResponse' not in latest_question_response: question_answered = True if answered is None or answered == question_answered: question_list.append(self.get_question(question_map=question_map)) return question_answered prev_question_answered = True question_list = [] if update: self._update_questions() for question_map in self._my_map['questions']: if self._is_question_sequential(question_map) and honor_sequential: if prev_question_answered: prev_question_answered = update_question_list() else: update_question_list() if self._my_map['actualStartTime'] is None: self._my_map['actualStartTime'] = DateTime.utcnow() return QuestionList(question_list, runtime=self._runtime, proxy=self._proxy)
gets all available questions for this section if answered == False: only return next unanswered question if answered == True: only return next answered question if answered in None: return next question whether answered or not if honor_sequential == True: only return questions if section or part is set to sequential items
def ident(): matrix = stypes.emptyDoubleMatrix() libspice.ident_c(matrix) return stypes.cMatrixToNumpy(matrix)
This routine returns the 3x3 identity matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ident_c.html :return: The 3x3 identity matrix. :rtype: 3x3-Element Array of floats
def get_dict_repr(self): return dict( phase_name = self.phase_name, phase_type = self.phase_type, actions = self.actions )
Return a dictionary representation of this phase. This will be used for checksumming, in order to uniquely compare instance images against their requirements
def _get_dataset(self, dataset, name, color): global palette html = "{" html += '\t"label": "' + name + '",' if color is not None: html += '"backgroundColor": "' + color + '",\n' else: html += '"backgroundColor": ' + palette + ',\n' html += '"data": ' + self._format_list(dataset) + ',\n' html += "}" return html
Encode a dataset
def remove_default_content(portal): logger.info("*** Delete Default Content ***") object_ids = portal.objectIds() delete_ids = filter(lambda id: id in object_ids, CONTENTS_TO_DELETE) portal.manage_delObjects(ids=delete_ids)
Remove default Plone contents
def unlink_reference(self, source, target): target_uid = api.get_uid(target) key = self.get_relationship_key(target) backrefs = get_backreferences(source, relationship=None) if key not in backrefs: logger.warn( "Referenced object {} has no backreferences for the key {}" .format(repr(source), key)) return False if target_uid not in backrefs[key]: logger.warn("Target {} was not linked by {}" .format(repr(target), repr(source))) return False backrefs[key].remove(target_uid) return True
Unlink the target from the source
def prepare_parser(program): parser = ArgumentParser( description=PROG_DESCRIPTION, prog=program, formatter_class=HelpFormatter, add_help=False) parser.add_argument( "-h", "--help", action=MinimalHelpAction, help=argparse.SUPPRESS) submodules = ( "nodes", "machines", "devices", "controllers", "fabrics", "vlans", "subnets", "spaces", "files", "tags", "users", "profiles", "shell", ) cmd_help.register(parser) for submodule in submodules: module = import_module("." + submodule, __name__) module.register(parser) parser.add_argument( '--debug', action='store_true', default=False, help=argparse.SUPPRESS) return parser
Create and populate an argument parser.
def make_operatorsetid( domain, version, ): operatorsetid = OperatorSetIdProto() operatorsetid.domain = domain operatorsetid.version = version return operatorsetid
Construct an OperatorSetIdProto. Arguments: domain (string): The domain of the operator set id version (integer): Version of operator set id
def add(self, string: (str, list)): if len(self._entries) == 1: self._entries[0].delete(0, 'end') self._entries[0].insert(0, string) else: if len(string) != len(self._entries): raise ValueError('the "string" list must be ' 'equal to the number of entries') for i, e in enumerate(self._entries): self._entries[i].delete(0, 'end') self._entries[i].insert(0, string[i])
Clear the contents of the entry field and insert the contents of string. :param string: an str containing the text to display :return:
def try_run_inschek(pst): for ins_file,out_file in zip(pst.instruction_files,pst.output_files): df = _try_run_inschek(ins_file,out_file) if df is not None: pst.observation_data.loc[df.index, "obsval"] = df.obsval
attempt to run INSCHEK for each instruction file, model output file pair in a pyemu.Pst. If the run is successful, the INSCHEK written .obf file is used to populate the pst.observation_data.obsval attribute Parameters ---------- pst : (pyemu.Pst)
def process_keystroke(self, inp, idx, offset): if inp.lower() in (u'q', u'Q'): return (-1, -1) self._process_keystroke_commands(inp) idx, offset = self._process_keystroke_movement(inp, idx, offset) return idx, offset
Process keystroke ``inp``, adjusting screen parameters. :param inp: return value of Terminal.inkey(). :type inp: blessed.keyboard.Keystroke :param idx: page index. :type idx: int :param offset: scrolling region offset of current page. :type offset: int :returns: tuple of next (idx, offset). :rtype: (int, int)
def balance(self, account_id=None): if not account_id: if len(self.accounts()) == 1: account_id = self.accounts()[0].id else: raise ValueError("You need to pass account ID") endpoint = '/balance' response = self._get_response( method='get', endpoint=endpoint, params={ 'account_id': account_id, }, ) return MonzoBalance(data=response.json())
Returns balance information for a specific account. Official docs: https://monzo.com/docs/#read-balance :param account_id: Monzo account ID :type account_id: str :raises: ValueError :returns: Monzo balance instance :rtype: MonzoBalance
def hexdigest(self, data=None): from base64 import b16encode if pyver == 2: return b16encode(self.digest(data)) else: return b16encode(self.digest(data)).decode('us-ascii')
Returns digest in the hexadecimal form. For compatibility with hashlib
def start(self): if self.stream is None: from pyaudio import PyAudio, paInt16 self.pa = PyAudio() self.stream = self.pa.open( 16000, 1, paInt16, True, frames_per_buffer=self.chunk_size ) self._wrap_stream_read(self.stream) self.engine.start() self.running = True self.is_paused = False self.thread = Thread(target=self._handle_predictions) self.thread.daemon = True self.thread.start()
Start listening from stream
def add_location_reminder(self, service, name, lat, long, trigger, radius): args = { 'item_id': self.id, 'service': service, 'type': 'location', 'name': name, 'loc_lat': str(lat), 'loc_long': str(long), 'loc_trigger': trigger, 'radius': radius } _perform_command(self.project.owner, 'reminder_add', args)
Add a reminder to the task which activates on at a given location. .. warning:: Requires Todoist premium. :param service: ```email```, ```sms``` or ```push``` for mobile. :type service: str :param name: An alias for the location. :type name: str :param lat: The location latitude. :type lat: float :param long: The location longitude. :type long: float :param trigger: ```on_enter``` or ```on_leave```. :type trigger: str :param radius: The radius around the location that is still considered the location. :type radius: float >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> project = user.get_project('PyTodoist') >>> task = project.add_task('Install PyTodoist') >>> task.add_location_reminder('email', 'Leave Glasgow', ... 55.8580, 4.2590, 'on_leave', 100)
def _convert_method_settings_into_operations(method_settings=None): operations = [] if method_settings: for method in method_settings.keys(): for key, value in method_settings[method].items(): if isinstance(value, bool): if value: value = 'true' else: value = 'false' operations.append({ 'op': 'replace', 'path': method + _resolve_key(key), 'value': value }) return operations
Helper to handle the conversion of method_settings to operations :param method_settings: :return: list of operations
def tzname_in_python2(myfunc): def inner_func(*args, **kwargs): if PY3: return myfunc(*args, **kwargs) else: return myfunc(*args, **kwargs).encode() return inner_func
Change unicode output into bytestrings in Python 2 tzname() API changed in Python 3. It used to return bytes, but was changed to unicode strings
def avatar(self, size: int = 256) -> str: url = 'https://api.adorable.io/avatars/{0}/{1}.png' return url.format(size, self.password(hashed=True))
Generate a random avatar.. :param size: Size of avatar. :return: Link to avatar.
def _generate_processing_blocks(start_id, min_blocks=0, max_blocks=4): processing_blocks = [] num_blocks = random.randint(min_blocks, max_blocks) for i in range(start_id, start_id + num_blocks): _id = 'sip-pb{:03d}'.format(i) block = dict(id=_id, resources_requirement={}, workflow={}) processing_blocks.append(block) return processing_blocks
Generate a number of Processing Blocks
def _set_led_value(self, group, val): new_bitmask = set_bit(self._value, group, bool(val)) self._set_led_bitmask(new_bitmask)
Set the LED value and confirm with a status check.
def _get_videos_for_filter(video_filter, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None): videos = Video.objects.filter(**video_filter) paginator_context = {} if sort_field: videos = videos.order_by(sort_field.value, "edx_video_id") if sort_dir == SortDirection.desc: videos = videos.reverse() if pagination_conf: videos_per_page = pagination_conf.get('videos_per_page') paginator = Paginator(videos, videos_per_page) videos = paginator.page(pagination_conf.get('page_number')) paginator_context = { 'current_page': videos.number, 'total_pages': videos.paginator.num_pages, 'items_on_one_page':videos_per_page } return (VideoSerializer(video).data for video in videos), paginator_context
Returns a generator expression that contains the videos found, sorted by the given field and direction, with ties broken by edx_video_id to ensure a total order.
def paintEvent(self, event): if not self.toPlainText() and not self.hasFocus() and self._placeholder: p = QtGui.QPainter(self.viewport()) p.setClipping(False) col = self.palette().text().color() col.setAlpha(128) oldpen = p.pen() p.setPen(col) p.drawText(self.viewport().geometry(), QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop, self._placeholder) p.setPen(oldpen) else: return super(JB_PlainTextEdit, self).paintEvent(event)
Paint the widget :param event: :type event: :returns: None :rtype: None :raises: None
def add_note(self, note, octave=None, dynamics={}): if type(note) == str: if octave is not None: note = Note(note, octave, dynamics) elif len(self.notes) == 0: note = Note(note, 4, dynamics) else: if Note(note, self.notes[-1].octave) < self.notes[-1]: note = Note(note, self.notes[-1].octave + 1, dynamics) else: note = Note(note, self.notes[-1].octave, dynamics) if not hasattr(note, 'name'): raise UnexpectedObjectError("Object '%s' was not expected. " "Expecting a mingus.containers.Note object." % note) if note not in self.notes: self.notes.append(note) self.notes.sort() return self.notes
Add a note to the container and sorts the notes from low to high. The note can either be a string, in which case you could also use the octave and dynamics arguments, or a Note object.
def exec_request(endpoint, func, raise_for_status=False, **kwargs): try: endpoint = '{0}/api/v1/{1}'.format(settings.SEAT_URL, endpoint) headers = {'X-Token': settings.SEAT_XTOKEN, 'Accept': 'application/json'} logger.debug(headers) logger.debug(endpoint) ret = getattr(requests, func)(endpoint, headers=headers, data=kwargs) ret.raise_for_status() return ret.json() except requests.HTTPError as e: if raise_for_status: raise e logger.exception("Error encountered while performing API request to SeAT with url {}".format(endpoint)) return {}
Send an https api request
def extract(body, sender): try: delimiter = get_delimiter(body) body = body.strip() if has_signature(body, sender): lines = body.splitlines() markers = _mark_lines(lines, sender) text, signature = _process_marked_lines(lines, markers) if signature: text = delimiter.join(text) if text.strip(): return (text, delimiter.join(signature)) except Exception as e: log.exception('ERROR when extracting signature with classifiers') return (body, None)
Strips signature from the body of the message. Returns stripped body and signature as a tuple. If no signature is found the corresponding returned value is None.
def pool_create(hypervisor, identifier, pool_path): path = os.path.join(pool_path, identifier) if not os.path.exists(path): os.makedirs(path) xml = POOL_DEFAULT_CONFIG.format(identifier, path) return hypervisor.storagePoolCreateXML(xml, 0)
Storage pool creation. The following values are set in the XML configuration: * name * target/path * target/permission/label
def _GenerateStopTimesTuples(self): stoptimes = self.GetStopTimes() for i, st in enumerate(stoptimes): yield st.GetFieldValuesTuple(self.trip_id)
Generator for rows of the stop_times file
def null_slice(slice_): try: slice_ = as_slice(slice_) except TypeError: return False if isinstance(slice_, numpy.ndarray) and numpy.all(slice_): return True if isinstance(slice_, slice) and slice_ in ( slice(None, None, None), slice(0, None, 1) ): return True
Returns True if a slice will have no affect
def is_valid_ipv6 (ip): if not (_ipv6_re.match(ip) or _ipv6_ipv4_re.match(ip) or _ipv6_abbr_re.match(ip) or _ipv6_ipv4_abbr_re.match(ip)): return False return True
Return True if given ip is a valid IPv6 address.
def split(self, granularity_after_split, exclude_partial=True): if granularity_after_split == Granularity.DAY: return self.get_days() elif granularity_after_split == Granularity.WEEK: return self.get_weeks(exclude_partial) elif granularity_after_split == Granularity.MONTH: return self.get_months(exclude_partial) elif granularity_after_split == Granularity.QUARTER: return self.get_quarters(exclude_partial) elif granularity_after_split == Granularity.HALF_YEAR: return self.get_half_years(exclude_partial) elif granularity_after_split == Granularity.YEAR: return self.get_years(exclude_partial) else: raise Exception("Invalid granularity: %s" % granularity_after_split)
Split a period into a given granularity. Optionally include partial periods at the start and end of the period.
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names): valid_indices = [index for index, grad_list in enumerate(grad_arrays) if grad_list[0] is not None] valid_grad_arrays = [grad_arrays[i] for i in valid_indices] valid_param_arrays = [param_arrays[i] for i in valid_indices] valid_param_names = [param_names[i] for i in valid_indices] size = len(valid_grad_arrays) start = 0 default_batch = '16' batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch)) while start < size: end = start + batch if start + batch < size else size kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start) kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start) start = end
Perform update of param_arrays from grad_arrays on NCCL kvstore.
def infer_ml_task(y): if y.dtype.kind in np.typecodes['AllInteger'] or y.dtype == np.object: ml_task = 'classification' else: ml_task = 'regression' _logger.warning('Infered {} as machine learning task'.format(ml_task)) return ml_task
Infer the machine learning task to select for. The result will be either `'regression'` or `'classification'`. If the target vector only consists of integer typed values or objects, we assume the task is `'classification'`. Else `'regression'`. :param y: The target vector y. :type y: pandas.Series :return: 'classification' or 'regression' :rtype: str
def get_version_text(self): show_version_brief_not_supported = False version_text = None try: version_text = self.device.send("show version brief", timeout=120) except CommandError: show_version_brief_not_supported = True if show_version_brief_not_supported: try: version_text = self.device.send("show version", timeout=120) except CommandError as exc: exc.command = 'show version' raise exc return version_text
Return the version information from the device.
def get_gpubsub_publisher(config, metrics, changes_channel, **kw): builder = gpubsub_publisher.GPubsubPublisherBuilder( config, metrics, changes_channel, **kw) return builder.build_publisher()
Get a GPubsubPublisher client. A factory function that validates configuration, creates an auth and pubsub API client, and returns a Google Pub/Sub Publisher provider. Args: config (dict): Google Cloud Pub/Sub-related configuration. metrics (obj): :interface:`IMetricRelay` implementation. changes_channel (asyncio.Queue): Queue to publish message to make corrections to Cloud DNS. kw (dict): Additional keyword arguments to pass to the Publisher. Returns: A :class:`GPubsubPublisher` instance.
def get_hub(): try: hub = _local.hub except AttributeError: assert fibers.current().parent is None hub = _local.hub = Hub() return hub
Return the instance of the hub.
def computational_form(data): if isinstance(data.iloc[0], DataFrame): dslice = Panel.from_dict(dict([(i,data.iloc[i]) for i in xrange(len(data))])) elif isinstance(data.iloc[0], Series): dslice = DataFrame(data.tolist()) dslice.index = data.index else: dslice = data return dslice
Input Series of numbers, Series, or DataFrames repackaged for calculation. Parameters ---------- data : pandas.Series Series of numbers, Series, DataFrames Returns ------- pandas.Series, DataFrame, or Panel repacked data, aligned by indices, ready for calculation
def gen_outputs(self, riskinput, monitor, epspath=None, hazard=None): self.monitor = monitor hazard_getter = riskinput.hazard_getter if hazard is None: with monitor('getting hazard'): hazard_getter.init() hazard = hazard_getter.get_hazard() sids = hazard_getter.sids assert len(sids) == 1 with monitor('computing risk', measuremem=False): assets_by_taxo = get_assets_by_taxo(riskinput.assets, epspath) for rlzi, haz in sorted(hazard[sids[0]].items()): out = self.get_output(assets_by_taxo, haz, rlzi) yield out
Group the assets per taxonomy and compute the outputs by using the underlying riskmodels. Yield one output per realization. :param riskinput: a RiskInput instance :param monitor: a monitor object used to measure the performance
def serialize_date(attr, **kwargs): if isinstance(attr, str): attr = isodate.parse_date(attr) t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) return t
Serialize Date object into ISO-8601 formatted string. :param Date attr: Object to be serialized. :rtype: str
def auto_consume(func): def inner(*args, **kwargs): func(*args, **kwargs) args[0].consume_line() return inner
Decorator for auto consuming lines when leaving the function
def this(obj, **kwargs): verbose = kwargs.get("verbose", True) if verbose: print('{:=^30}'.format(" whatis.this? ")) for func in pipeline: s = func(obj, **kwargs) if s is not None: print(s) if verbose: print('{:=^30}\n'.format(" whatis.this? "))
Prints series of debugging steps to user. Runs through pipeline of functions and print results of each.
def add_section(self, alias, section): if not isinstance(alias, six.string_types): raise TypeError('Section name must be a string, got a {!r}'.format(type(alias))) self._tree[alias] = section if self.settings.str_path_separator in alias: raise ValueError( 'Section alias must not contain str_path_separator which is configured for this Config -- {!r} -- ' 'but {!r} does.'.format(self.settings.str_path_separator, alias) ) section._section = self section._section_alias = alias self.dispatch_event(self.hooks.section_added_to_section, alias=alias, section=self, subject=section)
Add a sub-section to this section.
def update(self): con = self.subpars.pars.control self(con.relwb*con.nfk)
Update |WB| based on |RelWB| and |NFk|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(2) >>> lnk(ACKER) >>> relwb(0.2) >>> nfk(100.0, 200.0) >>> derived.wb.update() >>> derived.wb wb(20.0, 40.0)
def setup_pathing(self): self.s3_version_uri = self._path_formatter(self.version) self.s3_latest_uri = self._path_formatter("LATEST") self.s3_canary_uri = self._path_formatter("CANARY") self.s3_alpha_uri = self._path_formatter("ALPHA") self.s3_mirror_uri = self._path_formatter("MIRROR")
Format pathing for S3 deployments.