code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def to_list(var): if var is None: return [] if isinstance(var, str): var = var.split('\n') elif not isinstance(var, list): try: var = list(var) except TypeError: raise ValueError("{} cannot be converted to the list.".format(var)) return var
Checks if given value is a list, tries to convert, if it is not.
def getattr(self, key): if ((key == "classId") and (self.__dict__.has_key(key))): return self.__dict__[key] if UcsUtils.FindClassIdInMoMetaIgnoreCase(self.classId): if self.__dict__.has_key(key): if key in _ManagedObjectMeta[self.classId]: return self.__dict__[key] else: if self.__dict__.has_key('XtraProperty'): if self.__dict__['XtraProperty'].has_key(key): return self.__dict__['XtraProperty'][UcsUtils.WordU(key)] else: raise AttributeError(key) else: print "No XtraProperty in mo:", self.classId, " key:", key else: if self.__dict__['XtraProperty'].has_key(key): return self.__dict__['XtraProperty'][UcsUtils.WordU(key)] elif key == "Dn" or key == "Rn": return None else: raise AttributeError(key)
This method gets attribute value of a Managed Object.
def info(name): info = __salt__['user.info'](name=name) ret = {'name': name, 'passwd': '', 'lstchg': '', 'min': '', 'max': '', 'warn': '', 'inact': '', 'expire': ''} if info: ret = {'name': info['name'], 'passwd': 'Unavailable', 'lstchg': info['password_changed'], 'min': '', 'max': '', 'warn': '', 'inact': '', 'expire': info['expiration_date']} return ret
Return information for the specified user This is just returns dummy data so that salt states can work. :param str name: The name of the user account to show. CLI Example: .. code-block:: bash salt '*' shadow.info root
def from_response(cls, header_data, ignore_bad_cookies=False, ignore_bad_attributes=True): "Construct a Cookies object from response header data." cookies = cls() cookies.parse_response( header_data, ignore_bad_cookies=ignore_bad_cookies, ignore_bad_attributes=ignore_bad_attributes) return cookies
Construct a Cookies object from response header data.
def get_current_release(self): current = self._runner.run("readlink '{0}'".format(self._current)) if current.failed: return None return os.path.basename(current.strip())
Get the release ID of the "current" deployment, None if there is no current deployment. This method performs one network operation. :return: Get the current release ID :rtype: str
def projects(self): result = set() for todo in self._todos: projects = todo.projects() result = result.union(projects) return result
Returns a set of all projects in this list.
def set_option(self, key, subkey, value): key, subkey = _lower_keys(key, subkey) _entry_must_exist(self.gc, key, subkey) df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)] if df["locked"].values[0]: raise ValueError("{0}.{1} option is locked".format(key, subkey)) ev.value_eval(value, df["type"].values[0]) if not self.check_option(key, subkey, value): info = "{0}.{1} accepted options are: ".format(key, subkey) info += "[{}]".format(", ".join(df["values"].values[0])) raise ValueError(info) self.gc.loc[ (self.gc["k1"] == key) & (self.gc["k2"] == subkey), "value"] = value
Sets the value of an option. :param str key: First identifier of the option. :param str subkey: Second identifier of the option. :param value: New value for the option (type varies). :raise: :NotRegisteredError: If ``key`` or ``subkey`` do not define any option. :ValueError: If the targeted obtion is locked. :ValueError: If the provided value is not the expected type for the option. :ValueError: If the provided value is not in the expected available values for the option.
def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs): cache = diskcache.Cache( directory, disk=CompressedDisk, disk_compress_level=compress_level, disk_value_type_is_binary=value_type_is_binary, **kwargs ) return cache
Create a html cache. Html string will be automatically compressed. :param directory: path for the cache directory. :param compress_level: 0 ~ 9, 9 is slowest and smallest. :param kwargs: other arguments. :return: a `diskcache.Cache()`
def before_sleep_func_accept_retry_state(fn): if not six.callable(fn): return fn if func_takes_retry_state(fn): return fn @_utils.wraps(fn) def wrapped_before_sleep_func(retry_state): warn_about_non_retry_state_deprecation( 'before_sleep', fn, stacklevel=4) return fn( retry_state.retry_object, sleep=getattr(retry_state.next_action, 'sleep'), last_result=retry_state.outcome) return wrapped_before_sleep_func
Wrap "before_sleep" function to accept "retry_state".
def execute_script(self, string, args=None): result = None try: result = self.driver_wrapper.driver.execute_script(string, args) return result except WebDriverException: if result is not None: message = 'Returned: ' + str(result) else: message = "No message. Check your Javascript source: {}".format(string) raise WebDriverJavascriptException.WebDriverJavascriptException(self.driver_wrapper, message)
Execute script passed in to function @type string: str @value string: Script to execute @type args: dict @value args: Dictionary representing command line args @rtype: int @rtype: response code
def load_output_meta(self): options = self.options file_path = os.path.join(options.inputdir, 'output.meta.json') with open(file_path) as infile: return json.load(infile)
Load descriptive output meta data from a JSON file in the input directory.
def getTagMapNearPosition(self, idx): try: return self.__ambiguousTypes[idx].tagMap except KeyError: raise error.PyAsn1Error('Type position out of range')
Return ASN.1 types that are allowed at or past given field position. Some ASN.1 serialisation allow for skipping optional and defaulted fields. Some constructed ASN.1 types allow reordering of the fields. When recovering such objects it may be important to know which types can possibly be present at any given position in the field sets. Parameters ---------- idx: :py:class:`int` Field index Returns ------- : :class:`~pyasn1.type.tagmap.TagMap` Map if ASN.1 types allowed at given field position Raises ------ : :class:`~pyasn1.error.PyAsn1Error` If given position is out of fields range
def isAncestorOf(self, other): if isinstance(other, Key): return other._string.startswith(self._string + '/') raise TypeError('%s is not of type %s' % (other, Key))
Returns whether this Key is an ancestor of `other`. >>> john = Key('/Comedy/MontyPython/Actor:JohnCleese') >>> Key('/Comedy').isAncestorOf(john) True
def locationUpdatingAccept(MobileId_presence=0, FollowOnProceed_presence=0, CtsPermission_presence=0): a = TpPd(pd=0x5) b = MessageType(mesType=0x02) c = LocalAreaId() packet = a / b / c if MobileId_presence is 1: d = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / d if FollowOnProceed_presence is 1: e = FollowOnProceed(ieiFOP=0xA1) packet = packet / e if CtsPermission_presence is 1: f = CtsPermissionHdr(ieiCP=0xA2, eightBitCP=0x0) packet = packet / f return packet
LOCATION UPDATING ACCEPT Section 9.2.13
def _dict_mapping_to_pb(mapping, proto_type): converted_pb = getattr(trace_pb2, proto_type)() ParseDict(mapping, converted_pb) return converted_pb
Convert a dict to protobuf. Args: mapping (dict): A dict that needs to be converted to protobuf. proto_type (str): The type of the Protobuf. Returns: An instance of the specified protobuf.
def create_color_method(color, code): def func(self, content=''): return self._apply_color(code, content) setattr(Terminal, color, func)
Create a function for the given color Done inside this function to keep the variables out of the main scope
def _regressors(self): regressors = () if self.sklearn_ver[:2] >= (0, 18): from sklearn.neural_network.multilayer_perceptron \ import MLPRegressor regressors += (MLPRegressor, ) return regressors
Get a set of supported regressors. Returns ------- regressors : {set} The set of supported regressors.
def LDA(x, labels, n=False): if not n: n = x.shape[1] - 1 try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') assert type(n) == int, "Provided n is not an integer." assert x.shape[1] > n, "The requested n is bigger than \ number of features in x." eigen_values, eigen_vectors = LDA_base(x, labels) eigen_order = eigen_vectors.T[(-eigen_values).argsort()] return eigen_order[:n].dot(x.T).T
Linear Discriminant Analysis function. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Kwargs:** * `n` : number of features returned (integer) - how many columns should the output keep **Returns:** * new_x : matrix with reduced size (number of columns are equal `n`)
def export_certificate(ctx, slot, format, certificate): controller = ctx.obj['controller'] try: cert = controller.read_certificate(slot) except APDUError as e: if e.sw == SW.NOT_FOUND: ctx.fail('No certificate found.') else: logger.error('Failed to read certificate from slot %s', slot, exc_info=e) certificate.write(cert.public_bytes(encoding=format))
Export a X.509 certificate. Reads a certificate from one of the slots on the YubiKey. \b SLOT PIV slot to read certificate from. CERTIFICATE File to write certificate to. Use '-' to use stdout.
def template(cls, userdata): ud = Userdata(cls.normalize(cls.create_empty(None), userdata)) return ud
Create a template instance used for message callbacks.
def __doc_cmp(self, other): if other is None: return -1 if self.is_new and other.is_new: return 0 if self.__docid < other.__docid: return -1 elif self.__docid == other.__docid: return 0 else: return 1
Comparison function. Can be used to sort docs alphabetically.
def _getEngineVersionDetails(self): versionFile = os.path.join(self.getEngineRoot(), 'Engine', 'Build', 'Build.version') return json.loads(Utility.readFile(versionFile))
Parses the JSON version details for the latest installed version of UE4
def update_employee(emp_id, key=None, value=None, items=None): if items is None: if key is None or value is None: return {'Error': 'At least one key/value pair is required'} items = {key: value} elif isinstance(items, six.string_types): items = salt.utils.yaml.safe_load(items) xml_items = '' for pair in items: xml_items += '<field id="{0}">{1}</field>'.format(pair, items[pair]) xml_items = '<employee>{0}</employee>'.format(xml_items) status, result = _query( action='employees', command=emp_id, data=xml_items, method='POST', ) return show_employee(emp_id, ','.join(items.keys()))
Update one or more items for this employee. Specifying an empty value will clear it for that employee. CLI Examples: salt myminion bamboohr.update_employee 1138 nickname Curly salt myminion bamboohr.update_employee 1138 nickname '' salt myminion bamboohr.update_employee 1138 items='{"nickname": "Curly"} salt myminion bamboohr.update_employee 1138 items='{"nickname": ""}
def set_default_from_schema(instance, schema): for name, property_ in schema.get('properties', {}).items(): if 'default' in property_: instance.setdefault(name, property_['default']) if 'properties' in property_: set_default_from_schema(instance.setdefault(name, {}), property_) return instance
Populate default values on an `instance` given a `schema`. Parameters ---------- instance : dict instance to populate default values for schema : dict JSON schema with default values Returns ------- instance : dict instance with populated default values
def rangify(number_list): if not number_list: return number_list ranges = [] range_start = prev_num = number_list[0] for num in number_list[1:]: if num != (prev_num + 1): ranges.append((range_start, prev_num)) range_start = num prev_num = num ranges.append((range_start, prev_num)) return ranges
Assumes the list is sorted.
def download(self, files=None, destination=None, overwrite=False, callback=None): if files is None: files = self.files elif not isinstance(files, list): files = [files] if destination is None: destination = os.path.expanduser('~') for f in files: if not isinstance(f, dict): raise FMBaseError('File must be a <dict> with file data') self._download(f, destination, overwrite, callback)
Download file or files. :param files: file or files to download :param destination: destination path (defaults to users home directory) :param overwrite: replace existing files? :param callback: callback function that will receive total file size and written bytes as arguments :type files: ``list`` of ``dict`` with file data from filemail :type destination: ``str`` or ``unicode`` :type overwrite: ``bool`` :type callback: ``func``
def jsonp_wrap(callback_key='callback'): def decorator_fn(f): @wraps(f) def jsonp_output_decorator(*args, **kwargs): task_data = _get_data_from_args(args) data = task_data.get_data() if callback_key not in data: raise KeyError( 'Missing required parameter "{0}" for task.'.format( callback_key)) callback = data[callback_key] jsonp = f(*args, **kwargs) if isinstance(JobContext.get_current_context(), WebJobContext): JobContext.get_current_context().add_responder( MimeSetterWebTaskResponder('application/javascript')) jsonp = "{callback}({data})".format(callback=callback, data=jsonp) return jsonp return jsonp_output_decorator return decorator_fn
Format response to jsonp and add a callback to JSON data - a jsonp request
def read_motifs(infile=None, fmt="pwm", as_dict=False): if infile is None or isinstance(infile, six.string_types): infile = pwmfile_location(infile) with open(infile) as f: motifs = _read_motifs_from_filehandle(f, fmt) else: motifs = _read_motifs_from_filehandle(infile, fmt) if as_dict: motifs = {m.id:m for m in motifs} return motifs
Read motifs from a file or stream or file-like object. Parameters ---------- infile : string or file-like object, optional Motif database, filename of motif file or file-like object. If infile is not specified the default motifs as specified in the config file will be returned. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. as_dict : boolean, optional Return motifs as a dictionary with motif_id, motif pairs. Returns ------- motifs : list List of Motif instances. If as_dict is set to True, motifs is a dictionary.
def read(self, subpath=None): is_binary = self.is_binary(subpath) filename = self.readme_for(subpath) try: if is_binary: return self._read_binary(filename) return self._read_text(filename) except (OSError, EnvironmentError) as ex: if ex.errno == errno.ENOENT: raise ReadmeNotFoundError(filename) raise
Returns the UTF-8 content of the specified subpath. subpath is expected to already have been normalized. Raises ReadmeNotFoundError if a README for the specified subpath does not exist. Raises werkzeug.exceptions.NotFound if the resulting path would fall out of the root directory.
def restart_listener(self, topics): if self.listener is not None: if self.listener.running: self.stop() self.__init__(topics=topics)
Restart listener after configuration update.
def last_component_continued(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('SL record not yet initialized!') if not self.symlink_components: raise pycdlibexception.PyCdlibInternalError('Trying to get continued on a non-existent component!') return self.symlink_components[-1].is_continued()
Determines whether the previous component of this SL record is a continued one or not. Parameters: None. Returns: True if the previous component of this SL record is continued, False otherwise.
def AddSourceRestriction(self,cidr): self.source_restrictions.append(SourceRestriction(self,cidr)) return(self.Update())
Add and commit a single source IP restriction policy. >>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0] .AddSourceRestriction(cidr="132.200.20.1/32").WaitUntilComplete() 0
def get(self, key, default=None): if key in self._hparam_types: if default is not None: param_type, is_param_list = self._hparam_types[key] type_str = 'list<%s>' % param_type if is_param_list else str(param_type) fail_msg = ("Hparam '%s' of type '%s' is incompatible with " 'default=%s' % (key, type_str, default)) is_default_list = isinstance(default, list) if is_param_list != is_default_list: raise ValueError(fail_msg) try: if is_default_list: for value in default: _cast_to_type_if_compatible(key, param_type, value) else: _cast_to_type_if_compatible(key, param_type, default) except ValueError as e: raise ValueError('%s. %s' % (fail_msg, e)) return getattr(self, key) return default
Returns the value of `key` if it exists, else `default`.
def opp_two_point_field_goal_percentage(self): try: result = float(self.opp_two_point_field_goals) / \ float(self.opp_two_point_field_goal_attempts) return round(result, 3) except ZeroDivisionError: return 0.0
Returns a ``float`` of the number of two point field goals made divided by the number of two point field goal attempts by opponents. Percentage ranges from 0-1.
def get_template_folder(): cfg = get_project_configuration() if 'templates' not in cfg: home = os.path.expanduser("~") rcfile = os.path.join(home, ".hwrtrc") cfg['templates'] = pkg_resources.resource_filename('hwrt', 'templates/') with open(rcfile, 'w') as f: yaml.dump(cfg, f, default_flow_style=False) return cfg['templates']
Get path to the folder where th HTML templates are.
def snapshot(): all_objects = gc.get_objects() this_frame = inspect.currentframe() selected_objects = [] for obj in all_objects: if obj is not this_frame: selected_objects.append(obj) graph = ObjectGraph(selected_objects) del this_frame, all_objects, selected_objects, obj return graph
Return the graph of all currently gc-tracked objects. Excludes the returned :class:`~refcycle.object_graph.ObjectGraph` and objects owned by it. Note that a subsequent call to :func:`~refcycle.creators.snapshot` will capture all of the objects owned by this snapshot. The :meth:`~refcycle.object_graph.ObjectGraph.owned_objects` method may be helpful when excluding these objects from consideration.
def resetMonitors(self): Debug.error("*** BE AWARE: experimental - might not work ***") Debug.error("Re-evaluation of the monitor setup has been requested") Debug.error("... Current Region/Screen objects might not be valid any longer") Debug.error("... Use existing Region/Screen objects only if you know what you are doing!") self.__init__(self._screenId) self.showMonitors()
Recalculates screen based on changed monitor setup
def parse_extension_arg(arg, arg_dict): match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg) if match is None: raise ValueError( "invalid extension argument '%s', must be in key=value form" % arg ) name = match.group(1) value = match.group(4) arg_dict[name] = value
Converts argument strings in key=value or key.namespace=value form to dictionary entries Parameters ---------- arg : str The argument string to parse, which must be in key=value or key.namespace=value form. arg_dict : dict The dictionary into which the key/value pair will be added
def end_offsets(self, partitions): offsets = self._fetcher.end_offsets( partitions, self.config['request_timeout_ms']) return offsets
Get the last offset for the given partitions. The last offset of a partition is the offset of the upcoming message, i.e. the offset of the last available message + 1. This method does not change the current consumer position of the partitions. Note: This method may block indefinitely if the partition does not exist. Arguments: partitions (list): List of TopicPartition instances to fetch offsets for. Returns: ``{TopicPartition: int}``: The end offsets for the given partitions. Raises: UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms
def new_fact(self): fact = lib.EnvCreateFact(self._env, self._tpl) if fact == ffi.NULL: raise CLIPSError(self._env) return new_fact(self._env, fact)
Create a new Fact from this template.
def get_node_id(edge, node_type): assert node_type in ('source', 'target') _, node_id_str = edge.attrib[node_type].split('.') return int(node_id_str)
returns the source or target node id of an edge, depending on the node_type given.
def get_src_names(gta): o = [] for s in gta.roi.sources: o += [s.name] return sorted(o)
Build and return a list of source name Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object Returns ------- l : list Names of the source
def fetch(self): if self.document_url is None: raise CloudantDocumentException(101) resp = self.r_session.get(self.document_url) resp.raise_for_status() self.clear() self.update(response_to_json_dict(resp, cls=self.decoder))
Retrieves the content of the current document from the remote database and populates the locally cached Document object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached Document object.
def get_content_children(self, content_id, expand=None, parent_version=None, callback=None): params = {} if expand: params["expand"] = expand if parent_version: params["parentVersion"] = parent_version return self._service_get_request("rest/api/content/{id}/child".format(id=content_id), params=params, callback=callback)
Returns a map of the direct children of a piece of Content. Content can have multiple types of children - for example a Page can have children that are also Pages, but it can also have Comments and Attachments. The {@link ContentType}(s) of the children returned is specified by the "expand" query parameter in the request - this parameter can include expands for multiple child types. If no types are included in the expand parameter, the map returned will just list the child types that are available to be expanded for the {@link Content} referenced by the "content_id" parameter. :param content_id (string): A string containing the id of the content to retrieve children for. :param expand (string): OPTIONAL :A comma separated list of properties to expand on the children. Default: None. :param parent_version (int): OPTIONAL: An integer representing the version of the content to retrieve children for. Default: 0 (Latest) :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
def call(subcommand, args): args['<napp>'] = parse_napps(args['<napp>']) func = getattr(NAppsAPI, subcommand) func(args)
Call a subcommand passing the args.
def is_ancestor(self, child_key_name, ancestor_key_name): if ancestor_key_name is None: return True one_up_parent = self.dct[child_key_name]['parent'] if child_key_name == ancestor_key_name: return True elif one_up_parent is None: return False else: return self.is_ancestor(one_up_parent, ancestor_key_name)
Returns True if ancestor lies in the ancestry tree of child.
def get_data(self, data_x, data_y): image = self.get_image() if image is not None: return image.get_data_xy(data_x, data_y) raise ImageViewNoDataError("No image found")
Get the data value at the given position. Indices are zero-based, as in Numpy. Parameters ---------- data_x, data_y : int Data indices for X and Y, respectively. Returns ------- value Data slice. Raises ------ ginga.ImageView.ImageViewNoDataError Image not found.
def update_port_statuses_cfg(self, context, port_ids, status): self._l3plugin.update_router_port_statuses(context, port_ids, status)
Update the operational statuses of a list of router ports. This is called by the Cisco cfg agent to update the status of a list of ports. :param context: contains user information :param port_ids: list of ids of all the ports for the given status :param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN.
def match(self, row): if re.search(self._expression, row[self._field]): return True return False
Returns True if the field matches the regular expression of this simple condition. Returns False otherwise. :param dict row: The row. :rtype: bool
def get_client_index_from_id(self, client_id): for index, client in enumerate(self.clients): if id(client) == client_id: return index
Return client index from id
def reset(self): self._options = {} self.save(force=True) self._success = True
Resets the configuration, and overwrites the existing configuration file.
def humanize_duration(duration): days = duration.days hours = int(duration.seconds / 3600) minutes = int(duration.seconds % 3600 / 60) seconds = int(duration.seconds % 3600 % 60) parts = [] if days > 0: parts.append(u'%s %s' % (days, pluralize(days, _('day,days')))) if hours > 0: parts.append(u'%s %s' % (hours, pluralize(hours, _('hour,hours')))) if minutes > 0: parts.append(u'%s %s' % (minutes, pluralize(minutes, _('minute,minutes')))) if seconds > 0: parts.append(u'%s %s' % (seconds, pluralize(seconds, _('second,seconds')))) return ', '.join(parts) if len(parts) != 0 else _('< 1 second')
Returns a humanized string representing time difference For example: 2 days 1 hour 25 minutes 10 seconds
def conv3x3(in_channels, out_channels, stride=1): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
3x3 convolution with padding. Original code has had bias turned off, because Batch Norm would remove the bias either way
def get_session(db_url): engine = create_engine(db_url, poolclass=NullPool, echo=False) Session = sessionmaker(bind=engine) Base.metadata.create_all(engine) return Session()
Gets SQLAlchemy session given url. Your tables must inherit from Base in hdx.utilities.database. Args: db_url (str): SQLAlchemy url Returns: sqlalchemy.orm.session.Session: SQLAlchemy session
def is_same_filename (filename1, filename2): return os.path.realpath(filename1) == os.path.realpath(filename2)
Check if filename1 and filename2 are the same filename.
def _initialize_providers(self): configured_providers = active_config.DATABASES provider_objects = {} if not isinstance(configured_providers, dict) or configured_providers == {}: raise ConfigurationError( "'DATABASES' config must be a dict and at least one " "provider must be defined") if 'default' not in configured_providers: raise ConfigurationError( "You must define a 'default' provider") for provider_name, conn_info in configured_providers.items(): provider_full_path = conn_info['PROVIDER'] provider_module, provider_class = provider_full_path.rsplit('.', maxsplit=1) provider_cls = getattr(importlib.import_module(provider_module), provider_class) provider_objects[provider_name] = provider_cls(conn_info) return provider_objects
Read config file and initialize providers
def GameActionModeEnum(ctx): return Enum( ctx, diplomacy=0, speed=1, instant_build=2, quick_build=4, allied_victory=5, cheat=6, unk0=9, spy=10, unk1=11, farm_queue=13, farm_unqueue=14, default=Pass )
Game Action Modes.
def get_color(index): if index in range(0, 8): state['turtle'].goto(-WCB_WIDTH / 2, -WCB_HEIGHT / 2) _make_cnc_request("tool.color./" + str(index)) colors = ["black", "red", "orange", "yellow", "green", "blue", "purple", "brown"] state['turtle'].color(colors[index]) state['distance_traveled'] = 0 else: print("Color indexes must be between 0 and 7, but you gave me: " + index)
Dips the brush in paint. Arguments: index - an integer between 0 and 7, inclusive. Tells the bot which color you want.
def plotGene(self): pl.plot(self.x, self.y, '.') pl.grid(True) pl.show()
Plot the gene
def send_packed_virtual_touch_event(xpos, ypos, phase, device_id, finger): message = create(protobuf.SEND_PACKED_VIRTUAL_TOUCH_EVENT_MESSAGE) event = message.inner() event.data = xpos.to_bytes(2, byteorder='little') event.data += ypos.to_bytes(2, byteorder='little') event.data += phase.to_bytes(2, byteorder='little') event.data += device_id.to_bytes(2, byteorder='little') event.data += finger.to_bytes(2, byteorder='little') return message
Create a new WAKE_DEVICE_MESSAGE.
def purge_collection(keys): "Recursive purge of nodes with name and id" for key in keys: m = re.match(r'(.*) \((\d+)\)', key) name = m.group(1) node_id = m.group(2) value = render_value_for_node(node_id) print 'remove node with name:{0} and id:{1}'.format(name, node_id) delete_node(node_id=node_id) if isinstance(value, dict): purge_collection(value.keys())
Recursive purge of nodes with name and id
def all(cls, path=''): url = urljoin(cls._meta.base_url, path) pq_items = cls._get_items(url=url, **cls._meta._pyquery_kwargs) return [cls(item=i) for i in pq_items.items()]
Return all ocurrences of the item.
def douglas_rachford_pd_stepsize(L, tau=None, sigma=None): r if tau is None and sigma is None: L_norms = _operator_norms(L) tau = 1 / sum(L_norms) sigma = [2.0 / (len(L_norms) * tau * Li_norm ** 2) for Li_norm in L_norms] return tau, tuple(sigma) elif tau is None: L_norms = _operator_norms(L) tau = 2 / sum(si * Li_norm ** 2 for si, Li_norm in zip(sigma, L_norms)) return tau, tuple(sigma) elif sigma is None: L_norms = _operator_norms(L) tau = float(tau) sigma = [2.0 / (len(L_norms) * tau * Li_norm ** 2) for Li_norm in L_norms] return tau, tuple(sigma) else: return float(tau), tuple(sigma)
r"""Default step sizes for `douglas_rachford_pd`. Parameters ---------- L : sequence of `Operator` or float The operators or the norms of the operators that are used in the `douglas_rachford_pd` method. For `Operator` entries, the norm is computed with ``Operator.norm(estimate=True)``. tau : positive float, optional Use this value for ``tau`` instead of computing it from the operator norms, see Notes. sigma : tuple of float, optional The ``sigma`` step size parameters for the dual update. Returns ------- tau : float The ``tau`` step size parameter for the primal update. sigma : tuple of float The ``sigma`` step size parameters for the dual update. Notes ----- To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma_i` and :math:`L_i` need to satisfy .. math:: \tau \sum_{i=1}^n \sigma_i \|L_i\|^2 < 4. This function has 4 options, :math:`\tau`/:math:`\sigma` given or not given. - If neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as: .. math:: \tau = \frac{1}{\sum_{i=1}^n \|L_i\|}, \quad \sigma_i = \frac{2}{n \tau \|L_i\|^2} - If only :math:`\sigma` is given, :math:`\tau` is set to: .. math:: \tau = \frac{2}{\sum_{i=1}^n \sigma_i \|L_i\|^2} - If only :math:`\tau` is given, :math:`\sigma` is set to: .. math:: \sigma_i = \frac{2}{n \tau \|L_i\|^2} - If both are given, they are returned as-is without further validation.
def _handle_sub_action(self, input_dict, handler): if not self.can_handle(input_dict): return input_dict key = self.intrinsic_name sub_value = input_dict[key] input_dict[key] = self._handle_sub_value(sub_value, handler) return input_dict
Handles resolving replacements in the Sub action based on the handler that is passed as an input. :param input_dict: Dictionary to be resolved :param supported_values: One of several different objects that contain the supported values that need to be changed. See each method above for specifics on these objects. :param handler: handler that is specific to each implementation. :return: Resolved value of the Sub dictionary
def call(cmd, timeout=None, signum=signal.SIGKILL, keep_rc=False, encoding="utf-8", env=os.environ): if not isinstance(cmd, list): cmd = [cmd] p = Pipeline(*cmd, timeout=timeout, signum=signum, env=env) res = p(keep_rc=keep_rc) if keep_rc: rc, output = res output = output.decode(encoding, 'ignore') return rc, output return res.decode(encoding, "ignore")
Execute a cmd or list of commands with an optional timeout in seconds. If `timeout` is supplied and expires, the process is killed with SIGKILL (kill -9) and an exception is raised. Otherwise, the command output is returned. Parameters ---------- cmd: str or [[str]] The command(s) to execute timeout: int Seconds before kill is issued to the process signum: int The signal number to issue to the process on timeout keep_rc: bool Whether to return the exit code along with the output encoding: str unicode decoding scheme to use. Default is "utf-8" env: dict The environment in which to execute commands. Default is os.environ Returns ------- str Content of stdout of cmd on success. Raises ------ CalledProcessError Raised when cmd fails
def get_model(self): class Model(object): def __init__(self, cnn): self.cnn = cnn return Model(self.__cnn)
`object` of model as a function approximator, which has `cnn` whose type is `pydbm.cnn.pydbm.cnn.convolutional_neural_network.ConvolutionalNeuralNetwork`.
def setItemPolicy(self, item, policy): index = item._combobox_indices[self.ColAction].get(policy, 0) self._updateItemComboBoxIndex(item, self.ColAction, index) combobox = self.itemWidget(item, self.ColAction) if combobox: combobox.setCurrentIndex(index)
Sets the policy of the given item
def diagonalize(operator): eig_values, eig_vecs = LA.eigh(operator) emin = np.amin(eig_values) eig_values -= emin return eig_values, eig_vecs
diagonalizes single site Spin Hamiltonian
def to_iso_string(self) -> str: assert isinstance(self.value, datetime) return datetime.isoformat(self.value)
Returns full ISO string for the given date
def authenticate(self, code: str) -> 'Preston': headers = self._get_authorization_headers() data = { 'grant_type': 'authorization_code', 'code': code } r = self.session.post(self.TOKEN_URL, headers=headers, data=data) if not r.status_code == 200: raise Exception(f'Could not authenticate, got repsonse code {r.status_code}') new_kwargs = dict(self._kwargs) response_data = r.json() new_kwargs['access_token'] = response_data['access_token'] new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in']) new_kwargs['refresh_token'] = response_data['refresh_token'] return Preston(**new_kwargs)
Authenticates using the code from the EVE SSO. A new Preston object is returned; this object is not modified. The intended usage is: auth = preston.authenticate('some_code_here') Args: code: SSO code Returns: new Preston, authenticated
def glob_by_extensions(directory, extensions): directorycheck(directory) files = [] xt = files.extend for ex in extensions: xt(glob.glob('{0}/*.{1}'.format(directory, ex))) return files
Returns files matched by all extensions in the extensions list
def b58enc(uid): if not isinstance(uid, int): raise ValueError('Invalid integer: {}'.format(uid)) if uid == 0: return BASE58CHARS[0] enc_uid = "" while uid: uid, r = divmod(uid, 58) enc_uid = BASE58CHARS[r] + enc_uid return enc_uid
Encodes a UID to an 11-length string, encoded using base58 url-safe alphabet
def mark(self, partition, offset): max_offset = max(offset + 1, self.high_water_mark.get(partition, 0)) self.logger.debug("Setting high-water mark to: %s", {partition: max_offset}) self.high_water_mark[partition] = max_offset
Set the high-water mark in the current context. In order to know the current partition, it is helpful to initialize the consumer to provide partition info via: .. code:: python consumer.provide_partition_info()
def error(cls, name, message, *args): cls.getLogger(name).error(message, *args)
Convenience function to log a message at the ERROR level. :param name: The name of the logger instance in the VSG namespace (VSG.<name>) :param message: A message format string. :param args: The arguments that are are merged into msg using the string formatting operator. :..note: The native logger's `kwargs` are not used in this function.
def sanity_check_wirevector(self, w): from .wire import WireVector if not isinstance(w, WireVector): raise PyrtlError( 'error attempting to pass an input of type "%s" ' 'instead of WireVector' % type(w))
Check that w is a valid wirevector type.
def columns_used(self): return list(tz.unique(tz.concatv( util.columns_in_filters(self.fit_filters), util.columns_in_filters(self.predict_filters), util.columns_in_formula(self.default_model_expr), self._group.columns_used(), [self.segmentation_col])))
Returns all the columns used across all models in the group for filtering and in the model expression.
def load_decorate(package): from acorn.logging.decoration import set_decorating, decorating origdecor = decorating set_decorating(True) import sys from importlib import import_module apack = import_module(package) from acorn.logging.decoration import decorate decorate(apack) sys.modules["acorn.{}".format(package)] = apack from acorn.logging.decoration import set_decorating set_decorating(origdecor) return apack
Imports and decorates the package with the specified name.
def _ParseTypeCheckString(type_check_string, stack_location, self_name): target_frame = inspect.stack()[stack_location][0] self_name = self_name or inspect.stack()[stack_location][3] eval_globals = target_frame.f_globals eval_locals = {self_name: Typename[self_name]} try: return eval(type_check_string, eval_globals, eval_locals) except: print "Exception while parsing", type_check_string raise
Convert string version of a type_check into a python instance. Type checks can be either defined directly in python code or in a string. The syntax is exactly the same since we use eval to parse the string. :param int stack_location: For eval to get the right globals() scope, we require a stack_location to tell us the index in inspect.stack to where the string was defined. :param str self_name: Optional name of the class itself, which can be used to type check for an instance of a class you are currently defining, and thus would not be available in the globals namespace. If none, it will be quessed from the stack.
def vcas2mach(cas, h): tas = vcas2tas(cas, h) M = vtas2mach(tas, h) return M
CAS to Mach conversion
def info_post(node_id): contents = request_parameter(parameter="contents") info_type = request_parameter( parameter="info_type", parameter_type="known_class", default=models.Info ) for x in [contents, info_type]: if type(x) == Response: return x node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/info POST, node does not exist") exp = Experiment(session) try: info = info_type(origin=node, contents=contents) assign_properties(info) exp.info_post_request(node=node, info=info) session.commit() except Exception: return error_response( error_type="/info POST server error", status=403, participant=node.participant, ) return success_response(info=info.__json__())
Create an info. The node id must be specified in the url. You must pass contents as an argument. info_type is an additional optional argument. If info_type is a custom subclass of Info it must be added to the known_classes of the experiment class.
def value(self): user = self.trigger.agentml.request_log.most_recent().user groups = self.trigger.agentml.request_log.most_recent().groups if len(self._element): message = ''.join(map(str, self.trigger.agentml.parse_tags(self._element, self.trigger))) else: message = self._element.text default = attribute(self._element, 'default', '') response = self.trigger.agentml.get_reply(user.id, message, groups) return response or default
Return the value of the redirect response
def update_hash(src_file): hash_file = local.path(src_file) + ".hash" new_hash = 0 with open(hash_file, 'w') as h_file: new_hash = get_hash_of_dirs(src_file) h_file.write(str(new_hash)) return new_hash
Update the hash for the given file. Args: src: The file name. root: The path of the given file.
def extract_program_summary(data): from bs4 import BeautifulSoup soup = BeautifulSoup(data, 'html.parser') try: return soup.find( 'div', {'class': 'episode-synopsis'} ).find_all('div')[-1].text.strip() except Exception: _LOGGER.info('No summary found for program: %s', soup.find('a', {'class': 'prog_name'})) return "No summary"
Extract the summary data from a program's detail page
def boolean_rows(a, b, operation=np.intersect1d): a = np.asanyarray(a, dtype=np.int64) b = np.asanyarray(b, dtype=np.int64) av = a.view([('', a.dtype)] * a.shape[1]).ravel() bv = b.view([('', b.dtype)] * b.shape[1]).ravel() shared = operation(av, bv).view(a.dtype).reshape(-1, a.shape[1]) return shared
Find the rows in two arrays which occur in both rows. Parameters --------- a: (n, d) int Array with row vectors b: (m, d) int Array with row vectors operation : function Numpy boolean set operation function: -np.intersect1d -np.setdiff1d Returns -------- shared: (p, d) array containing rows in both a and b
def pts_on_bezier_curve(P=[(0.0, 0.0)], n_seg=0): assert isinstance(P, list) assert len(P) > 0 for p in P: assert isinstance(p, tuple) for i in p: assert len(p) > 1 assert isinstance(i, float) assert isinstance(n_seg, int) assert n_seg >= 0 return [pt_on_bezier_curve(P, float(i)/n_seg) for i in range(n_seg)] + [P[-1]]
Return list N+1 points representing N line segments on bezier curve defined by control points P.
def get_pore_surface_parameters(surface_area): PoreSurfaceParameters = DataFactory('phtools.surface') d = { 'accessible_surface_area': surface_area.get_dict()['ASA_A^2'], 'target_volume': 40e3, 'sampling_method': 'random', } return PoreSurfaceParameters(dict=d)
Get input parameters for pore surface binary. Get input parameters for pore_surface binary from zeo++ output, while keeping data provenance.
def get_disk_usage(path): cmd = 'du -sh --block-size=1 {0}'.format(path) total = getoutput(cmd).split()[0] return int(total)
Returns the allocated disk space for the given path in bytes. :param path: String representing the path as it would be given to the `du` command. Best to give an absolute path here.
def _filter_settings(settings, prefix): ret = {} for skey in settings.keys(): if skey.startswith(prefix): key = skey[len(prefix):] ret[key] = settings[skey] return ret
Filter all settings to only return settings that start with a certain prefix. :param dict settings: A settings dictionary. :param str prefix: A prefix.
def get_screen_size(self, screen_no): return GetScreenSize(display=self.display, opcode=self.display.get_extension_major(extname), window=self.id, screen=screen_no, )
Returns the size of the given screen number
def import_private_key(self, pem_text, password=None): if isinstance(pem_text, str) is True: pem_text = pem_text.encode() if password is not None and isinstance(password, str) is True: password = password.encode() self.__set_private_key( serialization.load_pem_private_key(pem_text, password=password, backend=default_backend()) )
Import a private key from data in PEM-format :param pem_text: text with private key :param password: If it is not None, then result will be decrypt with the given password :return: None
def create_node_group(self): self.create_stack( self.node_group_name, 'amazon-eks-nodegroup.yaml', capabilities=['CAPABILITY_IAM'], parameters=define_parameters( ClusterName=self.cluster_name, ClusterControlPlaneSecurityGroup=self.security_groups, Subnets=self.subnet_ids, VpcId=self.vpc_ids, KeyName=self.ssh_key_name, NodeAutoScalingGroupMaxSize="1", NodeVolumeSize="100", NodeImageId="ami-0a54c984b9f908c81", NodeGroupName=f"{self.name} OnDemand Nodes" ) )
Create on-demand node group on Amazon EKS.
def reverse_url(self, scheme: str, path: str) -> str: path = path.lstrip('/') server, port = self.connection_handler.server, self.connection_handler.port if self.connection_handler.path: url = '{scheme}://{server}:{port}/{path}'.format(scheme=scheme, server=server, port=port, path=path) else: url = '{scheme}://{server}:{port}/'.format(scheme=scheme, server=server, port=port) return url + path
Reverses the url using scheme and path given in parameter. :param scheme: Scheme of the url :param path: Path of the url :return:
def daOnes(shap, dtype=numpy.float): res = DistArray(shap, dtype) res[:] = 1 return res
One constructor for numpy distributed array @param shap the shape of the array @param dtype the numpy data type
def get_y(self, var, coords=None): coords = coords or self.ds.coords coord = self.get_variable_by_axis(var, 'y', coords) if coord is not None: return coord return coords.get(self.get_yname(var))
Get the y-coordinate of a variable This method searches for the y-coordinate in the :attr:`ds`. It first checks whether there is one dimension that holds an ``'axis'`` attribute with 'Y', otherwise it looks whether there is an intersection between the :attr:`y` attribute and the variables dimensions, otherwise it returns the coordinate corresponding to the second last dimension of `var` (or the last if the dimension of var is one-dimensional) Possible types -------------- var: xarray.Variable The variable to get the y-coordinate for coords: dict Coordinates to use. If None, the coordinates of the dataset in the :attr:`ds` attribute are used. Returns ------- xarray.Coordinate or None The y-coordinate or None if it could be found
def image_groups_list(self, api_url=None, offset=0, limit=-1, properties=None): return sco.get_resource_listing( self.get_api_references(api_url)[sco.REF_IMAGE_GROUPS_LIST], offset, limit, properties )
Get list of image group resources from a SCO-API. Parameters ---------- api_url : string, optional Base Url of the SCO-API. Uses default API if argument not present. offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(scoserv.ResourceHandle) List of resource handles (one per image group in the listing)
def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret
Returns a member of a service group or None
def _call_handler(self, key, insert_text): if isinstance(key, tuple): for k in key: self._call_handler(k, insert_text) else: if key == Keys.BracketedPaste: self._in_bracketed_paste = True self._paste_buffer = '' else: self.feed_key_callback(KeyPress(key, insert_text))
Callback to handler.
def coerce_to_list(items, preprocess=None): if not isinstance(items, list): items = [items] if preprocess: items = list(map(preprocess, items)) return items
Given an instance or list, coerce to list. With optional preprocessing.
def get_weights(model_hparams, vocab_size, hidden_dim=None): if hidden_dim is None: hidden_dim = model_hparams.hidden_size num_shards = model_hparams.symbol_modality_num_shards shards = [] for i in range(num_shards): shard_size = (vocab_size // num_shards) + ( 1 if i < vocab_size % num_shards else 0) var_name = "weights_%d" % i shards.append( tf.get_variable( var_name, [shard_size, hidden_dim], initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5))) if num_shards == 1: ret = shards[0] else: ret = tf.concat(shards, 0) if not tf.executing_eagerly(): ret = common_layers.convert_gradient_to_tensor(ret) return ret
Create or get concatenated embedding or softmax variable. Args: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size Returns: a list of num_shards Tensors.
def broadcast_info(team_id, date=datetime.now()): year = date.year game_date = date.strftime('%Y-%m-%dT00:00:00') data = mlbgame.data.get_broadcast_info(team_id, year) schedule = json.loads(data.read().decode('utf-8')) schedule = schedule['mlb_broadcast_info']['queryResults']['row'] return [g for g in schedule if g['game_date'] == game_date]
Returns a dictionary of broadcast information for a given team during a given season