code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def raw(self): try: return urlopen(str(self.url)) except HTTPError as error: try: parsed = self._parsejson(error) exc = RequestError(parsed['message']) exc.__cause__ = None raise exc except ValueError: exc = StatbankError() exc.__cause__ = None raise exc
Make request to url and return the raw response object.
def _set_raise_on_bulk_item_failure(self, raise_on_bulk_item_failure): self._raise_on_bulk_item_failure = raise_on_bulk_item_failure self.bulker.raise_on_bulk_item_failure = raise_on_bulk_item_failure
Set the raise_on_bulk_item_failure parameter :param raise_on_bulk_item_failure a bool the status of the raise_on_bulk_item_failure
def _get_next_trial(self): trials_done = all(trial.is_finished() for trial in self._trials) wait_for_trial = trials_done and not self._search_alg.is_finished() self._update_trial_queue(blocking=wait_for_trial) with warn_if_slow("choose_trial_to_run"): trial = self._scheduler_alg.choose_trial_to_run(self) return trial
Replenishes queue. Blocks if all trials queued have finished, but search algorithm is still not finished.
def json_data(self): return { "info_in_id": self.info_in_id, "info_out_id": self.info_out_id, "node_id": self.node_id, "network_id": self.network_id, }
The json representation of a transformation.
def scale_app(self, app_id, instances=None, delta=None, force=False): if instances is None and delta is None: marathon.log.error('instances or delta must be passed') return try: app = self.get_app(app_id) except NotFoundError: marathon.log.error('App "{app}" not found'.format(app=app_id)) return desired = instances if instances is not None else ( app.instances + delta) return self.update_app(app.id, MarathonApp(instances=desired), force=force)
Scale an app. Scale an app to a target number of instances (with `instances`), or scale the number of instances up or down by some delta (`delta`). If the resulting number of instances would be negative, desired instances will be set to zero. If both `instances` and `delta` are passed, use `instances`. :param str app_id: application ID :param int instances: [optional] the number of instances to scale to :param int delta: [optional] the number of instances to scale up or down by :param bool force: apply even if a deployment is in progress :returns: a dict containing the deployment id and version :rtype: dict
def parse_limit(limit_def): lower, upper = get_limits(limit_def) reaction = limit_def.get('reaction') return reaction, lower, upper
Parse a structured flux limit definition as obtained from a YAML file Returns a tuple of reaction, lower and upper bound.
def _title(self): return ( 'Overall, proteins from %d pathogen%s were found in %d sample%s.' % (len(self.pathogenNames), '' if len(self.pathogenNames) == 1 else 's', len(self.sampleNames), '' if len(self.sampleNames) == 1 else 's'))
Create a title summarizing the pathogens and samples. @return: A C{str} title.
def delete_scan(self, scan_id): if self.get_scan_status(scan_id) == ScanStatus.RUNNING: return 0 try: del self.scan_processes[scan_id] except KeyError: logger.debug('Scan process for %s not found', scan_id) return self.scan_collection.delete_scan(scan_id)
Deletes scan_id scan from collection. @return: 1 if scan deleted, 0 otherwise.
def exit_config_mode(self, exit_config="exit", pattern=""): if not pattern: pattern = re.escape(self.base_prompt) return super(CiscoWlcSSH, self).exit_config_mode(exit_config, pattern)
Exit config_mode.
def absolutize(self, region_id, relative_address): if region_id == 'global': return relative_address if region_id not in self._region_id_to_address: raise SimRegionMapError('Non-existent region ID "%s"' % region_id) base_address = self._region_id_to_address[region_id].base_address return base_address + relative_address
Convert a relative address in some memory region to an absolute address. :param region_id: The memory region ID :param relative_address: The relative memory offset in that memory region :return: An absolute address if converted, or an exception is raised when region id does not exist.
def lookup_host_host(self, mac): res = self.lookup_by_host(mac=mac) try: return dict(ip=res["ip-address"], mac=res["hardware-address"], name=res["name"].decode('utf-8')) except KeyError: raise OmapiErrorAttributeNotFound()
Look for a host object with given mac address and return the name, mac, and ip address @type mac: str @rtype: dict or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no host object with the given mac address could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks ip, mac or name @raises socket.error:
def ip_v4(self, with_port: bool = False) -> str: ip = '.'.join(str(self.random.randint(0, 255)) for _ in range(4)) if with_port: ip += ':{}'.format(self.port()) return ip
Generate a random IPv4 address. :param with_port: Add port to IP. :return: Random IPv4 address. :Example: 19.121.223.58
def _image_of_size(image_size): return np.random.uniform(0, 256, [image_size, image_size, 3]).astype(np.uint8)
Generate a square RGB test image of the given side length.
def is_authenticated(self): if not self.token: return False try: self.lookup_token() return True except exceptions.Forbidden: return False except exceptions.InvalidPath: return False except exceptions.InvalidRequest: return False
Helper method which returns the authentication status of the client :return: :rtype:
def bin1d(x, bins): left = [-float("inf")] left.extend(bins[0:-1]) right = bins cuts = list(zip(left, right)) k = len(bins) binIds = np.zeros(x.shape, dtype='int') while cuts: k -= 1 l, r = cuts.pop(-1) binIds += (x > l) * (x <= r) * k counts = np.bincount(binIds, minlength=len(bins)) return (binIds, counts)
Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25])
def addbr(name): fcntl.ioctl(ifconfig.sockfd, SIOCBRADDBR, name) return Bridge(name)
Create new bridge with the given name
def uid(self): exp = datetime.datetime.utcnow() + self.expDelay claims = { 'user_info': self.user_info, 'exp': calendar.timegm(datetime.datetime.timetuple(exp))} return jwt.encode(claims, self.site.session_secret, algorithm=SESSION_SECRET_ALGORITHM)
uid is now generated automatically according to the claims. This should actually only be used for cookie generation
def analyse_topology(self,topology, cutoff=3.5): self.define_residues_for_plotting_topology(cutoff) self.find_the_closest_atoms(topology)
In case user wants to analyse only a single topology file, this process will determine the residues that should be plotted and find the ligand atoms closest to these residues.
def _group(self, rdd): return rdd.reduceByKey(lambda x, y: x.append(y))
Group together the values with the same key.
def assert_stmt(self): module_globals = vars(sys.modules[self.module]) line_range, lineno = self._find_assert_stmt( self.filename, self.linenumber, module_globals=module_globals) source = [linecache.getline(self.filename, x, module_globals=module_globals) for x in line_range] dedented_lines = textwrap.dedent(''.join(source)).split('\n')[:-1] formatted_lines = [] for i, line in zip(line_range, dedented_lines): prefix = '>' if i == lineno else ' ' formatted_lines.append(' {0} {1:4d} {2}'.format(prefix, i, line)) return '\n'.join(formatted_lines)
Returns a string displaying the whole statement that failed, with a '>' indicator on the line starting the expression.
def to_routing_header(params): if sys.version_info[0] < 3: return urlencode(params).replace("%2F", "/") return urlencode( params, safe="/", )
Returns a routing header string for the given request parameters. Args: params (Mapping[str, Any]): A dictionary containing the request parameters used for routing. Returns: str: The routing header string.
def _set_value(self, new_value): if self.min_value is not None and new_value < self.min_value: raise SettingOutOfBounds( "Trying to set parameter {0} = {1}, which is less than the minimum allowed {2}".format( self.name, new_value, self.min_value)) if self.max_value is not None and new_value > self.max_value: raise SettingOutOfBounds( "Trying to set parameter {0} = {1}, which is more than the maximum allowed {2}".format( self.name, new_value, self.max_value)) if self.has_auxiliary_variable(): with warnings.catch_warnings(): warnings.simplefilter("always", RuntimeWarning) warnings.warn("You are trying to assign to a parameter which is either linked or " "has auxiliary variables. The assignment has no effect.", RuntimeWarning) if self._transformation is None: new_internal_value = new_value else: new_internal_value = self._transformation.forward(new_value) if new_internal_value != self._internal_value: self._internal_value = new_internal_value for callback in self._callbacks: try: callback(self) except: raise NotCallableOrErrorInCall("Could not call callback for parameter %s" % self.name)
Sets the current value of the parameter, ensuring that it is within the allowed range.
def _create_query_string(params): parameters = params or {} for param, value in parameters.items(): param_value = str(value).lower() if isinstance(value, bool) else value parameters[param] = param_value return urlencode(parameters)
Support Elasticsearch 5.X
def set_sorting(self, flag): self.sorting['status'] = flag self.header().setSectionsClickable(flag == ON)
Enable result sorting after search is complete.
def get_layout(name, *args, **kwargs): if name not in _layout_map: raise KeyError("Graph layout '%s' not found. Should be one of %s" % (name, AVAILABLE_LAYOUTS)) layout = _layout_map[name] if inspect.isclass(layout): layout = layout(*args, **kwargs) return layout
Retrieve a graph layout Some graph layouts accept extra options. Please refer to their documentation for more information. Parameters ---------- name : string The name of the layout. The variable `AVAILABLE_LAYOUTS` contains all available layouts. *args Positional arguments which are passed to the layout. **kwargs Keyword arguments which are passed to the layout. Returns ------- layout : callable The callable generator which will calculate the graph layout
def flatten(list_to_flatten): def genflatten(lst): for elem in lst: if isinstance(elem, (list, tuple)): for x in flatten(elem): yield x else: yield elem return list(genflatten(list_to_flatten))
Flatten out a list.
def enable_gpid(self): if self.is_ncb: self.run_lldptool(["-T", "-i", self.port_name, "-g", "ncb", "-V", "evb", "-c", "evbgpid=yes"]) return True else: LOG.error("GPID cannot be set on NB") return False
Function to enable Group ID on the interface. This is needed to use the MAC, GID, VID Filter.
def get_catalogs_by_query(self, catalog_query): if self._catalog_session is not None: return self._catalog_session.get_catalogs_by_query(catalog_query) query_terms = dict(catalog_query._query_terms) collection = JSONClientValidated('cataloging', collection='Catalog', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) return objects.CatalogList(result, runtime=self._runtime)
Gets a list of ``Catalogs`` matching the given catalog query. arg: catalog_query (osid.cataloging.CatalogQuery): the catalog query return: (osid.cataloging.CatalogList) - the returned ``CatalogList`` raise: NullArgument - ``catalog_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``catalog_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
def set(self, group, **kwargs): if not self.leds: return assert group in self.led_groups, \ "%s is an invalid LED group, valid choices are %s" % \ (group, ', '.join(self.led_groups.keys())) for led in self.led_groups[group]: for k in kwargs: setattr(led, k, kwargs[k])
Set attributes for each LED in group. Example:: my_leds = Leds() my_leds.set_color('LEFT', brightness_pct=0.5, trigger='timer')
def on_mouse_move(self, event): if event.modifiers: return if event.is_dragging: x0, y0 = self._normalize(event.press_event.pos) x1, y1 = self._normalize(event.last_event.pos) x, y = self._normalize(event.pos) dx, dy = x - x1, y - y1 if event.button == 1: self.pan_delta((dx, dy)) elif event.button == 2: c = np.sqrt(self.size[0]) * .03 self.zoom_delta((dx, dy), (x0, y0), c=c)
Pan and zoom with the mouse.
def exclude(self, d, item): try: md = d.__metadata__ pmd = getattr(md, '__print__', None) if pmd is None: return False excludes = getattr(pmd, 'excludes', []) return ( item[0] in excludes ) except: pass return False
check metadata for excluded items
def extract_meta(cls, serializer, resource): if hasattr(serializer, 'child'): meta = getattr(serializer.child, 'Meta', None) else: meta = getattr(serializer, 'Meta', None) meta_fields = getattr(meta, 'meta_fields', []) data = OrderedDict() for field_name in meta_fields: data.update({ field_name: resource.get(field_name) }) return data
Gathers the data from serializer fields specified in meta_fields and adds it to the meta object.
def handle_server_filter(self, request, table=None): if not table: table = self.get_table() filter_info = self.get_server_filter_info(request, table) if filter_info is None: return False request.session[filter_info['value_param']] = filter_info['value'] if filter_info['field_param']: request.session[filter_info['field_param']] = filter_info['field'] return filter_info['changed']
Update the table server filter information in the session. Returns True if the filter has been changed.
def get(self, name): name = str(name) if name not in self._properties: raise ArgumentError("Unknown property in DeviceModel", name=name) return self._properties[name]
Get a device model property. Args: name (str): The name of the property to get
def _find_base_tds_url(catalog_url): url_components = urlparse(catalog_url) if url_components.path: return catalog_url.split(url_components.path)[0] else: return catalog_url
Identify the base URL of the THREDDS server from the catalog URL. Will retain URL scheme, host, port and username/password when present.
def manifest_parse(self, path): print("fw: parsing manifests") content = open(path).read() return json.loads(content)
parse manifest at path, return JSON object
def find(self, title): files = backend.iterfiles(self._drive, name=title) try: return next(self[id] for id, _ in files) except StopIteration: raise KeyError(title)
Fetch and return the first spreadsheet with the given title. Args: title(str): title/name of the spreadsheet to return Returns: SpreadSheet: new SpreadSheet instance Raises: KeyError: if no spreadsheet with the given ``title`` is found
def get_indexes_from_base(self): if self.is_indexed: return np.copy(self.order[i]) if self.data.base is None: i = 0 else: i = self.get_raw_index(0) return np.arange(i, i + len(self), dtype=np.uint32)
Get array of indexes from the base array, as if this raw data were indexed.
def main(): args = _parse_args() _init_logging(args.verbose) client = _from_args(args) client.submit_error(args.description, args.extra, default_message=args.default_message)
Create a new instance and publish an error from command line args. There is a console script for invoking this function from the command line directly.
async def viewers_js(request): response = singletons.server.response viewers_resource = singletons.viewers.get_resource() url_string = viewers_resource.url_string target_ts = TypeString('min.js') target_resource = TypedResource(url_string, target_ts) if target_resource.cache_exists(): return await response.file(target_resource.cache_path, headers={ 'Content-Type': 'application/javascript', }) if not viewers_resource.cache_exists(): viewers_resource.save() await singletons.workers.async_enqueue_sync( enqueue_conversion_path, url_string, str(target_ts), singletons.workers.enqueue_convert ) return response.text(NOT_LOADED_JS, headers={ 'Content-Type': 'application/javascript', })
Viewers determines the viewers installed based on settings, then uses the conversion infrastructure to convert all these JS files into a single JS bundle, that is then served. As with media, it will simply serve a cached version if necessary.
def group_factory(bridge, number, name, led_type): if led_type in [RGBW, BRIDGE_LED]: return RgbwGroup(bridge, number, name, led_type) elif led_type == RGBWW: return RgbwwGroup(bridge, number, name) elif led_type == WHITE: return WhiteGroup(bridge, number, name) elif led_type == DIMMER: return DimmerGroup(bridge, number, name) elif led_type == WRGB: return WrgbGroup(bridge, number, name) else: raise ValueError('Invalid LED type: %s', led_type)
Make a group. :param bridge: Member of this bridge. :param number: Group number (1-4). :param name: Name of group. :param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`. :returns: New group.
def substitute(self, var_map): if self in var_map: return var_map[self] return self._substitute(var_map)
Substitute sub-expressions Args: var_map (dict): Dictionary with entries of the form ``{expr: substitution}``
def has_object_error(self): if self._has_object_error is None: self._has_object_error = next( (True for o in self.objects() if o.has_error()), False) return self._has_object_error
Returns true if any requested object had a business logic error, otherwise returns false Returns: boolean
def _rx_timer_handler(self): with self.rx_mutex: if self.rx_state == ISOTP_WAIT_DATA: self.rx_state = ISOTP_IDLE warning("RX state was reset due to timeout")
Method called every time the rx_timer times out, due to the peer not sending a consecutive frame within the expected time window
def replace(self, episodes, length, rows=None): rows = tf.range(self._capacity) if rows is None else rows assert rows.shape.ndims == 1 assert_capacity = tf.assert_less( rows, self._capacity, message='capacity exceeded') with tf.control_dependencies([assert_capacity]): assert_max_length = tf.assert_less_equal( length, self._max_length, message='max length exceeded') with tf.control_dependencies([assert_max_length]): replace_ops = tools.nested.map( lambda var, val: tf.scatter_update(var, rows, val), self._buffers, episodes, flatten=True) with tf.control_dependencies(replace_ops): return tf.scatter_update(self._length, rows, length)
Replace full episodes. Args: episodes: Tuple of transition quantities with batch and time dimensions. length: Batch of sequence lengths. rows: Episodes to replace, defaults to all. Returns: Operation.
def dot_v2(vec1, vec2): return vec1.x * vec2.x + vec1.y * vec2.y
Return the dot product of two vectors
def niggli_reduce(lattice, eps=1e-5): _set_no_error() niggli_lattice = np.array(np.transpose(lattice), dtype='double', order='C') result = spg.niggli_reduce(niggli_lattice, float(eps)) _set_error_message() if result == 0: return None else: return np.array(np.transpose(niggli_lattice), dtype='double', order='C')
Run Niggli reduction Args: lattice: Lattice parameters in the form of [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]] eps: float: Tolerance to check if difference of norms of two basis vectors is close to zero or not and if two basis vectors are orthogonal by the value of dot product being close to zero or not. The detail is shown at https://atztogo.github.io/niggli/. Returns: if the Niggli reduction succeeded: Reduced lattice parameters are given as a numpy 'double' array: [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]] otherwise None is returned.
def view(self, rec): out_json = { 'uid': rec.uid, 'time_update': rec.time_update, 'title': rec.title, 'cnt_html': tornado.escape.xhtml_unescape(rec.cnt_html), } self.write(json.dumps(out_json))
view the post.
def dest_fpath(self, source_fpath: str) -> str: relative_fpath = os.path.join(*source_fpath.split(os.sep)[1:]) relative_dirpath = os.path.dirname(relative_fpath) source_fname = relative_fpath.split(os.sep)[-1] base_fname = source_fname.split('.')[0] dest_fname = f'{base_fname}.json' return os.path.join(self.dest_dir, relative_dirpath, dest_fname)
Calculates full path for end json-api file from source file full path.
def _multi_take(self, tup): o = self.obj d = {axis: self._get_listlike_indexer(key, axis) for (key, axis) in zip(tup, o._AXIS_ORDERS)} return o._reindex_with_indexers(d, copy=True, allow_dups=True)
Create the indexers for the passed tuple of keys, and execute the take operation. This allows the take operation to be executed all at once - rather than once for each dimension - improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis Returns ------- values: same type as the object being indexed
def set_metrics(self, key, name, metrics): with self._mor_lock: mor = self._mor[key].get(name) if mor is None: raise MorNotFoundError("Mor object '{}' is not in the cache.".format(name)) mor['metrics'] = metrics
Store a list of metric identifiers for the given instance key and Mor object name. If the key is not in the cache, raises a KeyError. If the Mor object is not in the cache, raises a MorNotFoundError
def prepare_module(self): if self.has_post_config_hook: try: self.module_class.post_config_hook() except Exception as e: self.terminated = True self.error_index = 0 self.error_messages = [ self.module_nice_name, u"{}: {}".format( self.module_nice_name, str(e) or e.__class__.__name__ ), ] self.error_output(self.error_messages[0]) msg = "Exception in `%s` post_config_hook()" % self.module_full_name self._py3_wrapper.report_exception(msg, notify_user=False) self._py3_wrapper.log("terminating module %s" % self.module_full_name) self.enabled = True
Ready the module to get it ready to start.
def residueCounts(self, convertCaseTo='upper'): if convertCaseTo == 'none': def convert(x): return x elif convertCaseTo == 'lower': convert = str.lower elif convertCaseTo == 'upper': convert = str.upper else: raise ValueError( "convertCaseTo must be one of 'none', 'lower', or 'upper'") counts = defaultdict(Counter) for titleAlignment in self: read = titleAlignment.read for hsp in titleAlignment.hsps: for (subjectOffset, residue, inMatch) in read.walkHSP(hsp): counts[subjectOffset][convert(residue)] += 1 return counts
Count residue frequencies at all sequence locations matched by reads. @param convertCaseTo: A C{str}, 'upper', 'lower', or 'none'. If 'none', case will not be converted (both the upper and lower case string of a residue will be present in the result if they are present in the read - usually due to low complexity masking). @return: A C{dict} whose keys are C{int} offsets into the title sequence and whose values are C{Counters} with the residue as keys and the count of that residue at that location as values.
def scene_to_collision(scene): manager = CollisionManager() objects = {} for node in scene.graph.nodes_geometry: T, geometry = scene.graph[node] objects[node] = manager.add_object(name=node, mesh=scene.geometry[geometry], transform=T) return manager, objects
Create collision objects from a trimesh.Scene object. Parameters ------------ scene : trimesh.Scene Scene to create collision objects for Returns ------------ manager : CollisionManager CollisionManager for objects in scene objects: {node name: CollisionObject} Collision objects for nodes in scene
def _is_pid_running_on_windows(pid): pid = str(pid) startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW process = subprocess.Popen(r'tasklist /fi "PID eq {0}"'.format(pid), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, startupinfo=startupinfo) stdoutdata, stderrdata = process.communicate() stdoutdata = to_text_string(stdoutdata) process.kill() check = pid in stdoutdata return check
Check if a process is running on windows systems based on the pid.
def update_object(self, container, obj, metadata, **kwargs): try: LOG.debug('update_object() with %s is success.', self.driver) return self.driver.update_object(container, obj, metadata, **kwargs) except DriverException as e: LOG.exception('copy_object() with %s raised\ an exception %s.', self.driver, e)
Update object metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). :param metadata(dict): additional metadata to include in the request.
def retry_handler(retries=0, delay=timedelta(), conditions=[]): delay_in_seconds = delay.total_seconds() return partial(retry_loop, retries, delay_in_seconds, conditions)
A simple wrapper function that creates a handler function by using on the retry_loop function. Args: retries (Integral): The number of times to retry if a failure occurs. delay (timedelta, optional, 0 seconds): A timedelta representing the amount time to delay between retries. conditions (list): A list of retry conditions. Returns: function: The retry_loop function partialed.
def refresh(self, refresh_token): r = requests.post(self.apiurl + "/token", params={"grant_type": "refresh_token", "client_id": self.cid, "client_secret": self.csecret, "refresh_token": refresh_token}) if r.status_code != 200: raise ServerError jsd = r.json() return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at'])
Renew an oauth token given an appropriate refresh token. :param refresh_token: The Refresh Token :return: A tuple of (token, expiration time in unix time stamp)
def on_select(self, item, action): if not isinstance(item, int): item = self.items.index(item) self._on_select[item] = action
Add an action to make when an object is selected. Only one action can be stored this way.
def is_supported(value, check_all=False, filters=None, iterate=False): assert filters is not None if value is None: return True if not is_editable_type(value): return False elif not isinstance(value, filters): return False elif iterate: if isinstance(value, (list, tuple, set)): valid_count = 0 for val in value: if is_supported(val, filters=filters, iterate=check_all): valid_count += 1 if not check_all: break return valid_count > 0 elif isinstance(value, dict): for key, val in list(value.items()): if not is_supported(key, filters=filters, iterate=check_all) \ or not is_supported(val, filters=filters, iterate=check_all): return False if not check_all: break return True
Return True if the value is supported, False otherwise
def _get_dominant_angle(lines, domination_type=MEDIAN): if domination_type == MEDIAN: return _get_median_angle(lines) elif domination_type == MEAN: return _get_mean_angle(lines) else: raise ValueError('Unknown domination type provided: %s' % ( domination_type))
Picks dominant angle of a set of lines. Args: lines: iterable of (x1, y1, x2, y2) tuples that define lines. domination_type: either MEDIAN or MEAN. Returns: Dominant angle value in radians. Raises: ValueError: on unknown domination_type.
def default_chunksize(self): if self._default_chunksize is None: try: self.dimension() self.output_type() except: self._default_chunksize = Iterable._FALLBACK_CHUNKSIZE else: self._default_chunksize = Iterable._compute_default_cs(self.dimension(), self.output_type().itemsize, self.logger) return self._default_chunksize
How much data will be processed at once, in case no chunksize has been provided. Notes ----- This variable respects your setting for maximum memory in pyemma.config.default_chunksize
def from_tuples(cls, tups): ivs = [Interval(*t) for t in tups] return IntervalTree(ivs)
Create a new IntervalTree from an iterable of 2- or 3-tuples, where the tuple lists begin, end, and optionally data.
def get_intermediate_dirs(fs, dir_path): intermediates = [] with fs.lock(): for path in recursepath(abspath(dir_path), reverse=True): try: resource = fs.getinfo(path) except ResourceNotFound: intermediates.append(abspath(path)) else: if resource.is_dir: break raise errors.DirectoryExpected(dir_path) return intermediates[::-1][:-1]
Get a list of non-existing intermediate directories. Arguments: fs (FS): A filesystem instance. dir_path (str): A path to a new directory on the filesystem. Returns: list: A list of non-existing paths. Raises: ~fs.errors.DirectoryExpected: If a path component references a file and not a directory.
def schedule(self, cron_schedule, base_datetime=None): logger.debug('Scheduling job {0} with cron schedule {1}'.format(self.name, cron_schedule)) if not self.state.allow_change_schedule: raise DagobahError("job's schedule cannot be changed in state: %s" % self.state.status) if cron_schedule is None: self.cron_schedule = None self.cron_iter = None self.next_run = None else: if base_datetime is None: base_datetime = datetime.utcnow() self.cron_schedule = cron_schedule self.cron_iter = croniter(cron_schedule, base_datetime) self.next_run = self.cron_iter.get_next(datetime) logger.debug('Determined job {0} next run of {1}'.format(self.name, self.next_run)) self.commit()
Schedules the job to run periodically using Cron syntax.
def get_directory(self, identifier): return os.path.join( os.path.join(self.directory, identifier[:2]), identifier )
Implements the policy for naming directories for image objects. Image object directories are name by their identifier. In addition, these directories are grouped in parent directories named by the first two characters of the identifier. The aim is to avoid having too many sub-folders in a single directory. Parameters ---------- identifier : string Unique object identifier Returns ------- string Path to image objects data directory
def memory_read16(self, addr, num_halfwords, zone=None): return self.memory_read(addr, num_halfwords, zone=zone, nbits=16)
Reads memory from the target system in units of 16-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_halfwords (int): number of half words to read zone (str): memory zone to read from Returns: List of halfwords read from the target system. Raises: JLinkException: if memory could not be read
def gen_binder_url(fpath, binder_conf, gallery_conf): fpath_prefix = binder_conf.get('filepath_prefix') link_base = binder_conf.get('notebooks_dir') relative_link = os.path.relpath(fpath, gallery_conf['src_dir']) path_link = os.path.join( link_base, replace_py_ipynb(relative_link)) if fpath_prefix is not None: path_link = '/'.join([fpath_prefix.strip('/'), path_link]) path_link = path_link.replace(os.path.sep, '/') binder_url = binder_conf['binderhub_url'] binder_url = '/'.join([binder_conf['binderhub_url'], 'v2', 'gh', binder_conf['org'], binder_conf['repo'], binder_conf['branch']]) if binder_conf.get('use_jupyter_lab', False) is True: binder_url += '?urlpath=lab/tree/{}'.format(path_link) else: binder_url += '?filepath={}'.format(path_link) return binder_url
Generate a Binder URL according to the configuration in conf.py. Parameters ---------- fpath: str The path to the `.py` file for which a Binder badge will be generated. binder_conf: dict or None The Binder configuration dictionary. See `gen_binder_rst` for details. Returns ------- binder_url : str A URL that can be used to direct the user to the live Binder environment.
def seconds_left(self): return int((self._ENDDATE.datetime - Date(self).datetime).total_seconds())
Remaining part of the year in seconds. In the first example, only one minute and thirty seconds of the year remain: >>> from hydpy.core.timetools import TOY >>> TOY('12_31_23_58_30').seconds_left 90 The second example shows that the 29th February is generally included: >>> TOY('2').seconds_left 28944000
def reset(self): self.at(ardrone.at.ref, False, True) time.sleep(0.1) self.at(ardrone.at.ref, False, False)
Toggle the drone's emergency state.
def set_xlim(self, xlim): if self.xlim_pipe is not None and self.xlim != xlim: try: self.xlim_pipe[0].send(xlim) except IOError: return False self.xlim = xlim return True
set new X bounds
def unsigned_big_integer(self, column, auto_increment=False): return self.big_integer(column, auto_increment, True)
Create a new unsigned big integer column on the table. :param column: The column :type column: str :type auto_increment: bool :rtype: Fluent
def _nowaveform_loglr(self): for det in self._data: setattr(self._current_stats, 'loglikelihood', -numpy.inf) setattr(self._current_stats, '{}_cplx_loglr'.format(det), -numpy.inf) setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.) return -numpy.inf
Convenience function to set loglr values if no waveform generated.
def _retrieve_device_cache(proxy=None): global DEVICE_CACHE if not DEVICE_CACHE: if proxy and salt.utils.napalm.is_proxy(__opts__): if 'napalm.get_device' in proxy: DEVICE_CACHE = proxy['napalm.get_device']() elif not proxy and salt.utils.napalm.is_minion(__opts__): DEVICE_CACHE = salt.utils.napalm.get_device(__opts__) return DEVICE_CACHE
Loads the network device details if not cached already.
def set_amino_acid(self, aa): aa = aa.upper() aa = aa[2:] if aa.startswith('P.') else aa self.__set_mutation_status() self.__parse_hgvs_syntax(aa)
Set amino acid change and position.
def get_methods_names(public_properties): if public_properties: prefix = ipopo_constants.IPOPO_PROPERTY_PREFIX else: prefix = ipopo_constants.IPOPO_HIDDEN_PROPERTY_PREFIX return ( "{0}{1}".format(prefix, ipopo_constants.IPOPO_GETTER_SUFFIX), "{0}{1}".format(prefix, ipopo_constants.IPOPO_SETTER_SUFFIX), )
Generates the names of the fields where to inject the getter and setter methods :param public_properties: If True, returns the names of public property accessors, else of hidden property ones :return: getter and a setter field names
def notify_completed(self, participant): if participant.status == "overrecruited" or not self.qualification_active: return worker_id = participant.worker_id for name in self.qualifications: try: self.mturkservice.increment_qualification_score(name, worker_id) except QualificationNotFoundException as ex: logger.exception(ex)
Assign a Qualification to the Participant for the experiment ID, and for the configured group_name, if it's been set. Overrecruited participants don't receive qualifications, since they haven't actually completed the experiment. This allows them to remain eligible for future runs.
def text(self, text, stylename=None): assert self._containers container = self._containers[-1] if stylename is not None: stylename = self._get_style_name(stylename) container.addElement(Span(stylename=stylename, text=text)) else: container.addElement(Span(text=text))
Add text within the current container.
def SVG_path(path, transform=None, simplify=False): if transform is not None: path = path.transformed(transform) vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [], PATH_DICT[path_code]) for (vertices, path_code) in path.iter_segments(simplify=simplify)] if not vc_tuples: return np.zeros((0, 2)), [] else: vertices, codes = zip(*vc_tuples) vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2) return vertices, list(codes)
Construct the vertices and SVG codes for the path Parameters ---------- path : matplotlib.Path object transform : matplotlib transform (optional) if specified, the path will be transformed before computing the output. Returns ------- vertices : array The shape (M, 2) array of vertices of the Path. Note that some Path codes require multiple vertices, so the length of these vertices may be longer than the list of path codes. path_codes : list A length N list of single-character path codes, N <= M. Each code is a single character, in ['L','M','S','C','Z']. See the standard SVG path specification for a description of these.
def watch(cams, path=None, delay=10): while True: for c in cams: c.snap(path) time.sleep(delay)
Get screenshots from all cams at defined intervall.
def _requested_name(self, name, action=None, func=None): if name is not None: if name in self._used_names: n = 2 while True: pn = name + '_' + str(n) if pn not in self._used_names: self._used_names.add(pn) return pn n += 1 else: self._used_names.add(name) return name if func is not None: if hasattr(func, '__name__'): name = func.__name__ if name == '<lambda>': name = action + '_lambda' elif hasattr(func, '__class__'): name = func.__class__.__name__ if name is None: if action is not None: name = action else: name = self.name return self._requested_name(name)
Create a unique name for an operator or a stream.
def fit_transform(self, X, y=None, **fit_params): self._validate_transformers() with Pool(self.n_jobs) as pool: result = pool.starmap(_fit_transform_one, ((trans, weight, X[trans['col_pick']] if hasattr(trans, 'col_pick') else X, y) for name, trans, weight in self._iter())) if not result: return np.zeros((X.shape[0], 0)) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if self.concatenate: if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs
Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers.
def verify_convention_version(self, ds): try: for convention in getattr(ds, "Conventions", '').replace(' ', '').split(','): if convention == 'ACDD-' + self._cc_spec_version: return ratable_result((2, 2), None, []) m = ["Conventions does not contain 'ACDD-{}'".format(self._cc_spec_version)] return ratable_result((1, 2), "Global Attributes", m) except AttributeError: m = ["No Conventions attribute present; must contain ACDD-{}".format(self._cc_spec_version)] return ratable_result((0, 2), "Global Attributes", m)
Verify that the version in the Conventions field is correct
def cmd_cminv(self, ch=None): viewer = self.get_viewer(ch) if viewer is None: self.log("No current viewer/channel.") return viewer.invert_cmap()
cminv ch=chname Invert the color map in the channel/viewer
def commit(self): if not self.consumer_group: return fail(Failure(InvalidConsumerGroupError( "Bad Group_id:{0!r}".format(self.consumer_group)))) if ((self._last_processed_offset is None) or (self._last_processed_offset == self._last_committed_offset)): return succeed(self._last_committed_offset) if self._commit_ds: d = Deferred() self._commit_ds.append(d) return fail(OperationInProgress(d)) d = Deferred() self._commit_ds.append(d) self._send_commit_request() if self._commit_looper is not None: self._commit_looper.reset() return d
Commit the offset of the message we last processed if it is different from what we believe is the last offset committed to Kafka. .. note:: It is possible to commit a smaller offset than Kafka has stored. This is by design, so we can reprocess a Kafka message stream if desired. On error, will retry according to :attr:`request_retry_max_attempts` (by default, forever). If called while a commit operation is in progress, and new messages have been processed since the last request was sent then the commit will fail with :exc:`OperationInProgress`. The :exc:`OperationInProgress` exception wraps a :class:`~twisted.internet.defer.Deferred` which fires when the outstanding commit operation completes. :returns: A :class:`~twisted.internet.defer.Deferred` which resolves with the committed offset when the operation has completed. It will resolve immediately if the current offset and the last committed offset do not differ.
def on_quit(self, connection, event): nickname = self.get_nickname(event) nickname_color = self.nicknames[nickname] del self.nicknames[nickname] self.namespace.emit("message", nickname, "leaves", nickname_color) self.emit_nicknames()
Someone left the channel - send the nicknames list to the WebSocket.
def load_data(train_path='./data/regression.train', test_path='./data/regression.test'): print('Load data...') df_train = pd.read_csv(train_path, header=None, sep='\t') df_test = pd.read_csv(test_path, header=None, sep='\t') num = len(df_train) split_num = int(0.9 * num) y_train = df_train[0].values y_test = df_test[0].values y_eval = y_train[split_num:] y_train = y_train[:split_num] X_train = df_train.drop(0, axis=1).values X_test = df_test.drop(0, axis=1).values X_eval = X_train[split_num:, :] X_train = X_train[:split_num, :] lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_eval, y_eval, reference=lgb_train) return lgb_train, lgb_eval, X_test, y_test
Load or create dataset
def read_buffer(io, print_output=False, print_func=None): def _print(line): if print_output: if print_func: formatted_line = print_func(line) else: formatted_line = line encoded_line = unicode(formatted_line).encode('utf-8') print(encoded_line) out = [] for line in io: if not isinstance(line, six.text_type): line = line.decode('utf-8') line = line.strip() out.append(line) _print(line) return out
Reads a file-like buffer object into lines and optionally prints the output.
def Extract(high: int, low: int, bv: BitVec) -> BitVec: raw = z3.Extract(high, low, bv.raw) if isinstance(bv, BitVecFunc): return BitVecFunc( raw=raw, func_name=None, input_=None, annotations=bv.annotations ) return BitVec(raw, annotations=bv.annotations)
Create an extract expression. :param high: :param low: :param bv: :return:
def list_hosting_devices_hosting_routers(self, client, router_id, **_params): return client.get((client.router_path + L3_ROUTER_DEVICES) % router_id, params=_params)
Fetches a list of hosting devices hosting a router.
def _writeImage(dataArray=None, inputHeader=None): prihdu = fits.PrimaryHDU(data=dataArray, header=inputHeader) pf = fits.HDUList() pf.append(prihdu) return pf
Writes out the result of the combination step. The header of the first 'outsingle' file in the association parlist is used as the header of the new image. Parameters ---------- dataArray : arr Array of data to be written to a fits.PrimaryHDU object inputHeader : obj fits.header.Header object to use as basis for the PrimaryHDU header
def service(self, *args, **kwargs): return self._client.service(*args, scope=self.id, **kwargs)
Retrieve a single service belonging to this scope. See :class:`pykechain.Client.service` for available parameters. .. versionadded:: 1.13
def _process_response(response): try: data = response.json() except ValueError: _log_and_raise_exception('Invalid response', response.text) if response.status_code == 200: return { 'headers': response.headers, 'data': data } return _raise_error_from_response(data)
Make the request and handle exception processing
def get_hit_count_from_obj_variable(context, obj_variable, tag_name): error_to_raise = template.TemplateSyntaxError( "'%(a)s' requires a valid individual model variable " "in the form of '%(a)s for [model_obj]'.\n" "Got: %(b)s" % {'a': tag_name, 'b': obj_variable} ) try: obj = obj_variable.resolve(context) except template.VariableDoesNotExist: raise error_to_raise try: ctype = ContentType.objects.get_for_model(obj) except AttributeError: raise error_to_raise hit_count, created = HitCount.objects.get_or_create( content_type=ctype, object_pk=obj.pk) return hit_count
Helper function to return a HitCount for a given template object variable. Raises TemplateSyntaxError if the passed object variable cannot be parsed.
def params_dict(self): location_code = 'US' language_code = 'en' if len(self.location): location_code = locationMap[process.extractOne(self.location, self.locations)[0]] if len(self.language): language_code = langMap[process.extractOne(self.language, self.languages)[0]] params = { 'hl': language_code, 'gl': location_code, 'ceid': '{}:{}'.format(location_code, language_code) } return params
function to get params dict for HTTP request
def delete_table_records(self, table, query_column, ids_to_delete): table = table.get_soap_object(self.client) result = self.call('deleteTableRecords', table, query_column, ids_to_delete) if hasattr(result, '__iter__'): return [DeleteResult(delete_result) for delete_result in result] return [DeleteResult(result)]
Responsys.deleteTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances
def describe_autocomplete(self, service, operation, param): service_index = self._index[service] LOG.debug(service_index) if param not in service_index.get('operations', {}).get(operation, {}): LOG.debug("param not in index: %s", param) return None p = service_index['operations'][operation][param] resource_name = p['resourceName'] resource_identifier = p['resourceIdentifier'] resource_index = service_index['resources'][resource_name] completion_operation = resource_index['operation'] path = resource_index['resourceIdentifier'][resource_identifier] return ServerCompletion(service=service, operation=completion_operation, params={}, path=path)
Describe operation and args needed for server side completion. :type service: str :param service: The AWS service name. :type operation: str :param operation: The AWS operation name. :type param: str :param param: The name of the parameter being completed. This must match the casing in the service model (e.g. InstanceIds, not --instance-ids). :rtype: ServerCompletion :return: A ServerCompletion object that describes what API call to make in order to complete the response.
def all_finite(self,X): if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(np.asarray(X,dtype='float32').sum()) and not np.isfinite(np.asarray(X,dtype='float32')).all()): return False return True
returns true if X is finite, false, otherwise
def prepare(self): super(Syndic, self).prepare() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], ], self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], pki_dir=self.config['pki_dir'], ) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up "{0}"'.format(self.config['id'])) import salt.minion self.daemonize_if_required() self.syndic = salt.minion.SyndicManager(self.config) self.set_pidfile()
Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare()
def noise_covariance(fit, dof=2, **kw): ev = fit.eigenvalues measurement_noise = ev[-1]/(fit.n-dof) return 4*ev*measurement_noise
Covariance taking into account the 'noise covariance' of the data. This is technically more realistic for continuously sampled data. From Faber, 1993