code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def setup_handler(setup_fixtures_fn, setup_fn): def handler(obj): setup_fixtures_fn(obj) setup_fn(obj) return handler
Returns a function that adds fixtures handling to the setup method. Makes sure that fixtures are setup before calling the given setup method.
def _update_general_statistics(a_float, dist): if not dist.count: dist.count = 1 dist.maximum = a_float dist.minimum = a_float dist.mean = a_float dist.sumOfSquaredDeviation = 0 else: old_count = dist.count old_mean = dist.mean new_mean = ((old_count * old_mean) + a_float) / (old_count + 1) delta_sum_squares = (a_float - old_mean) * (a_float - new_mean) dist.count += 1 dist.mean = new_mean dist.maximum = max(a_float, dist.maximum) dist.minimum = min(a_float, dist.minimum) dist.sumOfSquaredDeviation += delta_sum_squares
Adds a_float to distribution, updating the statistics fields. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated
def _process_converter(self, f, filt=None): if filt is None: filt = lambda col, c: True needs_new_obj = False new_obj = dict() for i, (col, c) in enumerate(self.obj.iteritems()): if filt(col, c): new_data, result = f(col, c) if result: c = new_data needs_new_obj = True new_obj[i] = c if needs_new_obj: new_obj = DataFrame(new_obj, index=self.obj.index) new_obj.columns = self.obj.columns self.obj = new_obj
Take a conversion function and possibly recreate the frame.
def get_random_password(self, length=32, chars=None): if chars is None: chars = string.ascii_letters + string.digits return ''.join(random.choice(chars) for x in range(length))
Helper function that gets a random password. :param length: The length of the random password. :type length: int :param chars: A string with characters to choose from. Defaults to all ASCII letters and digits. :type chars: str
def text_to_url(self, text): if text.startswith('/'): text = text[1:] return QUrl(self.home_url.toString()+text+'.html')
Convert text address into QUrl object
def setup_bash_in_container(builddir, _container, outfile, shell): with local.cwd(builddir): print("Entering bash inside User-Chroot. Prepare your image and " "type 'exit' when you are done. If bash exits with a non-zero" "exit code, no new container will be stored.") store_new_container = True try: run_in_container(shell, _container) except ProcessExecutionError: store_new_container = False if store_new_container: print("Packing new container image.") pack_container(_container, outfile) config_path = str(CFG["config_file"]) CFG.store(config_path) print("Storing config in {0}".format(os.path.abspath(config_path)))
Setup a bash environment inside a container. Creates a new chroot, which the user can use as a bash to run the wanted projects inside the mounted container, that also gets returned afterwards.
def _determine_v1_goals(self, address_mapper, options): v1_goals, ambiguous_goals, _ = options.goals_by_version requested_goals = v1_goals + ambiguous_goals spec_parser = CmdLineSpecParser(self._root_dir) for goal in requested_goals: if address_mapper.is_valid_single_address(spec_parser.parse_spec(goal)): logger.warning("Command-line argument '{0}' is ambiguous and was assumed to be " "a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal)) return [Goal.by_name(goal) for goal in requested_goals]
Check and populate the requested goals for a given run.
def create_user(self, ): name = self.username_le.text() if not name: self.username_le.setPlaceholderText("Please provide a username.") return first = self.first_le.text() last = self.last_le.text() email = self.email_le.text() try: user = djadapter.models.User(username=name, first_name=first, last_name=last, email=email) user.save() for prj in self.projects: prj.users.add(user) for task in self.tasks: task.users.add(user) self.user = user self.accept() except: log.exception("Could not create new assettype")
Create a user and store it in the self.user :returns: None :rtype: None :raises: None
def simple_prot(x, start): for i in range(start,len(x)-1): a,b,c = x[i-1], x[i], x[i+1] if b - a > 0 and b -c >= 0: return i else: return None
Find the first peak to the right of start
def get_code(self): if self.code is None: self.code = urlopen(self.url).read() return self.code
Opens the link and returns the response's content.
def _build_kernel(self, kernel_source, compile_flags=()): return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))
Convenience function for building the kernel for this worker. Args: kernel_source (str): the kernel source to use for building the kernel Returns: cl.Program: a compiled CL kernel
def is_unicode(string): str_type = str(type(string)) if str_type.find('str') > 0 or str_type.find('unicode') > 0: return True return False
Validates that the object itself is some kinda string
def GetPathInfo(self, timestamp=None): path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp) try: result = self._path_infos[path_info_timestamp].Copy() except KeyError: result = rdf_objects.PathInfo( path_type=self._path_type, components=self._components) stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries, timestamp) result.last_stat_entry_timestamp = stat_entry_timestamp result.stat_entry = self._stat_entries.get(stat_entry_timestamp) hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries, timestamp) result.last_hash_entry_timestamp = hash_entry_timestamp result.hash_entry = self._hash_entries.get(hash_entry_timestamp) return result
Generates a summary about the path record. Args: timestamp: A point in time from which the data should be retrieved. Returns: A `rdf_objects.PathInfo` instance.
def get_interaction_energy(self, assign_ff=True, ff=None, mol2=False, force_ff_assign=False): if not ff: ff = global_settings['buff']['force_field'] if assign_ff: for molecule in self._molecules: if hasattr(molecule, 'update_ff'): molecule.update_ff( ff, mol2=mol2, force_ff_assign=force_ff_assign) else: raise AttributeError( 'The following molecule does not have a update_ff' 'method:\n{}\nIf this is a custom molecule type it' 'should inherit from BaseAmpal:'.format(molecule)) interactions = find_inter_ampal(self, ff.distance_cutoff) buff_score = score_interactions(interactions, ff) return buff_score
Calculates the interaction energy of the AMPAL object. Parameters ---------- assign_ff: bool, optional If true the force field will be updated if required. ff: BuffForceField, optional The force field to be used for scoring. mol2: bool, optional If true, mol2 style labels will also be used. force_ff_assign: bool, optional If true, the force field will be completely reassigned, ignoring the cached parameters. Returns ------- buff_score: buff.BUFFScore A BUFFScore object with information about each of the interactions and the `Atoms` involved. Raises ------ AttributeError Raise if a component molecule does not have an `update_ff` method.
def find_users(session, *usernames): user_string = ','.join(usernames) return _make_request(session, FIND_USERS_URL, user_string)
Find multiple users by name.
def _update_limits_from_api(self): self.connect() logger.info("Querying RDS DescribeAccountAttributes for limits") lims = self.conn.describe_account_attributes()['AccountQuotas'] for lim in lims: if lim['AccountQuotaName'] not in self.API_NAME_TO_LIMIT: logger.info('RDS DescribeAccountAttributes returned unknown' 'limit: %s (max: %s; used: %s)', lim['AccountQuotaName'], lim['Max'], lim['Used']) continue lname = self.API_NAME_TO_LIMIT[lim['AccountQuotaName']] self.limits[lname]._set_api_limit(lim['Max']) if len(self.limits[lname].get_current_usage()) < 1: self.limits[lname]._add_current_usage(lim['Used']) logger.debug('Done setting limits from API.')
Query RDS's DescribeAccountAttributes API action, and update limits with the quotas returned. Updates ``self.limits``. We ignore the usage information from the API,
def get_user_orders(self): self._log('get user orders') return self._rest_client.post( endpoint='/open_orders', payload={'book': self.name} )
Return user's orders that are currently open. :return: User's orders currently open. :rtype: [dict]
def create_birthday(min_age=18, max_age=80): age = random.randint(min_age, max_age) start = datetime.date.today() - datetime.timedelta(days=random.randint(0, 365)) return start - datetime.timedelta(days=age * 365)
Create a random birthday fomr someone between the ages of min_age and max_age
def eject_medium(self, attachment): if not isinstance(attachment, IMediumAttachment): raise TypeError("attachment can only be an instance of type IMediumAttachment") new_attachment = self._call("ejectMedium", in_p=[attachment]) new_attachment = IMediumAttachment(new_attachment) return new_attachment
Tells VBoxSVC that the guest has ejected the medium associated with the medium attachment. in attachment of type :class:`IMediumAttachment` The medium attachment where the eject happened. return new_attachment of type :class:`IMediumAttachment` A new reference to the medium attachment, as the config change can result in the creation of a new instance.
def extract_constant(code, symbol, default=-1): if symbol not in code.co_names: return None name_idx = list(code.co_names).index(symbol) STORE_NAME = 90 STORE_GLOBAL = 97 LOAD_CONST = 100 const = default for byte_code in Bytecode(code): op = byte_code.opcode arg = byte_code.arg if op == LOAD_CONST: const = code.co_consts[arg] elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): return const else: const = default
Extract the constant value of 'symbol' from 'code' If the name 'symbol' is bound to a constant value by the Python code object 'code', return that value. If 'symbol' is bound to an expression, return 'default'. Otherwise, return 'None'. Return value is based on the first assignment to 'symbol'. 'symbol' must be a global, or at least a non-"fast" local in the code block. That is, only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' must be present in 'code.co_names'.
def matches_extension(path, extension): _, ext = os.path.splitext(path) if ext == '': return os.path.basename(path) == extension else: return fnmatch.fnmatch(ext[1:], extension)
Returns True if path has the given extension, or if the last path component matches the extension. Supports Unix glob matching. >>> matches_extension("./www/profile.php", "php") True >>> matches_extension("./scripts/menu.js", "html") False >>> matches_extension("./LICENSE", "LICENSE") True
def get_file(self, key, file): self._check_valid_key(key) if isinstance(file, str): return self._get_filename(key, file) else: return self._get_file(key, file)
Write contents of key to file Like :meth:`.KeyValueStore.put_file`, this method allows backends to implement a specialized function if data needs to be written to disk or streamed. If *file* is a string, contents of *key* are written to a newly created file with the filename *file*. Otherwise, the data will be written using the *write* method of *file*. :param key: The key to be read :param file: Output filename or an object with a *write* method. :raises exceptions.ValueError: If the key is not valid. :raises exceptions.IOError: If there was a problem reading or writing data. :raises exceptions.KeyError: If the key was not found.
def zoom(self, zoom, center=(0, 0, 0), mapped=True): zoom = as_vec4(zoom, default=(1, 1, 1, 1)) center = as_vec4(center, default=(0, 0, 0, 0)) scale = self.scale * zoom if mapped: trans = center - (center - self.translate) * zoom else: trans = self.scale * (1 - zoom) * center + self.translate self._set_st(scale=scale, translate=trans)
Update the transform such that its scale factor is changed, but the specified center point is left unchanged. Parameters ---------- zoom : array-like Values to multiply the transform's current scale factors. center : array-like The center point around which the scaling will take place. mapped : bool Whether *center* is expressed in mapped coordinates (True) or unmapped coordinates (False).
def delete_subscription(self, subscription_id): url = self.SUBSCRIPTIONS_ID_URL % subscription_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
Delete single subscription
def batch_norm_relu(inputs, is_training, relu=True): inputs = mtf.layers.batch_norm( inputs, is_training, BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, init_zero=(not relu)) if relu: inputs = mtf.relu(inputs) return inputs
Block of batch norm and relu.
def process_array_includes(self, array, json): includes = json.get('includes') or {} for key in array.items_mapped.keys(): if key in includes: for resource in includes[key]: processed = self.from_json(resource) array.items_mapped[key][processed.sys['id']] = processed
Iterate through all `includes` and create a resource for every item. In addition map the resources under the `items_mapped` by the resource id and type. :param array: Array resource. :param json: Raw JSON dictionary.
def maintenance_mode(self, **kwargs): is_get_config = kwargs.pop('get', False) delete = kwargs.pop('delete', False) rbridge_id = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) rid_args = dict(rbridge_id=rbridge_id) rid = getattr(self._rbridge, 'rbridge_id_system_mode_maintenance') config = rid(**rid_args) if is_get_config: maint_mode = callback(config, handler='get_config') mode = maint_mode.data_xml root = ET.fromstring(mode) namespace = 'urn:brocade.com:mgmt:brocade-rbridge' for rbridge_id_node in root.findall('{%s}rbridge-id' % namespace): system_mode = rbridge_id_node.find( '{%s}system-mode' % namespace) if system_mode is not None: return True else: return False if delete: config.find('.//*maintenance').set('operation', 'delete') return callback(config)
Configures maintenance mode on the device Args: rbridge_id (str): The rbridge ID of the device on which Maintenance mode will be configured in a VCS fabric. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `rbridge_id` is not specified. Examples: >>> import pynos.device >>> conn = ('10.24.39.202', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.system.maintenance_mode(rbridge_id='226') ... output = dev.system.maintenance_mode(rbridge_id='226', ... get=True) ... assert output == True ... output = dev.system.maintenance_mode(rbridge_id='226', ... delete=True) ... output = dev.system.maintenance_mode(rbridge_id='226', ... get=True) ... assert output == False
def _build_wheel_modern(ireq, output_dir, finder, wheel_cache, kwargs): kwargs.update({"progress_bar": "off", "build_isolation": False}) with pip_shims.RequirementTracker() as req_tracker: if req_tracker: kwargs["req_tracker"] = req_tracker preparer = pip_shims.RequirementPreparer(**kwargs) builder = pip_shims.WheelBuilder(finder, preparer, wheel_cache) return builder._build_one(ireq, output_dir)
Build a wheel. * ireq: The InstallRequirement object to build * output_dir: The directory to build the wheel in. * finder: pip's internal Finder object to find the source out of ireq. * kwargs: Various keyword arguments from `_prepare_wheel_building_kwargs`.
def split_text(text, separators=re.compile('\s'), brackets=None, strip=False): if not isinstance(separators, PATTERN_TYPE): separators = re.compile( '[{0}]'.format(''.join('\{0}'.format(c) for c in separators))) return nfilter( s.strip() if strip else s for s in separators.split(strip_brackets(text, brackets=brackets)))
Split text along the separators unless they appear within brackets. :param separators: An iterable single characters or a compiled regex pattern. :param brackets: `dict` mapping start tokens to end tokens of what is to be \ recognized as brackets. .. note:: This function will also strip content within brackets.
def compile(self, script, bare=False): if not hasattr(self, '_context'): self._context = self._runtime.compile(self._compiler_script) return self._context.call( "CoffeeScript.compile", script, {'bare': bare})
compile a CoffeeScript code to a JavaScript code. if bare is True, then compile the JavaScript without the top-level function safety wrapper (like the coffee command).
def rewind(self): if self.mode != READ: raise OSError("Can't rewind in write mode") self.fileobj.seek(0) self._new_member = True self.extrabuf = b"" self.extrasize = 0 self.extrastart = 0 self.offset = 0
Return the uncompressed stream file position indicator to the beginning of the file
def stop(self, stop_context): for p in self._providers: p.stop(stop_context) if self._clear_stop: self.clear_cache()
Perform any logic on solution stop
def local_response_norm(attrs, inputs, proto_obj): new_attrs = translation_utils._fix_attribute_names(attrs, {'bias': 'knorm', 'size' : 'nsize'}) return 'LRN', new_attrs, inputs
Local Response Normalization.
def _g(self, h, xp, s): nphi = sum(self.phi) return (nphi / 2.0) * log(2 * pi) + nphi * \ log(s) + 0.5 * sum((h - xp) ** 2) / (s ** 2)
Density function for blow and hop moves
def _extract_labels(time_series): labels = {"resource_type": time_series.resource.type} labels.update(time_series.resource.labels) labels.update(time_series.metric.labels) return labels
Build the combined resource and metric labels, with resource_type.
def read_release_version(): import re dirname = os.path.abspath(os.path.dirname(__file__)) try: f = open(os.path.join(dirname, "_version.py"), "rt") for line in f.readlines(): m = re.match("__version__ = '([^']+)'", line) if m: ver = m.group(1) return ver except: return None return None
Read the release version from ``_version.py``.
def run_iperf_client(self, server_host, extra_args=''): out = self.adb.shell('iperf3 -c %s %s' % (server_host, extra_args)) clean_out = new_str(out, 'utf-8').strip().split('\n') if 'error' in clean_out[0].lower(): return False, clean_out return True, clean_out
Start iperf client on the device. Return status as true if iperf client start successfully. And data flow information as results. Args: server_host: Address of the iperf server. extra_args: A string representing extra arguments for iperf client, e.g. '-i 1 -t 30'. Returns: status: true if iperf client start successfully. results: results have data flow information
def daylight_utc(self, date, latitude, longitude, observer_elevation=0): start = self.sunrise_utc(date, latitude, longitude, observer_elevation) end = self.sunset_utc(date, latitude, longitude, observer_elevation) return start, end
Calculate daylight start and end times in the UTC timezone. :param date: Date to calculate for. :type date: :class:`datetime.date` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :param observer_elevation: Elevation in metres to calculate daylight for :type observer_elevation: int :return: A tuple of the UTC date and time at which daylight starts and ends. :rtype: (:class:`~datetime.datetime`, :class:`~datetime.datetime`)
def com_google_fonts_check_metadata_familyname(family_metadata): name = "" fail = False for f in family_metadata.fonts: if name and f.name != name: fail = True name = f.name if fail: yield FAIL, ("METADATA.pb: Family name is not the same" " in all metadata \"fonts\" items.") else: yield PASS, ("METADATA.pb: Family name is the same" " in all metadata \"fonts\" items.")
Check that METADATA.pb family values are all the same.
def all_pkgs(self): if not self.packages: self.packages = self.get_pkg_list() return self.packages
Return a list of all packages.
def draw_layer(ax, layer): ax.set_aspect('equal', 'datalim') ax.plot(*layer) ax.axis('off')
Draws a layer on the given matplotlib axis. Args: ax (axis): the matplotlib axis to draw on layer (layer): the layers to plot
def part(self, target, reason=None): if reason: target += ' :' + reason self.send_line('PART %s' % target)
quit a channel
def _search_inasafe_layer(self): selected_nodes = self.iface.layerTreeView().selectedNodes() for selected_node in selected_nodes: tree_layers = [ child for child in selected_node.children() if ( isinstance(child, QgsLayerTreeLayer))] for tree_layer in tree_layers: layer = tree_layer.layer() keywords = self.keyword_io.read_keywords(layer) if keywords.get('inasafe_fields'): return layer
Search for an inasafe layer in an active group. :returns: A valid layer. :rtype: QgsMapLayer .. versionadded:: 4.3
def release_readme_verify(): version = "{version}" expected = populate_readme( version, version, pypi="", pypi_img="", versions="\n\n", versions_img="", circleci_badge=CIRCLECI_BADGE_RELEASE, circleci_path="/{circleci_build}", travis_badge=TRAVIS_BADGE_RELEASE, travis_path="/builds/{travis_build}", appveyor_badge=APPVEYOR_BADGE_RELEASE, appveyor_path="/build/{appveyor_build}", coveralls_badge=COVERALLS_BADGE_RELEASE, coveralls_path="builds/{coveralls_build}", ) with open(RELEASE_README_FILE, "r") as file_obj: contents = file_obj.read() if contents != expected: err_msg = "\n" + get_diff( contents, expected, "README.rst.release.actual", "README.rst.release.expected", ) raise ValueError(err_msg) else: print("README.rst.release.template contents are as expected.")
Specialize the template to a PyPI release template. Once populated, compare to ``README.rst.release.template``. Raises: ValueError: If the current template doesn't agree with the expected value specialized from the template.
def remove_timedim(self, var): if self.pps and var.dims[0] == 'time': data = var[0, :, :] data.attrs = var.attrs var = data return var
Remove time dimension from dataset
def merge_limits(axes, xlim=True, ylim=True): xlims = list() ylims = list() for ax in axes: [xlims.append(lim) for lim in ax.get_xlim()] [ylims.append(lim) for lim in ax.get_ylim()] for ax in axes: if xlim: ax.set_xlim(min(xlims), max(xlims)) if ylim: ax.set_ylim(min(ylims), max(ylims)) return None
Set maximum and minimum limits from list of axis objects to each axis Args ---- axes: iterable list of `matplotlib.pyplot` axis objects whose limits should be modified xlim: bool Flag to set modification of x axis limits ylim: bool Flag to set modification of y axis limits
def get_current_roles(): current_host = env.host_string roledefs = env.get('roledefs') if roledefs: return [role for role, hosts in six.iteritems(roledefs) if current_host in hosts] return []
Determines the list of roles, that the current host is assigned to. If ``env.roledefs`` is not set, an empty list is returned. :return: List of roles of the current host. :rtype: list
def _validate_func_args(func, kwargs): args, varargs, varkw, defaults = inspect.getargspec(func) if set(kwargs.keys()) != set(args[1:]): raise TypeError("decorator kwargs do not match %s()'s kwargs" % func.__name__)
Validate decorator args when used to decorate a function.
def split_pubnote(pubnote_str): pubnote = {} parts = pubnote_str.split(',') if len(parts) > 2: pubnote['journal_title'] = parts[0] pubnote['journal_volume'] = parts[1] pubnote['page_start'], pubnote['page_end'], pubnote['artid'] = split_page_artid(parts[2]) return {key: val for (key, val) in six.iteritems(pubnote) if val is not None}
Split pubnote into journal information.
def get_domain_report(self, this_domain, timeout=None): params = {'apikey': self.api_key, 'domain': this_domain} try: response = requests.get(self.base + 'domain/report', params=params, proxies=self.proxies, timeout=timeout) except requests.RequestException as e: return dict(error=str(e)) return _return_response_and_status_code(response)
Get information about a given domain. :param this_domain: a domain name. :param timeout: The amount of time in seconds the request should wait before timing out. :return: JSON response
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query): url, params = self._prepare_request(command, query) return { "url": url, "params": params, "files": files, "stream": use_long_polling, "verify": True, "timeout": request_timeout }
Return the request params we would send to the api.
def extract(ctx, input, output): log.info('chemdataextractor.extract') log.info('Reading %s' % input.name) doc = Document.from_file(input, fname=input.name) records = [record.serialize(primitive=True) for record in doc.records] jsonstring = json.dumps(records, indent=2, ensure_ascii=False) output.write(jsonstring)
Run ChemDataExtractor on a document.
def json(self, **kwargs): body = self._decompress(self.encoding) return _json.loads(body, **kwargs)
If the response's body is valid json, we load it as a python dict and return it.
def remove(self, member): if not self.client.zrem(self.name, member): raise KeyError(member)
Remove member.
def kruskal_mst(graph): edges_accepted = 0 ds = DisjointSet() pq = PriorityQueue() accepted_edges = [] label_lookup = {} nodes = graph.get_all_node_ids() num_vertices = len(nodes) for n in nodes: label = ds.add_set() label_lookup[n] = label edges = graph.get_all_edge_objects() for e in edges: pq.put(e['id'], e['cost']) while edges_accepted < (num_vertices - 1): edge_id = pq.get() edge = graph.get_edge(edge_id) node_a, node_b = edge['vertices'] label_a = label_lookup[node_a] label_b = label_lookup[node_b] a_set = ds.find(label_a) b_set = ds.find(label_b) if a_set != b_set: edges_accepted += 1 accepted_edges.append(edge_id) ds.union(a_set, b_set) return accepted_edges
Implements Kruskal's Algorithm for finding minimum spanning trees. Assumes a non-empty, connected graph.
def parse_media_range(range): (type, subtype, params) = parse_mime_type(range) params.setdefault('q', params.pop('Q', None)) try: if not params['q'] or not 0 <= float(params['q']) <= 1: params['q'] = '1' except ValueError: params['q'] = '1' return (type, subtype, params)
Parse a media-range into its component parts. Carves up a media range and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/*;q=0.5' would get parsed into: ('application', '*', {'q', '0.5'}) In addition this function also guarantees that there is a value for 'q' in the params dictionary, filling it in with a proper default if necessary. :rtype: (str,str,dict)
def has_provider_support(provider, media_type): if provider.lower() not in API_ALL: return False provider_const = "API_" + media_type.upper() return provider in globals().get(provider_const, {})
Verifies if API provider has support for requested media type
def _sendto(self, data, addr=None, attempts=10): tries = 0 slp_time = lambda: 0.5 / random.randint(10, 30) slp = slp_time() while tries < attempts: try: self.transport.sendto(data, addr=addr) self.log.debug('Sent successfully') return except AttributeError as ex: self.log.debug('Permission error: %s', ex) time.sleep(slp) tries += 1 slp += slp_time()
On multi-master environments, running on the same machine, transport sending to the destination can be allowed only at once. Since every machine will immediately respond, high chance to get sending fired at the same time, which will result to a PermissionError at socket level. We are attempting to send it in a different time. :param data: :param addr: :return:
def compare(left: Optional[L], right: Optional[R]) -> 'Comparison[L, R]': if isinstance(left, File) and isinstance(right, Directory): return FileDirectoryComparison(left, right) if isinstance(left, Directory) and isinstance(right, File): return DirectoryFileComparison(left, right) if isinstance(left, File) or isinstance(right, File): return FileComparison(left, right) if isinstance(left, Directory) or isinstance(right, Directory): return DirectoryComparison(left, right) raise TypeError(f'Cannot compare entities: {left}, {right}')
Calculate the comparison of two entities. | left | right | Return Type | |===========|===========|=========================| | file | file | FileComparison | | file | directory | FileDirectoryComparison | | file | None | FileComparison | | directory | file | DirectoryFileComparison | | directory | directory | DirectoryComparison | | directory | None | DirectoryComparison | | None | file | FileComparison | | None | directory | DirectoryComparison | | None | None | TypeError | :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: See table above.
def get_submissions(student_item_dict, limit=None): student_item_model = _get_or_create_student_item(student_item_dict) try: submission_models = Submission.objects.filter( student_item=student_item_model) except DatabaseError: error_message = ( u"Error getting submission request for student item {}" .format(student_item_dict) ) logger.exception(error_message) raise SubmissionNotFoundError(error_message) if limit: submission_models = submission_models[:limit] return SubmissionSerializer(submission_models, many=True).data
Retrieves the submissions for the specified student item, ordered by most recent submitted date. Returns the submissions relative to the specified student item. Exception thrown if no submission is found relative to this location. Args: student_item_dict (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. limit (int): Optional parameter for limiting the returned number of submissions associated with this student item. If not specified, all associated submissions are returned. Returns: List dict: A list of dicts for the associated student item. The submission contains five attributes: student_item, attempt_number, submitted_at, created_at, and answer. 'student_item' is the ID of the related student item for the submission. 'attempt_number' is the attempt this submission represents for this question. 'submitted_at' represents the time this submission was submitted, which can be configured, versus the 'created_at' date, which is when the submission is first created. Raises: SubmissionRequestError: Raised when the associated student item fails validation. SubmissionNotFoundError: Raised when a submission cannot be found for the associated student item. Examples: >>> student_item_dict = dict( >>> student_id="Tim", >>> item_id="item_1", >>> course_id="course_1", >>> item_type="type_one" >>> ) >>> get_submissions(student_item_dict, 3) [{ 'student_item': 2, 'attempt_number': 1, 'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>), 'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>), 'answer': u'The answer is 42.' }]
def changeGroupImageRemote(self, image_url, thread_id=None): (image_id, mimetype), = self._upload(get_files_from_urls([image_url])) return self._changeGroupImage(image_id, thread_id)
Changes a thread image from a URL :param image_url: URL of an image to upload and change :param thread_id: User/Group ID to change image. See :ref:`intro_threads` :raises: FBchatException if request failed
def _gen_explain_command( coll, spec, projection, skip, limit, batch_size, options, read_concern): cmd = _gen_find_command( coll, spec, projection, skip, limit, batch_size, options) if read_concern.level: return SON([('explain', cmd), ('readConcern', read_concern.document)]) return SON([('explain', cmd)])
Generate an explain command document.
def add_rule(self, binding_type: str, rule: BindingRule): if binding_type not in self._rules: self._rules[binding_type] = [] self._rules[binding_type].insert(0, rule)
Adds new rule
def print_generic(self, msg, prefix=None): if prefix is None: self._log(msg, Logger.INFO) else: self._log(msg, prefix) if self.use_sys: if (prefix is not None) and (prefix in self.PREFIX_TO_PRINT_FUNCTION): self.PREFIX_TO_PRINT_FUNCTION[prefix](msg) else: gf.safe_print(msg)
Print a message and log it. :param msg: the message :type msg: Unicode string :param prefix: the (optional) prefix :type prefix: Unicode string
def history(self): params = {"email": self.email_address} response = self._get("/subscribers/%s/history.json" % self.list_id, params=params) return json_to_py(response)
Gets the historical record of this subscriber's trackable actions.
def extract_subsection(im, shape): r shape = sp.array(shape) if shape[0] < 1: shape = sp.array(im.shape) * shape center = sp.array(im.shape) / 2 s_im = [] for dim in range(im.ndim): r = shape[dim] / 2 lower_im = sp.amax((center[dim] - r, 0)) upper_im = sp.amin((center[dim] + r, im.shape[dim])) s_im.append(slice(int(lower_im), int(upper_im))) return im[tuple(s_im)]
r""" Extracts the middle section of a image Parameters ---------- im : ND-array Image from which to extract the subsection shape : array_like Can either specify the size of the extracted section or the fractional size of the image to extact. Returns ------- image : ND-array An ND-array of size given by the ``shape`` argument, taken from the center of the image. Examples -------- >>> import scipy as sp >>> from porespy.tools import extract_subsection >>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]]) >>> print(im) [[1 1 1 1] [1 2 2 2] [1 2 3 3] [1 2 3 4]] >>> im = extract_subsection(im=im, shape=[2, 2]) >>> print(im) [[2 2] [2 3]]
def set_denotation(onnx_model, input_name, denotation, target_opset, dimension_denotation=None): if target_opset < 7: warnings.warn('Denotation is not supported in targeted opset - %d' % target_opset) return for graph_input in onnx_model.graph.input: if graph_input.name == input_name: graph_input.type.denotation = denotation if dimension_denotation: dimensions = graph_input.type.tensor_type.shape.dim if len(dimension_denotation) != len(dimensions): raise RuntimeError('Wrong number of dimensions: input "{}" has {} dimensions'.format(input_name, len(dimensions))) for dimension, channel_denotation in zip(dimensions, dimension_denotation): dimension.denotation = channel_denotation return onnx_model raise RuntimeError('Input "{}" not found'.format(input_name))
Set input type denotation and dimension denotation. Type denotation is a feature in ONNX 1.2.1 that let's the model specify the content of a tensor (e.g. IMAGE or AUDIO). This information can be used by the backend. One example where it is useful is in images: Whenever data is bound to a tensor with type denotation IMAGE, the backend can process the data (such as transforming the color space and pixel format) based on model metadata properties. :param onnx_model: ONNX model object :param input_name: Name of input tensor to edit (example: `'data0'`) :param denotation: Input type denotation (`documentation <https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition>`_) (example: `'IMAGE'`) :param target_opset: Target ONNX opset :param dimension_denotation: List of dimension type denotations. The length of the list must be the same of the number of dimensions in the tensor (`documentation https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition>`_) (example: `['DATA_BATCH', 'DATA_CHANNEL', 'DATA_FEATURE', 'DATA_FEATURE']`)
def on_failure(self, metadata): self.connection.reset() handler = self.handlers.get("on_failure") if callable(handler): handler(metadata) handler = self.handlers.get("on_summary") if callable(handler): handler() raise CypherError.hydrate(**metadata)
Called when a FAILURE message has been received.
def get_command_signature(self, command): parent = command.full_parent_name if len(command.aliases) > 0: aliases = '|'.join(command.aliases) fmt = '[%s|%s]' % (command.name, aliases) if parent: fmt = parent + ' ' + fmt alias = fmt else: alias = command.name if not parent else parent + ' ' + command.name return '%s%s %s' % (self.clean_prefix, alias, command.signature)
Retrieves the signature portion of the help page. Parameters ------------ command: :class:`Command` The command to get the signature of. Returns -------- :class:`str` The signature for the command.
def item_lister(command, _connection, page_size, page_number, sort_by, sort_order, item_class, result_set, **kwargs): page = page_number while True: item_collection = _connection.get_list(command, page_size=page_size, page_number=page, sort_by=sort_by, sort_order=sort_order, item_class=item_class, **kwargs) result_set.total_count = item_collection.total_count result_set.page_number = page for item in item_collection.items: yield item if item_collection.total_count < 0 or item_collection.page_size == 0: break if len(item_collection.items) > 0: page += 1 else: break
A generator function for listing Video and Playlist objects.
def set_params(self,**kwargs): for key,value in list(kwargs.items()): setattr(self,key,value)
Set the parameter values
def route(app_or_blueprint, context=default_context, **kwargs): def decorator(fn): fn = describe(**kwargs)(fn) transmute_func = TransmuteFunction(fn) routes, handler = create_routes_and_handler(transmute_func, context) for r in routes: if not hasattr(app_or_blueprint, SWAGGER_ATTR_NAME): setattr(app_or_blueprint, SWAGGER_ATTR_NAME, SwaggerSpec()) swagger_obj = getattr(app_or_blueprint, SWAGGER_ATTR_NAME) swagger_obj.add_func(transmute_func, context) app_or_blueprint.route(r, methods=transmute_func.methods)(handler) return handler return decorator
attach a transmute route.
def heightmap_add_voronoi( hm: np.ndarray, nbPoints: Any, nbCoef: int, coef: Sequence[float], rnd: Optional[tcod.random.Random] = None, ) -> None: nbPoints = len(coef) ccoef = ffi.new("float[]", coef) lib.TCOD_heightmap_add_voronoi( _heightmap_cdata(hm), nbPoints, nbCoef, ccoef, rnd.random_c if rnd else ffi.NULL, )
Add values from a Voronoi diagram to the heightmap. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. nbPoints (Any): Number of Voronoi sites. nbCoef (int): The diagram value is calculated from the nbCoef closest sites. coef (Sequence[float]): The distance to each site is scaled by the corresponding coef. Closest site : coef[0], second closest site : coef[1], ... rnd (Optional[Random]): A Random instance, or None.
def get_changes(self, serialized=False, keep=False): results = {} for k, f, t in self._changes: if k not in results: results[k] = [f, None] results[k][1] = ( self._serialize(k, t, self._fields) if serialized else t ) for k, v in six.iteritems(self): if isinstance(v, Dirtyable): result = v.get_changes(keep=keep) if result: if not k in results: results[k] = [result[0], None] results[k][1] = ( self._serialize(k, result[1], self._fields) if serialized else result[1] ) if not keep: self._changes = [] return results
Get a journal of changes that have occurred :param `serialized`: Return changes in the serialized format used by TaskWarrior. :param `keep_changes`: By default, the list of changes is reset after running ``.get_changes``; set this to `True` if you would like to keep the changes recorded following running this command. :returns: A dictionary of 2-tuples of changes, where the key is the name of the field that has changed, and the value is a 2-tuple containing the original value and the final value respectively.
def _read_config(self): if not self._file_path: return None elif self._file_path.startswith('s3://'): return self._read_s3_config() elif self._file_path.startswith('http://') or \ self._file_path.startswith('https://'): return self._read_remote_config() elif not path.exists(self._file_path): raise ValueError( 'Configuration file not found: {}'.format(self._file_path)) with open(self._file_path, 'r') as handle: return handle.read()
Read the configuration from the various places it may be read from. :rtype: str :raises: ValueError
def graft(coll, branch, index): pre = coll[:index] post = coll[index:] ret = pre + branch + post return ret
Graft list branch into coll at index
def repartition(self, numPartitions): return self.transform( lambda rdd: (rdd.repartition(numPartitions) if not isinstance(rdd, EmptyRDD) else rdd) )
Repartition every RDD. :rtype: DStream Example: >>> import pysparkling >>> sc = pysparkling.Context() >>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1) >>> ( ... ssc ... .queueStream([['hello', 'world']]) ... .repartition(2) ... .foreachRDD(lambda rdd: print(len(rdd.partitions()))) ... ) >>> ssc.start() >>> ssc.awaitTermination(0.25) 2 0
def sm_to_dot(model): dot_str = HEADER first = True for state in model.states: dot_str += '{}[label="{{{}{}|{}}}"]\n'.format( id(state), r"-\> " if first else "", state.name, "\\n".join(action.name for action in state.actions)) first = False for transition in state.transitions: dot_str += '{} -> {} [label="{}"]\n'\ .format(id(state), id(transition.to_state), transition.event.name) if model.resetEvents: dot_str += 'reset_events [label="{{Reset Events|{}}}", style=""]\n'\ .format("\\n".join(event.name for event in model.resetEvents)) dot_str += '\n}\n' return dot_str
Transforms given state machine model to dot str.
def get_helper(name=None, quiet=True, **kwargs): from helpme.defaults import HELPME_CLIENT if name is not None: HELPME_CLIENT = name if HELPME_CLIENT == 'github': from .github import Helper; elif HELPME_CLIENT == 'uservoice': from .uservoice import Helper elif HELPME_CLIENT == 'discourse': from .discourse import Helper else: from .github import Helper Helper.name = HELPME_CLIENT Helper.quiet = quiet return Helper()
get the correct helper depending on the environment variable HELPME_CLIENT quiet: if True, suppress most output about the client (e.g. speak)
def reduce(source, func, initializer=None): acc = accumulate.raw(source, func, initializer) return select.item.raw(acc, -1)
Apply a function of two arguments cumulatively to the items of an asynchronous sequence, reducing the sequence to a single value. If ``initializer`` is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty.
def react(self, emojiname): self._client.react_to_message( emojiname=emojiname, channel=self._body['channel'], timestamp=self._body['ts'])
React to a message using the web api
def post_create_table(self, table): table_opts = [] if 'impala_partition_by' in table.kwargs: table_opts.append('PARTITION BY %s' % table.kwargs.get('impala_partition_by')) if 'impala_stored_as' in table.kwargs: table_opts.append('STORED AS %s' % table.kwargs.get('impala_stored_as')) if 'impala_table_properties' in table.kwargs: table_properties = ["'{0}' = '{1}'".format(property_, value) for property_, value in table.kwargs.get('impala_table_properties', {}).items()] table_opts.append('TBLPROPERTIES (%s)' % ', '.join(table_properties)) return '\n%s' % '\n'.join(table_opts)
Build table-level CREATE options.
def step_I_create_logrecords_with_table(context): assert context.table, "REQUIRE: context.table" context.table.require_columns(["category", "level", "message"]) for row in context.table.rows: category = row["category"] if category == "__ROOT__": category = None level = LogLevel.parse_type(row["level"]) message = row["message"] make_log_record(category, level, message)
Step definition that creates one more log records by using a table. .. code-block: gherkin When I create log records with: | category | level | message | | foo | ERROR | Hello Foo | | foo.bar | WARN | Hello Foo.Bar | Table description ------------------ | Column | Type | Required | Description | | category | string | yes | Category (or logger) to use. | | level | LogLevel | yes | Log level to use. | | message | string | yes | Log message to use. | .. code-block: python import logging from behave.configuration import LogLevel for row in table.rows: logger = logging.getLogger(row.category) level = LogLevel.parse_type(row.level) logger.log(level, row.message)
def and_edge_predicates(edge_predicates: EdgePredicates) -> EdgePredicate: if not isinstance(edge_predicates, Iterable): return edge_predicates edge_predicates = tuple(edge_predicates) if 1 == len(edge_predicates): return edge_predicates[0] def concatenated_edge_predicate(graph: BELGraph, u: BaseEntity, v: BaseEntity, k: str) -> bool: return all( edge_predicate(graph, u, v, k) for edge_predicate in edge_predicates ) return concatenated_edge_predicate
Concatenate multiple edge predicates to a new predicate that requires all predicates to be met.
def on_http_error(error): def wrap(f): @functools.wraps(f) def wrapped_f(*args, **kwargs): try: return f(*args, **kwargs) except GitlabHttpError as e: raise error(e.error_message, e.response_code, e.response_body) return wrapped_f return wrap
Manage GitlabHttpError exceptions. This decorator function can be used to catch GitlabHttpError exceptions raise specialized exceptions instead. Args: error(Exception): The exception type to raise -- must inherit from GitlabError
def check_permission(self, request): return all((permission.has_permission(request) for permission in self.permission_classes))
Check this field's permissions to determine whether or not it may be shown.
def _recurse_on_row(self, col_dict, nested_value): row_value = None if col_dict['mode'] == 'REPEATED' and isinstance(nested_value, list): row_value = [self._transform_row(record['v'], col_dict['fields']) for record in nested_value] else: row_value = self._transform_row(nested_value, col_dict['fields']) return row_value
Apply the schema specified by the given dict to the nested value by recursing on it. Parameters ---------- col_dict : dict The schema to apply to the nested value. nested_value : A value nested in a BigQuery row. Returns ------- Union[dict, list] ``dict`` or ``list`` of ``dict`` objects from applied schema.
def _parseSegments(self, data, elfHeader): offset = elfHeader.header.e_phoff segments = [] for i in range(elfHeader.header.e_phnum): phdr = self.__classes.PHDR.from_buffer(data, offset) segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset) phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset) segments.append(phdrData) offset += elfHeader.header.e_phentsize return segments
Return a list of segments
def get_metadata_for_ids(pmid_list, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False): if len(pmid_list) > 200: raise ValueError("Metadata query is limited to 200 PMIDs at a time.") params = {'db': 'pubmed', 'retmode': 'xml', 'id': pmid_list} tree = send_request(pubmed_fetch, params) if tree is None: return None return get_metadata_from_xml_tree(tree, get_issns_from_nlm, get_abstracts, prepend_title)
Get article metadata for up to 200 PMIDs from the Pubmed database. Parameters ---------- pmid_list : list of PMIDs as strings Can contain 1-200 PMIDs. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
def js_extractor(response): matches = rscript.findall(response) for match in matches: match = match[2].replace('\'', '').replace('"', '') verb('JS file', match) bad_scripts.add(match)
Extract js files from the response body
def _ctypes_out(parameter): if (parameter.dimension is not None and ":" in parameter.dimension and "out" in parameter.direction and ("allocatable" in parameter.modifiers or "pointer" in parameter.modifiers)): if parameter.direction == "(inout)": return ("type(C_PTR), intent(inout) :: {}_o".format(parameter.name), True) else: return ("type(C_PTR), intent(inout) :: {}_c".format(parameter.name), True)
Returns a parameter variable declaration for an output variable for the specified parameter.
def is_row_empty(self, row): for cell in row: if not self.is_cell_empty(cell): return False return True
Returns True if every cell in the row is empty.
def _grid_in_property(field_name, docstring, read_only=False, closed_only=False): def getter(self): if closed_only and not self._closed: raise AttributeError("can only get %r on a closed file" % field_name) if field_name == 'length': return self._file.get(field_name, 0) return self._file.get(field_name, None) def setter(self, value): if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) self._file[field_name] = value if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " "can only be read after :meth:`close` " "has been called.") if not read_only and not closed_only: return property(getter, setter, doc=docstring) return property(getter, doc=docstring)
Create a GridIn property.
def stdrepr_iterable(self, obj, *, cls=None, before=None, after=None): if cls is None: cls = f'hrepr-{obj.__class__.__name__}' children = [self(a) for a in obj] return self.titled_box((before, after), children, 'h', 'h')[cls]
Helper function to represent iterables. StdHRepr calls this on lists, tuples, sets and frozensets, but NOT on iterables in general. This method may be called to produce custom representations. Arguments: obj (iterable): The iterable to represent. cls (optional): The class name for the representation. If None, stdrepr will use ``'hrepr-' + obj.__class__.___name__`` before (optional): A string or a Tag to prepend to the elements. after (optional): A string or a Tag to append to the elements.
def from_data(data): if len(data) == 0: return None else: ptable = PrettyTable() ptable.field_names = data[0].keys() for row in data: ptable.add_row(row) return ptable
Construct a Prettytable from list of rows.
def seek(self, offset, whence=0): self.flush() if whence == self.SEEK_SET: self._realpos = self._pos = offset elif whence == self.SEEK_CUR: self._pos += offset self._realpos = self._pos else: self._realpos = self._pos = self._get_size() + offset self._rbuffer = bytes()
Set the file's current position. See `file.seek` for details.
def _connect(dbfile: 'PathLike') -> apsw.Connection: conn = apsw.Connection(os.fspath(dbfile)) _set_foreign_keys(conn, 1) assert _get_foreign_keys(conn) == 1 return conn
Connect to SQLite database file.
def regularpage(foldername=None, pagename=None): if foldername is None and pagename is None: raise ExperimentError('page_not_found') if foldername is None and pagename is not None: return render_template(pagename) else: return render_template(foldername+"/"+pagename)
Route not found by the other routes above. May point to a static template.
def astimezone_and_leap_second(self, tz): dt, leap_second = self.utc_datetime_and_leap_second() normalize = getattr(tz, 'normalize', None) if self.shape and normalize is not None: dt = array([normalize(d.astimezone(tz)) for d in dt]) elif self.shape: dt = array([d.astimezone(tz) for d in dt]) elif normalize is not None: dt = normalize(dt.astimezone(tz)) else: dt = dt.astimezone(tz) return dt, leap_second
Convert to a Python ``datetime`` and leap second in a timezone. Convert this time to a Python ``datetime`` and a leap second:: dt, leap_second = t.astimezone_and_leap_second(tz) The argument ``tz`` should be a timezone from the third-party ``pytz`` package, which must be installed separately. The date and time returned will be for that time zone. The leap second value is provided because a Python ``datetime`` can only number seconds ``0`` through ``59``, but leap seconds have a designation of at least ``60``. The leap second return value will normally be ``0``, but will instead be ``1`` if the date and time are a UTC leap second. Add the leap second value to the ``second`` field of the ``datetime`` to learn the real name of the second. If this time is an array, then an array of ``datetime`` objects and an array of leap second integers is returned, instead of a single value each.
def get_import_code(tlobject): kind = 'functions' if tlobject.is_function else 'types' ns = '.' + tlobject.namespace if tlobject.namespace else '' return 'from telethon.tl.{}{} import {}'\ .format(kind, ns, tlobject.class_name)
``TLObject -> from ... import ...``.