code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def mpirun(self): cmd = self.attributes['mpirun'] if cmd and cmd[0] != 'mpirun': cmd = ['mpirun'] return [str(e) for e in cmd]
Additional options passed as a list to the ``mpirun`` command
def connect( self, login, password, authz_id=b"", starttls=False, authmech=None): try: self.sock = socket.create_connection((self.srvaddr, self.srvport)) self.sock.settimeout(Client.read_timeout) except socket.error as msg: raise Error("Connection to server failed: %s" % str(msg)) if not self.__get_capabilities(): raise Error("Failed to read capabilities from server") if starttls and not self.__starttls(): return False if self.__authenticate(login, password, authz_id, authmech): return True return False
Establish a connection with the server. This function must be used. It read the server capabilities and wraps calls to STARTTLS and AUTHENTICATE commands. :param login: username :param password: clear password :param starttls: use a TLS connection or not :param authmech: prefered authenticate mechanism :rtype: boolean
def _compute_intra_event_std(self, C, vs30, pga1100, sigma_pga): sig_lnyb = np.sqrt(C['s_lny'] ** 2. - C['s_lnAF'] ** 2.) sig_lnab = np.sqrt(sigma_pga ** 2. - C['s_lnAF'] ** 2.) alpha = self._compute_intra_event_alpha(C, vs30, pga1100) return np.sqrt( (sig_lnyb ** 2.) + (C['s_lnAF'] ** 2.) + ((alpha ** 2.) * (sig_lnab ** 2.)) + (2.0 * alpha * C['rho'] * sig_lnyb * sig_lnab))
Returns the intra-event standard deviation at the site, as defined in equation 15, page 147
def pad_to_power2(data, axis = None, mode="constant"): if axis is None: axis = list(range(data.ndim)) if np.all([_is_power2(n) for i, n in enumerate(data.shape) if i in axis]): return data else: return pad_to_shape(data,[(_next_power_of_2(n) if i in axis else n) for i,n in enumerate(data.shape)], mode)
pad data to a shape of power 2 if axis == None all axis are padded
def campaign(self, name, owner=None, **kwargs): return Campaign(self.tcex, name, owner=owner, **kwargs)
Create the Campaign TI object. Args: owner: name: **kwargs: Return:
def heater_level(self, value): if self._ext_sw_heater_drive: if value not in range(0, self._heater_bangbang_segments+1): raise exceptions.RoasterValueError self._heater_level.value = value else: raise exceptions.RoasterValueError
Verifies that the heater_level is between 0 and heater_segments. Can only be called when freshroastsr700 object is initialized with ext_sw_heater_drive=True. Will throw RoasterValueError otherwise.
def generate_entities_doc(ctx, out_path, package): from canari.commands.generate_entities_doc import generate_entities_doc generate_entities_doc(ctx.project, out_path, package)
Create entities documentation from Canari python classes file.
def input_dir(dirname): dirname = dirname.rstrip('/') if excluded(dirname): return 0 errors = 0 for root, dirs, files in os.walk(dirname): if options.verbose: message('directory ' + root) options.counters['directories'] = \ options.counters.get('directories', 0) + 1 dirs.sort() for subdir in dirs: if excluded(subdir): dirs.remove(subdir) files.sort() for filename in files: errors += input_file(os.path.join(root, filename)) return errors
Check all Python source files in this directory and all subdirectories.
def execute_route(self, meta_data, request_pdu): try: function = create_function_from_request_pdu(request_pdu) results =\ function.execute(meta_data['unit_id'], self.route_map) try: return function.create_response_pdu(results) except TypeError: return function.create_response_pdu() except ModbusError as e: function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, e.error_code) except Exception as e: log.exception('Could not handle request: {0}.'.format(e)) function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, ServerDeviceFailureError.error_code)
Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU.
def jbcorrelation(sites_or_distances, imt, vs30_clustering=False): if hasattr(sites_or_distances, 'mesh'): distances = sites_or_distances.mesh.get_distance_matrix() else: distances = sites_or_distances if imt.period < 1: if not vs30_clustering: b = 8.5 + 17.2 * imt.period else: b = 40.7 - 15.0 * imt.period else: b = 22.0 + 3.7 * imt.period return numpy.exp((- 3.0 / b) * distances)
Returns the Jayaram-Baker correlation model. :param sites_or_distances: SiteCollection instance o ristance matrix :param imt: Intensity Measure Type (PGA or SA) :param vs30_clustering: flag, defalt false
def remove_exclude_regions(orig_bed, base_file, items, remove_entire_feature=False): from bcbio.structural import shared as sshared out_bed = os.path.join("%s-noexclude.bed" % (utils.splitext_plus(base_file)[0])) if not utils.file_uptodate(out_bed, orig_bed): exclude_bed = sshared.prepare_exclude_file(items, base_file) with file_transaction(items[0], out_bed) as tx_out_bed: pybedtools.BedTool(orig_bed).subtract(pybedtools.BedTool(exclude_bed), A=remove_entire_feature, nonamecheck=True).saveas(tx_out_bed) if utils.file_exists(out_bed): return out_bed else: return orig_bed
Remove centromere and short end regions from an existing BED file of regions to target.
def shift(self, timezone): try: self._tzinfo = pytz.timezone(timezone) except pytz.UnknownTimeZoneError: raise DeloreanInvalidTimezone('Provide a valid timezone') self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo)) self._tzinfo = self._dt.tzinfo return self
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object, modifying the Delorean object and returning the modified object. .. testsetup:: from datetime import datetime from delorean import Delorean .. doctest:: >>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific') >>> d.shift('UTC') Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
def _get_result_paths(self,data): result = {} result['Tree'] = ResultPath(Path=splitext(self._input_filename)[0] + \ '.tree') return result
Get the resulting tree
def add_issues_to_sprint(self, sprint_id, issue_keys): if self._options['agile_rest_path'] == GreenHopperResource.AGILE_BASE_REST_PATH: url = self._get_url('sprint/%s/issue' % sprint_id, base=self.AGILE_BASE_URL) payload = {'issues': issue_keys} try: self._session.post(url, data=json.dumps(payload)) except JIRAError as e: if e.status_code == 404: warnings.warn('Status code 404 may mean, that too old JIRA Agile version is installed.' ' At least version 6.7.10 is required.') raise elif self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH: sprint_field_id = self._get_sprint_field_id() data = {'idOrKeys': issue_keys, 'customFieldId': sprint_field_id, 'sprintId': sprint_id, 'addToBacklog': False} url = self._get_url('sprint/rank', base=self.AGILE_BASE_URL) return self._session.put(url, data=json.dumps(data)) else: raise NotImplementedError('No API for adding issues to sprint for agile_rest_path="%s"' % self._options['agile_rest_path'])
Add the issues in ``issue_keys`` to the ``sprint_id``. The sprint must be started but not completed. If a sprint was completed, then have to also edit the history of the issue so that it was added to the sprint before it was completed, preferably before it started. A completed sprint's issues also all have a resolution set before the completion date. If a sprint was not started, then have to edit the marker and copy the rank of each issue too. :param sprint_id: the sprint to add issues to :type sprint_id: int :param issue_keys: the issues to add to the sprint :type issue_keys: List[str] :rtype: Response
def bootstrap_histogram_1D( values, intervals, uncertainties=None, normalisation=False, number_bootstraps=None, boundaries=None): if not number_bootstraps or np.all(np.fabs(uncertainties < PRECISION)): output = hmtk_histogram_1D(values, intervals) if normalisation: output = output / float(np.sum(output)) else: output = output return output else: temp_hist = np.zeros([len(intervals) - 1, number_bootstraps], dtype=float) for iloc in range(0, number_bootstraps): sample = sample_truncated_gaussian_vector(values, uncertainties, boundaries) output = hmtk_histogram_1D(sample, intervals) temp_hist[:, iloc] = output output = np.sum(temp_hist, axis=1) if normalisation: output = output / float(np.sum(output)) else: output = output / float(number_bootstraps) return output
Bootstrap samples a set of vectors :param numpy.ndarray values: The data values :param numpy.ndarray intervals: The bin edges :param numpy.ndarray uncertainties: The standard deviations of each observation :param bool normalisation: If True then returns the histogram as a density function :param int number_bootstraps: Number of bootstraps :param tuple boundaries: (Lower, Upper) bounds on the data :param returns: 1-D histogram of data
def _is_in_keep_going(self, x, y): x1, y1, x2, y2 = self._keep_going return self.won == 1 and x1 <= x < x2 and y1 <= y < y2
Checks if the mouse is in the keep going button, and if the won overlay is shown.
def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30, 30)): if im.mode != "RGBA": im = im.convert("RGBA") textlayer = Image.new("RGBA", im.size, (0, 0, 0, 0)) textdraw = ImageDraw.Draw(textlayer) textsize = textdraw.textsize(inputtext, font=font) textpos = [im.size[i] - textsize[i] - margin[i] for i in [0, 1]] textdraw.text(textpos, inputtext, font=font, fill=color) if opacity != 1: textlayer = reduce_opacity(textlayer, opacity) return Image.composite(textlayer, im, textlayer)
imprints a PIL image with the indicated text in lower-right corner
def hook(klass): if not hasattr(klass, 'do_vim'): setupMethod(klass, trace_dispatch) klass.__bases__ += (SwitcherToVimpdb, )
monkey-patch pdb.Pdb class adds a 'vim' (and 'v') command: it switches to debugging with vimpdb
def rpc(self, name, handler, value, request=None): chan = self._channel(name) if value is None: value = Value(Type([])) return _p4p.ClientOperation(chan, handler=unwrapHandler(handler, self._nt), value=value, pvRequest=wrapRequest(request), rpc=True)
Perform RPC operation on PV :param name: A single name string or list of name strings :param callable handler: Completion notification. Called with a Value, RemoteError, or Cancelled :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :returns: A object with a method cancel() which may be used to abort the operation.
def nodes(): for name, provider in env.providers.items(): print name provider.nodes() print
List running nodes on all enabled cloud providers. Automatically flushes caches
def _create_lock_object(self, key): return redis_lock.Lock(self.redis_conn, key, expire=self.settings['REDIS_LOCK_EXPIRATION'], auto_renewal=True)
Returns a lock object, split for testing
def init_logger(log_requests=False): logger = logging.getLogger(__name__.split(".")[0]) for handler in logger.handlers: logger.removeHandler(handler) formatter = coloredlogs.ColoredFormatter(fmt="%(asctime)s: %(message)s") handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) logger.propagate = False if log_requests: requests.packages.urllib3.add_stderr_logger()
Initialize the logger
def index(request, template_name="index.html"): if request.GET.get('ic-request'): counter, created = Counter.objects.get_or_create(pk=1) counter.value += 1 counter.save() else: counter, created = Counter.objects.get_or_create(pk=1) print(counter.value) context = dict( value=counter.value, ) return render(request, template_name, context=context)
\ The index view, which basically just displays a button and increments a counter.
def set_address(network, address): if network.find('.//ip') is not None: raise RuntimeError("Address already specified in XML configuration.") netmask = str(address.netmask) ipv4 = str(address[1]) dhcp_start = str(address[2]) dhcp_end = str(address[-2]) ip = etree.SubElement(network, 'ip', address=ipv4, netmask=netmask) dhcp = etree.SubElement(ip, 'dhcp') etree.SubElement(dhcp, 'range', start=dhcp_start, end=dhcp_end)
Sets the given address to the network XML element. Libvirt bridge will have address and DHCP server configured according to the subnet prefix length.
def WriteFileHeader(self, arcname=None, compress_type=None, st=None): if not self._stream: raise ArchiveAlreadyClosedError( "Attempting to write to a ZIP archive that was already closed.") self.cur_zinfo = self._GenerateZipInfo( arcname=arcname, compress_type=compress_type, st=st) self.cur_file_size = 0 self.cur_compress_size = 0 if self.cur_zinfo.compress_type == zipfile.ZIP_DEFLATED: self.cur_cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15) else: self.cur_cmpr = None self.cur_crc = 0 if not self._stream: raise ArchiveAlreadyClosedError( "Attempting to write to a ZIP archive that was already closed.") self.cur_zinfo.header_offset = self._stream.tell() self._zip_fd._writecheck(self.cur_zinfo) self._zip_fd._didModify = True self._stream.write(self.cur_zinfo.FileHeader()) return self._stream.GetValueAndReset()
Writes a file header.
def _copyValues(self, to, extra=None): paramMap = self._paramMap.copy() if extra is not None: paramMap.update(extra) for param in self.params: if param in self._defaultParamMap and to.hasParam(param.name): to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param] if param in paramMap and to.hasParam(param.name): to._set(**{param.name: paramMap[param]}) return to
Copies param values from this instance to another instance for params shared by them. :param to: the target instance :param extra: extra params to be copied :return: the target instance with param values copied
def nub(it): seen = set() for v in it: h = hash(v) if h in seen: continue seen.add(h) yield v
Dedups an iterable in arbitrary order. Uses memory proportional to the number of unique items in ``it``.
def number_of_bytes_to_modify(buf_len, fuzz_factor): return random.randrange(math.ceil((float(buf_len) / fuzz_factor))) + 1
Calculate number of bytes to modify. :param buf_len: len of data buffer to fuzz. :param fuzz_factor: degree of fuzzing. :return: number of bytes to change.
def handleError(self, test, err, capt=None): if not hasattr(test.test, "testcase_guid"): if err[0] == errors.BlockedTest: raise SkipTest(err[1]) return True elif err[0] == errors.DeprecatedTest: raise SkipTest(err[1]) return True elif err[0] == errors.SkipTest: raise SkipTest(err[1]) return True
If the database plugin is not present, we have to handle capturing "errors" that shouldn't be reported as such in base.
def get_delta(D, k): if k < 0: raise Exception('k must be at least 0th order.') result = D for i in range(k): result = D.T.dot(result) if i % 2 == 0 else D.dot(result) return result
Calculate the k-th order trend filtering matrix given the oriented edge incidence matrix and the value of k.
def normalize(self): self.__v = self.__v - np.amin(self.__v) self.__v = self.__v / np.amax(self.__v)
Sets the potential range 0 to 1.
def assign_nested_vars(variables, tensors, indices=None): if isinstance(variables, (tuple, list)): return tf.group(*[ assign_nested_vars(variable, tensor) for variable, tensor in zip(variables, tensors)]) if indices is None: return variables.assign(tensors) else: return tf.scatter_update(variables, indices, tensors)
Assign tensors to matching nested tuple of variables. Args: variables: Nested tuple or list of variables to update. tensors: Nested tuple or list of tensors to assign. indices: Batch indices to assign to; default to all. Returns: Operation.
def check_output(self, cmd): ret, output = self._call(cmd, True) if ret != 0: raise RemoteCommandFailure(command=cmd, ret=ret) logger.debug("Output: %r", output) return output
Calls a command through SSH and returns its output.
def com_google_fonts_check_fontv(ttFont): from fontv.libfv import FontVersion fv = FontVersion(ttFont) if fv.version and (fv.is_development or fv.is_release): yield PASS, "Font version string looks GREAT!" else: yield INFO, ("Version string is: \"{}\"\n" "The version string must ideally include a git commit hash" " and either a 'dev' or a 'release' suffix such as in the" " example below:\n" "\"Version 1.3; git-0d08353-release\"" "").format(fv.get_name_id5_version_string())
Check for font-v versioning
def _match_space_at_line(line): regex = re.compile(r"^{0}$".format(_MDL_COMMENT)) return regex.match(line)
Return a re.match object if an empty comment was found on line.
def projection_name(self, **kwargs: Dict[str, Any]) -> str: return self.projection_name_format.format(**kwargs)
Define the projection name for this projector. Note: This function is just a basic placeholder and likely should be overridden. Args: kwargs: Projection information dict combined with additional arguments passed to the projection function. Returns: Projection name string formatted with the passed options. By default, it returns ``projection_name_format`` formatted with the arguments to this function.
def reserved_quota(self, reserved_quota): if reserved_quota is None: raise ValueError("Invalid value for `reserved_quota`, must not be `None`") if reserved_quota is not None and reserved_quota < 0: raise ValueError("Invalid value for `reserved_quota`, must be a value greater than or equal to `0`") self._reserved_quota = reserved_quota
Sets the reserved_quota of this ServicePackageMetadata. Sum of all open reservations for this account. :param reserved_quota: The reserved_quota of this ServicePackageMetadata. :type: int
def _parse_tables(cls, parsed_content): tables = parsed_content.find_all('table', attrs={"width": "100%"}) output = OrderedDict() for table in tables: title = table.find("td").text output[title] = table.find_all("tr")[1:] return output
Parses the information tables contained in a character's page. Parameters ---------- parsed_content: :class:`bs4.BeautifulSoup` A :class:`BeautifulSoup` object containing all the content. Returns ------- :class:`OrderedDict`[str, :class:`list`of :class:`bs4.Tag`] A dictionary containing all the table rows, with the table headers as keys.
def partial_derivative_mu(mu, sigma, low, high, data): pd_mu = np.sum(data - mu) / sigma ** 2 pd_mu -= len(data) * ((norm.pdf(low, mu, sigma) - norm.pdf(high, mu, sigma)) / (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma))) return -pd_mu
The partial derivative with respect to the mean. Args: mu (float): the mean of the truncated normal sigma (float): the std of the truncated normal low (float): the lower truncation bound high (float): the upper truncation bound data (ndarray): the one dimension list of data points for which we want to calculate the likelihood Returns: float: the partial derivative evaluated at the given point
def kldiv_model(prediction, fm): (_, r_x) = calc_resize_factor(prediction, fm.image_size) q = np.array(prediction, copy=True) q -= np.min(q.flatten()) q /= np.sum(q.flatten()) return kldiv(None, q, distp = fm, scale_factor = r_x)
wraps kldiv functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Should be filtered for the image corresponding to the prediction
def download_images(query, path, size=1024): im_size = "thumb-{0}.jpg".format(size) im_list = [] for im in query: key = im['properties']['key'] url = MAPILLARY_API_IM_RETRIEVE_URL + key + '/' + im_size filename = key + ".jpg" try: image = urllib.URLopener() image.retrieve(url, path + filename) coords = ",".join(map(str, im['geometry']['coordinates'])) im_list.append([filename, coords]) print("Successfully downloaded: {0}".format(filename)) except KeyboardInterrupt: break except Exception as e: print("Failed to download: {} due to {}".format(filename, e)) return im_list
Download images in query result to path. Return list of downloaded images with lat,lon. There are four sizes available: 320, 640, 1024 (default), or 2048.
def get_shard_id2num_examples(num_shards, total_num_examples): num_example_in_shard = total_num_examples // num_shards shard_id2num_examples = [num_example_in_shard for _ in range(num_shards)] for shard_id in range(total_num_examples % num_shards): shard_id2num_examples[shard_id] += 1 return shard_id2num_examples
Return the mapping shard_id=>num_examples, assuming round-robin.
def _get_biodata(base_file, args): with open(base_file) as in_handle: config = yaml.safe_load(in_handle) config["install_liftover"] = False config["genome_indexes"] = args.aligners ann_groups = config.pop("annotation_groups", {}) config["genomes"] = [_setup_genome_annotations(g, args, ann_groups) for g in config["genomes"] if g["dbkey"] in args.genomes] return config
Retrieve biodata genome targets customized by install parameters.
def dumps(obj): return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder)
Outputs json with formatting edits + object handling.
def once(self): ns = self.Namespace() ns.memo = None ns.run = False def work_once(*args, **kwargs): if ns.run is False: ns.memo = self.obj(*args, **kwargs) ns.run = True return ns.memo return self._wrap(work_once)
Returns a function that will be executed at most one time, no matter how often you call it. Useful for lazy initialization.
def complete_experiment(self, status): self.log("Bot player completing experiment. Status: {}".format(status)) while True: url = "{host}/{status}?participant_id={participant_id}".format( host=self.host, participant_id=self.participant_id, status=status ) try: result = requests.get(url) result.raise_for_status() except RequestException: self.stochastic_sleep() continue return result
Record worker completion status to the experiment server. This is done using a GET request to the /worker_complete or /worker_failed endpoints.
def _SGraphFromJsonTree(json_str): g = json.loads(json_str) vertices = [_Vertex(x['id'], dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'id'])) for x in g['vertices']] edges = [_Edge(x['src'], x['dst'], dict([(str(k), v) for k, v in _six.iteritems(x) if k != 'src' and k != 'dst'])) for x in g['edges']] sg = _SGraph().add_vertices(vertices) if len(edges) > 0: sg = sg.add_edges(edges) return sg
Convert the Json Tree to SGraph
def list_profile(hostname, username, password, profile_type, name=None, ): bigip_session = _build_session(username, password) try: if name: response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}?expandSubcollections=true'.format(type=profile_type, name=name)) else: response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}'.format(type=profile_type)) except requests.exceptions.ConnectionError as e: return _load_connection_error(hostname, e) return _load_response(response)
A function to connect to a bigip device and list an existing profile. If no name is provided than all profiles of the specified type will be listed. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password profile_type The type of profile(s) to list name The name of the profile to list CLI Example:: salt '*' bigip.list_profile bigip admin admin http my-http-profile
def reward_goal(self): if not 'goal' in self.mode: return mode = self.mode['goal'] if mode and mode['reward'] and self.__test_cond(mode): if mode['reward'] > 0: self.logger.info("Escaped!!") self.player.stats['reward'] += mode['reward'] self.player.stats['score'] += mode['reward'] self.player.game_over = self.player.game_over or mode['terminal']
Add an end goal reward
def resample(self, data, cache_dir=None, mask_area=None, **kwargs): if mask_area is None and isinstance( self.source_geo_def, SwathDefinition): mask_area = True if mask_area: if isinstance(self.source_geo_def, SwathDefinition): geo_dims = self.source_geo_def.lons.dims else: geo_dims = ('y', 'x') flat_dims = [dim for dim in data.dims if dim not in geo_dims] if np.issubdtype(data.dtype, np.integer): kwargs['mask'] = data == data.attrs.get('_FillValue', np.iinfo(data.dtype.type).max) else: kwargs['mask'] = data.isnull() kwargs['mask'] = kwargs['mask'].all(dim=flat_dims) cache_id = self.precompute(cache_dir=cache_dir, **kwargs) return self.compute(data, cache_id=cache_id, **kwargs)
Resample `data` by calling `precompute` and `compute` methods. Only certain resampling classes may use `cache_dir` and the `mask` provided when `mask_area` is True. The return value of calling the `precompute` method is passed as the `cache_id` keyword argument of the `compute` method, but may not be used directly for caching. It is up to the individual resampler subclasses to determine how this is used. Args: data (xarray.DataArray): Data to be resampled cache_dir (str): directory to cache precomputed results (default False, optional) mask_area (bool): Mask geolocation data where data values are invalid. This should be used when data values may affect what neighbors are considered valid. Returns (xarray.DataArray): Data resampled to the target area
def add_str(self, seq, name=None, description=""): self.add_seq(SeqRecord(Seq(seq), id=name, description=description))
Use this method to add a sequence as a string to this fasta.
def register(self, model_or_iterable, moderation_class): if isinstance(model_or_iterable, ModelBase): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model in self._registry: raise AlreadyModerated( "The model '%s' is already being moderated" % model._meta.verbose_name ) self._registry[model] = moderation_class(model)
Register a model or a list of models for comment moderation, using a particular moderation class. Raise ``AlreadyModerated`` if any of the models are already registered.
def apply_markup(value, arg=None): if arg is not None: return formatter(value, filter_name=arg) return formatter(value)
Applies text-to-HTML conversion. Takes an optional argument to specify the name of a filter to use.
def decrypt(self, key): if 'ciphertext' not in self.objects: raise InvalidJWEOperation("No available ciphertext") self.decryptlog = list() if 'recipients' in self.objects: for rec in self.objects['recipients']: try: self._decrypt(key, rec) except Exception as e: self.decryptlog.append('Failed: [%s]' % repr(e)) else: try: self._decrypt(key, self.objects) except Exception as e: self.decryptlog.append('Failed: [%s]' % repr(e)) if not self.plaintext: raise InvalidJWEData('No recipient matched the provided ' 'key' + repr(self.decryptlog))
Decrypt a JWE token. :param key: The (:class:`jwcrypto.jwk.JWK`) decryption key. :param key: A (:class:`jwcrypto.jwk.JWK`) decryption key or a password string (optional). :raises InvalidJWEOperation: if the key is not a JWK object. :raises InvalidJWEData: if the ciphertext can't be decrypted or the object is otherwise malformed.
def call(self, method, *args): payload = self.build_payload(method, args) logging.debug('* Client will send payload: {}'.format(payload)) self.send(payload) res = self.receive() assert payload[2] == res['ref'] return res['result'], res['error']
Make a call to a `Responder` and return the result
def parse_section_entry_points(self, section_options): parsed = self._parse_section_to_dict(section_options, self._parse_list) self['entry_points'] = parsed
Parses `entry_points` configuration file section. :param dict section_options:
def get_private_name(self, f): f = self.__swagger_rename__[f] if f in self.__swagger_rename__.keys() else f return '_' + self.__class__.__name__ + '__' + f
get private protected name of an attribute :param str f: name of the private attribute to be accessed.
def readn(self, n): data = '' while len(data) < n: received = self.sock.recv(n - len(data)) if not len(received): raise socket.error('no data read from socket') data += received return data
Keep receiving data until exactly `n` bytes have been read.
def from_storage(source, source_format='csv', csv_options=None, ignore_unknown_values=False, max_bad_records=0, compressed=False, schema=None): result = FederatedTable() if source_format == 'csv': result._bq_source_format = 'CSV' if csv_options is None: csv_options = _csv_options.CSVOptions() elif source_format == 'json': if csv_options: raise Exception('CSV options are not support for JSON tables') result._bq_source_format = 'NEWLINE_DELIMITED_JSON' else: raise Exception("Invalid source format %s" % source_format) result._source = source if isinstance(source, list) else [source] result._source_format = source_format result._csv_options = csv_options result._ignore_unknown_values = ignore_unknown_values result._max_bad_records = max_bad_records result._compressed = compressed result._schema = schema return result
Create an external table for a GCS object. Args: source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item name. Can be a single source or a list. source_format: the format of the data, 'csv' or 'json'; default 'csv'. csv_options: For CSV files, the options such as quote character and delimiter. ignore_unknown_values: If True, accept rows that contain values that do not match the schema; the unknown values are ignored (default False). max_bad_records: The maximum number of bad records that are allowed (and ignored) before returning an 'invalid' error in the Job result (default 0). compressed: whether the data is GZ compressed or not (default False). Note that compressed data can be used as a federated table but cannot be loaded into a BQ Table. schema: the schema of the data. This is required for this table to be used as a federated table or to be loaded using a Table object that itself has no schema (default None).
def _get_free_display_port(self): display = 100 if not os.path.exists("/tmp/.X11-unix/"): return display while True: if not os.path.exists("/tmp/.X11-unix/X{}".format(display)): return display display += 1
Search a free display port
def reexport_tf_summary(): import sys packages = [ 'tensorflow', 'tensorflow.compat.v2', 'tensorflow._api.v2', 'tensorflow._api.v2.compat.v2', 'tensorflow._api.v1.compat.v2', ] if not getattr(tf, '__version__', '').startswith('2.'): packages.remove('tensorflow') def dynamic_wildcard_import(module): symbols = getattr(module, '__all__', None) if symbols is None: symbols = [k for k in module.__dict__.keys() if not k.startswith('_')] globals().update({symbol: getattr(module, symbol) for symbol in symbols}) notfound = object() for package_name in packages: package = sys.modules.get(package_name, notfound) if package is notfound: continue module = getattr(package, 'summary', None) if module is None: continue dynamic_wildcard_import(module) return
Re-export all symbols from the original tf.summary. This function finds the original tf.summary V2 API and re-exports all the symbols from it within this module as well, so that when this module is patched into the TF API namespace as the new tf.summary, the effect is an overlay that just adds TensorBoard-provided symbols to the module. Finding the original tf.summary V2 API module reliably is a challenge, since this code runs *during* the overall TF API import process and depending on the order of imports (which is subject to change), different parts of the API may or may not be defined at the point in time we attempt to access them. This code also may be inserted into two places in the API (tf and tf.compat.v2) and may be re-executed multiple times even for the same place in the API (due to the TF module import system not populating sys.modules properly), so it needs to be robust to many different scenarios. The one constraint we can count on is that everywhere this module is loaded (via the component_api_helper mechanism in TF), it's going to be the 'summary' submodule of a larger API package that already has a 'summary' attribute that contains the TF-only summary API symbols we need to re-export. This may either be the original TF-only summary module (the first time we load this module) or a pre-existing copy of this module (if we're re-loading this module again). We don't actually need to differentiate those two cases, because it's okay if we re-import our own TensorBoard-provided symbols; they will just be overwritten later on in this file. So given that guarantee, the approach we take is to first attempt to locate a TF V2 API package that already has a 'summary' attribute (most likely this is the parent package into which we're being imported, but not necessarily), and then do the dynamic version of "from tf_api_package.summary import *". Lastly, this logic is encapsulated in a function to avoid symbol leakage.
def return_fv_by_seeds(fv, seeds=None, unique_cls=None): if seeds is not None: if unique_cls is not None: return select_from_fv_by_seeds(fv, seeds, unique_cls) else: raise AssertionError("Input unique_cls has to be not None if seeds is not None.") else: return fv
Return features selected by seeds and unique_cls or selection from features and corresponding seed classes. :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number of features :param seeds: ndarray with seeds. Does not to be linear. :param unique_cls: number of used seeds clases. Like [1, 2] :return: fv, sd - selection from feature vector and selection from seeds or just fv for whole image
def map_concepts_to_indicators( self, n: int = 1, min_temporal_res: Optional[str] = None ): for node in self.nodes(data=True): query_parts = [ "select Indicator from concept_to_indicator_mapping", f"where `Concept` like '{node[0]}'", ] query = " ".join(query_parts) results = engine.execute(query) if min_temporal_res is not None: if min_temporal_res not in ["month"]: raise ValueError("min_temporal_res must be 'month'") vars_with_required_temporal_resolution = [ r[0] for r in engine.execute( "select distinct `Variable` from indicator where " f"`{min_temporal_res.capitalize()}` is not null" ) ] results = [ r for r in results if r[0] in vars_with_required_temporal_resolution ] node[1]["indicators"] = { x: Indicator(x, "MITRE12") for x in [r[0] for r in take(n, results)] }
Map each concept node in the AnalysisGraph instance to one or more tangible quantities, known as 'indicators'. Args: n: Number of matches to keep min_temporal_res: Minimum temporal resolution that the indicators must have data for.
def _stringify_number(v): if isinstance(v, (float, Decimal)): if math.isinf(v) and v > 0: v = 'Infinity' elif math.isinf(v) and v < 0: v = '-Infinity' else: v = '{:f}'.format(v) elif isinstance(v, BinarySize): v = '{:d}'.format(int(v)) elif isinstance(v, int): v = '{:d}'.format(v) else: v = str(v) return v
Stringify a number, preventing unwanted scientific notations.
def sql_program_name_func(command): args = command.split(' ') for prog in args: if '=' not in prog: return prog return args[0]
Extract program name from `command`. >>> sql_program_name_func('ls') 'ls' >>> sql_program_name_func('git status') 'git' >>> sql_program_name_func('EMACS=emacs make') 'make' :type command: str
def create_project(self, name, client_id, budget = None, budget_by = 'none', notes = None, billable = True): project = {'project':{ 'name': name, 'client_id': client_id, 'budget_by': budget_by, 'budget': budget, 'notes': notes, 'billable': billable, }} response = self.post_request('projects/', project, follow = True) if response: return Project(self, response['project'])
Creates a Project with the given information.
def get_config(self): if not self._config: namespace = {} if os.path.exists(self.config_path): execfile(self.config_path, namespace) self._config = namespace.get('config') or Configuration() return self._config
Load user configuration or return default when not found. :rtype: :class:`Configuration`
def _determine_case(was_upper, words, string): case_type = 'unknown' if was_upper: case_type = 'upper' elif string.islower(): case_type = 'lower' elif len(words) > 0: camel_case = words[0].islower() pascal_case = words[0].istitle() or words[0].isupper() if camel_case or pascal_case: for word in words[1:]: c = word.istitle() or word.isupper() camel_case &= c pascal_case &= c if not c: break if camel_case: case_type = 'camel' elif pascal_case: case_type = 'pascal' else: case_type = 'mixed' return case_type
Determine case type of string. Arguments: was_upper {[type]} -- [description] words {[type]} -- [description] string {[type]} -- [description] Returns: - upper: All words are upper-case. - lower: All words are lower-case. - pascal: All words are title-case or upper-case. Note that the stringiable may still have separators. - camel: First word is lower-case, the rest are title-case or upper-case. stringiable may still have separators. - mixed: Any other mixing of word casing. Never occurs if there are no separators. - unknown: stringiable contains no words.
def plot_vxz(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): if cb_label is None: cb_label = self._vxz_label if ax is None: fig, axes = self.vxz.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.vxz.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
Plot the Vxz component of the tensor. Usage ----- x.plot_vxz([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{xz}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
def skipDryRun(logger, dryRun, level=logging.DEBUG): if not isinstance(level, int): level = logging.getLevelName(level) return ( functools.partial(_logDryRun, logger, level) if dryRun else functools.partial(logger.log, level) )
Return logging function. When logging function called, will return True if action should be skipped. Log will indicate if skipped because of dry run.
def get_harddisk_sleep(): ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret)
Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep
def _stream_search(self, *args, **kwargs): for hit in scan( self.elastic, query=kwargs.pop("body", None), scroll="10m", **kwargs ): hit["_source"]["_id"] = hit["_id"] yield hit["_source"]
Helper method for iterating over ES search results.
def change_numbering(self, rename_dict, inplace=False): output = self if inplace else self.copy() new_index = [rename_dict.get(key, key) for key in self.index] output.index = new_index if not inplace: return output
Return the reindexed version of Cartesian. Args: rename_dict (dict): A dictionary mapping integers on integers. Returns: Cartesian: A renamed copy according to the dictionary passed.
def put_tagging(Bucket, region=None, key=None, keyid=None, profile=None, **kwargs): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) tagslist = [] for k, v in six.iteritems(kwargs): if six.text_type(k).startswith('__'): continue tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)}) conn.put_bucket_tagging(Bucket=Bucket, Tagging={ 'TagSet': tagslist, }) return {'updated': True, 'name': Bucket} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
Given a valid config, update the tags for a bucket. Returns {updated: true} if tags were updated and returns {updated: False} if tags were not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_tagging my_bucket my_role [...]
def setup_logging(level, console_stream=None, log_dir=None, scope=None, log_name=None, native=None): log_filename = None file_handler = None def trace(self, message, *args, **kwargs): if self.isEnabledFor(TRACE): self._log(TRACE, message, args, **kwargs) logging.Logger.trace = trace logger = logging.getLogger(scope) for handler in logger.handlers: logger.removeHandler(handler) if console_stream: native_handler = create_native_stderr_log_handler(level, native, stream=console_stream) logger.addHandler(native_handler) if log_dir: safe_mkdir(log_dir) log_filename = os.path.join(log_dir, log_name or 'pants.log') native_handler = create_native_pantsd_file_log_handler(level, native, log_filename) file_handler = native_handler logger.addHandler(native_handler) logger.setLevel(level) logging.captureWarnings(True) _maybe_configure_extended_logging(logger) return LoggingSetupResult(log_filename, file_handler)
Configures logging for a given scope, by default the global scope. :param str level: The logging level to enable, must be one of the level names listed here: https://docs.python.org/2/library/logging.html#levels :param file console_stream: The stream to use for default (console) logging. If None (default), this will disable console logging. :param str log_dir: An optional directory to emit logs files in. If unspecified, no disk logging will occur. If supplied, the directory will be created if it does not already exist and all logs will be tee'd to a rolling set of log files in that directory. :param str scope: A logging scope to configure. The scopes are hierarchichal logger names, with The '.' separator providing the scope hierarchy. By default the root logger is configured. :param str log_name: The base name of the log file (defaults to 'pants.log'). :param Native native: An instance of the Native FFI lib, to register rust logging. :returns: The full path to the main log file if file logging is configured or else `None`. :rtype: str
def reset(self): self.filename = None self.groups = [] self.tabs.clear() self.setEnabled(False) self.button_color.setEnabled(False) self.button_del.setEnabled(False) self.button_apply.setEnabled(False) self.action['load_channels'].setEnabled(False) self.action['save_channels'].setEnabled(False)
Reset all the information of this widget.
def amg_video_search(self, entitiy_type, query, **kwargs): return self.make_request('amgvideo', entitiy_type, query, kwargs)
Search the Movies and TV database Where ``entitiy_type`` is a comma separated list of: ``movie`` Movies ``tvseries`` TV series ``credit`` people working in TV or movies
def to_pb(self): return policy_pb2.Policy( etag=self.etag, version=self.version or 0, bindings=[ policy_pb2.Binding(role=role, members=sorted(self[role])) for role in self ], )
Render a protobuf message. Returns: google.iam.policy_pb2.Policy: a message to be passed to the ``set_iam_policy`` gRPC API.
def read_config_info(ini_file): try: config = RawConfigParser() config.optionxform = lambda option: option config.read(ini_file) the_stuff = {} for section in config.sections(): the_stuff[section] = {} for option in config.options(section): the_stuff[section][option] = config.get(section, option) return the_stuff except Exception as wtf: logging.error('Exception caught in read_config_info(): {}'.format(wtf)) traceback.print_exc(file=sys.stdout) return sys.exit(1)
Read the INI file Args: ini_file - path to the file Returns: A dictionary of stuff from the INI file Exits: 1 - if problems are encountered
def _read(self): self.json_file.seek(0) try: data = zlib.decompress(self.json_file.read()) self.backup_dict = json.loads(data.decode('utf-8')) except (EOFError, zlib.error): self.backup_dict = {}
Reads backup file from json_file property and sets backup_dict property with data decompressed and deserialized from that file. If no usable data is found backup_dict is set to the empty dict.
def pull_cfg_from_parameters_out(parameters_out, namelist_to_read="nml_allcfgs"): single_cfg = Namelist({namelist_to_read: {}}) for key, value in parameters_out[namelist_to_read].items(): if "file_tuning" in key: single_cfg[namelist_to_read][key] = "" else: try: if isinstance(value, str): single_cfg[namelist_to_read][key] = value.strip(" \t\n\r").replace( "\x00", "" ) elif isinstance(value, list): clean_list = [v.strip(" \t\n\r").replace("\x00", "") for v in value] single_cfg[namelist_to_read][key] = [v for v in clean_list if v] else: assert isinstance(value, Number) single_cfg[namelist_to_read][key] = value except AttributeError: if isinstance(value, list): assert all([isinstance(v, Number) for v in value]) single_cfg[namelist_to_read][key] = value else: raise AssertionError( "Unexpected cause in out parameters conversion" ) return single_cfg
Pull out a single config set from a parameters_out namelist. This function returns a single file with the config that needs to be passed to MAGICC in order to do the same run as is represented by the values in ``parameters_out``. Parameters ---------- parameters_out : dict, f90nml.Namelist The parameters to dump namelist_to_read : str The namelist to read from the file. Returns ------- :obj:`f90nml.Namelist` An f90nml object with the cleaned, read out config. Examples -------- >>> cfg = pull_cfg_from_parameters_out(magicc.metadata["parameters"]) >>> cfg.write("/somewhere/else/ANOTHERNAME.cfg")
def extract_args(cls, *args): model = None crudbuilder = None for arg in args: if issubclass(arg, models.Model): model = arg else: crudbuilder = arg return [model, crudbuilder]
Takes any arguments like a model and crud, or just one of those, in any order, and return a model and crud.
async def load_varint(reader): buffer = _UINT_BUFFER await reader.areadinto(buffer) width = int_mark_to_size(buffer[0] & PortableRawSizeMark.MASK) result = buffer[0] shift = 8 for _ in range(width-1): await reader.areadinto(buffer) result += buffer[0] << shift shift += 8 return result >> 2
Binary load of variable size integer serialized by dump_varint :param reader: :return:
def process_modules(modules): for mod in modules['client']: directory = '%s/client_modules' % HERE if not exists(directory): makedirs(directory) write_module_file(mod, directory, 'pyeapi') for mod in modules['api']: directory = '%s/api_modules' % HERE if not exists(directory): makedirs(directory) write_module_file(mod, directory, 'pyeapi.api') create_index(modules)
Accepts dictionary of 'client' and 'api' modules and creates the corresponding files.
def filter_not_empty_values(value): if not value: return None data = [x for x in value if x] if not data: return None return data
Returns a list of non empty values or None
def get(self, *args, **kwargs) -> "QuerySet": queryset = self.filter(*args, **kwargs) queryset._limit = 2 queryset._get = True return queryset
Fetch exactly one object matching the parameters.
def plot_welch_perdiogram(x, fs, nperseg): import scipy.signal import numpy N = len(x) time = numpy.arange(N) / fs f, Pxx_den = scipy.signal.welch(x, fs, nperseg=nperseg) plt.semilogy(f, Pxx_den) plt.ylim([0.5e-3, 1]) plt.xlabel('frequency [Hz]') plt.ylabel('PSD [V**2/Hz]') plt.show() numpy.mean(Pxx_den[256:]) f, Pxx_spec = scipy.signal.welch(x, fs, 'flattop', 1024, scaling='spectrum') plt.figure() plt.semilogy(f, numpy.sqrt(Pxx_spec)) plt.xlabel('frequency [Hz]') plt.ylabel('Linear spectrum [V RMS]') plt.show() return None
Plot Welch perdiogram Args ---- x: ndarray Signal array fs: float Sampling frequency nperseg: float Length of each data segment in PSD
def add_simple_link(self, issue, object): data = {"object": object} url = self._get_url('issue/' + str(issue) + '/remotelink') r = self._session.post( url, data=json.dumps(data)) simple_link = RemoteLink( self._options, self._session, raw=json_loads(r)) return simple_link
Add a simple remote link from an issue to web resource. This avoids the admin access problems from add_remote_link by just using a simple object and presuming all fields are correct and not requiring more complex ``application`` data. ``object`` should be a dict containing at least ``url`` to the linked external URL and ``title`` to display for the link inside JIRA. For definitions of the allowable fields for ``object`` , see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links. :param issue: the issue to add the remote link to :param object: the dictionary used to create remotelink data
def hashable(cls): assert "__hash__" in cls.__dict__ assert cls.__dict__["__hash__"] is not None assert "__eq__" in cls.__dict__ cls.__ne__ = lambda self, other: not self.__eq__(other) return cls
Makes sure the class is hashable. Needs a working __eq__ and __hash__ and will add a __ne__.
def command_str(self): if isinstance(self.command, six.string_types): return self.command return ' '.join(map(six.moves.shlex_quote, self.command))
get command to execute as string properly escaped :return: string
def run(self): try: Control().cursor_hide().write(file=self.file) super().run() except KeyboardInterrupt: self.stop() finally: Control().cursor_show().write(file=self.file)
Overrides WriterProcess.run, to handle KeyboardInterrupts better. This should not be called by any user. `multiprocessing` calls this in a subprocess. Use `self.start` to start this instance.
def get_cancel_url(self): if self.cancel_url: return self.cancel_url ModelClass = self.get_model_class() return reverse('trionyx:model-list', kwargs={ 'app': ModelClass._meta.app_label, 'model': ModelClass._meta.model_name, })
Get cancel url
def create_for_rectangle(self, x, y, width, height): return Surface._from_pointer( cairo.cairo_surface_create_for_rectangle( self._pointer, x, y, width, height), incref=False)
Create a new surface that is a rectangle within this surface. All operations drawn to this surface are then clipped and translated onto the target surface. Nothing drawn via this sub-surface outside of its bounds is drawn onto the target surface, making this a useful method for passing constrained child surfaces to library routines that draw directly onto the parent surface, i.e. with no further backend allocations, double buffering or copies. .. note:: As of cairo 1.12, the semantics of subsurfaces have not been finalized yet unless the rectangle is in full device units, is contained within the extents of the target surface, and the target or subsurface's device transforms are not changed. :param x: The x-origin of the sub-surface from the top-left of the target surface (in device-space units) :param y: The y-origin of the sub-surface from the top-left of the target surface (in device-space units) :param width: Width of the sub-surface (in device-space units) :param height: Height of the sub-surface (in device-space units) :type x: float :type y: float :type width: float :type height: float :returns: A new :class:`Surface` object. *New in cairo 1.10.*
def _reschedule(self, node): if node.shutting_down: return if not self.workqueue: node.shutdown() return self.log("Number of units waiting for node:", len(self.workqueue)) if self._pending_of(self.assigned_work[node]) > 2: return self._assign_work_unit(node)
Maybe schedule new items on the node. If there are any globally pending work units left then this will check if the given node should be given any more tests.
def try_mongodb_opts(self, host="localhost", database_name='INGInious'): try: mongo_client = MongoClient(host=host) except Exception as e: self._display_warning("Cannot connect to MongoDB on host %s: %s" % (host, str(e))) return None try: database = mongo_client[database_name] except Exception as e: self._display_warning("Cannot access database %s: %s" % (database_name, str(e))) return None try: GridFS(database) except Exception as e: self._display_warning("Cannot access gridfs %s: %s" % (database_name, str(e))) return None return database
Try MongoDB configuration
def numpyview(arr, datatype, shape, raw=False): if raw: return n.frombuffer(arr, dtype=n.dtype(datatype)).view(n.dtype(datatype)).reshape(shape) else: return n.frombuffer(arr.get_obj(), dtype=n.dtype(datatype)).view(n.dtype(datatype)).reshape(shape)
Takes mp shared array and returns numpy array with given shape.
def _getbug(self, objid, **kwargs): return self._getbugs([objid], permissive=False, **kwargs)[0]
Thin wrapper around _getbugs to handle the slight argument tweaks for fetching a single bug. The main bit is permissive=False, which will tell bugzilla to raise an explicit error if we can't fetch that bug. This logic is called from Bug() too
def unicode_urlencode(obj, charset='utf-8', for_qs=False): if not isinstance(obj, string_types): obj = text_type(obj) if isinstance(obj, text_type): obj = obj.encode(charset) safe = not for_qs and b'/' or b'' rv = text_type(url_quote(obj, safe)) if for_qs: rv = rv.replace('%20', '+') return rv
URL escapes a single bytestring or unicode string with the given charset if applicable to URL safe quoting under all rules that need to be considered under all supported Python versions. If non strings are provided they are converted to their unicode representation first.
def stations(self, station, limit=10): query = { 'start': 1, 'S': station + '?', 'REQ0JourneyStopsB': limit } rsp = requests.get('http://reiseauskunft.bahn.de/bin/ajax-getstop.exe/dn', params=query) return parse_stations(rsp.text)
Find stations for given queries Args: station (str): search query limit (int): limit number of results
def get_broadcast(self, broadcast_guid, **kwargs): params = kwargs broadcast = self._call('broadcasts/%s' % broadcast_guid, params=params, content_type='application/json') return Broadcast(broadcast)
Get a specific broadcast by guid