code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def check_video(video, languages=None, age=None, undefined=False): if languages and not (languages - video.subtitle_languages): logger.debug('All languages %r exist', languages) return False if age and video.age > age: logger.debug('Video is older than %r', age) return False if undefined and Language('und') in video.subtitle_languages: logger.debug('Undefined language found') return False return True
Perform some checks on the `video`. All the checks are optional. Return `False` if any of this check fails: * `languages` already exist in `video`'s :attr:`~subliminal.video.Video.subtitle_languages`. * `video` is older than `age`. * `video` has an `undefined` language in :attr:`~subliminal.video.Video.subtitle_languages`. :param video: video to check. :type video: :class:`~subliminal.video.Video` :param languages: desired languages. :type languages: set of :class:`~babelfish.language.Language` :param datetime.timedelta age: maximum age of the video. :param bool undefined: fail on existing undefined language. :return: `True` if the video passes the checks, `False` otherwise. :rtype: bool
def getTotalPrice(self): price = self.getPrice() vat = self.getVAT() price = price and price or 0 vat = vat and vat or 0 return float(price) + (float(price) * float(vat)) / 100
Compute total price including VAT
def _check_delay(self): if self._previous_request_at: dif = round(time.time() - self._previous_request_at, 2) * 1000 if dif < self.requests_delay: time.sleep( (self.requests_delay - dif) / 1000) self._previous_request_at = time.time()
Checks if a delay is needed between requests and sleeps if True
def get_selinux_status(): getenforce_command_exists() o = run_cmd(["getenforce"], return_output=True).strip() logger.debug("SELinux is %r", o) return o
get SELinux status of host :return: string, one of Enforced, Permissive, Disabled
def finish(self, value): if self._done.is_set(): raise errors.AlreadyComplete() self._value = value for cb in self._cbacks: backend.schedule(cb, args=(value,)) self._cbacks = None for wait in list(self._waits): wait.finish(self) self._waits = None for child in self._children: child = child() if child is None: continue child._incoming(self, value) self._children = None self._done.set()
Give the future it's value and trigger any associated callbacks :param value: the new value for the future :raises: :class:`AlreadyComplete <junction.errors.AlreadyComplete>` if already complete
def find_safe_starting_point(self): y = random.randint(2,self.grid_height-4) x = random.randint(2,self.grid_width-4) return y, x
finds a place on the grid which is clear on all sides to avoid starting in the middle of a blockage
def get_principal_credit_string_metadata(self): metadata = dict(self._mdata['principal_credit_string']) metadata.update({'existing_string_values': self._my_map['principalCreditString']}) return Metadata(**metadata)
Gets the metadata for the principal credit string. return: (osid.Metadata) - metadata for the credit string *compliance: mandatory -- This method must be implemented.*
def cluster(list_of_texts, num_clusters=3): pipeline = Pipeline([ ("vect", CountVectorizer()), ("tfidf", TfidfTransformer()), ("clust", KMeans(n_clusters=num_clusters)) ]) try: clusters = pipeline.fit_predict(list_of_texts) except ValueError: clusters = list(range(len(list_of_texts))) return clusters
Cluster a list of texts into a predefined number of clusters. :param list_of_texts: a list of untokenized texts :param num_clusters: the predefined number of clusters :return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
def read(self, fileobj): fileobj.seek(self._dataoffset, 0) data = fileobj.read(self.datalength) return len(data) == self.datalength, data
Return if all data could be read and the atom payload
def _parseupload(self, node): if not isinstance(node,ElementTree._Element): try: node = clam.common.data.parsexmlstring(node) except: raise Exception(node) if node.tag != 'clamupload': raise Exception("Not a valid CLAM upload response") for node2 in node: if node2.tag == 'upload': for subnode in node2: if subnode.tag == 'error': raise clam.common.data.UploadError(subnode.text) if subnode.tag == 'parameters': if 'errors' in subnode.attrib and subnode.attrib['errors'] == 'yes': errormsg = "The submitted metadata did not validate properly" for parameternode in subnode: if 'error' in parameternode.attrib: errormsg = parameternode.attrib['error'] raise clam.common.data.ParameterError(errormsg + " (parameter="+parameternode.attrib['id']+")") raise clam.common.data.ParameterError(errormsg) return True
Parse CLAM Upload XML Responses. For internal use
def _mem(self): value = int(psutil.virtual_memory().percent) set_metric("memory", value, category=self.category) gauge("memory", value)
Record Memory usage.
def last_available_business_date(self, asset_manager_id, asset_ids, page_no=None, page_size=None): self.logger.info('Retrieving last available business dates for assets') url = '%s/last-available-business-date' % self.endpoint params = {'asset_manager_ids': [asset_manager_id], 'asset_ids': ','.join(asset_ids)} if page_no: params['page_no'] = page_no if page_size: params['page_size'] = page_size response = self.session.get(url, params=params) if response.ok: self.logger.info("Received %s assets' last available business date", len(response.json())) return response.json() else: self.logger.error(response.text) response.raise_for_status()
Returns the last available business date for the assets so we know the starting date for new data which needs to be downloaded from data providers. This method can only be invoked by system user
def append(self, items): resp = self.client.add_to_item_list(items, self.url()) self.refresh() return resp
Add some items to this ItemList and save the changes to the server :param items: the items to add, either as a List of Item objects, an ItemList, a List of item URLs as Strings, a single item URL as a String, or a single Item object :rtype: String :returns: the server success message :raises: APIError if the API request is not successful
def addClass(self, cn): if cn: if isinstance(cn, (tuple, list, set, frozenset)): add = self.addClass for c in cn: add(c) else: classes = self._classes if classes is None: self._extra['classes'] = classes = set() add = classes.add for cn in cn.split(): add(slugify(cn)) return self
Add the specific class names to the class set and return ``self``.
def compare(self, dn, attr, value): return self.connection.compare_s(dn, attr, value) == 1
Compare the ``attr`` of the entry ``dn`` with given ``value``. This is a convenience wrapper for the ldap library's ``compare`` function that returns a boolean value instead of 1 or 0.
def destroy(self, folder=None): ameans = [(0, 0, 0) for _ in range(3)] ret = [self.save_info(folder, ameans)] aiomas.run(until=self.stop_slaves(folder)) self._pool.close() self._pool.terminate() self._pool.join() self._env.shutdown() return ret
Destroy the environment and the subprocesses.
def check_secret(self, secret): try: return hmac.compare_digest(secret, self.secret) except AttributeError: return secret == self.secret
Checks if the secret string used in the authentication attempt matches the "known" secret string. Some mechanisms will override this method to control how this comparison is made. Args: secret: The secret string to compare against what was used in the authentication attempt. Returns: True if the given secret matches the authentication attempt.
def usable_cpu_count(): try: result = len(os.sched_getaffinity(0)) except AttributeError: try: result = len(psutil.Process().cpu_affinity()) except AttributeError: result = os.cpu_count() return result
Get number of CPUs usable by the current process. Takes into consideration cpusets restrictions. Returns ------- int
def scroll_down (self): s = self.scroll_row_start - 1 e = self.scroll_row_end - 1 self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
Scroll display down one line.
def loads(self, src): assert isinstance(src, (unicode_, bytes_)) nodes = self.scan(src.strip()) self.parse(nodes) return ''.join(map(str, nodes))
Compile css from scss string.
def waitForAllConnectionsToClose(self): if not self._connections: return self._stop() return self._allConnectionsClosed.deferred().addBoth(self._stop)
Wait for all currently-open connections to enter the 'CLOSED' state. Currently this is only usable from test fixtures.
def statistical_distances(samples1, samples2, earth_mover_dist=True, energy_dist=True): out = [] temp = scipy.stats.ks_2samp(samples1, samples2) out.append(temp.pvalue) out.append(temp.statistic) if earth_mover_dist: out.append(scipy.stats.wasserstein_distance(samples1, samples2)) if energy_dist: out.append(scipy.stats.energy_distance(samples1, samples2)) return np.asarray(out)
Compute measures of the statistical distance between samples. Parameters ---------- samples1: 1d array samples2: 1d array earth_mover_dist: bool, optional Whether or not to compute the Earth mover's distance between the samples. energy_dist: bool, optional Whether or not to compute the energy distance between the samples. Returns ------- 1d array
def check(self): try: si, uninterp = self.interpolate() except (Object.CoercionError, MustacheParser.Uninterpolatable) as e: return TypeCheck(False, "Unable to interpolate: %s" % e) return self.checker(si)
Type check this object.
def init_region_config(self, region): self.regions[region] = self.region_config_class(region_name = region, resource_types = self.resource_types)
Initialize the region's configuration :param region: Name of the region
def _forward(self, x_dot_parameters): return forward(self._lattice, x_dot_parameters, self.state_machine.n_states)
Helper to calculate the forward weights.
def should_display_warnings_for(to_type): if not hasattr(to_type, '__module__'): return True elif to_type.__module__ in {'builtins'} or to_type.__module__.startswith('parsyfiles') \ or to_type.__name__ in {'DataFrame'}: return False elif issubclass(to_type, int) or issubclass(to_type, str) \ or issubclass(to_type, float) or issubclass(to_type, bool): return False else: return True
Central method where we control whether warnings should be displayed
def max_length_discard(records, max_length): logging.info('Applying _max_length_discard generator: ' 'discarding records longer than ' '.') for record in records: if len(record) > max_length: logging.debug('Discarding long sequence: %s, length=%d', record.id, len(record)) else: yield record
Discard any records that are longer than max_length.
def put(self, ndef_message, timeout=1.0): if not self.socket: try: self.connect('urn:nfc:sn:snep') except nfc.llcp.ConnectRefused: return False else: self.release_connection = True else: self.release_connection = False try: ndef_msgsize = struct.pack('>L', len(str(ndef_message))) snep_request = b'\x10\x02' + ndef_msgsize + str(ndef_message) if send_request(self.socket, snep_request, self.send_miu): response = recv_response(self.socket, 0, timeout) if response is not None: if response[1] != 0x81: raise SnepError(response[1]) return True return False finally: if self.release_connection: self.close()
Send an NDEF message to the server. Temporarily connects to the default SNEP server if the client is not yet connected. .. deprecated:: 0.13 Use :meth:`put_records` or :meth:`put_octets`.
def check_for_lane_permission(self): if self.current.lane_permission: log.debug("HAS LANE PERM: %s" % self.current.lane_permission) perm = self.current.lane_permission if not self.current.has_permission(perm): raise HTTPError(403, "You don't have required lane permission: %s" % perm) if self.current.lane_relations: context = self.get_pool_context() log.debug("HAS LANE RELS: %s" % self.current.lane_relations) try: cond_result = eval(self.current.lane_relations, context) except: log.exception("CONDITION EVAL ERROR : %s || %s" % ( self.current.lane_relations, context)) raise if not cond_result: log.debug("LANE RELATION ERR: %s %s" % (self.current.lane_relations, context)) raise HTTPError(403, "You aren't qualified for this lane: %s" % self.current.lane_relations)
One or more permissions can be associated with a lane of a workflow. In a similar way, a lane can be restricted with relation to other lanes of the workflow. This method called on lane changes and checks user has required permissions and relations. Raises: HTTPForbidden: if the current user hasn't got the required permissions and proper relations
def _operator(self, op, close_group=False): op = op.upper().strip() if op not in OP_LIST: raise ValueError("Error: '{}' is not a valid operator.".format(op)) else: if close_group: op = ") " + op + " (" else: op = " " + op + " " self.__query["q"] += op return self
Add an operator between terms. There must be a term added before using this method. All operators have helpers, so this method is usually not necessary to directly invoke. Arguments: op (str): The operator to add. Must be in the OP_LIST. close_group (bool): If ``True``, will end the current parenthetical group and start a new one. If ``False``, will continue current group. Example:: "(foo AND bar)" is one group. "(foo) AND (bar)" is two groups. Returns: SearchHelper: Self
def update_subnet(self, subnet, body=None): return self.put(self.subnet_path % (subnet), body=body)
Updates a subnet.
def price(self, from_=None, **kwargs): if from_: kwargs["from"] = from_ uri = "%s/%s" % (self.uri, "price") response, instance = self.request("GET", uri, params=kwargs) return instance
Check pricing for a new outbound message. An useful synonym for "message" command with "dummy" parameters set to true. :Example: message = client.messages.price(from_="447624800500", phones="999000001", text="Hello!", lists="1909100") :param str from: One of allowed Sender ID (phone number or alphanumeric sender ID). :param str text: Message text. Required if templateId is not set. :param str templateId: Template used instead of message text. Required if text is not set. :param str sendingTime: Message sending time in unix timestamp format. Default is now. Optional (required with rrule set). :param str contacts: Contacts ids, separated by comma, message will be sent to. :param str lists: Lists ids, separated by comma, message will be sent to. :param str phones: Phone numbers, separated by comma, message will be sent to. :param int cutExtra: Should sending method cut extra characters which not fit supplied partsCount or return 400 Bad request response instead. Default is false. :param int partsCount: Maximum message parts count (TextMagic allows sending 1 to 6 message parts). Default is 6. :param str referenceId: Custom message reference id which can be used in your application infrastructure. :param str rrule: iCal RRULE parameter to create recurrent scheduled messages. When used, sendingTime is mandatory as start point of sending. :param int dummy: If 1, just return message pricing. Message will not send.
def intersection(self,other): if self.everything: if other.everything: return DiscreteSet() else: return DiscreteSet(other.elements) else: if other.everything: return DiscreteSet(self.elements) else: return DiscreteSet(self.elements.intersection(other.elements))
Return a new DiscreteSet with the intersection of the two sets, i.e. all elements that are in both self and other. :param DiscreteSet other: Set to intersect with :rtype: DiscreteSet
def write_nochr_reads(in_file, out_file, config): if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: samtools = config_utils.get_program("samtools", config) cmd = "{samtools} view -b -f 4 {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Select unmapped reads") return out_file
Write a BAM file of reads that are not mapped on a reference chromosome. This is useful for maintaining non-mapped reads in parallel processes that split processing by chromosome.
def load_images(url, format='auto', with_path=True, recursive=True, ignore_failure=True, random_order=False): from ... import extensions as _extensions from ...util import _make_internal_url return _extensions.load_images(url, format, with_path, recursive, ignore_failure, random_order)
Loads images from a directory. JPEG and PNG images are supported. Parameters ---------- url : str The string of the path where all the images are stored. format : {'PNG' | 'JPG' | 'auto'}, optional The format of the images in the directory. The default 'auto' parameter value tries to infer the image type from the file extension. If a format is specified, all images must be of that format. with_path : bool, optional Indicates whether a path column is added to the SFrame. If 'with_path' is set to True, the returned SFrame contains a 'path' column, which holds a path string for each Image object. recursive : bool, optional Indicates whether 'load_images' should do recursive directory traversal, or a flat directory traversal. ignore_failure : bool, optional If true, prints warning for failed images and keep loading the rest of the images. random_order : bool, optional Load images in random order. Returns ------- out : SFrame Returns an SFrame with either an 'image' column or both an 'image' and a 'path' column. The 'image' column is a column of Image objects. If with_path is True, there is also a 'path' column which contains the image path for each of each corresponding Image object. Examples -------- >>> url ='https://static.turi.com/datasets/images/nested' >>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False, ... recursive=True)
def Header(self): _ = [option.OnGetValue() for option in self.options] return self.name
Fetch the header name of this Value.
def publish(self, topic, *args, **kwargs): return self._async_session.publish(topic, *args, **kwargs)
Publish an event to a topic. Replace :meth:`autobahn.wamp.interface.IApplicationSession.publish`
def convert_tensor(input_, device=None, non_blocking=False): def _func(tensor): return tensor.to(device=device, non_blocking=non_blocking) if device else tensor return apply_to_tensor(input_, _func)
Move tensors to relevant device.
def make_empty(self, axes=None): if axes is None: axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]] if self.ndim == 1: blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes)
return an empty BlockManager with the items axis of len 0
def _match(filtered, matcher): def match_filtered_identities(x, ids, matcher): for y in ids: if x.uuid == y.uuid: return True if matcher.match_filtered_identities(x, y): return True return False matched = [] while filtered: candidates = [] no_match = [] x = filtered.pop(0) while matched: ids = matched.pop(0) if match_filtered_identities(x, ids, matcher): candidates += ids else: no_match.append(ids) candidates.append(x) matched = [candidates] + no_match return matched
Old method to find matches in a set of filtered identities.
def _kick(self): xyz_init = self.xyz for particle in self.particles(): particle.pos += (np.random.rand(3,) - 0.5) / 100 self._update_port_locations(xyz_init)
Slightly adjust all coordinates in a Compound Provides a slight adjustment to coordinates to kick them out of local energy minima.
def _cal_color(self, value, color_index): range_min_p = self._domain[color_index] range_p = self._domain[color_index + 1] - range_min_p try: factor = (value - range_min_p) / range_p except ZeroDivisionError: factor = 0 min_color = self.colors[color_index] max_color = self.colors[color_index + 1] red = round(factor * (max_color.r - min_color.r) + min_color.r) green = round(factor * (max_color.g - min_color.g) + min_color.g) blue = round(factor * (max_color.b - min_color.b) + min_color.b) return Color(red, green, blue)
Blend between two colors based on input value.
def add_node(self, binary_descriptor): try: node_string = parse_binary_descriptor(binary_descriptor) except: self._logger.exception("Error parsing binary node descriptor: %s", binary_descriptor) return _pack_sgerror(SensorGraphError.INVALID_NODE_STREAM) try: self.graph.add_node(node_string) except NodeConnectionError: return _pack_sgerror(SensorGraphError.STREAM_NOT_IN_USE) except ProcessingFunctionError: return _pack_sgerror(SensorGraphError.INVALID_PROCESSING_FUNCTION) except ResourceUsageError: return _pack_sgerror(SensorGraphError.NO_NODE_SPACE_AVAILABLE) return Error.NO_ERROR
Add a node to the sensor_graph using a binary node descriptor. Args: binary_descriptor (bytes): An encoded binary node descriptor. Returns: int: A packed error code.
def main(): windows_libraries = list(pylink.Library.find_library_windows()) latest_library = None for lib in windows_libraries: if os.path.dirname(lib).endswith('JLinkARM'): latest_library = lib break elif latest_library is None: latest_library = lib elif os.path.dirname(lib) > os.path.dirname(latest_library): latest_library = lib if latest_library is None: raise OSError('No J-Link library found.') library = pylink.Library(latest_library) jlink = pylink.JLink(lib=library) print('Found version: %s' % jlink.version) for emu in jlink.connected_emulators(): jlink.disable_dialog_boxes() jlink.open(serial_no=emu.SerialNumber) jlink.sync_firmware() print('Updated emulator with serial number %s' % emu.SerialNumber) return None
Upgrades the firmware of the J-Links connected to a Windows device. Returns: None. Raises: OSError: if there are no J-Link software packages.
def _get_graphics(dom): out = {'autoport': 'None', 'keymap': 'None', 'listen': 'None', 'port': 'None', 'type': 'None'} doc = ElementTree.fromstring(dom.XMLDesc(0)) for g_node in doc.findall('devices/graphics'): for key, value in six.iteritems(g_node.attrib): out[key] = value return out
Get domain graphics from a libvirt domain object.
def init_with_context(self, context): site_name = get_admin_site_name(context) self.children += [ items.MenuItem(_('Dashboard'), reverse('{0}:index'.format(site_name))), items.Bookmarks(), ] for title, kwargs in get_application_groups(): if kwargs.get('enabled', True): self.children.append(CmsModelList(title, **kwargs)) self.children += [ ReturnToSiteItem() ]
Initialize the menu items.
def make_random_gaussians_table(n_sources, param_ranges, random_state=None): sources = make_random_models_table(n_sources, param_ranges, random_state=random_state) if 'flux' in param_ranges and 'amplitude' not in param_ranges: model = Gaussian2D(x_stddev=1, y_stddev=1) if 'x_stddev' in sources.colnames: xstd = sources['x_stddev'] else: xstd = model.x_stddev.value if 'y_stddev' in sources.colnames: ystd = sources['y_stddev'] else: ystd = model.y_stddev.value sources = sources.copy() sources['amplitude'] = sources['flux'] / (2. * np.pi * xstd * ystd) return sources
Make a `~astropy.table.Table` containing randomly generated parameters for 2D Gaussian sources. Each row of the table corresponds to a Gaussian source whose parameters are defined by the column names. The parameters are drawn from a uniform distribution over the specified input ranges. The output table can be input into :func:`make_gaussian_sources_image` to create an image containing the 2D Gaussian sources. Parameters ---------- n_sources : float The number of random Gaussian sources to generate. param_ranges : dict The lower and upper boundaries for each of the `~astropy.modeling.functional_models.Gaussian2D` parameters as a `dict` mapping the parameter name to its ``(lower, upper)`` bounds. The dictionary keys must be valid `~astropy.modeling.functional_models.Gaussian2D` parameter names or ``'flux'``. If ``'flux'`` is specified, but not ``'amplitude'`` then the 2D Gaussian amplitudes will be calculated and placed in the output table. If both ``'flux'`` and ``'amplitude'`` are specified, then ``'flux'`` will be ignored. Model parameters not defined in ``param_ranges`` will be set to the default value. random_state : int or `~numpy.random.RandomState`, optional Pseudo-random number generator state used for random sampling. Returns ------- table : `~astropy.table.Table` A table of parameters for the randomly generated Gaussian sources. Each row of the table corresponds to a Gaussian source whose parameters are defined by the column names. See Also -------- make_random_models_table, make_gaussian_sources_image Notes ----- To generate identical parameter values from separate function calls, ``param_ranges`` must be input as an `~collections.OrderedDict` with the same parameter ranges and ``random_state`` must be the same. Examples -------- >>> from collections import OrderedDict >>> from photutils.datasets import make_random_gaussians_table >>> n_sources = 5 >>> param_ranges = [('amplitude', [500, 1000]), ... ('x_mean', [0, 500]), ... ('y_mean', [0, 300]), ... ('x_stddev', [1, 5]), ... ('y_stddev', [1, 5]), ... ('theta', [0, np.pi])] >>> param_ranges = OrderedDict(param_ranges) >>> sources = make_random_gaussians_table(n_sources, param_ranges, ... random_state=12345) >>> for col in sources.colnames: ... sources[col].info.format = '%.8g' # for consistent table output >>> print(sources) amplitude x_mean y_mean x_stddev y_stddev theta --------- --------- --------- --------- --------- ---------- 964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859 658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889 591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615 602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422 783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298 To specifying the flux range instead of the amplitude range: >>> param_ranges = [('flux', [500, 1000]), ... ('x_mean', [0, 500]), ... ('y_mean', [0, 300]), ... ('x_stddev', [1, 5]), ... ('y_stddev', [1, 5]), ... ('theta', [0, np.pi])] >>> param_ranges = OrderedDict(param_ranges) >>> sources = make_random_gaussians_table(n_sources, param_ranges, ... random_state=12345) >>> for col in sources.colnames: ... sources[col].info.format = '%.8g' # for consistent table output >>> print(sources) flux x_mean y_mean x_stddev y_stddev theta amplitude --------- --------- --------- --------- --------- ---------- --------- 964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859 11.863685 658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889 6.3854388 591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615 7.3122209 602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422 8.5691781 783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298 11.611707 Note that in this case the output table contains both a flux and amplitude column. The flux column will be ignored when generating an image of the models using :func:`make_gaussian_sources_image`.
def verify_url_path(url_path, query_args, secret_key, salt_arg='_', max_expiry=None, digest=None): try: supplied_signature = query_args.pop('signature') except KeyError: raise SigningError("Signature missing.") if salt_arg is not None and salt_arg not in query_args: raise SigningError("No salt used.") if max_expiry is not None and 'expires' not in query_args: raise SigningError("Expiry time is required.") signature = _generate_signature(url_path, secret_key, query_args, digest) if not hmac.compare_digest(signature, supplied_signature): raise SigningError('Signature not valid.') try: expiry_time = int(query_args.pop('expires')) except KeyError: pass except ValueError: raise SigningError("Invalid expiry value.") else: expiry_delta = expiry_time - time() if expiry_delta < 0: raise SigningError("Signature has expired.") if max_expiry and expiry_delta > max_expiry: raise SigningError("Expiry time out of range.") return True
Verify a URL path is correctly signed. :param url_path: URL path :param secret_key: Signing key :param query_args: Arguments that make up the query string :param salt_arg: Argument required for salt (set to None to disable) :param max_expiry: Maximum length of time an expiry value can be for (set to None to disable) :param digest: Specify the digest function to use; default is sha256 from hashlib :rtype: bool :raises: URLError
def query_records_no_auth(self, name, query=''): req = requests.get(self.api_server + '/api/' + name + "/" + query) return req
Query records without authorization
def reset_all(self, suppress_logging=False): pool_names = list(self.pools) for name in pool_names: self.reset(name, suppress_logging)
iterates thru the list of established connections and resets them by disconnecting and reconnecting
def debug(self): url = '{}/debug/status'.format(self.url) data = self._get(url) return data.json()
Retrieve the debug information from the charmstore.
def _to_dict(objects): try: if isinstance(objects, six.string_types): objects = salt.utils.json.loads(objects) except ValueError as err: log.error("Could not parse objects: %s", err) raise err return objects
Potentially interprets a string as JSON for usage with mongo
def _set_id_field(new_class): if new_class.meta_.declared_fields: try: new_class.meta_.id_field = next( field for _, field in new_class.meta_.declared_fields.items() if field.identifier) except StopIteration: new_class._create_id_field()
Lookup the id field for this entity and assign
def _cmpFormatRanges(a, b): if a.format == b.format and \ a.start == b.start and \ a.length == b.length: return 0 else: return cmp(id(a), id(b))
PyQt does not define proper comparison for QTextLayout.FormatRange Define it to check correctly, if formats has changed. It is important for the performance
def _check_compatible_with( self, other: Union[Period, Timestamp, Timedelta, NaTType], ) -> None: raise AbstractMethodError(self)
Verify that `self` and `other` are compatible. * DatetimeArray verifies that the timezones (if any) match * PeriodArray verifies that the freq matches * Timedelta has no verification In each case, NaT is considered compatible. Parameters ---------- other Raises ------ Exception
def _ParseTimestamp(self, parser_mediator, row): timestamp = row.get('timestamp', None) if timestamp is not None: try: timestamp = int(timestamp, 10) except (ValueError, TypeError): parser_mediator.ProduceExtractionWarning( 'Unable to parse timestamp value: {0!s}'.format(timestamp)) return dfdatetime_posix_time.PosixTime(timestamp=timestamp) try: return self._ConvertToTimestamp(row['date'], row['time']) except ValueError as exception: parser_mediator.ProduceExtractionWarning(( 'Unable to parse time string: "{0:s} {1:s}" with error: ' '{2!s}').format(repr(row['date']), repr(row['time']), exception))
Provides a timestamp for the given row. If the Trend Micro log comes from a version that provides a POSIX timestamp, use that directly; it provides the advantages of UTC and of second precision. Otherwise fall back onto the local-timezone date and time. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, str]): fields of a single row, as specified in COLUMNS. Returns: dfdatetime.interface.DateTimeValue: date and time value.
def range(self, value): self._buffer.append(abs(value)) mean = sum(self._buffer) / len(self._buffer) estimate = next( (r for r in self.ranges if mean < self.scale * r), self.ranges[-1] ) if self._mapping: return self._mapping[estimate] else: return estimate
Estimates an appropriate sensitivity range.
def wildcards_overlap(name1, name2): if not name1 and not name2: return True if not name1 or not name2: return False for matched1, matched2 in _character_matches(name1, name2): if wildcards_overlap(name1[matched1:], name2[matched2:]): return True return False
Return true if two wildcard patterns can match the same string.
def get_fetch_response(self, res): res.code = res.status_code res.headers = Headers(res.headers) res._body = None res.body = '' body = res.content if body: if self.is_json(res.headers): res._body = res.json() else: res._body = body res.body = String(body, res.encoding) return res
the goal of this method is to make the requests object more endpoints like res -- requests Response -- the native requests response instance, we manipulate it a bit to make it look a bit more like the internal endpoints.Response object
def get_custom_fields(self): return CustomField.objects.filter( content_type=ContentType.objects.get_for_model(self))
Return a list of custom fields for this model
def evpn_prefix_del(self, route_type, route_dist, esi=0, ethernet_tag_id=None, mac_addr=None, ip_addr=None, ip_prefix=None): func_name = 'evpn_prefix.delete_local' kwargs = {EVPN_ROUTE_TYPE: route_type, ROUTE_DISTINGUISHER: route_dist} if route_type == EVPN_ETH_AUTO_DISCOVERY: kwargs.update({ EVPN_ESI: esi, EVPN_ETHERNET_TAG_ID: ethernet_tag_id, }) elif route_type == EVPN_MAC_IP_ADV_ROUTE: kwargs.update({ EVPN_ETHERNET_TAG_ID: ethernet_tag_id, MAC_ADDR: mac_addr, IP_ADDR: ip_addr, }) elif route_type == EVPN_MULTICAST_ETAG_ROUTE: kwargs.update({ EVPN_ETHERNET_TAG_ID: ethernet_tag_id, IP_ADDR: ip_addr, }) elif route_type == EVPN_ETH_SEGMENT: kwargs.update({ EVPN_ESI: esi, IP_ADDR: ip_addr, }) elif route_type == EVPN_IP_PREFIX_ROUTE: kwargs.update({ EVPN_ETHERNET_TAG_ID: ethernet_tag_id, IP_PREFIX: ip_prefix, }) else: raise ValueError('Unsupported EVPN route type: %s' % route_type) call(func_name, **kwargs)
This method deletes an advertised EVPN route. ``route_type`` specifies one of the EVPN route type name. ``route_dist`` specifies a route distinguisher value. ``esi`` is an value to specify the Ethernet Segment Identifier. ``ethernet_tag_id`` specifies the Ethernet Tag ID. ``mac_addr`` specifies a MAC address to advertise. ``ip_addr`` specifies an IPv4 or IPv6 address to advertise. ``ip_prefix`` specifies an IPv4 or IPv6 prefix to advertise.
def hashable(val): if val is None: return val try: hash(val) except TypeError: return repr(val) else: return val
Test if `val` is hashable and if not, get it's string representation Parameters ---------- val: object Any (possibly not hashable) python object Returns ------- val or string The given `val` if it is hashable or it's string representation
def _render_resource(self, resource): if not resource: return None if not isinstance(resource, self.model): raise TypeError( 'Resource(s) type must be the same as the serializer model type.') top_level_members = {} try: top_level_members['id'] = str(getattr(resource, self.primary_key)) except AttributeError: raise top_level_members['type'] = resource.__tablename__ top_level_members['attributes'] = self._render_attributes(resource) top_level_members['relationships'] = self._render_relationships( resource) return top_level_members
Renders a resource's top level members based on json-api spec. Top level members include: 'id', 'type', 'attributes', 'relationships'
def matching_line(freq, data, tref, bin_size=1): template_line = line_model(freq, data, tref=tref) _, amp, phi = avg_inner_product(data, template_line, bin_size=bin_size) return line_model(freq, data, tref=tref, amp=amp, phi=phi)
Find the parameter of the line with frequency 'freq' in the data. Parameters ---------- freq: float Frequency of the line to find in the data. data: pycbc.types.TimeSeries Data from which the line wants to be measured. tref: float Reference time for the frequency line. bin_size: {1, float}, optional Duration of the bins the data will be divided into for averaging. Returns ------- line_model: pycbc.types.TimeSeries A timeseries containing the frequency line with the amplitude and phase measured from the data.
def safe_mkdir(folder_name, force_perm=None): if os.path.exists(folder_name): return intermediary_folders = folder_name.split(os.path.sep) if intermediary_folders[-1] == "": intermediary_folders = intermediary_folders[:-1] if force_perm: force_perm_path = folder_name.split(os.path.sep) if force_perm_path[-1] == "": force_perm_path = force_perm_path[:-1] for i in range(1, len(intermediary_folders)): folder_to_create = os.path.sep.join(intermediary_folders[:i + 1]) if os.path.exists(folder_to_create): continue os.mkdir(folder_to_create) if force_perm: os.chmod(folder_to_create, force_perm)
Create the specified folder. If the parent folders do not exist, they are also created. If the folder already exists, nothing is done. Parameters ---------- folder_name : str Name of the folder to create. force_perm : str Mode to use for folder creation.
def apply(self, model): model.medium = {row.exchange: row.uptake for row in self.data.itertuples(index=False)}
Set the defined medium on the given model.
def _pair_exp_cov(X, Y, span=180): covariation = (X - X.mean()) * (Y - Y.mean()) if span < 10: warnings.warn("it is recommended to use a higher span, e.g 30 days") return covariation.ewm(span=span).mean()[-1]
Calculate the exponential covariance between two timeseries of returns. :param X: first time series of returns :type X: pd.Series :param Y: second time series of returns :type Y: pd.Series :param span: the span of the exponential weighting function, defaults to 180 :type span: int, optional :return: the exponential covariance between X and Y :rtype: float
async def execute_command( self, *args: bytes, timeout: DefaultNumType = _default ) -> SMTPResponse: if timeout is _default: timeout = self.timeout self._raise_error_if_disconnected() try: response = await self.protocol.execute_command( *args, timeout=timeout ) except SMTPServerDisconnected: self.close() raise if response.code == SMTPStatus.domain_unavailable: self.close() return response
Check that we're connected, if we got a timeout value, and then pass the command to the protocol. :raises SMTPServerDisconnected: connection lost
def checkout_task(current_target): try: scm = _make_scm(current_target) src_dir = current_target.config.get("dp.src_dir") shared_dir = current_target.config.get("dp.src_dir_shared") scm.checkout(repo_dir=src_dir, shared_dir=shared_dir) scm.update(repo_dir=src_dir) except devpipeline_core.toolsupport.MissingToolKey as mtk: current_target.executor.warning(mtk)
Update or a local checkout. Arguments target - The target to operate on.
def initialize_fields(self): for name, field in self.instance._meta.fields.items(): if getattr(field, 'primary_key', False): continue self._meta.fields[name] = self.convert_field(name, field) for name in dir(type(self.instance)): field = getattr(type(self.instance), name, None) if isinstance(field, ManyToManyField): self._meta.fields[name] = self.convert_field(name, field) super().initialize_fields()
Convert all model fields to validator fields. Then call the parent so that overwrites can happen if necessary for manually defined fields. :return: None
def plot_iso(axis, step, var): xmesh, ymesh, fld = get_meshes_fld(step, var) if conf.field.shift: fld = np.roll(fld, conf.field.shift, axis=0) axis.contour(xmesh, ymesh, fld, linewidths=1)
Plot isocontours of scalar field. Args: axis (:class:`matplotlib.axes.Axes`): the axis handler of an existing matplotlib figure where the isocontours should be plotted. step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. var (str): the scalar field name.
def _ensure_env(self, env: Union[jinja2.Environment, None]): if not env: env = jinja2.Environment() if not env.loader: env.loader = jinja2.FunctionLoader(lambda filename: self._cache[filename]) if 'faker' not in env.globals: faker = Faker() faker.seed(1234) env.globals['faker'] = faker if 'random_model' not in env.globals: env.globals['random_model'] = jinja2.contextfunction(random_model) if 'random_models' not in env.globals: env.globals['random_models'] = jinja2.contextfunction(random_models) return env
Make sure the jinja environment is minimally configured.
def blocks_to_mark_complete_on_view(self, blocks): blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)} completions = self.get_completions({block.location for block in blocks}) return {block for block in blocks if completions.get(block.location, 0) < 1.0}
Returns a set of blocks which should be marked complete on view and haven't been yet.
def exponential_terms(order, variables, data): variables_exp = OrderedDict() data_exp = OrderedDict() if 1 in order: data_exp[1] = data[variables] variables_exp[1] = variables order = set(order) - set([1]) for o in order: variables_exp[o] = ['{}_power{}'.format(v, o) for v in variables] data_exp[o] = data[variables]**o variables_exp = reduce((lambda x, y: x + y), variables_exp.values()) data_exp = pd.DataFrame(columns=variables_exp, data=np.concatenate([*data_exp.values()], axis=1)) return (variables_exp, data_exp)
Compute exponential expansions. Parameters ---------- order: range or list(int) A list of exponential terms to include. For instance, [1, 2] indicates that the first and second exponential terms should be added. To retain the original terms, 1 *must* be included in the list. variables: list(str) List of variables for which exponential terms should be computed. data: pandas DataFrame object Table of values of all observations of all variables. Returns ------- variables_exp: list A list of variables to include in the final data frame after adding the specified exponential terms. data_exp: pandas DataFrame object Table of values of all observations of all variables, including any specified exponential terms.
def locations(self): req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/locations') locations = req.get().json() return locations
Available locations to be used when creating a new machine. :returns: A list of available locations.
def do_session(self, args): filename = 'Not specified' if self.__session.filename is None \ else self.__session.filename print('{0: <30}: {1}'.format('Filename', filename))
Print current session information
def encode_jwt_token( self, user, override_access_lifespan=None, override_refresh_lifespan=None, **custom_claims ): ClaimCollisionError.require_condition( set(custom_claims.keys()).isdisjoint(RESERVED_CLAIMS), "The custom claims collide with required claims", ) self._check_user(user) moment = pendulum.now('UTC') if override_refresh_lifespan is None: refresh_lifespan = self.refresh_lifespan else: refresh_lifespan = override_refresh_lifespan refresh_expiration = (moment + refresh_lifespan).int_timestamp if override_access_lifespan is None: access_lifespan = self.access_lifespan else: access_lifespan = override_access_lifespan access_expiration = min( (moment + access_lifespan).int_timestamp, refresh_expiration, ) payload_parts = dict( iat=moment.int_timestamp, exp=access_expiration, rf_exp=refresh_expiration, jti=str(uuid.uuid4()), id=user.identity, rls=','.join(user.rolenames), **custom_claims ) return jwt.encode( payload_parts, self.encode_key, self.encode_algorithm, ).decode('utf-8')
Encodes user data into a jwt token that can be used for authorization at protected endpoints :param: override_access_lifespan: Override's the instance's access lifespan to set a custom duration after which the new token's accessability will expire. May not exceed the refresh_lifespan :param: override_refresh_lifespan: Override's the instance's refresh lifespan to set a custom duration after which the new token's refreshability will expire. :param: custom_claims: Additional claims that should be packed in the payload. Note that any claims supplied here must be JSON compatible types
def remove_callback(instance, prop, callback): p = getattr(type(instance), prop) if not isinstance(p, CallbackProperty): raise TypeError("%s is not a CallbackProperty" % prop) p.remove_callback(instance, callback)
Remove a callback function from a property in an instance Parameters ---------- instance The instance to detach the callback from prop : str Name of callback property in `instance` callback : func The callback function to remove
def bounter(size_mb=None, need_iteration=True, need_counts=True, log_counting=None): if not need_counts: return CardinalityEstimator() if size_mb is None: raise ValueError("Max size in MB must be provided.") if need_iteration: if log_counting: raise ValueError("Log counting is only supported with CMS implementation (need_iteration=False).") return HashTable(size_mb=size_mb) else: return CountMinSketch(size_mb=size_mb, log_counting=log_counting)
Factory method for bounter implementation. Args: size_mb (int): Desired memory footprint of the counter. need_iteration (Bool): With `True`, create a `HashTable` implementation which can iterate over inserted key/value pairs. With `False`, create a `CountMinSketch` implementation which performs better in limited-memory scenarios, but does not support iteration over elements. need_counts (Bool): With `True`, construct the structure normally. With `False`, ignore all remaining parameters and create a minimalistic cardinality counter based on hyperloglog which only takes 64KB memory. log_counting (int): Counting to use with `CountMinSketch` implementation. Accepted values are `None` (default counting with 32-bit integers), 1024 (16-bit), 8 (8-bit). See `CountMinSketch` documentation for details. Raise ValueError if not `None `and `need_iteration` is `True`.
def change_in_longitude(lat, miles): r = earth_radius * math.cos(lat * degrees_to_radians) return (miles / r) * radians_to_degrees
Given a latitude and a distance west, return the change in longitude.
def process_tokens(self, tokens): for tok_type, token, (start_row, start_col), _, _ in tokens: if tok_type == tokenize.STRING: self._process_string_token(token, start_row, start_col)
Process the token stream. This is required to override the parent class' implementation. Args: tokens: the tokens from the token stream to process.
def set_location(self, time, latitude, longitude): if isinstance(time, datetime.datetime): tzinfo = time.tzinfo else: tzinfo = time.tz if tzinfo is None: self.location = Location(latitude, longitude) else: self.location = Location(latitude, longitude, tz=tzinfo)
Sets the location for the query. Parameters ---------- time: datetime or DatetimeIndex Time range of the query.
def get_properties(self): variables = self.model.nodes() property_tag = {} for variable in sorted(variables): properties = self.model.node[variable] properties = collections.OrderedDict(sorted(properties.items())) property_tag[variable] = [] for prop, val in properties.items(): property_tag[variable].append(str(prop) + " = " + str(val)) return property_tag
Add property to variables in BIF Returns ------- dict: dict of type {variable: list of properties } Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.get_properties() {'bowel-problem': ['position = (335, 99)'], 'dog-out': ['position = (300, 195)'], 'family-out': ['position = (257, 99)'], 'hear-bark': ['position = (296, 268)'], 'light-on': ['position = (218, 195)']}
def set_model(self, m): self._model = m self.new_root.emit(QtCore.QModelIndex()) self.model_changed(m)
Set the model for the level :param m: the model that the level should use :type m: QtCore.QAbstractItemModel :returns: None :rtype: None :raises: None
def join_pretty_tensors(tensors, output, join_function=None, name='join'): if not tensors: raise ValueError('pretty_tensors must be a non-empty sequence.') with output.g.name_scope(name): if join_function is None: last_dim = len(tensors[0].shape) - 1 return output.with_tensor(tf.concat(tensors, last_dim)) else: return output.with_tensor(join_function(tensors))
Joins the list of pretty_tensors and sets head of output_pretty_tensor. Args: tensors: A sequence of Layers or SequentialLayerBuilders to join. output: A pretty_tensor to set the head with the result. join_function: A function to join the tensors, defaults to concat on the last dimension. name: A name that is used for the name_scope Returns: The result of calling with_tensor on output Raises: ValueError: if pretty_tensors is None or empty.
def getSymbols(self): symbollist = [] for rule in self.productions: for symbol in rule.leftside + rule.rightside: if symbol not in symbollist: symbollist.append(symbol) symbollist += self.terminal_symbols return symbollist
Returns every symbol
def reset(self): status = self.m_objPCANBasic.Reset(self.m_PcanHandle) return status == PCAN_ERROR_OK
Command the PCAN driver to reset the bus after an error.
def selected_canvas_hazlayer(self): if self.lstCanvasHazLayers.selectedItems(): item = self.lstCanvasHazLayers.currentItem() else: return None try: layer_id = item.data(Qt.UserRole) except (AttributeError, NameError): layer_id = None layer = QgsProject.instance().mapLayer(layer_id) return layer
Obtain the canvas layer selected by user. :returns: The currently selected map layer in the list. :rtype: QgsMapLayer
def runtime_error(self, msg, method): if self.testing: self._py3_wrapper.report_exception(msg) raise KeyboardInterrupt if self.error_hide: self.hide_errors() return msg = msg.splitlines()[0] errors = [self.module_nice_name, u"{}: {}".format(self.module_nice_name, msg)] if self.error_messages != errors: self.error_messages = errors self.error_index = 0 self.error_output(self.error_messages[self.error_index], method)
Show the error in the bar
def _get_serializer(self, _type): if _type in _serializers: return _serializers[_type] elif _type == 'array': return self._get_array_serializer() elif _type == 'object': return self._get_object_serializer() raise ValueError('Unknown type: {}'.format(_type))
Gets a serializer for a particular type. For primitives, returns the serializer from the module-level serializers. For arrays and objects, uses the special _get_T_serializer methods to build the encoders and decoders.
def login(username, password, development_mode=False): retval = None try: user = User.fetch_by(username=username) if user and (development_mode or user.verify_password(password)): retval = user except OperationalError: pass return retval
Return the user if successful, None otherwise
async def migrate_redis1_to_redis2(storage1: RedisStorage, storage2: RedisStorage2): if not isinstance(storage1, RedisStorage): raise TypeError(f"{type(storage1)} is not RedisStorage instance.") if not isinstance(storage2, RedisStorage): raise TypeError(f"{type(storage2)} is not RedisStorage instance.") log = logging.getLogger('aiogram.RedisStorage') for chat, user in await storage1.get_states_list(): state = await storage1.get_state(chat=chat, user=user) await storage2.set_state(chat=chat, user=user, state=state) data = await storage1.get_data(chat=chat, user=user) await storage2.set_data(chat=chat, user=user, data=data) bucket = await storage1.get_bucket(chat=chat, user=user) await storage2.set_bucket(chat=chat, user=user, bucket=bucket) log.info(f"Migrated user {user} in chat {chat}")
Helper for migrating from RedisStorage to RedisStorage2 :param storage1: instance of RedisStorage :param storage2: instance of RedisStorage2 :return:
def load_object(target, namespace=None): if namespace and ':' not in target: allowable = dict((i.name, i) for i in pkg_resources.iter_entry_points(namespace)) if target not in allowable: raise ValueError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable)) return allowable[target].load() parts, target = target.split(':') if ':' in target else (target, None) module = __import__(parts) for part in parts.split('.')[1:] + ([target] if target else []): module = getattr(module, part) return module
This helper function loads an object identified by a dotted-notation string. For example: # Load class Foo from example.objects load_object('example.objects:Foo') If a plugin namespace is provided simple name references are allowed. For example: # Load the plugin named 'routing' from the 'web.dispatch' namespace load_object('routing', 'web.dispatch') Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
def update_metadata(self): if self._data_directory is None: raise Exception('Need to call `api.set_data_directory` first.') metadata_url = 'https://repo.continuum.io/pkgs/metadata.json' filepath = os.sep.join([self._data_directory, 'metadata.json']) worker = self.download_requests(metadata_url, filepath) return worker
Update the metadata available for packages in repo.continuum.io. Returns a download worker.
def create(self, request, *args, **kwargs): bulk_payload = self._get_bulk_payload(request) if bulk_payload: return self._create_many(bulk_payload) return super(DynamicModelViewSet, self).create( request, *args, **kwargs)
Either create a single or many model instances in bulk using the Serializer's many=True ability from Django REST >= 2.2.5. The data can be represented by the serializer name (single or plural forms), dict or list. Examples: POST /dogs/ { "name": "Fido", "age": 2 } POST /dogs/ { "dog": { "name": "Lucky", "age": 3 } } POST /dogs/ { "dogs": [ {"name": "Fido", "age": 2}, {"name": "Lucky", "age": 3} ] } POST /dogs/ [ {"name": "Fido", "age": 2}, {"name": "Lucky", "age": 3} ]
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False): from sparklingpandas.groupby import GroupBy return GroupBy(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze)
Returns a groupby on the schema rdd. This returns a GroupBy object. Note that grouping by a column name will be faster than most other options due to implementation.
def _rem(self, command, *args, **kwargs): if self.indexable: self.deindex(args) return self._traverse_command(command, *args, **kwargs)
Shortcut for commands that only remove values from the field. Removed values will be deindexed.
def get_filter_item(name: str, operation: bytes, value: bytes) -> bytes: assert isinstance(name, str) assert isinstance(value, bytes) if operation is None: return filter_format(b"(%s=%s)", [name, value]) elif operation == "contains": assert value != "" return filter_format(b"(%s=*%s*)", [name, value]) else: raise ValueError("Unknown search operation %s" % operation)
A field could be found for this term, try to get filter string for it.
def make_mesh( coor, ngroups, conns, mesh_in ): mat_ids = [] for ii, conn in enumerate( conns ): mat_id = nm.empty( (conn.shape[0],), dtype = nm.int32 ) mat_id.fill( mesh_in.mat_ids[ii][0] ) mat_ids.append( mat_id ) mesh_out = Mesh.from_data( 'merged mesh', coor, ngroups, conns, mat_ids, mesh_in.descs ) return mesh_out
Create a mesh reusing mat_ids and descs of mesh_in.
def label_subplot(ax=None, x=0.5, y=-0.25, text="(a)", **kwargs): if ax is None: ax = plt.gca() ax.text(x=x, y=y, s=text, transform=ax.transAxes, horizontalalignment="center", verticalalignment="top", **kwargs)
Create a subplot label.