code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def list(self, accountID, **kwargs): request = Request('GET', '/v3/accounts/{accountID}/orders') request.set_path_param('accountID', accountID) request.set_param('ids', kwargs.get('ids')) request.set_param('state', kwargs.get('state')) request.set_param('instrument', kwargs.get('instrument')) request.set_param('count', kwargs.get('count')) request.set_param('beforeID', kwargs.get('beforeID')) response = self.ctx.request(request) if (response.content_type is None): return response if (not response.content_type.startswith('application/json')): return response jbody = json.loads(response.raw_body) parsed_body = {} if (str(response.status) == '200'): if (jbody.get('orders') is not None): parsed_body['orders'] = [self.ctx.order.Order.from_dict(d, self.ctx) for d in jbody.get('orders')] if (jbody.get('lastTransactionID') is not None): parsed_body['lastTransactionID'] = jbody.get('lastTransactionID') elif (str(response.status) == '400'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '404'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '405'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') else: parsed_body = jbody response.body = parsed_body return response
Get a list of Orders for an Account Args: accountID: Account Identifier ids: List of Order IDs to retrieve state: The state to filter the requested Orders by instrument: The instrument to filter the requested orders by count: The maximum number of Orders to return beforeID: The maximum Order ID to return. If not provided the most recent Orders in the Account are returned Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def create(self, name, domain_name): name = self.wrap(self.resource.create(dict(name=name, domain_name=domain_name))) self.add(name) return name
Register a url (e.g. wallet.gem.co) for Args: name (str): human-readable wallet name (e.g. wallet) domain_name (str): the domain name to create subdomain on (e.g. gem.co) this domain must already be registered with Gem Returns: The new round.NetkiName
juraj-google-style
def console_print_rect_ex(con: tcod.console.Console, x: int, y: int, w: int, h: int, flag: int, alignment: int, fmt: str) -> int: return int(lib.TCOD_console_printf_rect_ex(_console(con), x, y, w, h, flag, alignment, _fmt(fmt)))
Print a string constrained to a rectangle with blend and alignment. Returns: int: The number of lines of text once word-wrapped. .. deprecated:: 8.5 Use :any:`Console.print_rect` instead.
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): encoding = (self._ENCODING or parser_mediator.codepage) text_file_object = text_file.TextFile(file_object, encoding=encoding) if (not self._ParseAndValidateRecord(parser_mediator, text_file_object)): raise errors.UnableToParseFile('Unable to parse as Opera global_history.dat.') while self._ParseRecord(parser_mediator, text_file_object): pass
Parses an Opera global history file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def convert_strain_to_deformation(strain, shape="upper"): strain = SquareTensor(strain) ftdotf = 2*strain + np.eye(3) if shape == "upper": result = scipy.linalg.cholesky(ftdotf) elif shape == "symmetric": result = scipy.linalg.sqrtm(ftdotf) else: raise ValueError("shape must be \"upper\" or \"symmetric\"") return Deformation(result)
This function converts a strain to a deformation gradient that will produce that strain. Supports three methods: Args: strain (3x3 array-like): strain matrix shape: (string): method for determining deformation, supports "upper" produces an upper triangular defo "lower" produces a lower triangular defo "symmetric" produces a symmetric defo
juraj-google-style
def GetFileSystemReferenceCount(self, path_spec): identifier = self._GetFileSystemCacheIdentifier(path_spec) cache_value = self._file_system_cache.GetCacheValue(identifier) if (not cache_value): return None return cache_value.reference_count
Retrieves the reference count of a cached file system object. Args: path_spec (PathSpec): path specification. Returns: int: reference count or None if there is no file system object for the corresponding path specification cached.
codesearchnet
def _randomUniformAvoidAnchors(self, low, high, anchors, radius, num_samples): self.assertTrue(low < high) self.assertTrue(radius >= 0) num_anchors = len(anchors) self.assertTrue(2 * radius * num_anchors < 0.5 * (high - low)) anchors = np.reshape(anchors, num_anchors) samples = [] while len(samples) < num_samples: sample = np.random.uniform(low, high) if np.all(np.fabs(sample - anchors) > radius): samples.append(sample) return samples
Generate samples that are far enough from a set of anchor points. We generate uniform samples in [low, high], then reject those that are less than radius away from any point in anchors. We stop after we have accepted num_samples samples. Args: low: The lower end of the interval. high: The upper end of the interval. anchors: A list of length num_crops with anchor points to avoid. radius: Distance threshold for the samples from the anchors. num_samples: How many samples to produce. Returns: samples: A list of length num_samples with the accepted samples.
github-repos
def nic_v1(msg, NICs): if typecode(msg) < 5 or typecode(msg) > 22: raise RuntimeError( "%s: Not a surface position message (5<TC<8), \ airborne position message (8<TC<19), \ or airborne position with GNSS height (20<TC<22)" % msg ) tc = typecode(msg) NIC = uncertainty.TC_NICv1_lookup[tc] if isinstance(NIC, dict): NIC = NIC[NICs] try: Rc = uncertainty.NICv1[NIC][NICs]['Rc'] VPL = uncertainty.NICv1[NIC][NICs]['VPL'] except KeyError: Rc, VPL = uncertainty.NA, uncertainty.NA return Rc, VPL
Calculate NIC, navigation integrity category, for ADS-B version 1 Args: msg (string): 28 bytes hexadecimal message string NICs (int or string): NIC supplement Returns: int or string: Horizontal Radius of Containment int or string: Vertical Protection Limit
juraj-google-style
def run_server(cls, args=None, **kwargs): if (args is None): args = sys.argv[1:] args = ([cls.__name__] + list(args)) green_mode = getattr(cls, 'green_mode', None) kwargs.setdefault('green_mode', green_mode) return run((cls,), args, **kwargs)
Run the class as a device server. It is based on the tango.server.run method. The difference is that the device class and server name are automatically given. Args: args (iterable): args as given in the tango.server.run method without the server name. If None, the sys.argv list is used kwargs: the other keywords argument are as given in the tango.server.run method.
codesearchnet
def is_legal_subject(self, c: OntologyClass) -> bool: domains = self.included_domains() return c and (not domains or c in domains or c.super_classes_closure() & domains)
is_legal_subject(c) = true if - c in included_domains(self) or - super_classes_closure(c) intersection included_domains(self) is not empty There is no need to check the included_domains(super_properties_closure(self)) because included_domains(super_properties_closure(self)) is subset of super_classes_closure(included_domains(self)) Args: c: Returns:
juraj-google-style
def interm_fluent_ordering(self) -> List[str]: interm_fluents = self.intermediate_fluents.values() key = (lambda pvar: (pvar.level, pvar.name)) return [str(pvar) for pvar in sorted(interm_fluents, key=key)]
The list of intermediate-fluent names in canonical order. Returns: List[str]: A list of fluent names.
codesearchnet
async def download_file(context, url, abs_filename, session=None, chunk_size=128): session = session or context.session loggable_url = get_loggable_url(url) log.info("Downloading %s", loggable_url) parent_dir = os.path.dirname(abs_filename) async with session.get(url) as resp: if resp.status == 404: await _log_download_error(resp, "404 downloading %(url)s: %(status)s; body=%(body)s") raise Download404("{} status {}!".format(loggable_url, resp.status)) elif resp.status != 200: await _log_download_error(resp, "Failed to download %(url)s: %(status)s; body=%(body)s") raise DownloadError("{} status {} is not 200!".format(loggable_url, resp.status)) makedirs(parent_dir) with open(abs_filename, "wb") as fd: while True: chunk = await resp.content.read(chunk_size) if not chunk: break fd.write(chunk) log.info("Done")
Download a file, async. Args: context (scriptworker.context.Context): the scriptworker context. url (str): the url to download abs_filename (str): the path to download to session (aiohttp.ClientSession, optional): the session to use. If None, use context.session. Defaults to None. chunk_size (int, optional): the chunk size to read from the response at a time. Default is 128.
juraj-google-style
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None, projections=False): if os.path.exists(os.path.join(dir_name, 'branch_0')): branch_dir_names = [os.path.abspath(d) for d in glob.glob('{i}/branch_*'.format(i=dir_name)) if os.path.isdir(d)] sort_by = (lambda x: int(x.split('_')[(- 1)])) sorted_branch_dir_names = sorted(branch_dir_names, key=sort_by) branches = [] for dir_name in sorted_branch_dir_names: xml_file = os.path.join(dir_name, 'vasprun.xml') if os.path.exists(xml_file): run = Vasprun(xml_file, parse_projected_eigen=projections) branches.append(run.get_band_structure(efermi=efermi)) else: warnings.warn('Skipping {}. Unable to find {}'.format(d=dir_name, f=xml_file)) return get_reconstructed_band_structure(branches, efermi) else: xml_file = os.path.join(dir_name, 'vasprun.xml') if os.path.exists(xml_file): return Vasprun(xml_file, parse_projected_eigen=projections).get_band_structure(kpoints_filename=None, efermi=efermi) else: return None
This method is used to get band structure info from a VASP directory. It takes into account that the run can be divided in several branches named "branch_x". If the run has not been divided in branches the method will turn to parsing vasprun.xml directly. The method returns None is there"s a parsing error Args: dir_name: Directory containing all bandstructure runs. efermi: Efermi for bandstructure. projections: True if you want to get the data on site projections if any. Note that this is sometimes very large Returns: A BandStructure Object
codesearchnet
def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor: if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape)) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask
Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`Tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
github-repos
def end_at(self, document_fields): query = query_mod.Query(self) return query.end_at(document_fields)
End query at a cursor with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.end_at` for more information on this method. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor.
codesearchnet
def get_base_branch(): base_branch = git.guess_base_branch() if (base_branch is None): log.info("Can't guess the base branch, you have to pick one yourself:") base_branch = choose_branch() return base_branch
Return the base branch for the current branch. This function will first try to guess the base branch and if it can't it will let the user choose the branch from the list of all local branches. Returns: str: The name of the branch the current branch is based on.
codesearchnet
def __init__(self, samplerate, nframes, wavpath): imp = minidom.getDOMImplementation() dt = imp.createDocumentType('sonic-visualiser', None, None) self.doc = doc = imp.createDocument(None,'sv', dt) root = doc.documentElement self.__dname = dict() self.data = root.appendChild(doc.createElement('data')) self.display = root.appendChild(doc.createElement('display')) window = self.display.appendChild(doc.createElement('window')) self.defwidth = 900 window.setAttribute('width', str(self.defwidth)) window.setAttribute('height', str(856)) self.selections = root.appendChild(doc.createElement('selections')) self.nbdata = 0 self.samplerate = samplerate self.nframes = nframes self.__setMainWaveModel(wavpath)
Init a sonic visualiser environment structure based on the attributes of the main audio file Args: samplerate(int): media sample rate (Hz) nframes(int): number of samples wavpath(str): Full path to the wav file used in the current environment
juraj-google-style
def _page_to_text(page): start_pos = page.find(u"<text") assert start_pos != -1 end_tag_pos = page.find(u">", start_pos) assert end_tag_pos != -1 end_tag_pos += len(u">") end_pos = page.find(u"</text>") if end_pos == -1: return u"" return page[end_tag_pos:end_pos]
Extract the text from a page. Args: page: a unicode string Returns: a unicode string
juraj-google-style
def _CreateProcessingConfiguration(self, knowledge_base): configuration = configurations.ProcessingConfiguration() configuration.artifact_filters = self._artifact_filters configuration.credentials = self._credential_configurations configuration.debug_output = self._debug_mode configuration.event_extraction.text_prepend = self._text_prepend configuration.extraction.hasher_file_size_limit = self._hasher_file_size_limit configuration.extraction.hasher_names_string = self._hasher_names_string configuration.extraction.process_archives = self._process_archives configuration.extraction.process_compressed_streams = self._process_compressed_streams configuration.extraction.yara_rules_string = self._yara_rules_string configuration.filter_file = self._filter_file configuration.input_source.mount_path = self._mount_path configuration.log_filename = self._log_file configuration.parser_filter_expression = self._parser_filter_expression configuration.preferred_year = self._preferred_year configuration.profiling.directory = self._profiling_directory configuration.profiling.sample_rate = self._profiling_sample_rate configuration.profiling.profilers = self._profilers configuration.temporary_directory = self._temporary_directory if (not configuration.parser_filter_expression): operating_system = knowledge_base.GetValue('operating_system') operating_system_product = knowledge_base.GetValue('operating_system_product') operating_system_version = knowledge_base.GetValue('operating_system_version') preset_definitions = parsers_manager.ParsersManager.GetPresetsForOperatingSystem(operating_system, operating_system_product, operating_system_version) if preset_definitions: preset_names = [preset_definition.name for preset_definition in preset_definitions] filter_expression = ','.join(preset_names) logger.info('Parser filter expression set to: {0:s}'.format(filter_expression)) configuration.parser_filter_expression = filter_expression return configuration
Creates a processing configuration. Args: knowledge_base (KnowledgeBase): contains information from the source data needed for parsing. Returns: ProcessingConfiguration: processing configuration. Raises: BadConfigOption: if more than 1 parser and parser plugins preset was found for the detected operating system.
codesearchnet
def plot_timestream(array, kidid, xtick='time', scantypes=None, ax=None, **kwargs): if (ax is None): ax = plt.gca() index = np.where((array.kidid == kidid))[0] if (len(index) == 0): raise KeyError('Such a kidid does not exist.') index = int(index) if (scantypes is None): if (xtick == 'time'): ax.plot(array.time, array[(:, index)], label='ALL', **kwargs) elif (xtick == 'index'): ax.plot(np.ogrid[:len(array.time)], array[(:, index)], label='ALL', **kwargs) else: for scantype in scantypes: if (xtick == 'time'): ax.plot(array.time[(array.scantype == scantype)], array[(:, index)][(array.scantype == scantype)], label=scantype, **kwargs) elif (xtick == 'index'): ax.plot(np.ogrid[:len(array.time[(array.scantype == scantype)])], array[(:, index)][(array.scantype == scantype)], label=scantype, **kwargs) ax.set_xlabel('{}'.format(xtick)) ax.set_ylabel(str(array.datatype.values)) ax.legend() kidtpdict = {0: 'wideband', 1: 'filter', 2: 'blind'} try: kidtp = kidtpdict[int(array.kidtp[index])] except KeyError: kidtp = 'filter' ax.set_title('ch logger.info('timestream data (ch={}) has been plotted.'.format(kidid))
Plot timestream data. Args: array (xarray.DataArray): Array which the timestream data are included. kidid (int): Kidid. xtick (str): Type of x axis. 'time': Time. 'index': Time index. scantypes (list): Scantypes. If None, all scantypes are used. ax (matplotlib.axes): Axis you want to plot on. kwargs (optional): Plot options passed to ax.plot().
codesearchnet
def sendline(self, text): logger.debug("Sending input '{0}' to '{1}'".format(text, self.name)) try: return self._spawn.sendline(text) except pexpect.exceptions.EOF as e: logger.debug("Raising termination exception.") raise TerminationException(instance=self, real_exception=e, output=self.get_output()) except pexpect.exceptions.TIMEOUT as e: logger.debug("Raising timeout exception.") raise TimeoutException(instance=self, real_exception=e, output=self.get_output()) except Exception as e: logger.debug("Sending input failed: " + str(e)) raise NestedException(instance=self, real_exception=e, output=self.get_output())
Sends an input line to the running program, including os.linesep. Args: text (str): The input text to be send. Raises: TerminationException: The program terminated before / while / after sending the input. NestedException: An internal problem occured while waiting for the output.
juraj-google-style
def _simplify_non_context_field_binary_composition(expression): if any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))): raise AssertionError(u'Received a BinaryComposition {} with a ContextField operand. This should never happen.'.format(expression)) if (expression.operator == u'||'): if ((expression.left == TrueLiteral) or (expression.right == TrueLiteral)): return TrueLiteral else: return expression elif (expression.operator == u'&&'): if (expression.left == TrueLiteral): return expression.right if (expression.right == TrueLiteral): return expression.left else: return expression else: return expression
Return a simplified BinaryComposition if either operand is a TrueLiteral. Args: expression: BinaryComposition without any ContextField operand(s) Returns: simplified expression if the given expression is a disjunction/conjunction and one of it's operands is a TrueLiteral, and the original expression otherwise
codesearchnet
def __init__(self, cell): self._cell = cell
Creates a new CounterCell. Args: cell: A c pointer of TFE_MonitoringCounterCell.
github-repos
def generate_key_pair(secret=None): if secret: keypair_raw = ed25519_generate_key_pair_from_secret(secret) return CryptoKeypair( *(k.decode() for k in keypair_raw)) else: return generate_keypair()
Generates a cryptographic key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: :class:`~bigchaindb.common.crypto.CryptoKeypair`: A :obj:`collections.namedtuple` with named fields :attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and :attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.
juraj-google-style
def from_json(cls, data): assert 'header' in data, 'Required keyword "header" is missing!' assert 'values' in data, 'Required keyword "values" is missing!' assert 'datetimes' in data, 'Required keyword "datetimes" is missing!' coll = cls(Header.from_json(data['header']), data['values'], data['datetimes']) if 'validated_a_period' in data: coll._validated_a_period = data['validated_a_period'] return coll
Create a Data Collection from a dictionary. Args: { "header": A Ladybug Header, "values": An array of values, "datetimes": An array of datetimes, "validated_a_period": Boolean for whether header analysis_period is valid }
juraj-google-style
def set_time(self, value: float): if value < 0: value = 0 self.controller.row = self.rps * value
Set the current time jumping in the timeline. Args: value (float): The new time
juraj-google-style
def all_to_all(x, concat_dimension, split_dimension, split_count, group_assignment=None, name=None): if group_assignment is None: group_assignment = _create_default_group_assignment() return gen_tpu_ops.all_to_all(x, group_assignment, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count, name=name)
Exchange data across TPU replicas. Args: x: The local tensor. concat_dimension: The dimension number to concatenate. split_dimension: The dimension number to split. split_count: The number of splits, this number must equal to the sub-group size(group_assignment.get_shape()[1]) group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the replica ids in the ith subgroup. name: Optional op name. Returns: A `Tensor` which is concatenated by data from different replicas.
github-repos
def query_source_file_line(self, file_path, lineno): if not self._source_files: raise ValueError('This debug server has not received any source file contents yet.') for source_files in self._source_files: for source_file_proto in source_files.source_files: if source_file_proto.file_path == file_path: return source_file_proto.lines[lineno - 1] raise ValueError('Source file at path %s has not been received by the debug server', file_path)
Query the content of a given line in a source file. Args: file_path: Path to the source file. lineno: Line number as an `int`. Returns: Content of the line as a string. Raises: ValueError: If no source file is found at the given file_path.
github-repos
def __init__(self, stream): super(BinaryReader, self).__init__() self.stream = stream
Create an instance. Args: stream (BytesIO): a stream to operate on. i.e. a neo.IO.MemoryStream or raw BytesIO.
juraj-google-style
def inspect_swarm(self): url = self._url('/swarm') return self._result(self._get(url), True)
Retrieve low-level information about the current swarm. Returns: A dictionary containing data about the swarm. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def _DropCommonSuffixes(filename): for suffix in itertools.chain((('%s.%s' % (test_suffix.lstrip('_'), ext)) for (test_suffix, ext) in itertools.product(_test_suffixes, GetNonHeaderExtensions())), (('%s.%s' % (suffix, ext)) for (suffix, ext) in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))): if (filename.endswith(suffix) and (len(filename) > len(suffix)) and (filename[((- len(suffix)) - 1)] in ('-', '_'))): return filename[:((- len(suffix)) - 1)] return os.path.splitext(filename)[0]
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed.
codesearchnet
def GetValueByName(self, name): if not self._registry_key and self._registry: self._GetKeyFromRegistry() if not self._registry_key: return None return self._registry_key.GetValueByName(name)
Retrieves a value by name. Args: name (str): name of the value or an empty string for the default value. Returns: WinRegistryValue: Windows Registry value or None if not found.
juraj-google-style
def find_one(self, collection, query): obj = getattr(self.db, collection) result = obj.find_one(query) return result
Search a collection for the query provided and return one result. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results.
codesearchnet
def sequence_equal(self, second_iterable, equality_comparer=operator.eq): if self.closed(): raise ValueError('Attempt to call to_tuple() on a closed Queryable.') if (not is_iterable(second_iterable)): raise TypeError('Cannot compute sequence_equal() with second_iterable of non-iterable {type}'.format(type=str(type(second_iterable))[7:(- 1)])) if (not is_callable(equality_comparer)): raise TypeError('aggregate() parameter equality_comparer={equality_comparer} is not callable'.format(equality_comparer=repr(equality_comparer))) try: if (len(self._iterable) != len(second_iterable)): return False except TypeError: pass sentinel = object() for (first, second) in izip_longest(self, second_iterable, fillvalue=sentinel): if ((first is sentinel) or (second is sentinel)): return False if (not equality_comparer(first, second)): return False return True
Determine whether two sequences are equal by elementwise comparison. Sequence equality is defined as the two sequences being equal length and corresponding elements being equal as determined by the equality comparer. Note: This method uses immediate execution. Args: second_iterable: The sequence which will be compared with the source sequence. equality_comparer: An optional binary predicate function which is used to compare corresponding elements. Should return True if the elements are equal, otherwise False. The default equality comparer is operator.eq which calls __eq__ on elements of the source sequence with the corresponding element of the second sequence as a parameter. Returns: True if the sequences are equal, otherwise False. Raises: ValueError: If the Queryable is closed. TypeError: If second_iterable is not in fact iterable. TypeError: If equality_comparer is not callable.
codesearchnet
def codemirror_instance(config_name, varname, element_id, assets=True): output = io.StringIO() manifesto = CodemirrorAssetTagRender() manifesto.register(config_name) if assets: output.write(manifesto.css_html()) output.write(manifesto.js_html()) html = manifesto.codemirror_html(config_name, varname, element_id) output.write(html) content = output.getvalue() output.close() return mark_safe(content)
Return HTML to init a CodeMirror instance for an element. This will output the whole HTML needed to initialize a CodeMirror instance with needed assets loading. Assets can be omitted with the ``assets`` option. Example: :: {% load djangocodemirror_tags %} {% codemirror_instance 'a-config-name' 'foo_codemirror' 'foo' %} Arguments: config_name (string): A registred config name. varname (string): A Javascript variable name. element_id (string): An HTML element identifier (without leading ``#``) to attach to a CodeMirror instance. Keyword Arguments: assets (Bool): Adds needed assets before the HTML if ``True``, else only CodeMirror instance will be outputed. Default value is ``True``. Returns: string: HTML.
codesearchnet
def _remove_string_from_commastring(self, field, string): commastring = self.data.get(field, '') if (string in commastring): self.data[field] = commastring.replace(string, '') return True return False
Remove a string from a comma separated list of strings Args: field (str): Field containing comma separated list string (str): String to remove Returns: bool: True if string removed or False if not
codesearchnet
def warning_handler(self, handler): if not self.opened(): handler = handler or util.noop self._warning_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler) self._dll.JLINKARM_SetWarnOutHandler(self._warning_handler)
Setter for the warning handler function. If the DLL is open, this function is a no-op, so it should be called prior to calling ``open()``. Args: self (JLink): the ``JLink`` instance handler (function): function to call on warning messages Returns: ``None``
juraj-google-style
def selected(self): query_results = self.map((lambda el: el.is_selected()), 'selected').results if query_results: return all(query_results) return False
Check whether all the matched elements are selected. Returns: bool
codesearchnet
def list_vmss_vm_instance_view_pg(access_token, subscription_id, resource_group, vmss_name, link=None): if (link is None): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines?$expand=instanceView&$select=instanceView', '&api-version=', COMP_API]) else: endpoint = link return do_get(endpoint, access_token)
Gets one page of a paginated list of scale set VM instance views. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. link (str): Optional link to URI to get list (as part of a paginated API query). Returns: HTTP response. JSON body of list of VM instance views.
codesearchnet
def submit(self, func, *args, executors='all', fn_hash=None, cache=False, **kwargs): if self.cleanup_called: raise ValueError('Cannot submit to a DFK that has been cleaned up') task_id = self.task_count self.task_count += 1 if (isinstance(executors, str) and (executors.lower() == 'all')): choices = list((e for e in self.executors if (e != 'data_manager'))) elif isinstance(executors, list): choices = executors executor = random.choice(choices) (args, kwargs) = self._add_input_deps(executor, args, kwargs) task_def = {'depends': None, 'executor': executor, 'func': func, 'func_name': func.__name__, 'args': args, 'kwargs': kwargs, 'fn_hash': fn_hash, 'memoize': cache, 'callback': None, 'exec_fu': None, 'checkpoint': None, 'fail_count': 0, 'fail_history': [], 'env': None, 'status': States.unsched, 'id': task_id, 'time_submitted': None, 'time_returned': None, 'app_fu': None} if (task_id in self.tasks): raise DuplicateTaskError('internal consistency error: Task {0} already exists in task list'.format(task_id)) else: self.tasks[task_id] = task_def (dep_cnt, depends) = self._gather_all_deps(args, kwargs) self.tasks[task_id]['depends'] = depends task_stdout = kwargs.get('stdout') task_stderr = kwargs.get('stderr') logger.info('Task {} submitted for App {}, waiting on tasks {}'.format(task_id, task_def['func_name'], [fu.tid for fu in depends])) self.tasks[task_id]['task_launch_lock'] = threading.Lock() app_fu = AppFuture(tid=task_id, stdout=task_stdout, stderr=task_stderr) self.tasks[task_id]['app_fu'] = app_fu app_fu.add_done_callback(partial(self.handle_app_update, task_id)) self.tasks[task_id]['status'] = States.pending logger.debug('Task {} set to pending state with AppFuture: {}'.format(task_id, task_def['app_fu'])) for d in depends: def callback_adapter(dep_fut): self.launch_if_ready(task_id) try: d.add_done_callback(callback_adapter) except Exception as e: logger.error('add_done_callback got an exception {} which will be ignored'.format(e)) self.launch_if_ready(task_id) return task_def['app_fu']
Add task to the dataflow system. If the app task has the executors attributes not set (default=='all') the task will be launched on a randomly selected executor from the list of executors. If the app task specifies a particular set of executors, it will be targeted at the specified executors. >>> IF all deps are met: >>> send to the runnable queue and launch the task >>> ELSE: >>> post the task in the pending queue Args: - func : A function object - *args : Args to the function KWargs : - executors (list or string) : List of executors this call could go to. Default='all' - fn_hash (Str) : Hash of the function and inputs Default=None - cache (Bool) : To enable memoization or not - kwargs (dict) : Rest of the kwargs to the fn passed as dict. Returns: (AppFuture) [DataFutures,]
codesearchnet
def install_package(tar_url, folder, md5_url='{tar_url}.md5', on_download=lambda: None, on_complete=lambda: None): data_file = join(folder, basename(tar_url)) md5_url = md5_url.format(tar_url=tar_url) try: remote_md5 = download(md5_url).decode('utf-8').split(' ')[0] except (UnicodeDecodeError, URLError): raise ValueError('Invalid MD5 url: ' + md5_url) if remote_md5 != calc_md5(data_file): on_download() if isfile(data_file): try: with tarfile.open(data_file) as tar: for i in reversed(list(tar)): try: os.remove(join(folder, i.path)) except OSError: pass except (OSError, EOFError): pass download_extract_tar(tar_url, folder, data_file) on_complete() if remote_md5 != calc_md5(data_file): raise ValueError('MD5 url does not match tar: ' + md5_url) return True return False
Install or update a tar package that has an md5 Args: tar_url (str): URL of package to download folder (str): Location to extract tar. Will be created if doesn't exist md5_url (str): URL of md5 to use to check for updates on_download (Callable): Function that gets called when downloading a new update on_complete (Callable): Function that gets called when a new download is complete Returns: bool: Whether the package was updated
juraj-google-style
def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0): hostname = None try: address = dns.reversename.from_address(ip_address) hostname = query_dns(address, "PTR", cache=cache, nameservers=nameservers, timeout=timeout)[0] except dns.exception.DNSException: pass return hostname
Resolves an IP address to a hostname using a reverse DNS query Args: ip_address (str): The IP address to resolve cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS query timeout in seconds Returns: str: The reverse DNS hostname (if any)
juraj-google-style
def delete_object(self, ref, delete_arguments=None): opts = self._get_request_options() if not isinstance(delete_arguments, dict): delete_arguments = {} url = self._construct_url(ref, query_params=delete_arguments) self._log_request('delete', url, opts) r = self.session.delete(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('delete', r, ref) raise ib_ex.InfobloxCannotDeleteObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
Remove an Infoblox object Args: ref (str): Object reference delete_arguments (dict): Extra delete arguments Returns: The object reference of the removed object Raises: InfobloxException
juraj-google-style
def pcolls_from_streaming_cache(user_pipeline: beam.Pipeline, query_pipeline: beam.Pipeline, name_to_pcoll: Dict[str, beam.PCollection]) -> Dict[str, beam.PCollection]: def exception_handler(e): _LOGGER.error(str(e)) return True cache_manager = ie.current_env().get_cache_manager(user_pipeline, create_if_absent=True) test_stream_service = ie.current_env().get_test_stream_service_controller(user_pipeline) if not test_stream_service: test_stream_service = TestStreamServiceController(cache_manager, exception_handler=exception_handler) test_stream_service.start() ie.current_env().set_test_stream_service_controller(user_pipeline, test_stream_service) tag_to_name = {} for name, pcoll in name_to_pcoll.items(): key = CacheKey.from_pcoll(name, pcoll).to_str() tag_to_name[key] = name output_pcolls = query_pipeline | test_stream.TestStream(output_tags=set(tag_to_name.keys()), coder=cache_manager._default_pcoder, endpoint=test_stream_service.endpoint) sql_source = {} for tag, output in output_pcolls.items(): name = tag_to_name[tag] output.element_type = name_to_pcoll[name].element_type sql_source[name] = output return sql_source
Reads PCollection cache through the TestStream. Args: user_pipeline: The beam.Pipeline object defined by the user in the notebook. query_pipeline: The beam.Pipeline object built by the magic to execute the SQL query. name_to_pcoll: PCollections with variable names used in the SQL query. Returns: A Dict[str, beam.PCollection], where each PCollection is tagged with their PCollection variable names, read from the cache. When the user_pipeline has unbounded sources, we force all cache reads to go through the TestStream even if they are bounded sources.
github-repos
def _validate_bool(value): if isinstance(value, six.text_type): if (value.strip().lower() == 'true'): value = True elif (value.strip().lower() == 'false'): value = False else: raise ValueError('"{}" must be a boolean ("True" or "False")'.format(value)) if (not isinstance(value, bool)): raise ValueError('"{}" is not a boolean value.'.format(value)) return value
Validate a setting is a bool. Returns: bool: The value as a boolean. Raises: ValueError: If the value can't be parsed as a bool string or isn't already bool.
codesearchnet
def load_schema(schema_name, resolved=False): schema_data = '' with open(get_schema_path(schema_name, resolved)) as schema_fd: schema_data = json.loads(schema_fd.read()) return schema_data
Load the given schema from wherever it's installed. Args: schema_name(str): Name of the schema to load, for example 'authors'. resolved(bool): If True will return the resolved schema, that is with all the $refs replaced by their targets. Returns: dict: the schema with the given name.
codesearchnet
def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False): try: notify_info2('Updating: ', oldobj) if (oldobj is newobj): return if (type(oldobj) is not type(newobj)): notify_error(('Type of: %s changed... Skipping.' % (oldobj,))) return if isinstance(newobj, types.FunctionType): self._update_function(oldobj, newobj) return if isinstance(newobj, types.MethodType): self._update_method(oldobj, newobj) return if isinstance(newobj, classmethod): self._update_classmethod(oldobj, newobj) return if isinstance(newobj, staticmethod): self._update_staticmethod(oldobj, newobj) return if hasattr(types, 'ClassType'): classtype = (types.ClassType, type) else: classtype = type if isinstance(newobj, classtype): self._update_class(oldobj, newobj) return if (hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and (newobj.__metaclass__ == newobj.__class__)): self._update_class(oldobj, newobj) return if (namespace is not None): if ((oldobj != newobj) and (str(oldobj) != str(newobj)) and (repr(oldobj) != repr(newobj))): xreload_old_new = None if is_class_namespace: xreload_old_new = getattr(namespace, '__xreload_old_new__', None) if (xreload_old_new is not None): self.found_change = True xreload_old_new(name, oldobj, newobj) elif ('__xreload_old_new__' in namespace): xreload_old_new = namespace['__xreload_old_new__'] xreload_old_new(namespace, name, oldobj, newobj) self.found_change = True except: notify_error(('Exception found when updating %s. Proceeding for other items.' % (name,))) pydev_log.exception()
Update oldobj, if possible in place, with newobj. If oldobj is immutable, this simply returns newobj. Args: oldobj: the object to be updated newobj: the object used as the source for the update
codesearchnet
def CheckPrintf(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] match = Search('snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,', line) if (match and (match.group(2) != '0')): error(filename, linenum, 'runtime/printf', 3, ('If you can, use sizeof(%s) instead of %s as the 2nd arg to snprintf.' % (match.group(1), match.group(2)))) if Search('\\bsprintf\\s*\\(', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search('\\b(strcpy|strcat)\\s*\\(', line) if match: error(filename, linenum, 'runtime/printf', 4, ('Almost always, snprintf is better than %s' % match.group(1)))
Check for printf related issues. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def create_bq_dataset(project, dataset_base_name): client = bigquery.Client(project=project) unique_dataset_name = '%s%d%s' % (dataset_base_name, int(time.time()), secrets.token_hex(3)) dataset_ref = client.dataset(unique_dataset_name, project=project) dataset = bigquery.Dataset(dataset_ref) client.create_dataset(dataset) return dataset_ref
Creates an empty BigQuery dataset. Args: project: Project to work in. dataset_base_name: Prefix for dataset id. Returns: A ``google.cloud.bigquery.dataset.DatasetReference`` object pointing to the new dataset.
github-repos
def _update_triplestore(self, es_result, action_list, **kwargs): idx_time = XsdDatetime(datetime.datetime.utcnow()) uri_keys = {} bnode_keys = {} for item in action_list: try: uri_keys[item['_id']] = item['_source']["uri"] except KeyError: bnode_keys[item['_id']] = item['_id'] error_dict = {} error_bnodes = {} if es_result[1]: for result in es_result[1]: err_item = list(result.values())[0] try: error_dict[uri_keys.pop(err_item['_id'])] = \ XsdString(err_item['error']['reason']) except KeyError: error_bnodes[bnode_keys.pop(err_item['_id'])] = \ XsdString(err_item['error']['reason']) if uri_keys: sparql_good = .format(idx_time=idx_time.sparql, subj_list="<%s>" % ">\n<".join(uri_keys.values())) self.tstore_conn.update_query(sparql_good) if not error_dict: return sparql_error = .format(subj_list="<%s>" % ">\n<".join(error_dict.keys())) self.tstore_conn.update_query(sparql_error) del sparql_error sparql_update = .format( idx_time=idx_time.sparql, error_list="\n".join(["(<%s> %s)" % (key, val.sparql) for key, val in error_dict.items()])) self.tstore_conn.update_query(sparql_update) del sparql_update
updates the triplestore with success of saves and failues of indexing Args: ----- es_result: the elasticsearch result list action_list: list of elasticsearch action items that were indexed
juraj-google-style
def full_name(decl, with_defaults=True): if (None is decl): raise RuntimeError('Unable to generate full name for None object!') if with_defaults: if (not decl.cache.full_name): path = declaration_path(decl) if (path == ['']): decl.cache.full_name = '' else: decl.cache.full_name = full_name_from_declaration_path(path) return decl.cache.full_name else: if (not decl.cache.full_partial_name): path = partial_declaration_path(decl) if (path == ['']): decl.cache.full_partial_name = '' else: decl.cache.full_partial_name = full_name_from_declaration_path(path) return decl.cache.full_partial_name
Returns declaration full qualified name. If `decl` belongs to anonymous namespace or class, the function will return C++ illegal qualified name. Args: decl (declaration_t): declaration for which the full qualified name should be calculated. Returns: list[(str | basestring)]: full name of the declaration.
codesearchnet
def account_states(self, **kwargs): path = self._get_id_path('account_states') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
This method lets users get the status of whether or not the movie has been rated or added to their favourite or watch lists. A valid session id is required. Args: session_id: see Authentication. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def __field_to_parameter_type_and_format(self, field): variant = field.variant if variant == messages.Variant.MESSAGE: raise TypeError('A message variant can\'t be used in a parameter.') custom_variant_map = { messages.Variant.DOUBLE: ('number', 'double'), messages.Variant.FLOAT: ('number', 'float'), messages.Variant.INT64: ('string', 'int64'), messages.Variant.SINT64: ('string', 'int64'), messages.Variant.UINT64: ('string', 'uint64'), messages.Variant.INT32: ('integer', 'int32'), messages.Variant.SINT32: ('integer', 'int32'), messages.Variant.UINT32: ('integer', 'uint32'), messages.Variant.BOOL: ('boolean', None), messages.Variant.STRING: ('string', None), messages.Variant.BYTES: ('string', 'byte'), messages.Variant.ENUM: ('string', None), } return custom_variant_map.get(variant) or (variant.name.lower(), None)
Converts the field variant type into a tuple describing the parameter. Args: field: An instance of a subclass of messages.Field. Returns: A tuple with the type and format of the field, respectively. Raises: TypeError: if the field variant is a message variant.
juraj-google-style
def parse_global_args(argv): parser = create_parser() args = parser.parse_args(argv) should_log = (args.include or args.exclude or (args.verbose > 0)) verbosity = args.verbose root = logging.getLogger() if should_log: formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s', '%y-%m-%d %H:%M:%S') if args.logfile: handler = logging.FileHandler(args.logfile) else: handler = logging.StreamHandler() handler.setFormatter(formatter) if (args.include and args.exclude): print('You cannot combine whitelisted (-i) and blacklisted (-e) loggers, you must use one or the other.') sys.exit(1) loglevels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG] if (verbosity >= len(loglevels)): verbosity = (len(loglevels) - 1) level = loglevels[verbosity] if args.include: for name in args.include: logger = logging.getLogger(name) logger.setLevel(level) logger.addHandler(handler) root.addHandler(logging.NullHandler()) else: for name in args.exclude: logger = logging.getLogger(name) logger.disabled = True root.setLevel(level) root.addHandler(handler) else: root.addHandler(logging.NullHandler()) return args
Parse all global iotile tool arguments. Any flag based argument at the start of the command line is considered as a global flag and parsed. The first non flag argument starts the commands that are passed to the underlying hierarchical shell. Args: argv (list): The command line for this command Returns: Namespace: The parsed arguments, with all of the commands that should be executed in an iotile shell as the attribute 'commands'
codesearchnet
def extract_formats(config_handle): configurations = dict(config_handle) formats = dict(configurations.get('formats', {})) return formats
Get application formats. See :class:`gogoutils.Formats` for available options. Args: config_handle (configparser.ConfigParser): Instance of configurations. Returns: dict: Formats in ``{$format_type: $format_pattern}``.
codesearchnet
def base_path(self): path = self.request.path base_path = path[:path.rfind('/')] if (not base_path.endswith('/command')): raise BadRequestPathError('Json handlers should have /command path prefix') return base_path[:base_path.rfind('/')]
Base path for all mapreduce-related urls. JSON handlers are mapped to /base_path/command/command_name thus they require special treatment. Raises: BadRequestPathError: if the path does not end with "/command". Returns: The base path.
codesearchnet
def get_current_and_head_revision( database_url: str, alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]: head_revision = get_head_revision_from_alembic( alembic_config_filename=alembic_config_filename, alembic_base_dir=alembic_base_dir, version_table=version_table ) log.info("Intended database version: {}", head_revision) current_revision = get_current_revision( database_url=database_url, version_table=version_table ) log.info("Current database version: {}", current_revision) return current_revision, head_revision
Returns a tuple of ``(current_revision, head_revision)``; see :func:`get_current_revision` and :func:`get_head_revision_from_alembic`. Arguments: database_url: SQLAlchemy URL for the database alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions
juraj-google-style
def getVariable(self, name): return lock_and_call( lambda: Variable(self._impl.getVariable(name)), self._lock )
Get the variable with the corresponding name. Args: name: Name of the variable to be found. Raises: TypeError: if the specified variable does not exist.
juraj-google-style
def plot_spectrum(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, c=None, **kwargs): if self.header[b'nbits'] <=2: logged = False t='all' ax = plt.gca() plot_f, plot_data = self.grab_data(f_start, f_stop, if_id) if self.header[b'foff'] < 0: plot_data = plot_data[..., ::-1] plot_f = plot_f[::-1] if isinstance(t, int): print("extracting integration %i..." % t) plot_data = plot_data[t] elif t == b'all': print("averaging along time axis...") if len(plot_data.shape) > 1: plot_data = plot_data.mean(axis=0) else: plot_data = plot_data.mean() else: raise RuntimeError("Unknown integration %s" % t) dec_fac_x = 1 if plot_data.shape[0] > MAX_PLT_POINTS: dec_fac_x = int(plot_data.shape[0] / MAX_PLT_POINTS) plot_data = rebin(plot_data, dec_fac_x, 1) plot_f = rebin(plot_f, dec_fac_x, 1) if not c: kwargs['c'] = ' if logged: plt.plot(plot_f, db(plot_data),label='Stokes I', **kwargs) plt.ylabel("Power [dB]") else: plt.plot(plot_f, plot_data,label='Stokes I', **kwargs) plt.ylabel("Power [counts]") plt.xlabel("Frequency [MHz]") plt.legend() try: plt.title(self.header[b'source_name']) except KeyError: plt.title(self.filename) plt.xlim(plot_f[0], plot_f[-1])
Plot frequency spectrum of a given file Args: t (int): integration number to plot (0 -> len(data)) logged (bool): Plot in linear (False) or dB units (True) if_id (int): IF identification (if multiple IF signals in file) c: color for line kwargs: keyword args to be passed to matplotlib plot()
juraj-google-style
def _validate_clientsecrets(clientsecrets_dict): _INVALID_FILE_FORMAT_MSG = 'Invalid file format. See https: if (clientsecrets_dict is None): raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG) try: ((client_type, client_info),) = clientsecrets_dict.items() except (ValueError, AttributeError): raise InvalidClientSecretsError((_INVALID_FILE_FORMAT_MSG + ' Expected a JSON object with a single property for a "web" or "installed" application')) if (client_type not in VALID_CLIENT): raise InvalidClientSecretsError('Unknown client type: {0}.'.format(client_type)) for prop_name in VALID_CLIENT[client_type]['required']: if (prop_name not in client_info): raise InvalidClientSecretsError('Missing property "{0}" in a client type of "{1}".'.format(prop_name, client_type)) for prop_name in VALID_CLIENT[client_type]['string']: if client_info[prop_name].startswith('[['): raise InvalidClientSecretsError('Property "{0}" is not configured.'.format(prop_name)) return (client_type, client_info)
Validate parsed client secrets from a file. Args: clientsecrets_dict: dict, a dictionary holding the client secrets. Returns: tuple, a string of the client type and the information parsed from the file.
codesearchnet
def __init__(self, controller, device_initializer=\ lambda sc, idcode: JTAGDevice(sc,idcode), ignore_jtag_enabled=False, debug=False, collect_compiler_artifacts=False, collect_compiler_merge_artifacts=False, print_statistics=False): self._debug = debug self._collect_compiler_artifacts = collect_compiler_artifacts self._collect_compiler_merge_artifacts = collect_compiler_merge_artifacts self._print_statistics = print_statistics self._fitted_lv1_prim_cache = {} self._devices = [] self._hasinit = False self._sm = JTAGStateMachine() self._ignore_jtag_enabled = ignore_jtag_enabled self._desired_speed = None self.initialize_device_from_id = device_initializer self.get_descriptor_for_idcode = \ jtagDeviceDescription.get_descriptor_for_idcode if isinstance(controller, InaccessibleController): raise DevicePermissionDeniedError() self._controller = controller self._controller._scanchain = self self._command_queue = CommandQueue(self) default_prims = {RunInstruction, TransitionTAP, RWReg, RWDR, RWIR, Sleep, RWDevDR, RWDevIR} self._chain_primitives = {} self._device_primitives = {} self._lv1_chain_primitives = [] for prim in default_prims: assert issubclass(prim, Primitive) if issubclass(prim, DeviceTarget): self._device_primitives[prim._function_name] = prim else: self._chain_primitives[prim._function_name] = prim for prim in self._controller._primitives: if not issubclass(prim, Primitive): raise Exception("Registered Controller Prim has " "unknown type. (%s)"%prim) if issubclass(prim, DeviceTarget): self._device_primitives[prim._function_name] = prim else: self._chain_primitives[prim._function_name] = prim if issubclass(prim, Level1Primitive): self._lv1_chain_primitives.append(prim) for func_name, prim in self._chain_primitives.items(): if not self._gen_prim_adder(prim): raise Exception("Failed adding primitive %s, "\ "primitive with name %s "\ "already exists on scanchain"%\ (prim, prim._function_name))
Create a new JTAGScanChain to track and control a real chain. Args: controller: The CableDriver that this ScanChain will control. device_initializer: A callable that can map a (JTAGScanChain, Bitarray) to an instance of a JTAGDevice (Allows custom classes to be used). ignore_jtag_enabled: A boolean on if errors should be ignored when JTA is already enabled on the controller. debug: A boolean to enable extra debug printing.
juraj-google-style
def __init__(self, address="0.0.0.0/32", netmask=None): if '/' in address: address, netmask = address.split('/') else: netmask = 32 if netmask is None else netmask super().__init__(address) self.netmask = int(netmask)
Create an IPAddress with the parameters below. Args: address (str): IP Address using ipv4. Defaults to '0.0.0.0/32'
juraj-google-style
def replace_urls(status): text = status.text if (not has_url(status)): return text urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']] urls.sort(key=(lambda x: x[0][0]), reverse=True) for ((start, end), url) in urls: text = ((text[:start] + url) + text[end:]) return text
Replace shorturls in a status with expanded urls. Args: status (tweepy.status): A tweepy status object Returns: str
codesearchnet
def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8): diff = np.abs(arr1.astype(np.int) - arr2, dtype=np.int) return np.maximum(diff - min_diff, 0).astype(dtype)
Point-wise, hinge loss-like, difference between arrays. Args: arr1: integer array to compare. arr2: integer array to compare. min_diff: minimal difference taken into consideration. dtype: dtype of returned array. Returns: array
juraj-google-style
def mask_to_rgb(self, image: np.ndarray, palette: Optional[List[Tuple[int, int]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: return mask_to_rgb(image, palette=palette, data_format=data_format)
Converts a segmentation map to RGB format. Args: image (`np.ndarray`): Segmentation map with dimensions (height, width) where pixel values represent the class index. palette (`List[Tuple[int, int]]`, *optional*, defaults to `None`): Palette to use to convert the mask to RGB format. If unset, the mask is duplicated across the channel dimension. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The mask in RGB format.
github-repos
def has_sample(self, md5): sample = self.get_sample(md5) return (True if sample else False)
Checks if data store has this sample. Args: md5: The md5 digest of the required sample. Returns: True if sample with this md5 is present, else False.
codesearchnet
def number(digit): spoken = str(digit) if (spoken.startswith('8') or (spoken[:(len(spoken) % 3)] == '11')): article = 'an ' else: article = 'a ' if (spoken.endswith('1') and (spoken != '11')): suffix = 'st' elif (spoken.endswith('2') and (spoken != '12')): suffix = 'nd' elif (spoken.endswith('3') and (spoken != '13')): suffix = 'rd' else: suffix = 'th' if (digit > 999): prefix = (len(spoken) % 3) separated = spoken[:prefix] for n in range(prefix, len(spoken), 3): separated += (',' + spoken[n:(n + 3)]) spoken = separated return ((article + spoken) + suffix)
Gets a spoken-word representation for a number. Arguments: digit (int): An integer to convert into spoken-word. Returns: A spoken-word representation for a digit, including an article ('a' or 'an') and a suffix, e.g. 1 -> 'a 1st', 11 -> "an 11th". Adittionally delimits characters in pairs of three for values > 999.
codesearchnet
def validate_definition(self, definition_name, dict_to_test, definition=None): if ((definition_name not in self.specification['definitions'].keys()) and (definition is None)): return False spec_def = (definition or self.specification['definitions'][definition_name]) all_required_keys_present = all(((req in dict_to_test.keys()) for req in spec_def.get('required', {}))) if (('required' in spec_def) and (not all_required_keys_present)): return False properties_dict = spec_def.get('properties', {}) for (key, value) in dict_to_test.items(): if (value is not None): if (key not in properties_dict): return False elif (not self._validate_type(properties_dict[key], value)): return False return True
Validate the given dict according to the given definition. Args: definition_name: name of the the definition. dict_to_test: dict to test. Returns: True if the given dict match the definition, False otherwise.
codesearchnet
def int64_user_distribution(namespace, name, metric, ptransform=None) -> metrics_pb2.MonitoringInfo: labels = create_labels(ptransform=ptransform, namespace=namespace, name=name) payload = _encode_distribution(coders.VarIntCoder(), metric.count, metric.sum, metric.min, metric.max) return create_monitoring_info(USER_DISTRIBUTION_URN, DISTRIBUTION_INT64_TYPE, payload, labels)
Return the distribution monitoring info for the URN, metric and labels. Args: urn: The URN of the monitoring info/metric. metric: The DistributionData for the metric. ptransform: The ptransform id used as a label.
github-repos
def get_relative_import_files(module_file: Union[str, os.PathLike]) -> list[str]: no_change = False files_to_check = [module_file] all_relative_imports = [] while not no_change: new_imports = [] for f in files_to_check: new_imports.extend(get_relative_imports(f)) module_path = Path(module_file).parent new_import_files = [str(module_path / m) for m in new_imports] new_import_files = [f for f in new_import_files if f not in all_relative_imports] files_to_check = [f'{f}.py' for f in new_import_files] no_change = len(new_import_files) == 0 all_relative_imports.extend(files_to_check) return all_relative_imports
Get the list of all files that are needed for a given module. Note that this function recurses through the relative imports (if a imports b and b imports c, it will return module files for b and c). Args: module_file (`str` or `os.PathLike`): The module file to inspect. Returns: `list[str]`: The list of all relative imports a given module needs (recursively), which will give us the list of module files a given module needs.
github-repos
def to_tensor_7(self) -> torch.Tensor: tensor = self._trans.new_zeros((*self.shape, 7)) tensor[..., :4] = self._rots.get_quats() tensor[..., 4:] = self._trans return tensor
Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the translation. Returns: A [*, 7] tensor representation of the transformation
github-repos
def loads(self, config_str, as_defaults=False): self._rw.load_config_from_string(self._config, config_str, as_defaults=as_defaults)
Load configuration values from the specified source string. Args: config_str: as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.
juraj-google-style
class RunAggregationStrategy(beam.PTransform[beam.PCollection[NestedKeyedOutputT], beam.PCollection[NestedKeyedOutputT]]): def __init__(self, aggregation_strategy: Optional[AggregationFn], agg_model_id: str): self._aggregation_fn = aggregation_strategy self._agg_model_id = agg_model_id def expand(self, input: beam.PCollection[NestedKeyedOutputT]) -> beam.PCollection[NestedKeyedOutputT]: post_gbk = input | beam.MapTuple(lambda k, v: ((k, v[0]), v[1])) | beam.GroupByKey() if self._aggregation_fn is None: ret = post_gbk | beam.MapTuple(lambda k, v: (k[0], (k[1], AnomalyResult(example=v[0].example, predictions=[prediction for result in v for prediction in result.predictions])))) return ret aggregation_fn_spec = self._aggregation_fn.to_spec() aggregation_fn_spec.config['_run_init'] = True aggregation_fn = Specifiable.from_spec(aggregation_fn_spec) if isinstance(aggregation_fn, aggregations._AggModelIdMixin): aggregation_fn._set_agg_model_id_if_unset(self._agg_model_id) ret = post_gbk | beam.MapTuple(lambda k, v, agg=aggregation_fn: (k[0], (k[1], AnomalyResult(example=v[0].example, predictions=[agg.apply([prediction for result in v for prediction in result.predictions])])))) return ret
Applies an aggregation strategy to grouped anomaly detection results. This PTransform aggregates anomaly predictions from multiple models or data points using an `AggregationFn`. It handles both custom and simple aggregation strategies. Args: aggregation_strategy: The `AggregationFn` to use. agg_model_id: The model ID for aggregation.
github-repos
def _update_size(self, size, future): with self._size_lock: if ((size > self._size) and future.done): self._size = size
Keep track of the file size during writing. If specified size value is greater than the current size, update the current size using specified value. Used as callback in default "_flush" implementation for files supporting random write access. Args: size (int): Size value. future (concurrent.futures._base.Future): future.
codesearchnet
async def on_message(message): server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if server is not None and author != channel.server.me: prefix = data["discord"]["servers"][server.id]["prefix"] if channel.server.me in message.mentions: await client.send_typing(channel) response = "The current server prefix is `{0}`. Type `{0}help` for help.".format(prefix) await client.send_message(channel, response) if content.startswith(prefix): package = content.split(" ") command = package[0][len(prefix):] args = package[1:] arg = ' '.join(args) if command not in ["prefix", "activate", "deactivate", "warnmax", "warn", "ban"]: return is_admin = author == server.owner for role in message.author.roles: if role.permissions.administrator: is_admin = True if not is_admin: await client.send_typing(channel) reason = "You must have a role that has the permission 'Administrator'" embed = ui_embed.error(channel, "Insufficient Permissions", reason) await embed.send() return if command == "prefix" and args: new_prefix = arg.replace(" ", "").strip() data["discord"]["servers"][server.id]["prefix"] = new_prefix datatools.write_data(data) await client.send_typing(channel) embed = ui_embed.modify_prefix(channel, new_prefix) await embed.send() if command == "warnmax" and args: try: warn_max = int(arg) if warn_max > 0: data["discord"]["servers"][server.id][_data.modulename]["warnings_max"] = warn_max datatools.write_data(data) await client.send_typing(channel) embed = ui_embed.warning_max_changed(channel, warn_max) await embed.send() else: reason = "Maximum warnings must be greater than 0" embed = ui_embed.error(channel, "Error", reason) await embed.send() except (ValueError, TypeError): reason = "Warning maximum must be a number" embed = ui_embed.error(channel, "Error", reason) await embed.send() except Exception as e: logger.exception(e) if command == "warn" and args: for user in message.mentions: await api_manager.warn_user(channel, user) if command == "ban" and args: for user in message.mentions: await api_manager.ban_user(channel, user) if command == "activate" and args: await api_manager.activate_module(channel, arg, True) elif command == "deactivate" and args: await api_manager.activate_module(channel, arg, False)
The on_message event handler for this module Args: message (discord.Message): Input message
juraj-google-style
def __init__(self, key, iv): self.key = key.RawBytes() self.iv = iv.RawBytes()
Init. Args: key: The key, a rdf_crypto.EncryptionKey instance. iv: The iv, a rdf_crypto.EncryptionKey instance.
juraj-google-style
def list_datasets(self, get_global_public): appending = '' if get_global_public: appending = 'public' url = (self.url() + '/resource/{}dataset/'.format(appending)) req = self.remote_utils.get_url(url) if (req.status_code is not 200): raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format
codesearchnet
def create_chebyshev_samples(order, dim=1): x_data = .5*numpy.cos(numpy.arange(order, 0, -1)*numpy.pi/(order+1)) + .5 x_data = chaospy.quad.combine([x_data]*dim) return x_data.T
Chebyshev sampling function. Args: order (int): The number of samples to create along each axis. dim (int): The number of dimensions to create samples for. Returns: samples following Chebyshev sampling scheme mapped to the ``[0, 1]^dim`` hyper-cube and ``shape == (dim, order)``.
juraj-google-style
def get_key_by_job_id(cls, mapreduce_id): return db.Key.from_path(cls.kind(), ('%s:%s' % (mapreduce_id, cls._KEY_NAME)))
Retrieves the Key for a mapreduce ID. Args: mapreduce_id: The job to fetch. Returns: Datastore Key for the command for the given job ID.
codesearchnet
def verify_signature(public_key, signature, hash, hash_algo): hash_algo = _hash_algorithms[hash_algo] try: return (get_publickey(public_key).verify(signature, hash, padding.PKCS1v15(), utils.Prehashed(hash_algo)) is None) except InvalidSignature: return False
Verify the given signature is correct for the given hash and public key. Args: public_key (str): PEM encoded public key signature (bytes): signature to verify hash (bytes): hash of data hash_algo (str): hash algorithm used Returns: True if the signature is valid, False otherwise
codesearchnet
def csv_to_numpy(string_like, dtype=None): stream = StringIO(string_like) return np.genfromtxt(stream, dtype=dtype, delimiter=',')
Convert a CSV object to a numpy array. Args: string_like (str): CSV string. dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. This argument can only be used to 'upcast' the array. For downcasting, use the .astype(t) method. Returns: (np.array): numpy array
codesearchnet
def get_converter(in_type, out_type, *args, **kwargs): convs = pliers.converters.__all__ out_type = listify(out_type)[::(- 1)] default_convs = config.get_option('default_converters') for ot in out_type: conv_str = ('%s->%s' % (in_type.__name__, ot.__name__)) if (conv_str in default_convs): convs = (list(default_convs[conv_str]) + convs) for name in convs: cls = getattr(pliers.converters, name) if (not issubclass(cls, Converter)): continue available = (cls.available if issubclass(cls, EnvironmentKeyMixin) else True) if ((cls._input_type == in_type) and (cls._output_type in out_type) and available): conv = cls(*args, **kwargs) return conv return None
Scans the list of available Converters and returns an instantiation of the first one whose input and output types match those passed in. Args: in_type (type): The type of input the converter must have. out_type (type): The type of output the converter must have. args, kwargs: Optional positional and keyword arguments to pass onto matching Converter's initializer.
codesearchnet
def convert_to_row_table(self, add_units=True): rtable = [] if add_units: relavent_units = self.get_relavent_units() for row_index in range(self.start[0], self.end[0]): for column_index in range(self.start[1], self.end[1]): cell = self.table[row_index][column_index] if ((cell != None) and isinstance(cell, (int, float, long))): titles = self._find_titles(row_index, column_index) titles.append(cell) if add_units: titles.append(relavent_units.get((row_index, column_index))) rtable.append(titles) if (not rtable): for row_index in range(self.start[0], self.end[0]): row = [] rtable.append(row) for column_index in range(self.start[1], self.end[1]): row.append(self.table[row_index][column_index]) if add_units: row.append(relavent_units.get((row_index, column_index))) return rtable
Converts the block into row titled elements. These elements are copied into the return table, which can be much longer than the original block. Args: add_units: Indicates if units should be appened to each row item. Returns: A row-titled table representing the data in the block.
codesearchnet
def lowres_tensor(shape, underlying_shape, offset=None, sd=None): sd = (sd or 0.01) init_val = (sd * np.random.randn(*underlying_shape).astype('float32')) underlying_t = tf.Variable(init_val) t = resize_bilinear_nd(underlying_t, shape) if (offset is not None): if (not isinstance(offset, list)): offset = (len(shape) * [offset]) for n in range(len(offset)): if (offset[n] is True): offset[n] = ((shape[n] / underlying_shape[n]) / 2) if (offset[n] is False): offset[n] = 0 offset[n] = int(offset[n]) padding = [(pad, 0) for pad in offset] t = tf.pad(t, padding, 'SYMMETRIC') begin = (len(shape) * [0]) t = tf.slice(t, begin, shape) return t
Produces a tensor paramaterized by a interpolated lower resolution tensor. This is like what is done in a laplacian pyramid, but a bit more general. It can be a powerful way to describe images. Args: shape: desired shape of resulting tensor underlying_shape: shape of the tensor being resized into final tensor offset: Describes how to offset the interpolated vector (like phase in a Fourier transform). If None, apply no offset. If a scalar, apply the same offset to each dimension; if a list use each entry for each dimension. If a int, offset by that much. If False, do not offset. If True, offset by half the ratio between shape and underlying shape (analagous to 90 degrees). sd: Standard deviation of initial tensor variable. Returns: A tensor paramaterized by a lower resolution tensorflow variable.
codesearchnet
def _parse_control_fields(self, fields, tag_id='tag'): for field in fields: params = field.params if (tag_id not in params): continue self.controlfields[params[tag_id]] = field.getContent().strip()
Parse control fields. Args: fields (list): list of HTMLElements tag_id (str): parameter name, which holds the information, about field name this is normally "tag", but in case of oai_marc "id".
codesearchnet
def get_public_api(api_mapping_files: Sequence[str], file_prefixes_to_strip: Sequence[str], packages_to_ignore: Sequence[str], output_package: str, module_prefix: str) -> PublicAPI: ea = exported_api.ExportedApi() for f in api_mapping_files: ea.read(f) v1_entrypoints_by_module = collections.defaultdict(set) v2_entrypoints_by_module = collections.defaultdict(set) def add_exported_symbols(api_names: list[str], s: exported_api.ExportedSymbol, entrypoints_by_module: Mapping[str, set[_Entrypoint]]): for api_name in api_names: index_of_last_dot = api_name.rfind('.') index_of_first_dot = api_name.find('.') module = output_package if index_of_first_dot + 1 < index_of_last_dot: module += f'.{api_name[index_of_first_dot + 1:index_of_last_dot]}' name = api_name[index_of_last_dot + 1:] entrypoints_by_module[module].add(_Entrypoint(module, name, s)) for s in ea.symbols: if _should_skip_file(s.file_name, file_prefixes_to_strip, packages_to_ignore, module_prefix): continue add_exported_symbols(s.v1_apis, s, v1_entrypoints_by_module) add_exported_symbols(s.v2_apis, s, v2_entrypoints_by_module) v1_generated_imports_by_module = collections.defaultdict(set) v2_generated_imports_by_module = collections.defaultdict(set) def add_generated_imports(entrypoints_by_module: Mapping[str, set[_Entrypoint]], generated_imports_by_module: Mapping[str, set[str]]): for module in entrypoints_by_module: i = module.rfind('.') if i == -1: continue while i != -1: parent = module[:i] generated_imports_by_module[parent].add(module) module = parent i = module.rfind('.') add_generated_imports(v1_entrypoints_by_module, v1_generated_imports_by_module) add_generated_imports(v2_entrypoints_by_module, v2_generated_imports_by_module) docs_by_module = {} for d in ea.docs: for m in d.modules: if m in docs_by_module: raise DocExportedTwiceError(f'Docstring at {d.file_name}:{d.line_no} is registered for {m}, which already has a registered docstring.') docs_by_module[m] = d.docstring return PublicAPI(v1_entrypoints_by_module=v1_entrypoints_by_module, v2_entrypoints_by_module=v2_entrypoints_by_module, v1_generated_imports_by_module=v1_generated_imports_by_module, v2_generated_imports_by_module=v2_generated_imports_by_module, docs_by_module=docs_by_module)
Generates the structure of the public API from the given files. Args: api_mapping_files: List of files containing the exported API mappings and docstrings. file_prefixes_to_strip: A list of prefixes to strip from files when determining the packages to ignore. packages_to_ignore: A list of python packages that should be ignored when searching for tf_exports. output_package: The package to use for the imports. module_prefix: A prefix to add to the non-generated imports. Raises: DocExportedTwiceError: Two docstrings are registered for the same module. Returns: The public API structure.
github-repos
def valid_config_exists(config_path=CONFIG_PATH): if os.path.isfile(config_path): try: config = read_config(config_path) check_config(config) except (ConfigurationError, IOError): return False else: return False return True
Verify that a valid config file exists. Args: config_path (str): Path to the config file. Returns: boolean: True if there is a valid config file, false if not.
juraj-google-style
def default_pass_manager_simulator(basis_gates): pass_manager = PassManager() pass_manager.append(Unroller(basis_gates)) pass_manager.append([RemoveResetInZeroState(), Depth(), FixedPoint('depth')], do_while=(lambda property_set: (not property_set['depth_fixed_point']))) return pass_manager
The default pass manager without a coupling map. Args: basis_gates (list[str]): list of basis gate names to unroll to. Returns: PassManager: A passmanager that just unrolls, without any optimization.
codesearchnet
def decode(self, tx): if (not isinstance(self._service, BitcoinBlockrService)): raise NotImplementedError('Currently only supported for "blockr.io"') return self._service.decode(tx)
Decodes the given transaction. Args: tx: hex of transaction Returns: decoded transaction .. note:: Only supported for blockr.io at the moment.
codesearchnet
def are_same(self, path_1, path_2): if (path_1 == path_2): return True repo_1 = self.get_repository(path_1) repo_2 = self.get_repository(path_2) return (repo_1.uid == repo_2.uid)
Test that `path_1` and `path_2` refer to the same repository. This is more reliable than testing that the strings match, since slightly different strings might refer to the same repository (consider small differences in a filesystem path for example, eg '//svr/foo', '/svr/foo'). Returns: True if the paths refer to the same repository, False otherwise.
codesearchnet
def potential_purviews(self, direction, mechanism): all_purviews = utils.powerset(self._node_indices) return irreducible_purviews(self.cm, direction, mechanism, all_purviews)
All purviews which are not clearly reducible for mechanism. Args: direction (Direction): |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism which all purviews are checked for reducibility over. Returns: list[tuple[int]]: All purviews which are irreducible over ``mechanism``.
codesearchnet
def _create_per_worker_resources(self, fn, args=None, kwargs=None): results = [] for w in self._cluster.workers: results.append(w.create_resource(fn, args=args, kwargs=kwargs)) return PerWorkerValues(tuple(results))
Synchronously create resources on the workers. The resources are represented by `tf.distribute.experimental.coordinator.RemoteValue`s. Args: fn: The function to be dispatched to all workers for execution asynchronously. args: Positional arguments for `fn`. kwargs: Keyword arguments for `fn`. Returns: A `tf.distribute.experimental.coordinator.PerWorkerValues` object, which wraps a tuple of `tf.distribute.experimental.coordinator.RemoteValue` objects.
github-repos
def repository_compare(self, from_, to, **kwargs): path = '/projects/%s/repository/compare' % self.get_id() query_data = {'from': from_, 'to': to} return self.manager.gitlab.http_get(path, query_data=query_data, **kwargs)
Return a diff between two branches/commits. Args: from_(str): Source branch/SHA to(str): Destination branch/SHA **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server failed to perform the request Returns: str: The diff
juraj-google-style
def create_column_token_type_ids_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]: table_column_ids = list(zip(*table_values))[1] if table_values else [] return [0] * (1 + len(query_ids) + 1) + list(table_column_ids)
Creates the column token type IDs according to the query token IDs and a list of table values. Args: query_ids (`List[int]`): list of token IDs corresponding to the ID. table_values (`List[TableValue]`): lift of table values, which are named tuples containing the token value, the column ID and the row ID of said token. Returns: `List[int]`: List of ints containing the column token type IDs values.
github-repos
def log_deprecated(name="", text="", eos=""): assert name or text if eos: eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b") if name: if eos: warn_msg = "%s will be deprecated %s. %s" % (name, eos, text) else: warn_msg = "%s was deprecated. %s" % (name, text) else: warn_msg = text if eos: warn_msg += " Legacy period ends %s" % eos logger.warn("[Deprecated] " + warn_msg)
Log deprecation warning. Args: name (str): name of the deprecated item. text (str, optional): information about the deprecation. eos (str, optional): end of service date such as "YYYY-MM-DD".
juraj-google-style
def compile_sgf(in_path, optimize=True, model=None): if model is None: model = DeviceModel() parser = SensorGraphFileParser() parser.parse_file(in_path) parser.compile(model) if optimize: opt = SensorGraphOptimizer() opt.optimize(parser.sensor_graph, model=model) return parser.sensor_graph
Compile and optionally optimize an SGF file. Args: in_path (str): The input path to the sgf file to compile. optimize (bool): Whether to optimize the compiled result, defaults to True if not passed. model (DeviceModel): Optional device model if we are compiling for a nonstandard device. Normally you should leave this blank. Returns: SensorGraph: The compiled sensorgraph object
juraj-google-style
def cast_naive_datetime_to_tz(dt, tz=UTC()): if has_tz(dt): return dt return dt.replace(tzinfo=tz)
If datetime is tz-naive, set it to ``tz``. If datetime is tz-aware, return it unmodified. Args: dt : datetime tz-naive or tz-aware datetime. tz : datetime.tzinfo The timezone to which to adjust tz-naive datetime. Returns: datetime tz-aware datetime. Warning: This will change the actual moment in time that is represented if the datetime is naive and represents a date and time not in ``tz``. See Also: ``normalize_datetime_to_utc()``
codesearchnet
def __init__(self, test=None, t_node=None, f_node=None): self.test = test self.t_node = t_node self.f_node = f_node
Construct a BoolTree object Args: test (bool): test for whether to traverse the true-node or the false-node (`BoolTree.t_node` or `BoolTree.f_node`) t_node (BoolTree/Int): node to follow if test is `True` f_node (BoolTree/Int): node to follow if test is `False`
juraj-google-style
def _make_lcdproc(lcd_host, lcd_port, retry_config, charset=DEFAULT_LCDPROC_CHARSET, lcdd_debug=False): class ServerSpawner(utils.AutoRetryCandidate): 'Spawn the server, using auto-retry.' @utils.auto_retry def connect(self): return lcdrunner.LcdProcServer(lcd_host, lcd_port, charset=charset, debug=lcdd_debug) spawner = ServerSpawner(retry_config=retry_config, logger=logger) try: return spawner.connect() except socket.error as e: logger.error('Unable to connect to lcdproc %s:%s : %r', lcd_host, lcd_port, e) raise SystemExit(1)
Create and connect to the LCDd server. Args: lcd_host (str): the hostname to connect to lcd_prot (int): the port to connect to charset (str): the charset to use when sending messages to lcdproc lcdd_debug (bool): whether to enable full LCDd debug retry_attempts (int): the number of connection attempts retry_wait (int): the time to wait between connection attempts retry_backoff (int): the backoff for increasing inter-attempt delay Returns: lcdproc.server.Server
codesearchnet
def get_group(self, name, user_name=None): self.project_service.set_auth(self._token_project) return self.project_service.get_group(name, user_name)
Get information on the given group or whether or not a user is a member of the group. Args: name (string): Name of group to query. user_name (optional[string]): Supply None if not interested in determining if user is a member of the given group. Returns: (mixed): Dictionary if getting group information or bool if a user name is supplied. Raises: requests.HTTPError on failure.
juraj-google-style