code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def exclude(self, **filters): exclude = {'-%s' % key: value for key, value in filters.items()} return self.filter(**exclude)
Applies query filters for excluding matching records from result set. Args: **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Examples: >>> Person.objects.exclude(age=None) >>> Person.objects.filter(name__startswith='jo').exclude(age__lte=16)
juraj-google-style
def to_file(self, destination, format='csv', csv_delimiter=',', csv_header=True): f = codecs.open(destination, 'w', 'utf-8') fieldnames = [] for column in self.schema: fieldnames.append(column.name) if sys.version_info[0] == 2: csv_delimiter = csv_delimiter.encode('unicode_escape') ...
Save the results to a local file in CSV format. Args: destination: path on the local filesystem for the saved results. format: the format to use for the exported data; currently only 'csv' is supported. csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ',' csv_header: for CSV exports, whether to ...
juraj-google-style
def _parse_args(): parser = argparse.ArgumentParser(description='preprocess_coco_minival: Preprocess COCO minival dataset') parser.add_argument('--images_folder', type=str, help='Full path of the validation images folder.', required=True) parser.add_argument('--instances_file', type=str, help='Full path of ...
Creates a parser that parse the command line arguments. Returns: A namespace parsed from command line arguments.
github-repos
def sample(self, num_rows=1): self.check_fit() res = {} means = np.zeros(self.covariance.shape[0]) size = (num_rows,) clean_cov = np.nan_to_num(self.covariance) samples = np.random.multivariate_normal(means, clean_cov, size=size) for (i, (label, distrib)) in enumerate(self.distribs.items()):...
Creates sintentic values stadistically similar to the original dataset. Args: num_rows: `int` amount of samples to generate. Returns: np.ndarray: Sampled data.
codesearchnet
def max(x, axis=None, keepdims=False, initial=None): if any_symbolic_tensors((x,)): return Max(axis=axis, keepdims=keepdims, initial=initial).symbolic_call(x) return backend.numpy.max(x, axis=axis, keepdims=keepdims, initial=initial)
Return the maximum of a tensor or maximum along an axis. Args: x: Input tensor. axis: Axis or axes along which to operate. By default, flattened input is used. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Defaults to `False`. initial: The minimum va...
github-repos
def fix_variable(self, v, value): adj = self.adj linear = self.linear if (value not in self.vartype.value): raise ValueError('expected value to be in {}, received {} instead'.format(self.vartype.value, value)) removed_interactions = [] for u in adj[v]: self.add_variable(u, (value * a...
Fix the value of a variable and remove it from a binary quadratic model. Args: v (variable): Variable in the binary quadratic model to be fixed. value (int): Value assigned to the variable. Values must match the :class:`.Vartype` of the binary quadratic model. Examples: This example creates a binary quadratic model...
codesearchnet
def session_manager(self): return self._session_manager
Return the SessionManager used by the Supervisor. Returns: A SessionManager object.
github-repos
def normalize(x, axis=-1, order=2): from keras.src import ops if isinstance(x, np.ndarray): norm = np.atleast_1d(np.linalg.norm(x, order, axis)) norm[norm == 0] = 1 axis = axis or -1 return x / np.expand_dims(norm, axis) return ops.nn.normalize(x, axis=axis, order=order)
Normalizes an array. If the input is a NumPy array, a NumPy array will be returned. If it's a backend tensor, a backend tensor will be returned. Args: x: Array to normalize. axis: axis along which to normalize. order: Normalization order (e.g. `order=2` for L2 norm). Returns: A normalized copy of the array.
github-repos
def development_verify(): with open(DEVELOPMENT_TEMPLATE, 'r') as file_obj: template = file_obj.read() expected = template.format(revision=REVISION, rtd_version=RTD_VERSION) with open(DEVELOPMENT_FILE, 'r') as file_obj: contents = file_obj.read() if (contents != expected): err_ms...
Populate template and compare to ``DEVELOPMENT.rst`` Raises: ValueError: If the current ``DEVELOPMENT.rst`` doesn't agree with the expected value computed from the template.
codesearchnet
def Launch(self, request, global_params=None): config = self.GetMethodConfig('Launch') return self._RunMethod(config, request, global_params=global_params)
Launch a job with a FlexTemplate. Args: request: (DataflowProjectsLocationsFlexTemplatesLaunchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (LaunchFlexTemplateResponse) The response message.
github-repos
def ParseOptions(cls, options, output_module): if not isinstance(output_module, dynamic.DynamicOutputModule): raise errors.BadConfigObject( 'Output module is not an instance of DynamicOutputModule') default_fields = ','.join(cls._DEFAULT_FIELDS) fields = cls._ParseStringOption( ...
Parses and validates options. Args: options (argparse.Namespace): parser options. output_module (OutputModule): output module to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when the output filename was not provided.
juraj-google-style
async def getProvStack(self, iden: str): return self.cell.provstor.getProvStack(s_common.uhex(iden))
Return the providence stack associated with the given iden. Args: iden (str): the iden from splice Note: the iden appears on each splice entry as the 'prov' property
juraj-google-style
def in_cache(self, objpath, metahash): try: self.path_in_cache(objpath, metahash) return True except CacheMiss: return False
Returns true if object is cached. Args: objpath: Filename relative to buildroot. metahash: hash object
juraj-google-style
def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format...
github-repos
def list_knowledge_bases(project_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) print('Knowledge Bases for: {}'.format(project_id)) for knowledge_base in client.list_knowledge_bases(project_path): p...
Lists the Knowledge bases belonging to a project. Args: project_id: The GCP project linked with the agent.
juraj-google-style
def price(self, valuation_date, market, model=None): del model, valuation_date reference_curve = market.reference_curve fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction) return 100.0 * self._contract_notional * (1.0 - fwd_rate)
Returns the price of the contract on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Retur...
github-repos
def populate_readme(version, circleci_build, appveyor_build, coveralls_build, travis_build): with open(RELEASE_README_FILE, 'r') as file_obj: template = file_obj.read() contents = template.format(version=version, circleci_build=circleci_build, appveyor_build=appveyor_build, coveralls_build=coveralls_bui...
Populates ``README.rst`` with release-specific data. This is because ``README.rst`` is used on PyPI. Args: version (str): The current version. circleci_build (Union[str, int]): The CircleCI build ID corresponding to the release. appveyor_build (str): The AppVeyor build ID corresponding to the release. coveralls_build...
codesearchnet
def dequantize_flow(dx, dy, max_val=0.02, denorm=True): assert (dx.shape == dy.shape) assert ((dx.ndim == 2) or ((dx.ndim == 3) and (dx.shape[(- 1)] == 1))) (dx, dy) = [dequantize(d, (- max_val), max_val, 255) for d in [dx, dy]] if denorm: dx *= dx.shape[1] dy *= dx.shape[0] flow = n...
Recover from quantized flow. Args: dx (ndarray): Quantized dx. dy (ndarray): Quantized dy. max_val (float): Maximum value used when quantizing. denorm (bool): Whether to multiply flow values with width/height. Returns: ndarray: Dequantized flow.
codesearchnet
def to_hgnc(self, hgnc_alias, build='37'): result = self.hgnc_genes(hgnc_symbol=hgnc_alias, build=build) if result: for gene in result: return gene['hgnc_symbol'] else: return None
Check if a hgnc symbol is an alias Return the correct hgnc symbol, if not existing return None Args: hgnc_alias(str) Returns: hgnc_symbol(str)
codesearchnet
def get_reserved_vlan_range(self, id_or_uri): uri = (self._client.build_uri(id_or_uri) + '/reserved-vlan-range') return self._client.get(uri)
Gets the reserved vlan ID range for the fabric. Note: This method is only available on HPE Synergy. Args: id_or_uri: ID or URI of fabric. Returns: dict: vlan-pool
codesearchnet
def get_changes_since(self, timestamp: str) -> Dict[(str, List)]: rg = [] cg = [] ra = [] ca = [] layers = [] if (self.last_modified() > timestamp): if (self.row_graphs.last_modified() > timestamp): for name in self.row_graphs.keys(): if (self.row_graphs.last_...
Get a summary of the parts of the file that changed since the given time Args: timestamp: ISO8601 timestamp Return: dict: Dictionary like ``{"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}`` listing the names of objects that were modified since the given time
codesearchnet
def __setitem__(self, key, value): if not isinstance(key, basestring): raise Exception("LRU cache can only be indexed by strings") if key in self._cache: entry = self._cache[key] elif len(self._cache) < self._cache_size: self._cache[key] = entry = {} else: ent...
Put an item in the cache. Args: key: a string key for retrieving the item. value: the item to cache. Raises: Exception if the key is not a string.
juraj-google-style
def from_file(cls, filename, constant_lattice=True, **kwargs): fname = os.path.basename(filename) if fnmatch(fname, "*XDATCAR*"): structures = Xdatcar(filename).structures elif fnmatch(fname, "vasprun*.xml*"): structures = Vasprun(filename).structures ...
Convenience constructor to obtain trajectory from XDATCAR or vasprun.xml file Args: filename (str): The filename to read from. constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD simulation. True results in Returns: (Trajectory)
juraj-google-style
def RunMetadata(self, run, tag): accumulator = self.GetAccumulator(run) return accumulator.RunMetadata(tag)
Get the session.run() metadata associated with a TensorFlow run and tag. Args: run: A string name of a TensorFlow run. tag: A string name of the tag associated with a particular session.run(). Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: The metadata in the form ...
juraj-google-style
def _CreateDynamicDisplayAdSettings(media_service, opener): image = _CreateImage(media_service, opener, 'https: logo = {'type': 'IMAGE', 'mediaId': image['mediaId'], 'xsi_type': 'Image'} dynamic_settings = {'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!', 'xsi_type': 'D...
Creates settings for dynamic display ad. Args: media_service: a SudsServiceProxy instance for AdWords's MediaService. opener: an OpenerDirector instance. Returns: The dynamic display ad settings.
codesearchnet
def add_graph_building_optimization_tests(cls: _TC) -> _TC: if flags.config().graph_building_optimization.value(): return cls for name, value in cls.__dict__.copy().items(): if callable(value) and (name.startswith(unittest.TestLoader.testMethodPrefix) or name.startswith('benchmark')): ...
Adds methods with graph_building_optimization enabled to the test suite. Example: @test_util.add_graph_building_optimization_tests class FooTest(test.TestCase): def testBar(self): ... Generated class: class FooTest(test.TestCase): def testBar(self): ... def testBarWithGraphBuildingOptimization(self): // Enable gr...
github-repos
def percentile(self, percent): if (percent >= 100): percent = 100 target = (len(self) - (len(self) * (percent / 100))) for k in reversed(sorted(self._data.keys())): target -= self._data[k] if (target < 0): return k return 10
Return the value that is the Nth precentile in the histogram. Args: percent (Union[int, float]): The precentile being sought. The default consumer implementations use consistently use ``99``. Returns: int: The value corresponding to the requested percentile.
codesearchnet
def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm': return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))
Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns: The concatenated flatterms.
codesearchnet
def to_cache_timer(datetime_func): if (datetime_func is None): datetime_func = datetime.utcnow def _timer(): 'Return the timestamp since the epoch.' return (datetime_func() - datetime(1970, 1, 1)).total_seconds() return _timer
Converts a datetime_func to a timestamp_func. Args: datetime_func (callable[[datatime]]): a func that returns the current time Returns: time_func (callable[[timestamp]): a func that returns the timestamp from the epoch
codesearchnet
def __init__(self, validate_args=False, name="normal"): self._graph_parents = [] self._name = name self._validate_args = validate_args super(NormalCDF, self).__init__( validate_args=validate_args, forward_min_event_ndims=0, name=name)
Instantiates the `NormalCDF` bijector. Args: validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object.
juraj-google-style
def restore_state(self, state): state_name = state.get('state_name') state_version = state.get('state_version') if state_name != self.STATE_NAME or state_version != self.STATE_VERSION: raise ArgumentError("Invalid emulated device state name or version", found=(state_name, ...
Restore the current state of this emulated device. Note that restore_state happens synchronously in the emulation thread to avoid any race conditions with accessing data members and ensure a consistent atomic restoration process. This method will block while the background restore happens. Args: state (dict): A prev...
juraj-google-style
def _verify_params(self): reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params) if reserved_in_use: raise ValueError('Using a reserved parameter', reserved_in_use)
Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used.
codesearchnet
def manage_all_configs(save_results, filename): all_configs = get_all_configs() print_all_configs(all_configs[0], all_configs[1], all_configs[2]) if save_results: save_to_file(all_configs[3], filename)
Manages configuration detection and retrieval based on user input. Args: save_results: Boolean indicating whether to save the results to a file. filename: String that is the name of the output JSON file.
github-repos
def energy_at_conditions(self, pH, V): return ((self.energy + ((self.npH * PREFAC) * pH)) + (self.nPhi * V))
Get free energy for a given pH and V Args: pH (float): pH at which to evaluate free energy V (float): voltage at which to evaluate free energy Returns: free energy at conditions
codesearchnet
def _get_split_key(client_keys, num_splits): if not client_keys or len(client_keys) < num_splits - 1: return client_keys num_keys_per_split = max(1.0, float(len(client_keys)) / (num_splits - 1)) split_client_keys = [] for i in range(1, num_splits): split_index = int(round(i * num_keys_pe...
Given a list of keys and a number of splits find the keys to split on. Args: client_keys: the list of keys. num_splits: the number of splits. Returns: A list of keys to split on.
github-repos
def process_multientry(entry_list, prod_comp, coeff_threshold=0.0001): dummy_oh = [Composition('H'), Composition('O')] try: entry_comps = [e.composition for e in entry_list] rxn = Reaction((entry_comps + dummy_oh), [prod_comp]) coeffs = (- np.array([rxn.get_coeff(comp) for comp in entry_...
Static method for finding a multientry based on a list of entries and a product composition. Essentially checks to see if a valid aqueous reaction exists between the entries and the product composition and returns a MultiEntry with weights according to the coefficients if so. Args: entry_list ([Entry]): list of entrie...
codesearchnet
def __init__(self, certificate=None, private_key=None): self.private_key = private_key self.certificate = certificate self._ClearServerCipherCache() self.encrypted_cipher_cache = utils.FastStore(max_size=50000)
Creates a communicator. Args: certificate: Our own certificate. private_key: Our own private key.
juraj-google-style
def case(self, case_id=None): if case_id: for case in self.case_objs: if case.case_id == case_id: return case else: if self.cases: return list(self.case_objs)[0] return Case(case_id='unknown')
Return a Case object If no case_id is given return one case Args: case_id (str): A case id Returns: A Case object
juraj-google-style
def add_permissions(self, grp_name, resource, permissions): self.service.add_permissions( grp_name, resource, permissions, self.url_prefix, self.auth, self.session, self.session_send_opts)
Add additional permissions for the group associated with the given resource. Args: grp_name (string): Name of group. resource (intern.resource.boss.BossResource): Identifies which data model object to operate on. permissions (list): List of permissions to add to the given resource. Raises: requests.HTTPError on failu...
juraj-google-style
def get_javascript_error(self, return_type='string'): if BROME_CONFIG['proxy_driver']['intercept_javascript_error']: js_errors = self._driver.execute_script( 'return window.jsErrors; window.jsErrors = [];' ) if not js_errors: js_erro...
Return the gathered javascript error Args: return_type: 'string' | 'list'; default: 'string'
juraj-google-style
def sub(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarAddOperation(x1, -x2).outputs[0] with tf.name_scope(name, default_name="sub"): x1, x2 = binary_arguments_to_tensors(x1, x2) return add(x1, negative(x2), output_sh...
Binary subtraction with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
juraj-google-style
def pyside_load_ui(uifile, base_instance=None): form_class, base_class = load_ui_type(uifile) if not base_instance: typeName = form_class.__name__ finalType = type(typeName, (form_class, base_class), {}) base_instance = finalType() ...
Provide PyQt4.uic.loadUi functionality to PySide Args: uifile (str): Absolute path to .ui file base_instance (QWidget): The widget into which UI widgets are loaded Note: pysideuic is required for this to work with PySide. This seems to work correctly in Maya as well as outside of it as opposed to other implementati...
juraj-google-style
def nhapDaiHan(self, cucSo, gioiTinh): for cung in self.thapNhiCung: khoangCach = khoangCachCung(cung.cungSo, self.cungMenh, gioiTinh) cung.daiHan(cucSo + khoangCach * 10) return self
Nhap dai han Args: cucSo (TYPE): Description gioiTinh (TYPE): Description Returns: TYPE: Description
juraj-google-style
def output_csv(filehandle: TextIO, values: Iterable[str]) -> None: line = ",".join(values) filehandle.write(line + "\n")
Write a line of CSV. POOR; does not escape things properly. DEPRECATED. Args: filehandle: file to write to values: values
juraj-google-style
def BuildChecks(self, request): result = [] if request.HasField("start_time") or request.HasField("end_time"): def FilterTimestamp(file_stat, request=request): return file_stat.HasField("st_mtime") and ( file_stat.st_mtime < request.start_time or file_stat.st_mtime > ...
Parses request and returns a list of filter callables. Each callable will be called with the StatEntry and returns True if the entry should be suppressed. Args: request: A FindSpec that describes the search. Returns: a list of callables which return True if the file is to be suppressed.
juraj-google-style
def get_rows_fieldnames_from_raw_sql( session: Union[Session, Engine, Connection], sql: str) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]: result = session.execute(sql) fieldnames = result.keys() rows = result.fetchall() return rows, fieldnames
Returns results and column names from a query. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object sql: raw SQL to execure Returns: ``(rows, fieldnames)`` where ``rows`` is the usual set of results and ``fieldnames`` are the name of the result columns/fields.
juraj-google-style
def _get_computer_object(): with salt.utils.winapi.Com(): nt = win32com.client.Dispatch('AdsNameSpaces') return nt.GetObject('', 'WinNT:
A helper function to get the object for the local machine Returns: object: Returns the computer object for the local machine
codesearchnet
def hasValue(self) -> 'Builder': return self._to_builder(_evaluation.HasValueFunction(self.node.context, self.node, []))
The FHIRPath hasValue() function. Returns: An expression that evaluates to True if the parent has a single value that is a primitive.
github-repos
def are_equivalent_xml(a_xml, b_xml, ignore_timestamps=False): 'Normalizes then compares SystemMetadata XML docs for equivalency.\n ``a_xml`` and ``b_xml`` should be utf-8 encoded DataONE System Metadata XML\n documents.\n ' return are_equivalent_pyxb(d1_common.xml.deserialize(a_xml), d1_common.xml.deseriali...
Determine if two SystemMetadata XML docs are semantically equivalent. Normalize then compare SystemMetadata XML docs for equivalency. Args: a_xml, b_xml: bytes UTF-8 encoded SystemMetadata XML docs to compare ignore_timestamps: bool ``True``: Timestamps in the SystemMetadata are ignored so that objects that are comp...
codesearchnet
def sca_xsect(scatterer, h_pol=True): if (scatterer.psd_integrator is not None): return scatterer.psd_integrator.get_angular_integrated(scatterer.psd, scatterer.get_geometry(), 'sca_xsect') old_geom = scatterer.get_geometry() def d_xsect(thet, phi): (scatterer.phi, scatterer.thet) = ((phi *...
Scattering cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The scattering cross section.
codesearchnet
def _pack3(obj, fp, **options): global compatibility ext_handlers = options.get('ext_handlers') if (obj is None): _pack_nil(obj, fp, options) elif (ext_handlers and (obj.__class__ in ext_handlers)): _pack_ext(ext_handlers[obj.__class__](obj), fp, options) elif isinstance(obj, bool): ...
Serialize a Python object into MessagePack bytes. Args: obj: a Python object fp: a .write()-supporting file-like object Kwargs: ext_handlers (dict): dictionary of Ext handlers, mapping a custom type to a callable that packs an instance of the type into an Ext object force_float_precision (str): "single" to force pack...
codesearchnet
def set(self, key, value, *, section=DataStoreDocumentSection.Data): key_notation = '.'.join([section, key]) try: self._delete_gridfs_data(self._data_from_dotnotation(key_notation, default=None)) except KeyError: logger.info('Adding new field {} to the data store'.format(key_notation)) r...
Store a value under the specified key in the given section of the document. This method stores a value into the specified section of the workflow data store document. Any existing value is overridden. Before storing a value, any linked GridFS document under the specified key is deleted. Args: key (str): The key point...
codesearchnet
def open_window(self, private=False): handles_before = self.selenium.window_handles self.switch_to() with self.selenium.context(self.selenium.CONTEXT_CHROME): self.selenium.find_element(*self._file_menu_button_locator).click() if private: self.selenium.find_element(*self._file_me...
Open a new browser window. Args: private (bool): Optional parameter to open a private browsing window. Defaults to False. Returns: :py:class:`BrowserWindow`: Opened window.
codesearchnet
def __init__(self, function_meta, functions_mapping=None, check_variables_set=None): self.functions_mapping = functions_mapping or {} self.check_variables_set = check_variables_set or set() self.cache_key = None self.__parse(function_meta)
init LazyFunction object with function_meta Args: function_meta (dict): function name, args and kwargs. { "func_name": "func", "args": [1, 2] "kwargs": {"a": 3, "b": 4} }
juraj-google-style
def read_table(fstream): pos = fstream.tell() line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 4: return None partition["table"] = fragments[0] partition["group"] = ...
Read a likwid table info from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing likwid's table info as key/value pairs.
juraj-google-style
def run_numerical_categorical_analysis(args, schema_list): header = [column['name'] for column in schema_list] input_files = file_io.get_matching_files(args.input_file_pattern) for col_schema in schema_list: col_type = col_schema['type'].lower() if ((col_type != 'string') and (col_type != 'i...
Makes the numerical and categorical analysis files. Args: args: the command line args schema_list: python object of the schema json file. Raises: ValueError: if schema contains unknown column types.
codesearchnet
def gather_gpu_devices(): try: dev_info = _gather_gpu_devices_proc() if not dev_info: raise ValueError('No devices found') return dev_info except (IOError, ValueError, errors.OpError): pass try: return _gather_gpu_devices_cudart() except (OSError, Valu...
Gather gpu device info. Returns: A list of test_log_pb2.GPUInfo messages.
github-repos
def alloc_data(self, value): if isinstance(value, six.binary_type): return self._alloc_data(value) elif isinstance(value, six.text_type): return self._alloc_data(value.encode('utf-8') + b'\0') else: raise TypeError('No idea how to encode %s' % repr(v...
Allocate a piece of data that will be included in the shellcode body. Arguments: value(...): The value to add to the shellcode. Can be bytes or string type. Returns: ~pwnypack.types.Offset: The offset used to address the data.
juraj-google-style
def __init__(self, use_zeromq=True): super(PsortMultiProcessEngine, self).__init__() self._analysis_plugins = {} self._completed_analysis_processes = set() self._data_location = None self._event_filter_expression = None self._event_queues = {} self._event_tag_index = event_tag_index.Eve...
Initializes an engine object. Args: use_zeromq (Optional[bool]): True if ZeroMQ should be used for queuing instead of Python's multiprocessing queue.
juraj-google-style
def get_attribute(json, attr): res = [json[entry][attr] for entry, _ in enumerate(json)] logger.debug('{0}s (from JSON):\n{1}'.format(attr, res)) return res
Gets the values of an attribute from JSON Args: json: JSON data as a list of dict dates, where the keys are the raw market statistics. attr: String of attribute in JSON file to collect. Returns: List of values of specified attribute from JSON
juraj-google-style
def console_map_ascii_code_to_font(asciiCode: int, fontCharX: int, fontCharY: int) -> None: lib.TCOD_console_map_ascii_code_to_font(_int(asciiCode), fontCharX, fontCharY)
Set a character code to new coordinates on the tile-set. `asciiCode` must be within the bounds created during the initialization of the loaded tile-set. For example, you can't use 255 here unless you have a 256 tile tile-set loaded. This applies to all functions in this group. Args: asciiCode (int): The character c...
codesearchnet
def noninteractive_changeset_update(self, fqn, template, old_parameters, parameters, stack_policy, tags, **kwargs): logger.debug('Using noninterative changeset provider mode for %s.', fqn) (_changes, change_set_id) = create_change_set(self.cloudformation, fqn, template, parameters, tags, 'UPDATE', service_role=...
Update a Cloudformation stack using a change set. This is required for stacks with a defined Transform (i.e. SAM), as the default update_stack API cannot be used with them. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to ...
codesearchnet
def classification_signature_def(examples, classes, scores): if examples is None: raise ValueError('Classification `examples` cannot be None.') if not isinstance(examples, tensor_lib.Tensor): raise ValueError(f'Classification `examples` must be a string Tensor. Found `examples` of type {type(exa...
Creates classification signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Classify API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A s...
github-repos
def dumps(o, encoder=None): retval = "" if encoder is None: encoder = TomlEncoder(o.__class__) addtoretval, sections = encoder.dump_sections(o, "") retval += addtoretval outer_objs = [id(o)] while sections: section_ids = [id(section) for section in sections] for out...
Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict
juraj-google-style
def handle_incoming_message(self, msg): if msg.type == MessageType.START_JOB: job = msg.message['job'] self.schedule_job(job) elif msg.type == MessageType.CANCEL_JOB: job_id = msg.message['job_id'] self.cancel(job_id)
Start or cancel a job, based on the msg. If msg.type == MessageType.START_JOB, then start the job given by msg.job. If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id. Args: msg (barbequeue.messaging.classes.Message): Returns: None
juraj-google-style
def _check_parameter_range(s_min, s_max): if (s_min == DEFAULT_S_MIN): return (0.0, 1.0) if (s_max == DEFAULT_S_MAX): return (s_min, s_min) return (s_min, s_max)
r"""Performs a final check on a clipped parameter range. .. note:: This is a helper for :func:`clip_range`. If both values are unchanged from the "unset" default, this returns the whole interval :math:`\left[0.0, 1.0\right]`. If only one of the values is set to some parameter :math:`s`, this returns the "degenerate...
codesearchnet
def add_handler(self, handler: Handler, group: int=0): if isinstance(handler, DisconnectHandler): self.disconnect_handler = handler.callback else: self.dispatcher.add_handler(handler, group) return (handler, group)
Use this method to register an update handler. You can register multiple handlers, but at most one handler within a group will be used for a single update. To handle the same update more than once, register your handler using a different group id (lower group id == higher priority). Args: handler (``Handler``): The h...
codesearchnet
def authentication_required(req, resp, resource, uri_kwargs): if 'user' not in req.context: args = ["Unauthorized", "This resource requires authentication"] if FALCON_VERSION >= (1, 0, 0): args.append(req.context.get('challenges', [])) raise HTTPUnauthorized(*args...
Ensure that user is authenticated otherwise return ``401 Unauthorized``. If request fails to authenticate this authorization hook will also include list of ``WWW-Athenticate`` challenges. Args: req (falcon.Request): the request object. resp (falcon.Response): the response object. resource (object): the resource objec...
juraj-google-style
def oem_name(self, value): if value == self._defaults['ai.device.oemName'] and 'ai.device.oemName' in self._values: del self._values['ai.device.oemName'] else: self._values['ai.device.oemName'] = value
The oem_name property. Args: value (string). the property value.
juraj-google-style
def create_method_arguments(self, node, method, use_defaults=False): args = [] num_posargs = method.argcount(node) num_posargs_no_default = num_posargs - len(method.defaults) for i in range(num_posargs): default_idx = i - num_posargs_no_default if use_defaults and default_idx >= 0: ...
Create arguments for the given method. Creates Unknown objects as arguments for the given method. Note that we don't need to take parameter annotations into account as InterpreterFunction.call() will take care of that. Args: node: The current node. method: An abstract.InterpreterFunction. use_defaults: Whether to use...
github-repos
def _method_url(self, method_name): return '{base_url}/api/{api}/{method}'.format(base_url=self._base_url(), api=self.api_version, method=method_name)
Generate the URL for the requested method Args: method_name (str): Name of the method Returns: A string containing the URL of the method
codesearchnet
def search(nats_api, search_model, algo, dataset='cifar10', reporting_epoch=12, max_train_hours=20000.0): nats_api.reset_time() times, best_valids, best_tests = ([0.0], [0.0], [0.0]) valid_models = 0 time_spent = 0 start_time = time.time() last_report_time = start_time for model, feedback in...
Define the search procedure. Args: nats_api: the NATS-Bench object. search_model: which is a `model` object annotated with `one_of`. algo: algorithm for search. dataset: the target dataset reporting_epoch: Use test set results for models trained for this many epochs. max_train_hours: max time budget to train the model...
github-repos
def hook(self, function, dependencies=None): if (not isinstance(dependencies, (Iterable, type(None), str))): raise TypeError('Invalid list of dependencies provided!') if (not hasattr(function, '__deps__')): function.__deps__ = dependencies if self.isloaded(function.__deps__): self.ap...
Tries to load a hook Args: function (func): Function that will be called when the event is called Kwargs: dependencies (str): String or Iterable with modules whose hooks should be called before this one Raises: :class:TypeError Note that the dependencies are module-wide, that means that if `parent.foo` and `parent....
codesearchnet
def _apply_threshold_to_predictions(self, result: AnomalyResult) -> AnomalyResult: predictions = [dataclasses.replace(p, label=self._threshold_fn.apply(p.score), threshold=self._threshold_fn.threshold) for p in result.predictions] return dataclasses.replace(result, predictions=predictions)
Updates the prediction labels in an AnomalyResult using the ThresholdFn. Args: result (AnomalyResult): The input `AnomalyResult` containing anomaly scores. Returns: AnomalyResult: A new `AnomalyResult` with updated prediction labels and threshold values.
github-repos
def serialize_streamnet(streamnet_file, output_reach_file): FileClass.copy_files(streamnet_file, output_reach_file) ds_reach = ogr_Open(output_reach_file, update=True) layer_reach = ds_reach.GetLayer(0) layer_def = layer_reach.GetLayerDefn() i_link = layer_def.GetFieldIn...
Eliminate reach with zero length and return the reach ID map. Args: streamnet_file: original stream net ESRI shapefile output_reach_file: serialized stream net, ESRI shapefile Returns: id pairs {origin: newly assigned}
juraj-google-style
def get_tensor_size(self, tensor_name, partial_layout=None, mesh_dimension_to_size=None): return (self.get_tensor_dtype(tensor_name).size * self.get_tensor_num_entries(tensor_name, partial_layout, mesh_dimension_to_size))
The size of a tensor in bytes. If partial_layout is specified, then mesh_dimension_to_size must also be. In this case, the size on a single device is returned. Args: tensor_name: a string, name of a tensor in the graph. partial_layout: an optional {string: string}, from MTF dimension name to mesh dimension name. mesh...
codesearchnet
def get_template(template): from cloud_inquisitor.database import db tmpl = db.Template.find_one(template_name=template) if (not tmpl): raise InquisitorError('No such template found: {}'.format(template)) tmplenv = Environment(loader=BaseLoader, autoescape=True) tmplenv.filters['json_loads']...
Return a Jinja2 template by filename Args: template (str): Name of the template to return Returns: A Jinja2 Template object
codesearchnet
def call_next(self, *args, **kwargs) -> t.List[run.RunInfo]: all_results = [] for ext in self.next_extensions: LOG.debug(' %s ', ext) results = ext(*args, **kwargs) LOG.debug(' %s => %s', ext, results) if (results is None): LOG.warning('No result from: %s', ext) ...
Call all child extensions with the given arguments. This calls all child extensions and collects the results for our own parent. Use this to control the execution of your nested extensions from your own extension. Returns: :obj:`list` of :obj:`RunInfo`: A list of collected results of our child extensions.
codesearchnet
def __init__(self, files=None, misspelling_file=None): if misspelling_file: self._misspelling_dict = defaultdict(list) with open(misspelling_file, 'r') as f: for line in f: bad_word, correction = line.strip().split(' ', 1) self._misspelling_dict[bad_word].append(correcti...
Initialises an Misspellings instance. Args: files: List of files to check. More can be added with add(). misspelling_file: Filename with a list of misspelled words and their alternatives. Raises: IOError: Raised if misspelling_file can't be found. ValueError: Raised if misspelling_file isn't correctly formatted.
juraj-google-style
def plot_loss_history(history, figsize=(15, 8)): plt.figure(figsize=figsize) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.xlabel(" plt.ylabel("Loss") plt.legend(["Training", "Validation"]) plt.title("Loss over time") plt.show()
Plots the learning history for a Keras model, assuming the validation data was provided to the 'fit' function. Args: history: The return value from the 'fit' function. figsize: The size of the plot.
juraj-google-style
def parseEquation(self, inp): inp = MathService._preprocess(inp) split = inp.split(' ') for i, w in enumerate(split): if w in self.__unaryOperators__: op = self.__unaryOperators__[w] eq1 = ' '.join(split[:i]) ...
Solves the equation specified by the input string. Args: inp (str): An equation, specified in words, containing some combination of numbers, binary, and unary operations. Returns: The floating-point result of carrying out the computation.
juraj-google-style
def _fits_surface(self, width, height): assert(width > 0 and height > 0) if self.rot and (width > self.width or height > self.height): width, height = height, width if width > self.width or height > self.height: return False else: return True
Test surface is big enough to place a rectangle Arguments: width (int, float): Rectangle width height (int, float): Rectangle height Returns: boolean: True if it could be placed, False otherwise
juraj-google-style
async def remove_participant(self, p: Participant): await self.connection('DELETE', 'tournaments/{}/participants/{}'.format(self._id, p._id)) if p in self.participants: self.participants.remove(p)
remove a participant from the tournament |methcoro| Args: p: the participant to remove Raises: APIException
juraj-google-style
def CompleteTask(self, task): with self._lock: if task.identifier not in self._tasks_merging: raise KeyError('Task {0:s} was not merging.'.format(task.identifier)) self.SampleTaskStatus(task, 'completed') del self._tasks_merging[task.identifier] logger.debug('Completed task {...
Completes a task. The task is complete and can be removed from the task manager. Args: task (Task): task. Raises: KeyError: if the task was not merging.
juraj-google-style
def set_max_steps_per_epoch(max_steps_per_epoch): global _MAX_STEPS_PER_EPOCH _MAX_STEPS_PER_EPOCH = max_steps_per_epoch
Limit the maximum number of steps for any call to fit/evaluate/predict. This will cap the number of steps for single epoch of a call to `fit()`, `evaluate()`, or `predict()`. This is purely for debugging, and can also be set via the `KERAS_MAX_STEPS_PER_EPOCH` environment variable to quickly run a scrip without modify...
github-repos
def __init__(self, data=None, _KEY=None, _ATTRS=None): if self.__class__ is MapEntry: raise TypeError('MapEntry is an abstract class.') if data is None: return else: for key in data: setattr(self, key, data[key]) self.log = logging.getLogger(__name__)
This is an abstract class. Args: data: An optional dict of attribute, value pairs to populate with. Raises: TypeError: Bad argument, or attempt to instantiate abstract class.
github-repos
def load(self, source, as_defaults=False): if isinstance(source, six.string_types): source = os.path.expanduser(source) with open(source, encoding='utf-8') as f: self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults) elif isinstance(sou...
Load configuration values from the specified source. Args: source: as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.
juraj-google-style
def get_commit(profile, sha): resource = ('/commits/' + sha) data = api.get_request(profile, resource) return prepare(data)
Fetch a commit. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the commit to fetch. Returns: A dict with data about the commit.
codesearchnet
def _set_bearer_user_vars(allowed_client_ids, scopes): (all_scopes, sufficient_scopes) = _process_scopes(scopes) try: authorized_scopes = oauth.get_authorized_scopes(sorted(all_scopes)) except oauth.Error: _logger.debug('Unable to get authorized scopes.', exc_info=True) return if...
Validate the oauth bearer token and set endpoints auth user variables. If the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE. This provides enough information that our endpoints.get_current_user() function can get the user. Args: allowed_client_ids: List of client IDs that are acceptable. scopes: List of...
codesearchnet
def get_ax3d_fig_plt(ax=None, **kwargs): import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d if ax is None: fig = plt.figure(**kwargs) ax = axes3d.Axes3D(fig) else: fig = plt.gcf() return ax, fig, plt
Helper function used in plot functions supporting an optional Axes3D argument. If ax is None, we build the `matplotlib` figure and create the Axes3D else we return the current active figure. Args: kwargs: keyword arguments are passed to plt.figure if ax is not None. Returns: ax: :class:`Axes` object figure: matplotli...
juraj-google-style
def AddTransaction(self, tx): if (BC.Default() is None): return False if (tx.Hash.ToBytes() in self.MemPool.keys()): return False if BC.Default().ContainsTransaction(tx.Hash): return False if (not tx.Verify(self.MemPool.values())): logger.error('Verifying tx result... fai...
Add a transaction to the memory pool. Args: tx (neo.Core.TX.Transaction): instance. Returns: bool: True if successfully added. False otherwise.
codesearchnet
def create_from_snapshot(self, data, timeout=(- 1)): uri = (self.URI + '/from-snapshot') return self._client.create(data, uri=uri, timeout=timeout)
Creates a new volume on the storage system from a snapshot of a volume. A volume template must also be specified when creating a volume from a snapshot. The global setting "StorageVolumeTemplateRequired" controls whether or not root volume templates can be used to provision volumes. The value of this setting defaults ...
codesearchnet
def summarize_variables(var_list=None, tag=None): if (var_list is None): var_list = tf.trainable_variables() if (tag is None): tag = 'training_variables/' name_to_var = {v.name: v for v in var_list} for v_name in list(name_to_var): v = name_to_var[v_name] tf.summary.histo...
Summarize the variables. Args: var_list: a list of variables; defaults to trainable_variables. tag: name scope of the summary; defaults to training_variables/.
codesearchnet
def Run(self): for e in self.events: if e.Run() is False: return False return True
Execute this state transition. Returns: Whether or not all event functions returned True.
github-repos
def poll(): event_ptr = ffi.new('SDL_Event *') while lib.SDL_PollEvent(event_ptr): (yield Event._from_ptr(event_ptr)) event_ptr = ffi.new('SDL_Event *')
Polls for currently pending events. Returns: Iterable[Event]: Events from the event queue.
codesearchnet
def add_bboxes_to_image(image, bboxes, color='red', width=1): def expanded_bbox(bbox, n): l = min(bbox[0][0], bbox[1][0]) r = max(bbox[0][0], bbox[1][0]) t = min(bbox[0][1], bbox[1][1]) b = max(bbox[0][1], bbox[1][1]) return ((l - n, t - n), (r + n, b + n)) ...
Draw rectangles on the image for the bounding boxes Returns a PIL.Image Arguments: image -- input image bboxes -- bounding boxes in the [((l, t), (r, b)), ...] format Keyword arguments: color -- color to draw the rectangles width -- line width of the rectangles Example: image = Image.open(filename) add_bboxes_to_image(...
juraj-google-style
def __init__(self, url): self.url = url self.domain = urlparse(url).netloc self.index = None self.creation_ts = time.time() self.downloaded_ts = None self.processing_started_ts = None self.processing_ended_ts = None for key in worker_mapping().k...
Constructor. Args: url (str): URL to which this request is related.
juraj-google-style
def ws_db996(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `ws_db996`'.format(value)) self._ws_db996...
Corresponds to IDD Field `ws_db996` Mean wind speed coincident with 99.6% dry-bulb temperature Args: value (float): value for IDD Field `ws_db996` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def _resolve_grad_inputs(cond_graph, grad_graph): new_inputs = [] for t in grad_graph.external_captures: if t.graph != grad_graph.outer_graph: assert t.graph == cond_graph for i, output in enumerate(t.graph.outputs): if output is t: t = t.graph...
Returns the tensors to pass as inputs to `grad_graph`. The `grad_graph` may have external references to 1. Its outer graph containing the input gradients. These references are kept as is. 2. Tensors in the forward pass graph. These tensors may not be "live" when the gradient is being computed. We replace such referenc...
github-repos
def fetch(self, addon_id, data={}, **kwargs): return super(Addon, self).fetch(addon_id, data, **kwargs)
Fetch addon for given Id Args: addon_id : Id for which addon object has to be retrieved Returns: addon dict for given subscription Id
codesearchnet