code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def Deserialize(self, reader): self.__hash = None self.DeserializeUnsigned(reader) byt = reader.ReadByte() if int(byt) != 1: raise Exception('Incorrect format') witness = Witness() witness.Deserialize(reader) self.Script = witness
Deserialize full object. Args: reader (neo.IO.BinaryReader):
juraj-google-style
def from_json(cls, data): assert ('name' in data), 'Required keyword "name" is missing!' assert ('data_type' in data), 'Required keyword "data_type" is missing!' if (cls._type_enumeration is None): cls._type_enumeration = _DataTypeEnumeration(import_modules=False) if (data['data_type'] == 'GenericType'): assert ('base_unit' in data), 'Keyword "base_unit" is missing and is required for GenericType.' return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit']) elif (data['data_type'] in cls._type_enumeration._TYPES): clss = cls._type_enumeration._TYPES[data['data_type']] if (data['data_type'] == data['name'].title().replace(' ', '')): return clss() else: instance = clss() instance._name = data['name'] return instance else: raise ValueError('Data Type {} could not be recognized'.format(data['data_type']))
Create a data type from a dictionary. Args: data: Data as a dictionary. { "name": data type name of the data type as a string "data_type": the class name of the data type as a string "base_unit": the base unit of the data type }
codesearchnet
def json_dict(json_data): if isinstance(json_data, dict): return json_data elif isinstance(json_data, basestring): return json.loads(json_data, object_hook=OrderedDict) else: raise TypeError( "'json_data' must be a dictionary or valid JSON string; " "received: {!r}".format(json_data) )
Given a dictionary or JSON string; return a dictionary. Args: json_data(dict, str): Input JSON object. Returns: A Python dictionary with the contents of the JSON object. Raises: TypeError: If the input object is not a dictionary or string.
juraj-google-style
def _get_individual_image(self, run, tag, index, sample): if self._db_connection_provider: db = self._db_connection_provider() cursor = db.execute( , {'run': run, 'tag': tag, 'sample': sample, 'index': index, 'dtype': tf.string.as_datatype_enum}) (data,) = cursor.fetchone() return six.binary_type(data) events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample) images = events[index].tensor_proto.string_val[2:] return images[sample]
Returns the actual image bytes for a given image. Args: run: The name of the run the image belongs to. tag: The name of the tag the images belongs to. index: The index of the image in the current reservoir. sample: The zero-indexed sample of the image to retrieve (for example, setting `sample` to `2` will fetch the third image sample at `step`). Returns: A bytestring of the raw image bytes.
juraj-google-style
def submit(cls, job_config, in_xg_transaction=False): cls.__validate_job_config(job_config) mapper_spec = job_config._get_mapper_spec() mapreduce_params = job_config._get_mr_params() mapreduce_spec = model.MapreduceSpec(job_config.job_name, job_config.job_id, mapper_spec.to_json(), mapreduce_params, util._obj_to_path(job_config._hooks_cls)) if in_xg_transaction: propagation = db.MANDATORY else: propagation = db.INDEPENDENT state = None @db.transactional(propagation=propagation) def _txn(): state = cls.__create_and_save_state(job_config, mapreduce_spec) cls.__add_kickoff_task(job_config, mapreduce_spec) return state state = _txn() return cls(state)
Submit the job to run. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: a Job instance representing the submitted job.
codesearchnet
def filter_by_analysis_period(self, analysis_period): self._check_analysis_period(analysis_period) analysis_period = self._get_analysis_period_subset(analysis_period) if analysis_period.st_hour == 0 and analysis_period.end_hour == 23: t_s = 60 / analysis_period.timestep st_ind = int((analysis_period.st_time.moy / t_s) - (self.header.analysis_period.st_time.moy / t_s)) end_ind = int((analysis_period.end_time.moy / t_s) - (analysis_period.st_time.moy / t_s) + st_ind + 1) if end_ind > st_ind: _filt_values = self._values[st_ind:end_ind] else: _filt_values = self._values[st_ind:] + self._values[:end_ind] _filt_header = self.header.duplicate() _filt_header._analysis_period = analysis_period return HourlyContinuousCollection(_filt_header, _filt_values) else: _filtered_data = self.filter_by_moys(analysis_period.moys) _filtered_data.header._analysis_period = analysis_period return _filtered_data
Filter the Data Collection based on an analysis period. Args: analysis period: A Ladybug analysis period Return: A new Data Collection with filtered data
juraj-google-style
def _make_concatenated_type(self, type1: _base.BaseValue, type2: _base.BaseValue | None) -> '_typing.Concatenate | None': if isinstance(type2, _abstract.ParamSpec): new_args = [type1, type2] elif isinstance(type2, _abstract.Concatenate): type2 = cast(Any, type2) new_args = [type1] + type2.args + [type2.paramspec] else: return None return _abstract.Concatenate(new_args, type1.ctx)
Concatenates type1 and type2 if possible. If type2 is a ParamSpec or Concatenate object, creates a new Concatenate object by adding type1 to the front. Args: type1: An abstract value. type2: An abstract value or None. Returns: A new Concatenate object, or None if type2 cannot be concatenated to.
github-repos
def list_vmss_skus(access_token, subscription_id, resource_group, vmss_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/skus', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List the VM skus available for a VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of VM skus.
juraj-google-style
def update(self, value: Union[RawValue, Value], raw: bool = False) -> "InstanceNode": newval = self.schema_node.from_raw( value, self.json_pointer()) if raw else value return self._copy(newval)
Update the receiver's value. Args: value: New value. raw: Flag to be set if `value` is raw. Returns: Copy of the receiver with the updated value.
juraj-google-style
def from_json_file(cls, json_file: Union[str, os.PathLike]): with open(json_file, 'r', encoding='utf-8') as reader: text = reader.read() video_processor_dict = json.loads(text) return cls(**video_processor_dict)
Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON file of parameters. Args: json_file (`str` or `os.PathLike`): Path to the JSON file containing the parameters. Returns: A video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object instantiated from that JSON file.
github-repos
def indicators_from_tag(self, indicator, tag_name, filters=None, params=None): params = params or {} for t in self.pivot_from_tag(indicator, tag_name, filters=filters, params=params): yield t
Args: indicator: tag_name: filters: params: Return:
juraj-google-style
def fts_match(self, fts_mask, segment): fts_mask = set(fts_mask) fts_seg = self.fts(segment) if fts_seg: return fts_seg <= fts_mask else: return None
Evaluates whether a set of features 'match' a segment (are a subset of that segment's features) Args: fts_mask (list): list of (value, feature) tuples segment (unicode): IPA string corresponding to segment (consonant or vowel) Returns: bool: None if `segment` cannot be parsed; True if the feature values of `fts_mask` are a subset of those for `segment`
juraj-google-style
def pxbounds(self, geom, clip=False): try: if isinstance(geom, dict): if 'geometry' in geom: geom = shape(geom['geometry']) else: geom = shape(geom) elif isinstance(geom, BaseGeometry): geom = shape(geom) else: geom = wkt.loads(geom) except: raise TypeError ("Invalid geometry object") if geom.disjoint(shape(self)): raise ValueError("Geometry outside of image bounds") (xmin, ymin, xmax, ymax) = ops.transform(self.__geo_transform__.rev, geom).bounds _nbands, ysize, xsize = self.shape if clip: xmin = max(xmin, 0) ymin = max(ymin, 0) xmax = min(xmax, xsize) ymax = min(ymax, ysize) return (xmin, ymin, xmax, ymax)
Returns the bounds of a geometry object in pixel coordinates Args: geom: Shapely geometry object or GeoJSON as Python dictionary or WKT string clip (bool): Clip the bounds to the min/max extent of the image Returns: list: bounds in pixels [min x, min y, max x, max y] clipped to image bounds
juraj-google-style
def erfinv(x, name="erfinv"): with tf.name_scope(name): x = tf.convert_to_tensor(value=x, name="x") if dtype_util.as_numpy_dtype(x.dtype) not in [np.float32, np.float64]: raise TypeError("x.dtype={} is not handled, see docstring for supported " "types.".format(dtype_util.name(x.dtype))) return ndtri((x + 1.) / 2.) / np.sqrt(2.)
The inverse function for erf, the error function. Args: x: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default="erfinv"). Returns: x: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x` is not floating-type.
juraj-google-style
def depth_march_average_ground_temperature(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_march_average_ground_temperature`'.format(value)) self._depth_march_average_ground_temperature = value
Corresponds to IDD Field `depth_march_average_ground_temperature` Args: value (float): value for IDD Field `depth_march_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def warning_max_changed(channel, max_warnings): gui = ui_embed.UI( channel, "Maximum Warnings Changed", "Users must now have {} warnings to be banned " "(this won't ban existing users with warnings)".format(max_warnings), modulename=modulename ) return gui
Creates an embed UI containing an error message Args: channel (discord.Channel): The Discord channel to bind the embed to max_warnings (int): The new maximum warnings Returns: ui (ui_embed.UI): The embed UI object
juraj-google-style
def _construct_full_hostname(self, hostname): if hostname.startswith(('http: return hostname if ': protocol, host = hostname.split(': raise ValueError('Protocol %s is not supported.' % protocol) return ':
Create a full (scheme included) hostname from the argument given. Only HTTP and HTTP+SSL protocols are allowed. Args: hostname: The hostname to use. Returns: The full hostname. Raises: ValueError: A not supported protocol is used.
juraj-google-style
def benchmarks_main(true_main, argv=None): if argv is None: argv = sys.argv found_arg = [arg for arg in argv if arg.startswith('--benchmark_filter=') or arg.startswith('-benchmark_filter=')] if found_arg: argv.remove(found_arg[0]) regex = found_arg[0].split('=')[1] app.run(lambda _: _run_benchmarks(regex), argv=argv) else: true_main()
Run benchmarks as declared in argv. Args: true_main: True main function to run if benchmarks are not requested. argv: the command line arguments (if None, uses sys.argv).
github-repos
def getAsWkt(self, session): statement = .format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.wkt
Retrieve the geometry in Well Known Text format. This method is a veneer for an SQL query that calls the ``ST_AsText()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: Well Known Text string representation of geometry.
juraj-google-style
def write(name, value): if value is not None: environ[name] = builtins.str(value) elif environ.get(name): del environ[name]
Write a raw env value. A ``None`` value clears the environment variable. Args: name: The environment variable name value: The value to write
juraj-google-style
def as_date(dat): LOGGER.debug('as_date(%s)', dat) return strict_rfc3339.timestamp_to_rfc3339_utcoffset(calendar.timegm(dat.timetuple()))
Return the RFC3339 UTC string representation of the given date and time. Args: dat (:py:class:`datetime.date`): the object/type to be serialized. Raises: TypeError: when ``o`` is not an instance of ``datetime.date``. Returns: (str) JSON serializable type for the given object.
codesearchnet
def colless(self, normalize='leaves'): t_res = copy(self) t_res.resolve_polytomies() leaves_below = dict() n = 0 I = 0 for node in t_res.traverse_postorder(): if node.is_leaf(): leaves_below[node] = 1 n += 1 else: (cl, cr) = node.children nl = leaves_below[cl] nr = leaves_below[cr] leaves_below[node] = (nl + nr) I += abs((nl - nr)) if ((normalize is None) or (normalize is False)): return I elif (not isinstance(normalize, str)): raise TypeError('normalize must be None or a string') normalize = normalize.lower() if (normalize == 'leaves'): return ((2.0 * I) / ((n - 1) * (n - 2))) elif (normalize == 'yule'): return (((I - (n * log(n))) - (n * ((EULER_GAMMA - 1) - log(2)))) / n) elif (normalize == 'pda'): return (I / (n ** 1.5)) else: raise RuntimeError("normalize must be None, 'leaves', 'yule', or 'pda'")
Compute the Colless balance index of this ``Tree``. If the tree has polytomies, they will be randomly resolved Args: ``normalize`` (``str``): How to normalize the Colless index (if at all) * ``None`` to not normalize * ``"leaves"`` to normalize by the number of leaves * ``"yule"`` to normalize to the Yule model * ``"pda"`` to normalize to the Proportional to Distinguishable Arrangements model Returns: ``float``: Colless index (either normalized or not)
codesearchnet
def _tzinfome(tzinfo): if (not isinstance(tzinfo, datetime.tzinfo)): try: tzinfo = pytz.timezone(tzinfo) assert (tzinfo.zone in pytz.all_timezones) except AttributeError: raise pytz.UnknownTimeZoneError(('Unknown timezone! %s' % tzinfo)) return tzinfo
Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded.
codesearchnet
def get_cot_artifacts(context): artifacts = {} filepaths = filepaths_in_dir(context.config['artifact_dir']) hash_alg = context.config['chain_of_trust_hash_algorithm'] for filepath in sorted(filepaths): path = os.path.join(context.config['artifact_dir'], filepath) sha = get_hash(path, hash_alg=hash_alg) artifacts[filepath] = {hash_alg: sha} return artifacts
Generate the artifact relative paths and shas for the chain of trust. Args: context (scriptworker.context.Context): the scriptworker context. Returns: dict: a dictionary of {"path/to/artifact": {"hash_alg": "..."}, ...}
juraj-google-style
def _message_to_entity(msg, modelclass): ent = modelclass() for prop_name, prop in modelclass._properties.iteritems(): if prop._code_name == 'blob_': continue value = getattr(msg, prop_name) if value is not None and isinstance(prop, model.StructuredProperty): if prop._repeated: value = [_message_to_entity(v, prop._modelclass) for v in value] else: value = _message_to_entity(value, prop._modelclass) setattr(ent, prop_name, value) return ent
Recursive helper for _to_base_type() to convert a message to an entity. Args: msg: A Message instance. modelclass: A Model subclass. Returns: An instance of modelclass.
juraj-google-style
def app(self): app = (self._app or current_app) if (not in_app_context(app)): raise RuntimeError("This component hasn't been initialized yet and an app context doesn't exist.") if hasattr(app, '_get_current_object'): app = app._get_current_object() return app
Internal method that will supply the app to use internally. Returns: flask.Flask: The app to use within the component. Raises: RuntimeError: This is raised if no app was provided to the component and the method is being called outside of an application context.
codesearchnet
def ExpandWindowsUserEnvironmentVariables(data_string, knowledge_base, sid=None, username=None): r win_environ_regex = re.compile(r"%([^%]+?)%") components = [] offset = 0 for match in win_environ_regex.finditer(data_string): components.append(data_string[offset:match.start()]) kb_user = knowledge_base.GetUser(sid=sid, username=username) kb_value = None if kb_user: kb_value = getattr(kb_user, match.group(1).lower(), None) if isinstance(kb_value, string_types) and kb_value: components.append(kb_value) else: components.append("%%%s%%" % match.group(1)) offset = match.end() components.append(data_string[offset:]) return "".join(components)
r"""Take a string and expand windows user environment variables based. Args: data_string: A string, e.g. "%TEMP%\\LogFiles" knowledge_base: A knowledgebase object. sid: A Windows SID for a user to expand for. username: A Windows user name to expand for. Returns: A string with available environment variables expanded.
juraj-google-style
def create(self, project_id=None): if (not self.exists()): if (project_id is None): project_id = self._api.project_id try: self._info = self._api.buckets_insert(self._name, project_id=project_id) except Exception as e: raise e return self
Creates the bucket. Args: project_id: the project in which to create the bucket. Returns: The bucket. Raises: Exception if there was an error creating the bucket.
codesearchnet
def resize_bytes(fobj, old_size, new_size, offset): if (new_size < old_size): delete_size = (old_size - new_size) delete_at = (offset + new_size) delete_bytes(fobj, delete_size, delete_at) elif (new_size > old_size): insert_size = (new_size - old_size) insert_at = (offset + old_size) insert_bytes(fobj, insert_size, insert_at)
Resize an area in a file adding and deleting at the end of it. Does nothing if no resizing is needed. Args: fobj (fileobj) old_size (int): The area starting at offset new_size (int): The new size of the area offset (int): The start of the area Raises: IOError
codesearchnet
def digest_content(self, rule): data = OrderedDict() current_key = None for token in rule.content: if (token.type == 'ident'): name = token.value if name.startswith('-'): name = name[1:] current_key = name data[current_key] = None if (token.type == 'string'): data[current_key] = token.value return data
Walk on rule content tokens to return a dict of properties. This is pretty naive and will choke/fail on everything that is more evolved than simple ``ident(string):value(string)`` Arguments: rule (tinycss2.ast.QualifiedRule): Qualified rule object as returned by tinycss2. Returns: dict: Dictionnary of retrieved variables and properties.
codesearchnet
def __init__(self, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog', event_store: str='default_event_store', placement_id: str=None): self.project = project self.retry = retry self.timeout = timeout self.metadata = metadata self.placement_id = placement_id self.catalog_name = catalog_name self.event_store = event_store if placement_id is None: raise ValueError('placement_id must be specified') else: self.placement_id = placement_id
Initializes a :class:`PredictUserEvent` transform. Args: project (str): Optional. GCP project name in which the catalog data will be imported. retry: Optional. Designation of what errors, if any, should be retried. timeout (float): Optional. The amount of time, in seconds, to wait for the request to complete. metadata: Optional. Strings which should be sent along with the request as metadata. catalog_name (str): Optional. Name of the catalog. Default: 'default_catalog' event_store (str): Optional. Name of the event store. Default: 'default_event_store' placement_id (str): Required. ID of the recommendation engine placement. This id is used to identify the set of models that will be used to make the prediction.
github-repos
def get_nodes(cluster): gk = get_api_client() site = get_cluster_site(cluster) return gk.sites[site].clusters[cluster].nodes.list()
Get all the nodes of a given cluster. Args: cluster(string): uid of the cluster (e.g 'rennes')
juraj-google-style
def _reduce_output(self, outputs, seq_lengths): batch_size = outputs.shape[0] reduced = [] for i in range(batch_size): if self.lstm_reduction == "mean": reduced.append(outputs[i, : seq_lengths[i], :].mean(dim=0)) elif self.lstm_reduction == "max": reduced.append(outputs[i, : seq_lengths[i], :].max(dim=0)[0]) elif self.lstm_reduction == "last": reduced.append(outputs[i, seq_lengths[i] - 1, :]) elif self.lstm_reduction == "attention": reduced.append(self._attention(outputs[i, : seq_lengths[i], :])) else: msg = ( f"Did not recognize lstm kwarg 'lstm_reduction' == " f"{self.lstm_reduction}" ) raise ValueError(msg) return torch.stack(reduced, dim=0)
Reduces the output of an LSTM step Args: outputs: (torch.FloatTensor) the hidden state outputs from the lstm, with shape [batch_size, max_seq_length, hidden_size]
juraj-google-style
def constants_from_enum(cls, module=None): if (not issubclass(cls, enum.Enum)): raise TypeError("Class '{}' is not subclass of enum.".format(cls.__name__)) if (module is None): module = cls.__module__ for value in cls: constant('{}.{}'.format(module, str(value)), value) return cls
Decorator for an enum class that generates Gin constants from values. Generated constants have format `module.ClassName.ENUM_VALUE`. The module name is optional when using the constant. Args: cls: Class type. module: The module to associate with the constants, to help handle naming collisions. If `None`, `cls.__module__` will be used. Returns: Class type (identity function). Raises: TypeError: When applied to a non-enum class.
codesearchnet
def get_encoder_config(self, encoder_config: PretrainedConfig) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(encoder_config)
Returns ONNX encoder config for `VisionEncoderDecoder` model. Args: encoder_config (`PretrainedConfig`): The encoder model's configuration to use when exporting to ONNX. Returns: [`VisionEncoderDecoderEncoderOnnxConfig`]: An instance of the ONNX configuration object
github-repos
def recipe_kv_uploader(config, recipe_name): drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https:
A tool for bulk editing key value pairs for CM placements. Args: recipe_name (string) - Name of document to deploy to.
github-repos
def DisplayWidth(self, buf): if not isinstance(buf, str): return len(buf) cached = self._display_width_cache.get(buf, None) if cached is not None: return cached width = 0 max_width = 0 i = 0 while i < len(buf): if self._csi and buf[i:].startswith(self._csi): i += self.GetControlSequenceLen(buf[i:]) elif buf[i] == '\n': max_width = max(width, max_width) width = 0 i += 1 else: width += GetCharacterDisplayWidth(buf[i]) i += 1 max_width = max(width, max_width) self._display_width_cache[buf] = max_width return max_width
Returns the display width of buf, handling unicode and ANSI controls. Args: buf: The string to count from. Returns: The display width of buf, handling unicode and ANSI controls.
github-repos
def _GenerateSshKey(self, key_type, key_dest): with tempfile.NamedTemporaryFile(prefix=key_type, delete=True) as temp: temp_key = temp.name command = ['ssh-keygen', '-t', key_type, '-f', temp_key, '-N', '', '-q'] try: self.logger.info('Generating SSH key %s.', key_dest) subprocess.check_call(command) except subprocess.CalledProcessError: self.logger.warning('Could not create SSH key %s.', key_dest) return shutil.move(temp_key, key_dest) shutil.move('%s.pub' % temp_key, '%s.pub' % key_dest) file_utils.SetPermissions(key_dest, mode=0o600) file_utils.SetPermissions('%s.pub' % key_dest, mode=0o644)
Generate a new SSH key. Args: key_type: string, the type of the SSH key. key_dest: string, a file location to store the SSH key.
juraj-google-style
def parse(self, argument): if not isinstance(argument, six.string_types): raise TypeError('flag value must be a string, found "{}"'.format( type(argument))) return argument
Parses the string argument and returns the native value. By default it returns its argument unmodified. Args: argument: string argument passed in the commandline. Raises: ValueError: Raised when it fails to parse the argument. TypeError: Raised when the argument has the wrong type. Returns: The parsed value in native type.
juraj-google-style
class MambaOutput(ModelOutput): last_hidden_state: Optional[torch.FloatTensor] = None cache_params: Optional[MambaCache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None
Class for the MAMBA model outputs. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. cache_params (`MambaCache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
github-repos
def _default_global_step_tensor(self): try: gs = ops.get_default_graph().get_tensor_by_name('global_step:0') if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]: return gs else: logging.warning("Found 'global_step' is not an int type: %s", gs.dtype) return None except KeyError: return None
Returns the global_step from the default graph. Returns: The global step `Tensor` or `None`.
github-repos
def next_weekday(date): n_days = (7 - date.weekday()) if (n_days > 3): n_days = 1 return (date + datetime.timedelta(days=n_days))
Return the first weekday after date Args: date (datetime or datetime.date) Returns: (datetime or datetime.date) Raises: -
codesearchnet
def unpack_archive(*components, **kwargs) -> str: path = fs.abspath(*components) compression = kwargs.get("compression", "bz2") dir = kwargs.get("dir", fs.dirname(path)) fs.cd(dir) tar = tarfile.open(path, "r:" + compression) tar.extractall() tar.close() fs.cdpop() return dir
Unpack a compressed archive. Arguments: *components (str[]): Absolute path. **kwargs (dict, optional): Set "compression" to compression type. Default: bz2. Set "dir" to destination directory. Defaults to the directory of the archive. Returns: str: Path to directory.
juraj-google-style
def destroy_s3(app='', env='dev', **_): session = boto3.Session(profile_name=env) client = session.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() bucket = client.Bucket(archaius['bucket']) for item in bucket.objects.filter(Prefix=archaius['path']): item.Object().delete() LOG.info('Deleted: %s/%s', item.bucket_name, item.key) return True
Destroy S3 Resources for _app_ in _env_. Args: app (str): Application name env (str): Deployment environment/account name Returns: boolean: True if destroyed sucessfully
codesearchnet
def from_json_and_lambdas(cls, file: str, lambdas): with open(file, "r") as f: data = json.load(f) return cls.from_dict(data, lambdas)
Builds a GrFN from a JSON object. Args: cls: The class variable for object creation. file: Filename of a GrFN JSON file. Returns: type: A GroundedFunctionNetwork object.
juraj-google-style
def delete(self, filename): folder = "Packages" if is_package(filename) else "Scripts" path = os.path.join(self.connection["mount_point"], folder, filename) if os.path.isdir(path): shutil.rmtree(path) elif os.path.isfile(path): os.remove(path)
Delete a file from the repository. This method will not delete a script from a migrated JSS. Please remove migrated scripts with jss.Script.delete. Args: filename: String filename only (i.e. no path) of file to delete. Will handle deleting scripts vs. packages automatically.
juraj-google-style
def write(self, file_prefix, session=None, options=None): return self._write(file_prefix, session, options=options)
Writes a training checkpoint. The checkpoint includes variables created by this object and any trackable objects it depends on at the time `Checkpoint.write()` is called. `write` does not number checkpoints, increment `save_counter`, or update the metadata used by `tf.train.latest_checkpoint`. It is primarily intended for use by higher level checkpoint management utilities. `save` provides a very basic implementation of these features. Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). session: The session to evaluate variables in. Ignored when executing eagerly. If not provided when graph building, the default session is used. options: Optional `tf.train.CheckpointOptions` object. Returns: The full path to the checkpoint (i.e. `file_prefix`).
github-repos
def _map_free_gates(layout, gates, coupling_map): blocked_qubits = set() mapped_gates = [] remaining_gates = [] for gate in gates: if (not gate['partition']): qubits = [n for n in gate['graph'].nodes() if (n.type == 'op')][0].qargs if (not qubits): continue if blocked_qubits.intersection(qubits): blocked_qubits.update(qubits) remaining_gates.append(gate) else: mapped_gate = _transform_gate_for_layout(gate, layout) mapped_gates.append(mapped_gate) continue qubits = gate['partition'][0] if blocked_qubits.intersection(qubits): blocked_qubits.update(qubits) remaining_gates.append(gate) elif (len(qubits) == 1): mapped_gate = _transform_gate_for_layout(gate, layout) mapped_gates.append(mapped_gate) elif (coupling_map.distance(*[layout[q] for q in qubits]) == 1): mapped_gate = _transform_gate_for_layout(gate, layout) mapped_gates.append(mapped_gate) else: blocked_qubits.update(qubits) remaining_gates.append(gate) return (mapped_gates, remaining_gates)
Map all gates that can be executed with the current layout. Args: layout (Layout): Map from virtual qubit index to physical qubit index. gates (list): Gates to be mapped. coupling_map (CouplingMap): CouplingMap for target device topology. Returns: tuple: mapped_gates (list): ops for gates that can be executed, mapped onto layout. remaining_gates (list): gates that cannot be executed on the layout.
codesearchnet
def symbolic_master_equation(self, rho=None): L, H = self.L, self.H if rho is None: rho = OperatorSymbol('rho', hs=self.space) return (-I * (H * rho - rho * H) + sum(Lk * rho * adjoint(Lk) - (adjoint(Lk) * Lk * rho + rho * adjoint(Lk) * Lk) / 2 for Lk in L.matrix.ravel()))
Compute the symbolic Liouvillian acting on a state rho If no rho is given, an OperatorSymbol is created in its place. This correspnds to the RHS of the master equation in which an average is taken over the external noise degrees of freedom. Args: rho (Operator): A symbolic density matrix operator Returns: Operator: The RHS of the master equation.
juraj-google-style
def ParseApplicationResourceUsage(self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs): self._ParseGUIDTable(parser_mediator, cache, database, table, self._APPLICATION_RESOURCE_USAGE_VALUES_MAP, SRUMApplicationResourceUsageEventData)
Parses the application resource usage table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (Optional[ESEDBCache]): cache, which contains information about the identifiers stored in the SruDbIdMapTable table. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table.
codesearchnet
def get_library_progress(self): kbp_dict = self._get_api_call('get_library_progress') return {asin: KindleCloudReaderAPI._kbp_to_progress(kbp) for (asin, kbp) in kbp_dict.iteritems()}
Returns the reading progress for all books in the kindle library. Returns: A mapping of ASINs to `ReadingProgress` instances corresponding to the books in the current user's library.
codesearchnet
def _scale_gradient_op(dtype): def scale_gradient_backward(op, grad): scale = op.inputs[1] scaled_grad = (grad * scale) return (scaled_grad, None) def scale_gradient_forward(x, scale): del scale return x func_name = 'ScaleGradient_{}'.format(dtype.name) return function.Defun(dtype, dtype, python_grad_func=scale_gradient_backward, func_name=func_name)(scale_gradient_forward)
Create an op that scales gradients using a Defun. The tensorflow Defun decorator creates an op and tensorflow caches these ops automatically according to `func_name`. Using a Defun decorator twice with the same `func_name` does not create a new op, instead the cached op is used. This method produces a new op the first time it is called with a given `dtype` argument, and then uses the cached op each time it is called after that with the same `dtype`. The scale value is given as an argument for the forward pass method so that it can be used in the backwards pass. Args: dtype: the dtype of the net whose gradient is being scaled. Returns: The op that scales gradients.
codesearchnet
def append(self, event, help=''): if isinstance(event, str): self._events[event] = HookList(is_waterfall=self.is_waterfall) self._help[event] = (help, getframeinfo(stack()[1][0])) if (not help): logger.warning("Great, don't say anything about your hooks and wait for plugin creators to figure it out.") elif isinstance(event, Iterable): for name in event: self.append(name) else: raise TypeError('Invalid event name!')
Creates a new event. `event` may be iterable or string Args: event (str): Name of event to declare Kwrgs: help (str): Help string for the event Raises: TypeError **Please** describe the event and its calling arguments in the help string.
codesearchnet
def get_all_anonymous_mappings(self, struct1, struct2, niggli=True, include_dist=False): (struct1, struct2) = self._process_species([struct1, struct2]) (struct1, struct2, fu, s1_supercell) = self._preprocess(struct1, struct2, niggli) matches = self._anonymous_match(struct1, struct2, fu, s1_supercell, break_on_match=(not include_dist)) if matches: if include_dist: return [(m[0], m[1][0]) for m in matches] else: return [m[0] for m in matches]
Performs an anonymous fitting, which allows distinct species in one structure to map to another. Returns a dictionary of species substitutions that are within tolerance Args: struct1 (Structure): 1st structure struct2 (Structure): 2nd structure niggli (bool): Find niggli cell in preprocessing include_dist (bool): Return the maximin distance with each mapping Returns: list of species mappings that map struct1 to struct2.
codesearchnet
class MajorityVote(LabelAggregation): def __init__(self, tie_breaker=DEFAULT_NORMAL_LABEL, **kwargs): self._tie_breaker = tie_breaker def inner(predictions: Iterable[int]) -> int: counters = collections.Counter(predictions) if counters[self._normal_label] < counters[self._outlier_label]: vote = self._outlier_label elif counters[self._normal_label] > counters[self._outlier_label]: vote = self._normal_label else: vote = self._tie_breaker return vote super().__init__(agg_func=inner, **kwargs)
Aggregates anomaly labels using majority voting. This `AggregationFn` implements a majority voting strategy to combine anomaly labels from multiple `AnomalyPrediction` objects. It counts the occurrences of normal and outlier labels and selects the label with the higher count as the aggregated label. In case of a tie, a tie-breaker label is used. Example: If input labels are [normal, outlier, outlier, normal, outlier], and normal_label=0, outlier_label=1, then the aggregated label will be outlier (1) because outliers have a majority (3 vs 2). Args: normal_label (int): The integer label for normal predictions. Defaults to 0. outlier_label (int): The integer label for outlier predictions. Defaults to 1. tie_breaker (int): The label to return if there is a tie in votes. Defaults to 0 (normal_label). **kwargs: Additional keyword arguments to pass to the base `LabelAggregation` class.
github-repos
def _validate_input_state(quantum_state): rho = np.asarray(quantum_state) if (rho.ndim == 1): rho = np.outer(rho, np.conj(rho)) shape = np.shape(rho) if ((len(shape) != 2) or (shape[0] != shape[1])): raise VisualizationError('Input is not a valid quantum state.') num = int(np.log2(rho.shape[0])) if ((2 ** num) != rho.shape[0]): raise VisualizationError('Input is not a multi-qubit quantum state.') return rho
Validates the input to state visualization functions. Args: quantum_state (ndarray): Input state / density matrix. Returns: rho: A 2d numpy array for the density matrix. Raises: VisualizationError: Invalid input.
codesearchnet
def get(name): for matcher in matchers: if matcher.__name__ == name or getattr(matcher, 'name', None) == name: return matcher
Returns a matcher instance by class or alias name. Arguments: name (str): matcher class name or alias. Returns: matcher: found matcher instance, otherwise ``None``.
juraj-google-style
def gumbel_softmax(x, z_size, mode, softmax_k=0, temperature_warmup_steps=150000, summary=True, name=None): with tf.variable_scope(name, default_name="gumbel_softmax"): m = tf.layers.dense(x, 2**z_size, name="mask") if softmax_k > 0: m, kl = top_k_softmax(m, softmax_k) return m, m, 1.0 - tf.reduce_mean(kl) logsm = tf.nn.log_softmax(m) gumbel_samples = gumbel_sample(common_layers.shape_list(m)) steps = temperature_warmup_steps gumbel_samples *= common_layers.inverse_exp_decay(steps temperature = 1.2 - common_layers.inverse_lin_decay(steps) temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) s = tf.nn.softmax((logsm + gumbel_samples) / temperature) m = tf.nn.softmax(m) kl = -tf.reduce_max(logsm, axis=-1) if summary: tf.summary.histogram("max-log", tf.reshape(kl, [-1])) maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1]) maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size)) distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True) d_variance = tf.reduce_mean( tf.squared_difference(distrib, d_mean), axis=[0]) d_dev = -tf.reduce_mean(d_variance) ret = s if mode != tf.estimator.ModeKeys.TRAIN: ret = tf.reshape(maxvhot, common_layers.shape_list(s)) return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002
Gumbel softmax discretization bottleneck. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. mode: tf.estimator.ModeKeys. softmax_k: If > 0 then do top-k softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. summary: Whether to write summaries. name: Name for the bottleneck scope. Returns: Embedding function, discrete code, and loss.
juraj-google-style
def register_entity(self, entity_value, entity_type, alias_of=None): if alias_of: self.trie.insert(entity_value.lower(), data=(alias_of, entity_type)) else: self.trie.insert(entity_value.lower(), data=(entity_value, entity_type)) self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept'))
Register an entity to be tagged in potential parse results Args: entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory") entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
codesearchnet
def zero_d_graph_to_molecule_graph(bonded_structure, graph): import networkx as nx seen_indices = [] sites = [] start_index = list(graph.nodes())[0] queue = [(start_index, (0, 0, 0), bonded_structure.structure[start_index])] while (len(queue) > 0): (comp_i, image_i, site_i) = queue.pop(0) if (comp_i in [x[0] for x in seen_indices]): raise ValueError('Graph component is not 0D') seen_indices.append((comp_i, image_i)) sites.append(site_i) for site_j in bonded_structure.get_connected_sites(comp_i, jimage=image_i): if (((site_j.index, site_j.jimage) not in seen_indices) and ((site_j.index, site_j.jimage, site_j.site) not in queue)): queue.append((site_j.index, site_j.jimage, site_j.site)) indices_ordering = np.argsort([x[0] for x in seen_indices]) sorted_sites = np.array(sites, dtype=object)[indices_ordering] sorted_graph = nx.convert_node_labels_to_integers(graph, ordering='sorted') mol = Molecule([s.specie for s in sorted_sites], [s.coords for s in sorted_sites]) mol_graph = MoleculeGraph.with_edges(mol, nx.Graph(sorted_graph).edges()) return mol_graph
Converts a zero-dimensional networkx Graph object into a MoleculeGraph. Implements a similar breadth-first search to that in calculate_dimensionality_of_site(). Args: bonded_structure (StructureGraph): A structure with bonds, represented as a pymatgen structure graph. For example, generated using the CrystalNN.get_bonded_structure() method. graph (nx.Graph): A networkx `Graph` object for the component of interest. Returns: (MoleculeGraph): A MoleculeGraph object of the component.
codesearchnet
def __best_intent(self, parse_result, context=[]): best_intent = None best_tags = None context_as_entities = [{'entities': [c]} for c in context] for intent in self.intent_parsers: (i, tags) = intent.validate_with_tags((parse_result.get('tags') + context_as_entities), parse_result.get('confidence')) if ((not best_intent) or (i and (i.get('confidence') > best_intent.get('confidence')))): best_intent = i best_tags = tags return (best_intent, best_tags)
Decide the best intent Args: parse_result(list): results used to match the best intent. context(list): ? Returns: best_intent, best_tags: best_intent : The best intent for given results best_tags : The Tags for result
codesearchnet
class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor): def __init__(self, max_length: int, eos_token_id: int): self.max_length = max_length if eos_token_id < 0: raise ValueError(f'The forced eos token id must be a non-negative integer, got {eos_token_id}') self.eos_token_id = eos_token_id def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: if cur_len == self.max_length - 1: batch_size, num_tokens = scores.shape scores = tf.zeros((batch_size, 1)) if self.eos_token_id > 0: scores = tf.concat((tf.broadcast_to(-float('inf'), (batch_size, self.eos_token_id)), scores), axis=-1) if self.eos_token_id < num_tokens - 1: scores = tf.concat((scores, tf.broadcast_to(-float('inf'), (batch_size, num_tokens - 1 - self.eos_token_id))), axis=-1) return scores
[`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. Args: max_length (`int`): The maximum length of the sequence to be generated. eos_token_id (`int`): The id of the token to force as the last generated token when `max_length` is reached.
github-repos
def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]]=None) -> Dict[str, List[str]]: if frameworks is None: frameworks = get_default_frameworks() modules = {'pt': auto_module.modeling_auto if is_torch_available() else None, 'tf': auto_module.modeling_tf_auto if is_tf_available() else None, 'flax': auto_module.modeling_flax_auto if is_flax_available() else None} model_classes = {} for framework in frameworks: new_model_classes = [] if modules[framework] is None: raise ValueError(f'You selected {framework} in the frameworks, but it is not installed.') model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None] for model_mapping_name in model_mappings: model_mapping = getattr(modules[framework], model_mapping_name) if model_type in model_mapping: new_model_classes.append(model_mapping[model_type]) if len(new_model_classes) > 0: model_classes[framework] = list(set(new_model_classes)) return model_classes
Retrieve the model classes associated to a given model. Args: model_type (`str`): A valid model type (like "bert" or "gpt2") frameworks (`List[str]`, *optional*): The frameworks to look for. Will default to `["pt", "tf", "flax"]`, passing a smaller list will restrict the classes returned. Returns: `Dict[str, List[str]]`: A dictionary with one key per framework and the list of model classes associated to that framework as values.
github-repos
def _ConvertAttributeValueToDict(cls, attribute_value): if isinstance(attribute_value, py2to3.BYTES_TYPE): encoded_value = binascii.b2a_qp(attribute_value) encoded_value = codecs.decode(encoded_value, 'ascii') attribute_value = { '__type__': 'bytes', 'stream': '{0:s}'.format(encoded_value) } elif isinstance(attribute_value, (list, tuple)): json_list = [] for list_element in attribute_value: json_dict = cls._ConvertAttributeValueToDict(list_element) json_list.append(json_dict) if isinstance(attribute_value, list): attribute_value = json_list else: attribute_value = { '__type__': 'tuple', 'values': json_list } elif isinstance(attribute_value, collections.Counter): attribute_value = cls._ConvertCollectionsCounterToDict(attribute_value) elif isinstance(attribute_value, dfvfs_path_spec.PathSpec): attribute_value = cls._ConvertPathSpecToDict(attribute_value) elif isinstance(attribute_value, containers_interface.AttributeContainer): attribute_value = cls._ConvertAttributeContainerToDict(attribute_value) return attribute_value
Converts an attribute value into a JSON dictionary. Args: attribute_value (object): an attribute value. Returns: dict|list: The JSON serialized object which can be a dictionary or a list.
juraj-google-style
def assert_integer_v2(x, message=None, name=None): assert_integer(x=x, message=message, name=name)
Assert that `x` is of integer dtype. If `x` has a non-integer type, `message`, as well as the dtype of `x` are printed, and `InvalidArgumentError` is raised. This can always be checked statically, so this method returns nothing. Args: x: A `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_integer". Raises: TypeError: If `x.dtype` is not a non-quantized integer type.
github-repos
def depth_february_average_ground_temperature(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_february_average_ground_temperature`'.format(value)) self._depth_february_average_ground_temperature = value
Corresponds to IDD Field `depth_february_average_ground_temperature` Args: value (float): value for IDD Field `depth_february_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def _eval(self, tensor): name = tensor if isinstance(tensor, str) else tensor.name index = '0' if ':' in name: name, index = name.split(':') if resource_variables_toggle.resource_variables_enabled(): name = name + '/Read/ReadVariableOp' return self.evaluate(name + ':' + index)
Evaluate a tensor. Takes care of the variations between graphs produced with and without resource variables when determining the name of the operation to run. Args: tensor: The tensor to evaluate, or a string with the tensor name. Returns: The evaluated tensor as a numpy array.
github-repos
def slice_naive(self, key): cls = self.__class__ key = check_key(self, key) enum = pd.Series(range(len(self))) enum.index = self.index values = self.field_values[enum[key].values] data = self.loc[key] return cls(data, field_values=values)
Naively (on index) slice the field data and values. Args: key: Int, slice, or iterable to select data and values Returns: field: Sliced field object
juraj-google-style
def execute(self, triple_map, output, **kwargs): sparql = PREFIX + triple_map.logicalSource.query.format( **kwargs) bindings = self.__get_bindings__(sparql) iterator = str(triple_map.logicalSource.iterator) for binding in bindings: entity_dict = binding.get(iterator) if isinstance(entity_dict, rdflib.term.Node): entity = entity_dict elif isinstance(entity_dict, dict): raw_value = entity_dict.get('value') if entity_dict.get('type').startswith('bnode'): entity = rdflib.BNode(raw_value) else: entity = rdflib.URIRef(raw_value) if triple_map.subjectMap.class_ is not None: output.add( (entity, rdflib.RDF.type, triple_map.subjectMap.class_)) sparql_query = self.__construct_compound_query__( triple_map).format(**kwargs) properties = self.__get_bindings__(sparql_query) for pred_obj_map in triple_map.predicateObjectMap: predicate = pred_obj_map.predicate if pred_obj_map.constant is not None: output.add( (entity, predicate, pred_obj_map.constant)) continue if " key = str(predicate).split(" else: key = str(predicate).split("/")[-1] for property_ in properties: if key in property_.keys(): info = {"about": property_.get(key)} object_ = __get_object__(info) output.add((entity, predicate, object_))
Method iterates through triple map's predicate object maps and processes query. Args: triple_map(SimpleNamespace): Triple Map
juraj-google-style
def stage_tc_create_tag(self, tag, resource): tag_resource = resource.tags(self.tcex.safetag(tag)) tag_resource.http_method = 'POST' t_response = tag_resource.request() if t_response.get('status') != 'Success': self.log.warning( '[tcex] Failed adding tag "{}" ({}).'.format(tag, t_response.get('response').text) )
Add a tag to a resource. Args: tag (str): The tag to be added to the resource. resource (obj): An instance of tcex resource class.
juraj-google-style
def parse_xhtml_reaction_notes(entry): properties = {} if (entry.xml_notes is not None): cobra_notes = dict(parse_xhtml_notes(entry)) if ('subsystem' in cobra_notes): properties['subsystem'] = cobra_notes['subsystem'] if ('gene_association' in cobra_notes): properties['genes'] = cobra_notes['gene_association'] if ('ec_number' in cobra_notes): properties['ec'] = cobra_notes['ec_number'] if ('authors' in cobra_notes): properties['authors'] = [a.strip() for a in cobra_notes['authors'].split(';')] if ('confidence' in cobra_notes): try: value = int(cobra_notes['confidence']) except ValueError: logger.warning('Unable to parse confidence level for {} as an integer: {}'.format(entry.id, cobra_notes['confidence'])) value = cobra_notes['confidence'] properties['confidence'] = value return properties
Return reaction properties defined in the XHTML notes. Older SBML models often define additional properties in the XHTML notes section because structured methods for defining properties had not been developed. This will try to parse the following properties: ``SUBSYSTEM``, ``GENE ASSOCIATION``, ``EC NUMBER``, ``AUTHORS``, ``CONFIDENCE``. Args: entry: :class:`SBMLReactionEntry`.
codesearchnet
def _FormatExpression(self, frame, expression): rc, value = _EvaluateExpression(frame, expression) if not rc: message = _FormatMessage(value['description']['format'], value['description'].get('parameters')) return '<' + message + '>' return self._FormatValue(value)
Evaluates a single watched expression and formats it into a string form. If expression evaluation fails, returns error message string. Args: frame: Python stack frame in which the expression is evaluated. expression: string expression to evaluate. Returns: Formatted expression value that can be used in the log message.
juraj-google-style
def _DiscoverElementTypeFromLocalname(self, type_localname): elem_type = None last_exception = None for ns_prefix in self.zeep_client.wsdl.types.prefix_map.values(): try: elem_type = self.zeep_client.get_type( '{%s}%s' % (ns_prefix, type_localname)) except zeep.exceptions.LookupError as e: last_exception = e continue break if not elem_type: raise last_exception return elem_type
Searches all namespaces for a type by name. Args: type_localname: The name of the type. Returns: A fully qualified SOAP type with the specified name. Raises: A zeep.exceptions.LookupError if the type cannot be found in any namespace.
juraj-google-style
def app_trim_memory(self, pid: int or str, level: str = 'RUNNING_LOW') -> None: _, error = self._execute('-s', self.device_sn, 'shell', 'am', 'send-trim-memory', str(pid), level) if error and error.startswith('Error'): raise ApplicationsException(error.split(':', 1)[-1].strip())
Trim memory. Args: level: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | \ MODERATE | RUNNING_CRITICAL | COMPLETE
juraj-google-style
def IsComposite(self): return (bool(self.condition) or (self.member_data_type_definition and self.member_data_type_definition.IsComposite()))
Determines if the data type is composite. A composite data type consists of other data types. Returns: bool: True if the data type is composite, False otherwise.
codesearchnet
def _finish(self, update_ops, name_scope): return control_flow_ops.group(*update_ops, name=name_scope)
Do what is needed to finish the update. This is called with the `name_scope` using the "name" that users have chosen for the application of gradients. Args: update_ops: List of `Operation` objects to update variables. This list contains the values returned by the `_apply_dense()` and `_apply_sparse()` calls. name_scope: String. Name to use for the returned operation. Returns: The operation to apply updates.
github-repos
def _make_rebatch_fn(self, dataset, num_workers, num_replicas_in_sync): if num_replicas_in_sync % num_workers: raise ValueError('tf.distribute expects every worker to have the same number of replicas. However, encountered `num_replicas_in_sync` ({}) that cannot be divided by `num_workers` ({})'.format(num_replicas_in_sync, num_workers)) num_replicas_per_worker = num_replicas_in_sync with ops.colocate_with(dataset._variant_tensor): batch_size = distribute.compute_batch_size(dataset) def rebatch_fn(dataset, worker_index): try: def apply_rebatch(): batch_sizes = distribute.batch_sizes_for_worker(batch_size, num_workers, num_replicas_per_worker, worker_index) return dataset.rebatch(batch_sizes).prefetch(num_replicas_per_worker) def apply_legacy_rebatch(): return distribute._LegacyRebatchDataset(dataset, num_replicas_in_sync).prefetch(num_replicas_per_worker) with ops.colocate_with(dataset._variant_tensor): return tf_cond.cond(math_ops.not_equal(batch_size, -1), true_fn=apply_rebatch, false_fn=apply_legacy_rebatch) except errors.InvalidArgumentError as e: if 'without encountering a batch' in str(e): six.reraise(ValueError, ValueError('Call the `batch` method on the input Dataset in order to be able to split your input across {} replicas.\n Please see the tf.distribute.Strategy guide. {}'.format(num_replicas_in_sync, e)), sys.exc_info()[2]) else: raise return rebatch_fn
Returns a callable that rebatches the input dataset. Args: dataset: A `tf.data.Dataset` representing the dataset to be distributed. num_workers: An integer representing the number of workers to distribute `dataset` among. num_replicas_in_sync: An integer representing the number of replicas in sync across all workers.
github-repos
def add_event_handler(self, callback, event=None): builders = events._get_handlers(callback) if (builders is not None): for event in builders: self._event_builders.append((event, callback)) return if isinstance(event, type): event = event() elif (not event): event = events.Raw() self._event_builders.append((event, callback))
Registers the given callback to be called on the specified event. Args: callback (`callable`): The callable function accepting one parameter to be used. Note that if you have used `telethon.events.register` in the callback, ``event`` will be ignored, and instead the events you previously registered will be used. event (`_EventBuilder` | `type`, optional): The event builder class or instance to be used, for instance ``events.NewMessage``. If left unspecified, `telethon.events.raw.Raw` (the :tl:`Update` objects with no further processing) will be passed instead.
codesearchnet
def _delete_batch(self, container, blobs): container_client = self.client.get_container_client(container) results = {} for blob in blobs: try: response = container_client.delete_blob(blob) results[container, blob] = response except ResourceNotFoundError as e: results[container, blob] = e.status_code return results
A helper method. Azure Blob Storage Python Client allows batch deletions for blobs within the same container. Args: container: container name. blobs: list of blobs to be deleted. Returns: Dictionary of the form {(container, blob): error}, where error is None if the operation succeeded.
github-repos
def finish(queue_name, task_id, owner, error=False): task = _get_task_with_policy(queue_name, task_id, owner) if (not (task.status == WorkQueue.LIVE)): logging.warning('Finishing already dead task. queue=%r, task_id=%r, owner=%r, status=%r', task.queue_name, task_id, owner, task.status) return False if (not error): task.status = WorkQueue.DONE else: task.status = WorkQueue.ERROR task.finished = datetime.datetime.utcnow() db.session.add(task) signals.task_updated.send(app, task=task) return True
Marks a work item on a queue as finished. Args: queue_name: Name of the queue the work item is on. task_id: ID of the task that is finished. owner: Who or what has the current lease on the task. error: Defaults to false. True if this task's final state is an error. Returns: True if the task has been finished for the first time; False if the task was already finished. Raises: TaskDoesNotExistError if the task does not exist. LeaseExpiredError if the lease is no longer active. NotOwnerError if the specified owner no longer owns the task.
codesearchnet
def create_reverse_dependency_map() -> Dict[str, List[str]]: cache = {} example_deps, examples = init_test_examples_dependencies() all_modules = list(PATH_TO_TRANFORMERS.glob('***.py')) + examples all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules} direct_deps.update(example_deps) something_changed = True while something_changed: something_changed = False for m in all_modules: for d in direct_deps[m]: if d.endswith('__init__.py'): continue if d not in direct_deps: raise ValueError(f'KeyError:{d}. From {m}') new_deps = set(direct_deps[d]) - set(direct_deps[m]) if len(new_deps) > 0: direct_deps[m].extend(list(new_deps)) something_changed = True reverse_map = collections.defaultdict(list) for m in all_modules: for d in direct_deps[m]: reverse_map[d].append(m) for m in [f for f in all_modules if f.endswith('__init__.py')]: direct_deps = get_module_dependencies(m, cache=cache) deps = sum([reverse_map[d] for d in direct_deps if not d.endswith('__init__.py')], direct_deps) reverse_map[m] = list(set(deps) - {m}) return reverse_map
Create the dependency map from module/test filename to the list of modules/tests that depend on it recursively. Returns: `Dict[str, List[str]]`: The reverse dependency map as a dictionary mapping filenames to all the filenames depending on it recursively. This way the tests impacted by a change in file A are the test files in the list corresponding to key A in this result.
github-repos
def verify_fileobj(fileobj, writable=False): try: data = fileobj.read(0) except Exception: if not hasattr(fileobj, "read"): raise ValueError("%r not a valid file object" % fileobj) raise ValueError("Can't read from file object %r" % fileobj) if not isinstance(data, bytes): raise ValueError( "file object %r not opened in binary mode" % fileobj) if writable: try: fileobj.write(b"") except Exception: if not hasattr(fileobj, "write"): raise ValueError("%r not a valid file object" % fileobj) raise ValueError("Can't write to file object %r" % fileobj)
Verifies that the passed fileobj is a file like object which we can use. Args: writable (bool): verify that the file object is writable as well Raises: ValueError: In case the object is not a file object that is readable (or writable if required) or is not opened in bytes mode.
juraj-google-style
def log(x): if any_symbolic_tensors((x,)): return Log().symbolic_call(x) return backend.numpy.log(x)
Natural logarithm, element-wise. Args: x: Input tensor. Returns: Output tensor, element-wise natural logarithm of `x`.
github-repos
def ParseZeitgeistEventRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = ZeitgeistActivityEventData() event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.subject_uri = self._GetRowValue(query_hash, row, 'subj_uri') timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UNKNOWN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a zeitgeist event row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def _integrate_parameter(self, x, x_is_constant, t0, t1, name=None): return x * (t1 - t0) if x_is_constant else x.integrate(t0, t1, name)
Returns the integral of x(t).dt over the interval [t0, t1]. Args: x: Scalar real `Tensor` of shape [`batch_shape`] or an instance of a left-continuous `PiecewiseConstantFunc`. The function to be integrated. x_is_constant: 'bool' which is True if x is a Scalar real `Tensor`. t0: A `Tensor` which is broadcastable to [`batch_shape`, `k`], where `k` is the number of intervals to evaluate the integral over. The start times of the `k` intervals. t1: A `Tensor` which is broadcastable to [`batch_shape`, `k`], where `k` is the number of intervals to evaluate the integral over. The end times of the `k` intervals. name: Str. The name to give this op. Returns: A `Tensor` of shape [`batch_shape`, `k`] with the integrals of x over the intervals [`t0`, `t1`].
github-repos
def generate_timing_breakdown_plot(timing_stats, scaling_var, title, description, plot_file): cmap_data = colormaps._viridis_data n_subplots = len(six.viewkeys(timing_stats)) fig, ax = plt.subplots(1, n_subplots+1, figsize=(3*(n_subplots+2), 5)) for plot_num, p_count in enumerate( sorted(six.iterkeys(timing_stats), key=functions.sort_processor_counts)): case_data = timing_stats[p_count] all_timers = set(six.iterkeys(case_data['model'])) | set(six.iterkeys(case_data['bench'])) all_timers = sorted(list(all_timers), reverse=True) cmap_stride = int(len(cmap_data)/(len(all_timers)+1)) colors = {all_timers[i]: cmap_data[i*cmap_stride] for i in range(len(all_timers))} sub_ax = plt.subplot(1, n_subplots+1, plot_num+1) sub_ax.set_title(p_count) sub_ax.set_ylabel('Runtime (s)') for case, var_data in case_data.items(): if case == 'bench': bar_num = 2 else: bar_num = 1 offset = 0 if var_data != {}: for var in sorted(six.iterkeys(var_data), reverse=True): if var != scaling_var: plt.bar(bar_num, var_data[var]['mean'], 0.8, bottom=offset, color=colors[var], label=(var if bar_num == 1 else '_none')) offset += var_data[var]['mean'] plt.bar(bar_num, var_data[scaling_var]['mean']-offset, 0.8, bottom=offset, color=colors[scaling_var], label=(scaling_var if bar_num == 1 else '_none')) sub_ax.set_xticks([1.4, 2.4]) sub_ax.set_xticklabels(('test', 'bench')) plt.legend(loc=6, bbox_to_anchor=(1.05, 0.5)) plt.tight_layout() sub_ax = plt.subplot(1, n_subplots+1, n_subplots+1) hid_bar = plt.bar(1, 100) for group in hid_bar: group.set_visible(False) sub_ax.set_visible(False) if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
Description Args: timing_stats: a dictionary of the form {proc_count : {model||bench : { var : { stat : val }}}} scaling_var: the variable that accounts for the total runtime title: the title of the plot description: the description of the plot plot_file: the file to write the plot out to Returns: an image element containing the plot file and metadata
juraj-google-style
def save_forensic_reports_to_splunk(self, forensic_reports): logger.debug("Saving forensic reports to Splunk") if type(forensic_reports) == dict: forensic_reports = [forensic_reports] if len(forensic_reports) < 1: return json_str = "" for report in forensic_reports: data = self._common_data.copy() data["sourcetype"] = "dmarc:forensic" timestamp = human_timestamp_to_timestamp( report["arrival_date_utc"]) data["time"] = timestamp data["event"] = report.copy() json_str += "{0}\n".format(json.dumps(data)) if not self.session.verify: logger.debug("Skipping certificate verification for Splunk HEC") try: response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) if response["code"] != 0: raise SplunkError(response["text"])
Saves forensic DMARC reports to Splunk Args: forensic_reports (list): A list of forensic report dictionaries to save in Splunk
juraj-google-style
def union(df, other, index=False, keep='first'): validate_set_ops(df, other) stacked = df.append(other) if index: stacked_reset_indexes = stacked.reset_index() index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns] index_name = df.index.names return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols) return_df.index.names = index_name return return_df else: return stacked.drop_duplicates(keep=keep)
Returns rows that appear in either DataFrame. Args: df (pandas.DataFrame): data passed in through the pipe. other (pandas.DataFrame): other DataFrame to use for set operation with the first. Kwargs: index (bool): Boolean indicating whether to consider the pandas index as part of the set operation (default `False`). keep (str): Indicates which duplicate should be kept. Options are `'first'` and `'last'`.
juraj-google-style
def copy(self): fs = self.__class__.__new__(self.__class__) fs.__dict__ = self.__dict__.copy() fs._frameSet = None if (self._frameSet is not None): fs._frameSet = self._frameSet.copy() return fs
Create a deep copy of this sequence Returns: :obj:`.FileSequence`:
codesearchnet
def Analyze(self, hashes): logger.debug('Opening connection to {0:s}:{1:d}'.format(self._host, self._port)) nsrl_socket = self._GetSocket() if (not nsrl_socket): self.SignalAbort() return [] hash_analyses = [] for digest in hashes: response = self._QueryHash(nsrl_socket, digest) if (response is None): continue hash_analysis = interface.HashAnalysis(digest, response) hash_analyses.append(hash_analysis) nsrl_socket.close() logger.debug('Closed connection to {0:s}:{1:d}'.format(self._host, self._port)) return hash_analyses
Looks up hashes in nsrlsvr. Args: hashes (list[str]): hash values to look up. Returns: list[HashAnalysis]: analysis results, or an empty list on error.
codesearchnet
def avg_dicts(dictin1, dictin2, dropmissing=True): dictout = dict() for key in dictin1: if (key in dictin2): dictout[key] = ((dictin1[key] + dictin2[key]) / 2) elif (not dropmissing): dictout[key] = dictin1[key] if (not dropmissing): for key in dictin2: if (key not in dictin1): dictout[key] = dictin2[key] return dictout
Create a new dictionary from two dictionaries by averaging values Args: dictin1 (DictUpperBound): First input dictionary dictin2 (DictUpperBound): Second input dictionary dropmissing (bool): Whether to drop keys missing in one dictionary. Defaults to True. Returns: Dict: Dictionary with values being average of 2 input dictionaries
codesearchnet
def Analyze(self, data): if (not self._rules): return try: self._matches = self._rules.match(data=data, timeout=self._MATCH_TIMEOUT) except yara.YaraTimeoutError: logger.error('Could not process file within timeout: {0:d}'.format(self._MATCH_TIMEOUT)) except yara.YaraError as exception: logger.error('Error processing file with Yara: {0!s}.'.format(exception))
Analyzes a block of data, attempting to match Yara rules to it. Args: data(bytes): a block of data.
codesearchnet
def __init__(self, resolver_context): super(APFSFile, self).__init__(resolver_context) self._file_system = None self._fsapfs_file_entry = None
Initializes a file-like object. Args: resolver_context (Context): resolver context.
juraj-google-style
def create_contentkey_authorization_policy(access_token, content): path = '/ContentKeyAuthorizationPolicies' endpoint = ''.join([ams_rest_endpoint, path]) body = content return do_ams_post(endpoint, path, body, access_token)
Create Media Service Content Key Authorization Policy. Args: access_token (str): A valid Azure authentication token. content (str): Content Payload. Returns: HTTP response. JSON body.
codesearchnet
def summarize(self, document, Abstractor, similarity_filter=None): if isinstance(document, str) is False: raise TypeError("The type of document must be str.") if isinstance(Abstractor, AbstractableDoc) is False: raise TypeError("The type of Abstractor must be AbstractableDoc.") if isinstance(similarity_filter, SimilarityFilter) is False and similarity_filter is not None: raise TypeError("The type of similarity_filter must be SimilarityFilter.") normalized_sentences = self.listup_sentence(document) if similarity_filter is not None: normalized_sentences = similarity_filter.similar_filter_r(normalized_sentences) self.tokenize(document) words = self.token fdist = nltk.FreqDist(words) top_n_words = [w[0] for w in fdist.items()][:self.target_n] scored_list = self.__closely_associated_score(normalized_sentences, top_n_words) filtered_list = Abstractor.filter(scored_list) result_list = [normalized_sentences[idx] for (idx, score) in filtered_list] result_dict = { "summarize_result": result_list, "scoring_data": filtered_list } return result_dict
Execute summarization. Args: document: The target document. Abstractor: The object of AbstractableDoc. similarity_filter The object of SimilarityFilter. Returns: dict data. - "summarize_result": The list of summarized sentences., - "scoring_data": The list of scores.
juraj-google-style
def remove_network(self, net_id): url = self._url("/networks/{0}", net_id) res = self._delete(url) self._raise_for_status(res)
Remove a network. Similar to the ``docker network rm`` command. Args: net_id (str): The network's id
juraj-google-style
def get_info_dict(info_line): variant_info = {} for raw_info in info_line.split(';'): splitted_info = raw_info.split('=') if len(splitted_info) == 2: variant_info[splitted_info[0]] = splitted_info[1] else: variant_info[splitted_info[0]] = True return variant_info
Parse a info field of a variant Make a dictionary from the info field of a vcf variant. Keys are the info keys and values are the raw strings from the vcf If the field only have a key (no value), value of infodict is True. Args: info_line (str): The info field of a vcf variant Returns: info_dict (dict): A INFO dictionary
juraj-google-style
def CopyFromStringISO8601(self, time_string): date_time_values = self._CopyDateTimeFromStringISO8601(time_string) self._CopyFromDateTimeValues(date_time_values)
Copies time elements from an ISO 8601 date and time string. Currently not supported: * Duration notation: "P..." * Week notation "2016-W33" * Date with week number notation "2016-W33-3" * Date without year notation "--08-17" * Ordinal date notation "2016-230" Args: time_string (str): date and time value formatted as: YYYY-MM-DDThh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. Raises: ValueError: if the time string is invalid or not supported.
codesearchnet
def download_archive(self, name, file_path): uri = ((self.URI + '/archive/') + name) return self._client.download(uri, file_path)
Download archived logs of the OS Volume. Args: name: Name of the OS Volume. file_path (str): Destination file path. Returns: bool: Indicates if the resource was successfully downloaded.
codesearchnet
def _get_flag_int_value(self, wanted_flag_name, default_value): flag_int_value = default_value found, flag_value = self.get_flag_value(wanted_flag_name) if found: try: flag_int_value = int(flag_value) except ValueError: logging.warning('Cannot convert %s to int for flag %s' % (flag_int_value, wanted_flag_name)) return flag_int_value
Returns the int value of a TensorTracer flag. Args: wanted_flag_name: the name of the flag we are looking for. default_value: the default value for the flag, if not provided. Returns: the value of the flag. Raises: RuntimeError: If supposedly deadcode is reached.
github-repos