code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def pack_container(in_container, out_file): container_filename = local.path(out_file).basename out_container = ((local.cwd / 'container-out') / container_filename) out_dir = out_container.dirname with local.cwd(in_container): tar('cjf', out_container, '.') c_hash = download.update_hash(out_container) if out_dir.exists(): mkdir('-p', out_dir) mv(out_container, out_file) mv((out_container + '.hash'), (out_file + '.hash')) new_container = {'path': out_file, 'hash': str(c_hash)} CFG['container']['known'] += new_container
Pack a container image into a .tar.bz2 archive. Args: in_container (str): Path string to the container image. out_file (str): Output file name.
codesearchnet
def entitlements(self, request, pk=None): enterprise_customer_user = self.get_object() instance = {"entitlements": enterprise_customer_user.entitlements} serializer = serializers.EnterpriseCustomerUserEntitlementSerializer(instance, context={'request': request}) return Response(serializer.data)
Retrieve the list of entitlements available to this learner. Only those entitlements are returned that satisfy enterprise customer's data sharing setting. Arguments: request (HttpRequest): Reference to in-progress request instance. pk (Int): Primary key value of the selected enterprise learner. Returns: (HttpResponse): Response object containing a list of learner's entitlements.
juraj-google-style
def cos(x): return math_ops.cos(x)
Computes cos of x element-wise. Args: x: Tensor or variable. Returns: A tensor.
github-repos
def main(argv: Optional[Sequence[str]]=None) -> None: args = parse_arguments(argv=argv) if args.logging: logging.basicConfig(level=logging.DEBUG) handle_skip() action = args.action request = parse_request() LOGGER.debug('Received action %s with request:\n%s', action, request) try: mapping = parse_mapping(args.mapping) except Exception as error: LOGGER.critical('Unable to parse mapping file', exc_info=True) print('Unable to parse mapping file: {error}'.format(error=error), file=sys.stderr) sys.exit(1) if (action == 'get'): get_password(request, mapping) else: LOGGER.info('Action %s is currently not supported', action) sys.exit(1)
Start the pass-git-helper script. Args: argv: If not ``None``, use the provided command line arguments for parsing. Otherwise, extract them automatically.
codesearchnet
def add_done_callback(self, fn): if self._result_set: _helpers.safe_invoke_callback(fn, self) return self._done_callbacks.append(fn) if self._polling_thread is None: self._polling_thread = _helpers.start_daemon_thread( target=self._blocking_poll )
Add a callback to be executed when the operation is complete. If the operation is not already complete, this will start a helper thread to poll for the status of the operation in the background. Args: fn (Callable[Future]): The callback to execute when the operation is complete.
juraj-google-style
def decode(self, images, save=None, round=4, names=None, **kwargs): if isinstance(images, string_types): images = [images] if isinstance(images, list): imgs_to_decode = imageutils.load_imgs(images, self.masker) else: imgs_to_decode = images methods = {'pearson': self._pearson_correlation, 'dot': self._dot_product, 'roi': self._roi_association} result = np.around(methods[self.method](imgs_to_decode, **kwargs), round) if (names is None): if (type(images).__module__ == np.__name__): names = [('image_%d' % i) for i in range(images.shape[1])] elif (self.method == 'roi'): names = [('cluster_%d' % i) for i in range(result.shape[1])] else: names = images result = pd.DataFrame(result, columns=names, index=self.feature_names) if (save is not None): result.to_csv(save, index_label='Feature') return result
Decodes a set of images. Args: images: The images to decode. Can be: - A single String specifying the filename of the image to decode - A list of filenames - A single NumPy array containing the image data save: Optional filename to save results to. If None (default), returns all results as an array. round: Optional integer indicating number of decimals to round result to. Defaults to 4. names: Optional list of names corresponding to the images in filenames. If passed, must be of same length and in same order as filenames. By default, the columns in the output will be named using the image filenames. Returns: An n_features x n_files numpy array, where each feature is a row and each image is a column. The meaning of the values depends on the decoding method used.
codesearchnet
def _CalculateDigestHash(self, file_entry, data_stream_name): file_object = file_entry.GetFileObject(data_stream_name=data_stream_name) if (not file_object): return None try: file_object.seek(0, os.SEEK_SET) hasher_object = hashers_manager.HashersManager.GetHasher('sha256') data = file_object.read(self._READ_BUFFER_SIZE) while data: hasher_object.Update(data) data = file_object.read(self._READ_BUFFER_SIZE) finally: file_object.close() return hasher_object.GetStringDigest()
Calculates a SHA-256 digest of the contents of the file entry. Args: file_entry (dfvfs.FileEntry): file entry whose content will be hashed. data_stream_name (str): name of the data stream whose content is to be hashed. Returns: str: hexadecimal representation of the SHA-256 hash or None if the digest cannot be determined.
codesearchnet
def post_cutout(self, token, channel, x_start, y_start, z_start, data, resolution=0): return self.data.post_cutout(token, channel, x_start, y_start, z_start, data, resolution)
Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload.
juraj-google-style
def _get_populate_from_value(instance, field_name: Union[(str, Tuple[str])], language: str): if callable(field_name): return field_name(instance) def get_field_value(name): value = resolve_object_property(instance, name) with translation.override(language): return str(value) if (isinstance(field_name, tuple) or isinstance(field_name, list)): value = '-'.join([value for value in [get_field_value(name) for name in field_name] if value]) return value return get_field_value(field_name)
Gets the value to create a slug from in the specified language. Arguments: instance: The model that the field resides on. field_name: The name of the field to generate a slug for. language: The language to generate the slug for. Returns: The text to generate a slug for.
codesearchnet
def compute_v(self, memory_antecedent): if self.shared_kv: raise ValueError("compute_v cannot be called with shared_kv") ret = mtf.einsum( [memory_antecedent, self.wv], reduced_dims=[self.memory_input_dim]) if self.combine_dims: ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.v_dims) return ret
Compute value Tensor v. Args: memory_antecedent: a Tensor with dimensions {memory_input_dim} + other_dims Returns: a Tensor with dimensions memory_heads_dims + {value_dim} + other_dims
juraj-google-style
def log_power_spectrum(frames, fft_points=512, normalize=True): power_spec = power_spectrum(frames, fft_points) power_spec[(power_spec <= 1e-20)] = 1e-20 log_power_spec = (10 * np.log10(power_spec)) if normalize: return (log_power_spec - np.max(log_power_spec)) else: return log_power_spec
Log power spectrum of each frame in frames. Args: frames (array): The frame array in which each row is a frame. fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. normalize (bool): If normalize=True, the log power spectrum will be normalized. Returns: array: The power spectrum - If frames is an num_frames x sample_per_frame matrix, output will be num_frames x fft_length.
codesearchnet
def read_video_pyav(video_path: str, sample_indices_fn: Callable, **kwargs): requires_backends(read_video_pyav, ['av']) import av container = av.open(video_path) total_num_frames = container.streams.video[0].frames video_fps = container.streams.video[0].average_rate duration = total_num_frames / video_fps if video_fps else 0 metadata = VideoMetadata(total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend='pyav') indices = sample_indices_fn(metadata=metadata, **kwargs) frames = [] container.seek(0) end_index = indices[-1] for i, frame in enumerate(container.decode(video=0)): if i > end_index: break if i >= 0 and i in indices: frames.append(frame) video = np.stack([x.to_ndarray(format='rgb24') for x in frames]) metadata.frames_indices = indices return (video, metadata)
Decode the video with PyAV decoder. Args: video_path (`str`): Path to the video file. sample_indices_fn (`Callable`, *optional*): A callable function that will return indices at which the video should be sampled. If the video has to be loaded using by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`. If not provided, simple uniform sampling with fps is performed. Example: def sample_indices_fn(metadata, **kwargs): return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int) Returns: Tuple[`np.array`, `VideoMetadata`]: A tuple containing: - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]). - `VideoMetadata` object.
github-repos
async def set_notification_level(self, level): (await self._client.set_conversation_notification_level(hangouts_pb2.SetConversationNotificationLevelRequest(request_header=self._client.get_request_header(), conversation_id=hangouts_pb2.ConversationId(id=self.id_), level=level)))
Set the notification level of this conversation. Args: level: ``NOTIFICATION_LEVEL_QUIET`` to disable notifications, or ``NOTIFICATION_LEVEL_RING`` to enable them. Raises: .NetworkError: If the request fails.
codesearchnet
def put(self, values, name=None): with ops.name_scope(name, '%s_put' % self._name, self._scope_vals(values)) as scope: if not isinstance(values, (list, tuple, dict)): values = [values] indices = list(range(len(values))) vals, _ = self._check_put_dtypes(values, indices) with ops.colocate_with(self._coloc_op): op = gen_data_flow_ops.stage(values=vals, shared_name=self._name, name=scope, capacity=self._capacity, memory_limit=self._memory_limit) return op
Create an op that places a value into the staging area. This operation will block if the `StagingArea` has reached its capacity. Args: values: A single tensor, a list or tuple of tensors, or a dictionary with tensor values. The number of elements must match the length of the list provided to the dtypes argument when creating the StagingArea. name: A name for the operation (optional). Returns: The created op. Raises: ValueError: If the number or type of inputs don't match the staging area.
github-repos
def get(self, id_or_url, default=None): if '/' in id_or_url: id = urls.SheetUrl.from_string(id_or_url).id else: id = id_or_url try: return self[id] except KeyError: return default
Fetch and return the spreadsheet with the given id or url. Args: id_or_url (str): unique alphanumeric id or URL of the spreadsheet Returns: New SpreadSheet instance or given default if none is found Raises: ValueError: if an URL is given from which no id could be extracted
juraj-google-style
def encode(self, input_features: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None, **kwargs): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_features, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_features, **kwargs) return self.module.apply({'params': params or self.params}, input_features=jnp.array(input_features, dtype='f4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)
Returns: Example: ```python >>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np") >>> input_features = inputs.input_features >>> encoder_outputs = model.encode(input_features=input_features) ```
github-repos
def init(library: typing.Union[(str, types.ModuleType)]) -> None: if isinstance(library, types.ModuleType): library = library.__name__ if (library not in manager._handlers): raise ValueError('Possible values are <{}>, not <{}>'.format(manager._handlers.keys(), library)) manager.init(library, asynclib) asynclib.lib_name = library asynclib._init = True
Must be called at some point after import and before your event loop is run. Populates the asynclib instance of _AsyncLib with methods relevant to the async library you are using. The supported libraries at the moment are: - curio - trio Args: library (str or module): Either the module name as a string or the imported module itself. E.g. ``multio.init(curio)``.
codesearchnet
def start(self, on_done): genesis_file = os.path.join(self._data_dir, 'genesis.batch') try: with open(genesis_file, 'rb') as batch_file: genesis_data = genesis_pb2.GenesisData() genesis_data.ParseFromString(batch_file.read()) LOGGER.info('Producing genesis block from %s', genesis_file) except IOError: raise InvalidGenesisStateError( "Genesis File {} specified, but unreadable".format( genesis_file)) initial_state_root = self._context_manager.get_first_root() genesis_batches = [batch for batch in genesis_data.batches] if genesis_batches: scheduler = SerialScheduler( self._context_manager.get_squash_handler(), initial_state_root, always_persist=True) LOGGER.debug('Adding %s batches', len(genesis_data.batches)) for batch in genesis_data.batches: scheduler.add_batch(batch) self._transaction_executor.execute(scheduler) scheduler.finalize() scheduler.complete(block=True) txn_receipts = [] state_hash = initial_state_root for batch in genesis_batches: result = scheduler.get_batch_execution_result( batch.header_signature) if result is None or not result.is_valid: raise InvalidGenesisStateError( 'Unable to create genesis block, due to batch {}' .format(batch.header_signature)) if result.state_hash is not None: state_hash = result.state_hash txn_results = scheduler.get_transaction_execution_results( batch.header_signature) txn_receipts += self._make_receipts(txn_results) settings_view = SettingsView( self._state_view_factory.create_view(state_hash)) name = settings_view.get_setting('sawtooth.consensus.algorithm.name') version = settings_view.get_setting( 'sawtooth.consensus.algorithm.version') if name is None or version is None: raise LocalConfigurationError( 'Unable to start validator; sawtooth.consensus.algorithm.name ' 'and sawtooth.consensus.algorithm.version must be set in the ' 'genesis block.') LOGGER.debug('Produced state hash %s for genesis block.', state_hash) block_builder = self._generate_genesis_block() block_builder.add_batches(genesis_batches) block_builder.set_state_hash(state_hash) block_publisher = self._get_block_publisher(initial_state_root) if not block_publisher.initialize_block(block_builder.block_header): LOGGER.error('Consensus refused to initialize consensus block.') raise InvalidGenesisConsensusError( 'Consensus refused to initialize genesis block.') if not block_publisher.finalize_block(block_builder.block_header): LOGGER.error('Consensus refused to finalize genesis block.') raise InvalidGenesisConsensusError( 'Consensus refused to finalize genesis block.') self._sign_block(block_builder) block = block_builder.build_block() blkw = BlockWrapper(block=block) LOGGER.info('Genesis block created: %s', blkw) self._block_manager.put([blkw.block]) self._block_manager.persist(blkw.identifier, "commit_store") self._txn_receipt_store.chain_update(block, txn_receipts) self._chain_id_manager.save_block_chain_id(block.header_signature) LOGGER.debug('Deleting genesis data.') os.remove(genesis_file) if on_done is not None: on_done()
Starts the genesis block creation process. Will call the given `on_done` callback on successful completion. Args: on_done (function): a function called on completion Raises: InvalidGenesisStateError: raises this error if a genesis block is unable to be produced, or the resulting block-chain-id saved.
juraj-google-style
def insert_and_get(self, **fields): if not self.conflict_target and not self.conflict_action: return super().create(**fields) compiler = self._build_insert_compiler([fields]) rows = compiler.execute_sql(return_id=False) columns = rows[0] model_columns = {} for field in self.model._meta.local_concrete_fields: model_columns[field.column] = field.attname model_init_fields = {} for column_name, column_value in columns.items(): try: model_init_fields[model_columns[column_name]] = column_value except KeyError: pass return self.model(**model_init_fields)
Creates a new record in the database and then gets the entire row. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: fields: The fields of the row to create. Returns: The model instance representing the row that was created.
juraj-google-style
def append_item(self, item): if isinstance(item, SubmenuItem): raise TypeError("SubmenuItems cannot be added to a MultiSelectMenu") super(MultiSelectMenu, self).append_item(item)
Add an item to the end of the menu before the exit item. Note that Multi-Select Menus will not allow a SubmenuItem to be added, as multi-select menus are expected to be used only for executing multiple actions. Args: item (:obj:`MenuItem`): The item to be added Raises: TypeError: If the specified MenuIem is a SubmenuItem.
juraj-google-style
def build_from_queue(cls, input_queue, replay_size, batch_size): return cls((lambda : input_queue.dequeue_many(batch_size)), replay_size, batch_size=batch_size)
Builds a `ReplayableQueue` that draws from a regular `input_queue`. Args: input_queue: The queue to draw from. replay_size: The size of the replay buffer. batch_size: The size of each batch. Returns: A ReplayableQueue.
codesearchnet
def pair_wise_sigmoid_focal_loss(inputs: Tensor, labels: Tensor, alpha: float=0.25, gamma: float=2.0) -> Tensor: if alpha < 0: raise ValueError('alpha must be positive') height_and_width = inputs.shape[1] criterion = nn.BCEWithLogitsLoss(reduction='none') prob = inputs.sigmoid() cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs)) focal_pos = (1 - prob) ** gamma * cross_entropy_loss_pos focal_pos *= alpha cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs)) focal_neg = prob ** gamma * cross_entropy_loss_neg focal_neg *= 1 - alpha loss = torch.matmul(focal_pos, labels.T) + torch.matmul(focal_neg, (1 - labels).T) return loss / height_and_width
A pair wise version of the focal loss, see `sigmoid_focal_loss` for usage. Args: inputs (`torch.Tensor`): A tensor representing a mask. labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). alpha (float, *optional*, defaults to 0.25): Weighting factor in range (0,1) to balance positive vs negative examples. gamma (float, *optional*, defaults to 2.0): Exponent of the modulating factor \\(1 - p_t\\) to balance easy vs hard examples. Returns: `torch.Tensor`: The computed loss between each pairs.
github-repos
def __init__(self, shape, dtype): self._dtype = dtype self._sum = tf.Variable(lambda: tf.zeros(shape, dtype), False) self._count = tf.Variable(lambda: 0, trainable=False)
Specify the shape and dtype of the mean to be estimated. Note that a float mean to zero submitted elements is NaN, while computing the integer mean of zero elements raises a division by zero error. Args: shape: Shape of the mean to compute. dtype: Data type of the mean to compute.
juraj-google-style
def batch_slice(linop, params_overrides, slices): if not isinstance(slices, collections.abc.Sequence): slices = (slices,) if len(slices) == 1 and slices[0] is Ellipsis: override_dict = {} else: batch_shape = linop.batch_shape_tensor() override_dict = {} for param_name, param_ndims_to_matrix_ndims in linop._experimental_parameter_ndims_to_matrix_ndims.items(): param = getattr(linop, param_name) if param is not None: override_dict[param_name] = nest.map_structure_up_to(param, functools.partial(_slice_single_param, slices=slices, batch_shape=batch_shape), param, param_ndims_to_matrix_ndims) override_dict.update(params_overrides) parameters = dict(linop.parameters, **override_dict) return type(linop)(**parameters)
Slices `linop` along its batch dimensions. Args: linop: A `LinearOperator` instance. params_overrides: A `dict` of parameter overrides. slices: A `slice` or `int` or `int` `Tensor` or `tf.newaxis` or `tuple` thereof. (e.g. the argument of a `__getitem__` method). Returns: new_linop: A batch-sliced `LinearOperator`.
github-repos
def __gt__(self, other): if not isinstance(other, DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') normalized_timestamp = self._GetNormalizedTimestamp() other_normalized_timestamp = other._GetNormalizedTimestamp() if normalized_timestamp is None: return False if other_normalized_timestamp is None: return True return normalized_timestamp > other_normalized_timestamp
Determines if the date time values are greater than other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are greater than other. Raises: ValueError: if other is not an instance of DateTimeValues.
juraj-google-style
def device(self): hdevice = self._libinput.libinput_event_get_device(self._hevent) return Device(hdevice, self._libinput)
The device associated with this event. For device added/removed events this is the device added or removed. For all other device events, this is the device that generated the event. Returns: ~libinput.define.Device: Device object.
codesearchnet
def __init__(self, default_model=None, id_resolver=None): try: super(ModelAdapter, self).__init__(id_resolver) except: pass self.default_model = default_model self.want_pbs = 0
Constructor. Args: default_model: If an implementation for the kind cannot be found, use this model class. If none is specified, an exception will be thrown (default). id_resolver: A datastore_pbs.IdResolver that can resolve application ids. This is only necessary when running on the Cloud Datastore v1 API.
juraj-google-style
def UploadAccount(self, hash_algorithm, hash_key, accounts): param = { 'hashAlgorithm': hash_algorithm, 'signerKey': hash_key, 'users': accounts } return self._InvokeGitkitApi('uploadAccount', param)
Uploads multiple accounts to Gitkit server. Args: hash_algorithm: string, algorithm to hash password. hash_key: string, base64-encoded key of the algorithm. accounts: array of accounts to be uploaded. Returns: Response of the API.
juraj-google-style
def lint(self, targets): LinterRunner.targets = targets linters = self._config.get_linter_classes() with Pool() as pool: out_err_none = pool.map(LinterRunner.run, linters) out_err = [item for item in out_err_none if item is not None] stdout, stderr = zip(*out_err) return sorted(chain.from_iterable(stdout)), chain.from_iterable(stderr)
Run linters in parallel and sort all results. Args: targets (list): List of files and folders to lint.
juraj-google-style
def acquire(self): if os.path.exists(self.path): try: pid = None with open(self.path, 'r') as f: line = f.readline().strip() pid = int(line) if (not psutil.pid_exists(pid)): os.remove(self.path) except ValueError as e: os.remove(self.path) except IOError as e: pass try: self.fd = os.open(self.path, ((os.O_CREAT | os.O_EXCL) | os.O_RDWR)) to_write = ('%s%s' % (os.getpid(), os.linesep)) os.write(self.fd, to_write.encode()) except OSError as e: if (not os.path.exists(self.path)): raise return False self.acquired = True return True
Attempts to acquire a lock for the J-Link lockfile. If the lockfile exists but does not correspond to an active process, the lockfile is first removed, before an attempt is made to acquire it. Args: self (Jlock): the ``JLock`` instance Returns: ``True`` if the lock was acquired, otherwise ``False``. Raises: OSError: on file errors.
codesearchnet
def delete_branch(profile, name): ref = "heads/" + name data = refs.delete_ref(profile, ref) return data
Delete a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch to delete. Returns: The response of the DELETE request.
juraj-google-style
def _continue_search(self, state): i = state[_StateKeys.CUR_INDEX] alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS] finished_scores = state[_StateKeys.FINISHED_SCORES] finished_flags = state[_StateKeys.FINISHED_FLAGS] not_at_max_decode_length = tf.less(i, self.max_decode_length) max_length_norm = _length_normalization(self.alpha, self.max_decode_length) best_alive_scores = alive_log_probs[:, 0] / max_length_norm finished_scores *= tf.to_float(finished_flags) lowest_finished_scores = tf.reduce_min(finished_scores, axis=1) finished_batches = tf.reduce_any(finished_flags, 1) lowest_finished_scores += (1. - tf.to_float(finished_batches)) * -INF worst_finished_score_better_than_best_alive_score = tf.reduce_all( tf.greater(lowest_finished_scores, best_alive_scores) ) return tf.logical_and( not_at_max_decode_length, tf.logical_not(worst_finished_score_better_than_best_alive_score) )
Return whether to continue the search loop. The loops should terminate when 1) when decode length has been reached, or 2) when the worst score in the finished sequences is better than the best score in the alive sequences (i.e. the finished sequences are provably unchanging) Args: state: A dictionary with the current loop state. Returns: Bool tensor with value True if loop should continue, False if loop should terminate.
juraj-google-style
def _record_op_seen_by_control_dependencies(self, op) -> None: for controller in self._control_dependencies_stack: controller.add_op(op)
Record that the given op depends on all registered control dependencies. Args: op: An Operation.
github-repos
def user_exists(self, username): response = self._get((self.rest_url + '/user'), params={'username': username}) if (not response.ok): return None return True
Determines if the user exists. Args: username: The user name. Returns: bool: True if the user exists in the Crowd application.
codesearchnet
def find_files(directory, pattern, recursively=True): for root, dirs, files in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): yield root, basename if not recursively: break
Yield a list of files with their base directories, recursively or not. Returns: A list of (base_directory, filename) Args: directory: base directory to start the search. pattern: fnmatch pattern for filenames. complete_filename: return complete filename or not? recursively: do we recurse or not?
juraj-google-style
def readinto(self, b): if not self._readable: raise UnsupportedOperation('read') size = len(b) with self._seek_lock: start = self._seek end = start + size self._seek = end with handle_os_exceptions(): read_data = self._read_range(start, end) read_size = len(read_data) if read_size: memoryview(b)[:read_size] = read_data if read_size != size: with self._seek_lock: self._seek = start + read_size return read_size
Read bytes into a pre-allocated, writable bytes-like object b, and return the number of bytes read. Args: b (bytes-like object): buffer. Returns: int: number of bytes read
juraj-google-style
def fit(self, X): training_signal = X self.window_design(self.window_length, self.beta) if self.method == 'std_dev': self.fit_freq_std_dev(training_signal) elif self.method == 'min_max': self.fit_freq_min_max(training_signal) else: raise ValueError('Unknown method: {}'.format(self.method))
Defines a spectral mask based on training data Args: X: Training data
juraj-google-style
def list_members(self, name, type='USER', recurse=True, max_results=1000): results = self.client.service.getListMembership(name, type, recurse, max_results, self.proxy_id) return [item['member'] for item in results]
Look up all the members of a list. Args: name (str): The name of the list type (str): The type of results to return. "USER" to get users, "LIST" to get lists. recurse (bool): Presumably, whether to recurse into member lists when retrieving users. max_results (int): Maximum number of results to return. Returns: list of strings: names of the members of the list
codesearchnet
async def is_change_done(self, zone, change_id): zone_id = self.get_managed_zone(zone) url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}' resp = await self.get_json(url) return resp['status'] == self.DNS_CHANGES_DONE
Check if a DNS change has completed. Args: zone (str): DNS zone of the change. change_id (str): Identifier of the change. Returns: Boolean
juraj-google-style
def unsubscribe(self, peer_jid): self.roster.unsubscribe(aioxmpp.JID.fromstr(peer_jid).bare())
Asks for unsubscription Args: peer_jid (str): the JID you ask for unsubscriptiion
juraj-google-style
async def apply(self, sender: str, recipient: str, mailbox: str, append_msg: AppendMessage) -> Tuple[(Optional[str], AppendMessage)]: ...
Run the filter and return the mailbox where it should be appended, or None to discard, and the message to be appended, which is usually the same as ``append_msg``. Args: sender: The envelope sender of the message. recipient: The envelope recipient of the message. mailbox: The intended mailbox to append the message. append_msg: The message to be appended. raises: :exc:`~pymap.exceptions.AppendFailure`
codesearchnet
def GetUpdateTimestamp(self): if self.update_time is None: self.update_time = self._ReadTimestamp(self.update_file) return self.update_time
Return the timestamp of the last cache update. Returns: An int with the number of seconds since epoch, or None if the timestamp file doesn't exist or has errors.
github-repos
def one_step(self, current_state, previous_kernel_results): @tfp.mcmc.internal.util.make_innermost_setter def set_num_leapfrog_steps(kernel_results, num_leapfrog_steps): return kernel_results._replace(accepted_results=kernel_results.accepted_results._replace(num_leapfrog_steps=num_leapfrog_steps)) step_size = previous_kernel_results.new_step_size previous_kernel_results = set_num_leapfrog_steps(previous_kernel_results, self._num_leapfrog_steps(step_size)) (new_state, kernel_results) = self._kernel.one_step(self._flatten_state(current_state), previous_kernel_results) return (self._unflatten_state(new_state), kernel_results)
Runs one iteration of NeuTra. Args: current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`. previous_kernel_results: `collections.namedtuple` containing `Tensor`s representing values from previous calls to this function (or from the `bootstrap_results` function.) Returns: next_state: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) after taking exactly one step. Has same type and shape as `current_state`. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain.
codesearchnet
def get_worksheet_keys(data_dict, result_info_key): keys = set(data_dict.keys()) keys.remove(result_info_key) if 'meta' in keys: keys.remove('meta') return sorted(keys)
Gets sorted keys from the dict, ignoring result_info_key and 'meta' key Args: data_dict: dict to pull keys from Returns: list of keys in the dict other than the result_info_key
juraj-google-style
def config_str2dict(option_value): dict = {} for key in option_value.split(','): if (':' in key): (key, value) = pair.split(':') value = float(value) else: value = 0 dict[key] = value return dict
Parse the value of a config option and convert it to a dictionary. The configuration allows lines formatted like: foo = Bar:1,Baz,Flub:0.75 This gets converted to a dictionary: foo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 } Args: option_value -- The config string to parse.
codesearchnet
def __init__(self, name, code, return_type, params=None, language='js', imports=None): if not isinstance(return_type, basestring): raise TypeError('Argument return_type should be a string. Instead got: ', type(return_type)) if params and not isinstance(params, list): raise TypeError('Argument params should be a list of parameter names and types') if imports and not isinstance(imports, list): raise TypeError('Argument imports should be a list of GCS string paths') if imports and language != 'js': raise Exception('Imports are available for Javascript UDFs only') self._name = name self._code = code self._return_type = return_type self._params = params or [] self._language = language self._imports = imports or [] self._sql = None
Initializes a UDF object from its pieces. Args: name: the name of the javascript function code: function body implementing the logic. return_type: BigQuery data type of the function return. See supported data types in the BigQuery docs params: list of parameter tuples: (name, type) language: see list of supported languages in the BigQuery docs imports: a list of GCS paths containing further support code.
juraj-google-style
def merge(metric_kind, prior, latest): prior_type, _ = _detect_value(prior) latest_type, _ = _detect_value(latest) if prior_type != latest_type: _logger.warn(u'Metric values are not compatible: %s, %s', prior, latest) raise ValueError(u'Incompatible delta metric values') if prior_type is None: _logger.warn(u'Bad metric values, types not known for : %s, %s', prior, latest) raise ValueError(u'Unsupported delta metric types') if metric_kind == MetricKind.DELTA: return _merge_delta_metric(prior, latest) else: return _merge_cumulative_or_gauge_metrics(prior, latest)
Merges `prior` and `latest` Args: metric_kind (:class:`MetricKind`): indicates the kind of metrics being merged prior (:class:`MetricValue`): an prior instance of the metric latest (:class:`MetricValue`: the latest instance of the metric
juraj-google-style
def colourise(__text: str, *args, **kwargs) -> str: if sys.stdout.isatty(): __text = style(__text, *args, **kwargs) return __text
Colourise text using click’s style function. Returns text untouched if colour output is not enabled, or ``stdout`` is not a tty. See :func:`click.style` for parameters Args: __text: Text to colourise Returns: Colourised text, when possible
juraj-google-style
def main(): windows_libraries = list(pylink.Library.find_library_windows()) latest_library = None for lib in windows_libraries: if os.path.dirname(lib).endswith('JLinkARM'): latest_library = lib break elif (latest_library is None): latest_library = lib elif (os.path.dirname(lib) > os.path.dirname(latest_library)): latest_library = lib if (latest_library is None): raise OSError('No J-Link library found.') library = pylink.Library(latest_library) jlink = pylink.JLink(lib=library) print(('Found version: %s' % jlink.version)) for emu in jlink.connected_emulators(): jlink.disable_dialog_boxes() jlink.open(serial_no=emu.SerialNumber) jlink.sync_firmware() print(('Updated emulator with serial number %s' % emu.SerialNumber)) return None
Upgrades the firmware of the J-Links connected to a Windows device. Returns: None. Raises: OSError: if there are no J-Link software packages.
codesearchnet
def restore_state(self, state): config_vars = state.get('config_variables', {}) for str_name, str_value in config_vars.items(): name = int(str_name) value = base64.b64decode(str_value) if name in self._config_variables: self._config_variables[name].current_value = value
Restore the current state of this emulated object. Args: state (dict): A previously dumped state produced by dump_state.
juraj-google-style
def economic_qs_linear(G): r import dask.array as da if not isinstance(G, da.Array): G = asarray(G, float) if G.shape[0] > G.shape[1]: (Q, Ssq, _) = svd(G, full_matrices=True) S0 = Ssq ** 2 rank = len(S0) Q0, Q1 = Q[:, :rank], Q[:, rank:] return ((Q0, Q1), S0) return economic_qs(G.dot(G.T))
r"""Economic eigen decomposition for symmetric matrices ``dot(G, G.T)``. It is theoretically equivalent to ``economic_qs(dot(G, G.T))``. Refer to :func:`numpy_sugar.economic_qs` for further information. Args: G (array_like): Matrix. Returns: tuple: ``((Q0, Q1), S0)``.
juraj-google-style
def _dataset_load_from_hdx(self, id_or_name): if not self._load_from_hdx('dataset', id_or_name): return False self._dataset_create_resources() return True
Loads the dataset given by either id or name from HDX Args: id_or_name (str): Either id or name of dataset Returns: bool: True if loaded, False if not
juraj-google-style
def thread(self, value: str): if ((value is not None) and (not isinstance(value, str))): raise TypeError("'thread' MUST be a string") self._thread = value
Set thread id of the message Args: value (str): the thread id
codesearchnet
def add_argument(self, parser, bootstrap=False): if self.cli_expose: if isinstance(self.child, YapconfBoolItem): original_default = self.child.default self.child.default = True args = self.child._get_argparse_names(parser.prefix_chars) kwargs = self._get_argparse_kwargs(bootstrap) parser.add_argument(*args, **kwargs) self.child.default = False args = self.child._get_argparse_names(parser.prefix_chars) kwargs = self._get_argparse_kwargs(bootstrap) parser.add_argument(*args, **kwargs) self.child.default = original_default else: super(YapconfListItem, self).add_argument(parser, bootstrap)
Add list-style item as an argument to the given parser. Generally speaking, this works mostly like the normal append action, but there are special rules for boolean cases. See the AppendReplace action for more details. Examples: A non-nested list value with the name 'values' and a child name of 'value' will result in a command-line argument that will correctly handle arguments like the following: ['--value', 'VALUE1', '--value', 'VALUE2'] Args: parser (argparse.ArgumentParser): The parser to add this item to. bootstrap (bool): Flag to indicate whether you only want to mark this item as required or not.
codesearchnet
def update_object(self, ref, payload, return_fields=None): query_params = self._build_query_params(return_fields=return_fields) opts = self._get_request_options(data=payload) url = self._construct_url(ref, query_params) self._log_request('put', url, opts) r = self.session.put(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('update', r, ref) raise ib_ex.InfobloxCannotUpdateObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
Update an Infoblox object Args: ref (str): Infoblox object reference payload (dict): Payload with data to send Returns: The object reference of the updated object Raises: InfobloxException
juraj-google-style
def intersection(L1, L2): D = L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if D != 0: x = Dx / D y = Dy / D return x, y else: return False
Intersects two line segments Args: L1 ([float, float]): x and y coordinates L2 ([float, float]): x and y coordinates Returns: bool: if they intersect (float, float): x and y of intersection, if they do
juraj-google-style
def _path(cls, ndivsm, structure=None, kpath_bounds=None, comment=None): if kpath_bounds is None: from pymatgen.symmetry.bandstructure import HighSymmKpath sp = HighSymmKpath(structure) kpath_labels = [] for labels in sp.kpath["path"]: kpath_labels.extend(labels) kpath_bounds = [] for label in kpath_labels: red_coord = sp.kpath["kpoints"][label] kpath_bounds.append(red_coord) return cls(mode=KSamplingModes.path, num_kpts=ndivsm, kpts=kpath_bounds, comment=comment if comment else "K-Path scheme")
Static constructor for path in k-space. Args: structure: :class:`Structure` object. kpath_bounds: List with the reduced coordinates of the k-points defining the path. ndivsm: Number of division for the smallest segment. comment: Comment string. Returns: :class:`KSampling` object.
juraj-google-style
def set_trunk_groups(self, intf, value=None, default=False, disable=False): if default: cmd = 'default switchport trunk group' return self.configure_interface(intf, cmd) if disable: cmd = 'no switchport trunk group' return self.configure_interface(intf, cmd) current_value = self.get(intf)['trunk_groups'] failure = False value = make_iterable(value) for name in set(value).difference(current_value): if not self.add_trunk_group(intf, name): failure = True for name in set(current_value).difference(value): if not self.remove_trunk_group(intf, name): failure = True return not failure
Configures the switchport trunk group value Args: intf (str): The interface identifier to configure. value (str): The set of values to configure the trunk group default (bool): Configures the trunk group default value disable (bool): Negates all trunk group settings Returns: True if the config operation succeeds otherwise False
juraj-google-style
def time_stats(self, **kwargs): if 'time_stats' in self.attributes: return self.attributes['time_stats'] path = '%s/%s/time_stats' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
Get time stats for the object. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTimeTrackingError: If the time tracking update cannot be done
juraj-google-style
def __register_class(self, parsed_config): methods = parsed_config.get('methods') if (not methods): return service_classes = set() for method in methods.itervalues(): rosy_method = method.get('rosyMethod') if (rosy_method and ('.' in rosy_method)): method_class = rosy_method.split('.', 1)[0] service_classes.add(method_class) for service_class in service_classes: if (service_class in self.__registered_classes): raise api_exceptions.ApiConfigurationError(('API class %s has already been registered.' % service_class)) self.__registered_classes.add(service_class)
Register the class implementing this config, so we only add it once. Args: parsed_config: The JSON object with the API configuration being added. Raises: ApiConfigurationError: If the class has already been registered.
codesearchnet
def Relay(self, inventory): if type(inventory) is MinerTransaction: return False if inventory.Hash.ToBytes() in self.KnownHashes: return False self.KnownHashes.append(inventory.Hash.ToBytes()) if type(inventory) is Block: pass elif type(inventory) is Transaction or issubclass(type(inventory), Transaction): if not self.AddTransaction(inventory): try: self.KnownHashes.remove(inventory.Hash.ToBytes()) except ValueError: pass return False else: pass relayed = self.RelayDirectly(inventory) return relayed
Relay the inventory to the remote client. Args: inventory (neo.Network.Inventory): Returns: bool: True if relayed successfully. False otherwise.
juraj-google-style
def toTFExample(dtypes): def _toTFExample(iter): float_dtypes = ['float', 'double'] int64_dtypes = ['boolean', 'tinyint', 'smallint', 'int', 'bigint', 'long'] bytes_dtypes = ['binary', 'string'] float_list_dtypes = ['array<float>', 'array<double>'] int64_list_dtypes = ['array<boolean>', 'array<tinyint>', 'array<smallint>', 'array<int>', 'array<bigint>', 'array<long>'] def _toTFFeature(name, dtype, row): feature = None if (dtype in float_dtypes): feature = (name, tf.train.Feature(float_list=tf.train.FloatList(value=[row[name]]))) elif (dtype in int64_dtypes): feature = (name, tf.train.Feature(int64_list=tf.train.Int64List(value=[row[name]]))) elif (dtype in bytes_dtypes): if (dtype == 'binary'): feature = (name, tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(row[name])]))) else: feature = (name, tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(row[name]).encode('utf-8')]))) elif (dtype in float_list_dtypes): feature = (name, tf.train.Feature(float_list=tf.train.FloatList(value=row[name]))) elif (dtype in int64_list_dtypes): feature = (name, tf.train.Feature(int64_list=tf.train.Int64List(value=row[name]))) else: raise Exception('Unsupported dtype: {0}'.format(dtype)) return feature results = [] for row in iter: features = dict([_toTFFeature(name, dtype, row) for (name, dtype) in dtypes]) example = tf.train.Example(features=tf.train.Features(feature=features)) results.append((bytearray(example.SerializeToString()), None)) return results return _toTFExample
mapPartition function to convert a Spark RDD of Row into an RDD of serialized tf.train.Example bytestring. Note that tf.train.Example is a fairly flat structure with limited datatypes, e.g. tf.train.FloatList, tf.train.Int64List, and tf.train.BytesList, so most DataFrame types will be coerced into one of these types. Args: :dtypes: the DataFrame.dtypes of the source DataFrame. Returns: A mapPartition function which converts the source DataFrame into tf.train.Example bytestrings.
codesearchnet
def ProcessAllReadyRequests(self): request_dict = data_store.REL_DB.ReadFlowRequestsReadyForProcessing(self.rdf_flow.client_id, self.rdf_flow.flow_id, next_needed_request=self.rdf_flow.next_request_to_process) if (not request_dict): return 0 processed = 0 while (self.rdf_flow.next_request_to_process in request_dict): (request, responses) = request_dict[self.rdf_flow.next_request_to_process] self.RunStateMethod(request.next_state, request, responses) self.rdf_flow.next_request_to_process += 1 processed += 1 self.completed_requests.append(request) if (processed and self.IsRunning() and (not self.outstanding_requests)): self.RunStateMethod('End') if ((self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING) and (not self.outstanding_requests)): self.MarkDone() self.PersistState() if (not self.IsRunning()): self._ClearAllRequestsAndResponses() return processed
Processes all requests that are due to run. Returns: The number of processed requests.
codesearchnet
def __init__(self, dom, path): self._dom = dom self._providers = { name: self._get_provider(spec) for name, spec in self._dom.get('sources', {}).items() } self._path = path
You would usually use the :func:`TemplateRepository.from_url` method instead of directly using this Args: dom (dict): Specification of the template repository (not confuse with xml dom)
juraj-google-style
def _parse_section(name, source): section = textwrap.dedent(_get_section(name, source)[7:]) commands = [] for line in section.splitlines(): if ((not commands) or (line[:1].isalpha() and line[:1].islower())): commands.append(line) else: commands[(- 1)] = '{} {}'.format(commands[(- 1)].strip(), line.strip()) return commands
Yield each section line. Note: Depending on how it is wrapped, a section line can take up more than one physical line. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A list containing each line, de-wrapped by whitespace from the source code. If the section is defined multiple times in the source code, all lines from all sections with that name will be returned.
codesearchnet
def sg_min(tensor, opt): r return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
r"""Computes the minimum of elements across axis of a tensor. See `tf.reduce_min()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
juraj-google-style
def train(self, resume_from_checkpoint: Optional[Union[str, bool]]=None, trial: Union['optuna.Trial', dict[str, Any], None]=None, ignore_keys_for_eval: Optional[list[str]]=None, **kwargs): if resume_from_checkpoint is False: resume_from_checkpoint = None self._memory_tracker.start() args = self.args self.is_in_train = True if self.neftune_noise_alpha is not None: self.model = self._activate_neftune(self.model) if (args.fp16_full_eval or args.bf16_full_eval) and (not args.do_train) and (not self.is_model_parallel) and (self.model_init is None): self._move_model_to_device(self.model, args.device) if 'model_path' in kwargs: resume_from_checkpoint = kwargs.pop('model_path') warnings.warn('`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` instead.', FutureWarning) if len(kwargs) > 0: raise TypeError(f'train() got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.') self._hp_search_setup(trial) self._train_batch_size = self.args.train_batch_size model_reloaded = False if self.model_init is not None: enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.model = self.call_model_init(trial) model_reloaded = True self.optimizer, self.lr_scheduler = (None, None) if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: resume_from_checkpoint = get_last_checkpoint(args.output_dir) if resume_from_checkpoint is None: raise ValueError(f'No valid checkpoint found in output directory ({args.output_dir})') if resume_from_checkpoint is not None: if not is_sagemaker_mp_enabled() and (not self.is_deepspeed_enabled) and (not self.is_fsdp_enabled): self._load_from_checkpoint(resume_from_checkpoint) state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) if state.train_batch_size is not None: self._train_batch_size = state.train_batch_size if model_reloaded: if self.place_model_on_device: self._move_model_to_device(self.model, args.device) self.model_wrapped = self.model inner_training_loop = find_executable_batch_size(self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size) if args.push_to_hub: try: hf_hub_utils.disable_progress_bars() return inner_training_loop(args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval) finally: hf_hub_utils.enable_progress_bars() else: return inner_training_loop(args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval)
Main training entry point. Args: resume_from_checkpoint (`str` or `bool`, *optional*): If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. trial (`optuna.Trial` or `Dict[str, Any]`, *optional*): The trial run or the hyperparameter dictionary for hyperparameter search. ignore_keys_for_eval (`List[str]`, *optional*) A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments used to hide deprecated arguments
github-repos
def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor: upper_bound1 = abs(up[0]) * abs(reg_scale) upper_bound2 = abs(up[0]) * abs(reg_scale) * 2 step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2)) left_values = [-step ** i + 1 for i in range(max_num_bins right_values = [step ** i - 1 for i in range(1, max_num_bins values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2] values = [v if v.dim() > 0 else v.unsqueeze(0) for v in values] values = torch.cat(values, 0) return values
Generates the non-uniform Weighting Function W(n) for bounding box regression. Args: max_num_bins (int): Max number of the discrete bins. up (Tensor): Controls upper bounds of the sequence, where maximum offset is ±up * H / W. reg_scale (float): Controls the curvature of the Weighting Function. Larger values result in flatter weights near the central axis W(max_num_bins/2)=0 and steeper weights at both ends. Returns: Tensor: Sequence of Weighting Function.
github-repos
def _ParseBooleanValue(self, byte_stream): if byte_stream == b'\x00': return False if byte_stream == b'\x01': return True raise errors.ParseError('Unsupported boolean value.')
Parses a boolean value. Args: byte_stream (bytes): byte stream. Returns: bool: boolean value. Raises: ParseError: when the boolean value cannot be parsed.
juraj-google-style
def get_entry(self, pathname_name): pathname_name = self._normalized_entryname(pathname_name) return self.contents[pathname_name]
Retrieves the specified child file or directory entry. Args: pathname_name: The basename of the child object to retrieve. Returns: The fake file or directory object. Raises: KeyError: if no child exists by the specified name.
juraj-google-style
def predict_next_action(self, state_key, next_action_list): if (self.q_df is not None): next_action_q_df = self.q_df[(self.q_df.state_key == state_key)] next_action_q_df = next_action_q_df[next_action_q_df.action_key.isin(next_action_list)] if (next_action_q_df.shape[0] == 0): return random.choice(next_action_list) else: if (next_action_q_df.shape[0] == 1): max_q_action = next_action_q_df['action_key'].values[0] else: next_action_q_df = next_action_q_df.sort_values(by=['q_value'], ascending=False) max_q_action = next_action_q_df.iloc[(0, :)]['action_key'] return max_q_action else: return random.choice(next_action_list)
Predict next action by Q-Learning. Args: state_key: The key of state in `self.t+1`. next_action_list: The possible action in `self.t+1`. Returns: The key of action.
codesearchnet
def from_array(array): try: raw_data = blosc.pack_array(array) except Exception as e: raise ValueError('Could not compress data from array. {}'.format(e)) return raw_data
Export a numpy array to a blosc array. Arguments: array: The numpy array to compress to blosc array Returns: Bytes/String. A blosc compressed array
codesearchnet
def replace_with_spqr_linear(model, quantization_config=None, modules_to_not_convert=None, current_key_name=None, has_been_replaced=False): if modules_to_not_convert is None: modules_to_not_convert = [] if is_accelerate_available(): from accelerate import init_empty_weights if is_spqr_available(): from spqr_quant import QuantizedLinear for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear): if '.'.join(current_key_name) + '.weight' not in modules_to_not_convert: with init_empty_weights(): tensor_name = '.'.join(current_key_name) shapes = quantization_config.shapes shapes_keys = shapes.keys() shapes_valid = f'{tensor_name}.dense_weights.shape' in shapes_keys and f'{tensor_name}.row_offsets.shape' in shapes_keys and (f'{tensor_name}.col_vals.shape' in shapes_keys) and (f'{tensor_name}.in_perm.shape' in shapes_keys) if not shapes_valid: raise ValueError(f'The SpQR quantization config does not contain the shape configuration for {tensor_name}. This indicates that the configuration is either invalid or corrupted.') dense_weights_shape = shapes[f'{tensor_name}.dense_weights.shape'] row_offsets_shape = shapes[f'{tensor_name}.row_offsets.shape'] col_vals_shape = shapes[f'{tensor_name}.col_vals.shape'] in_perm_shape = shapes[f'{tensor_name}.in_perm.shape'] in_features = module.in_features out_features = module.out_features model._modules[name] = QuantizedLinear.create_placehodler(rows=out_features, cols=in_features, bits=quantization_config.bits, beta1=quantization_config.beta1, beta2=quantization_config.beta2, dense_weights_shape=dense_weights_shape, row_offsets_shape=row_offsets_shape, col_vals_shape=col_vals_shape, in_perm_shape=in_perm_shape) has_been_replaced = True model._modules[name].source_cls = type(module) model._modules[name].requires_grad_(False) else: pass if len(list(module.children())) > 0: _, has_been_replaced = replace_with_spqr_linear(module, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, has_been_replaced=has_been_replaced) current_key_name.pop(-1) return (model, has_been_replaced)
Public method that recursively replaces the Linear layers of the given model with SpQR quantized layers. `accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the conversion has been successful or not. Args: model (`torch.nn.Module`): The model to convert, can be any `torch.nn.Module` instance. quantization_config (`SpQRConfig`): The quantization config object that contains the quantization parameters. modules_to_not_convert (`list[str]`, *optional*): A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be converted. current_key_name (`list`, *optional*): A list that contains the current key name. This is used for recursion and should not be passed by the user. has_been_replaced (`bool`, *optional*): A boolean that indicates if the conversion has been successful or not. This is used for recursion and should not be passed by the user.
github-repos
def unprotect(self, **kwargs): id = self.get_id().replace('/', '%2F') path = '%s/%s/unprotect' % (self.manager.path, id) self.manager.gitlab.http_put(path, **kwargs) self._attrs['protected'] = False
Unprotect the branch. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabProtectError: If the branch could not be unprotected
juraj-google-style
def _parse_local_interface(self, config): match = re.search('local-interface (\\w+)', config) value = (match.group(1) if match else None) return dict(local_interface=value)
Scans the config block and parses the local-interface value Args: config (str): The config block to scan Returns: dict: A dict object that is intended to be merged into the resource dict
codesearchnet
def read_excel( filename, dataset_class=dataset.pandas_dataset.PandasDataset, expectations_config=None, autoinspect_func=None, *args, **kwargs ): df = pd.read_excel(filename, *args, **kwargs) if isinstance(df, dict): for key in df: df[key] = _convert_to_dataset_class( df[key], dataset_class, expectations_config, autoinspect_func) else: df = _convert_to_dataset_class( df, dataset_class, expectations_config, autoinspect_func) return df
Read a file using Pandas read_excel and return a great_expectations dataset. Args: filename (string): path to file to read dataset_class (Dataset class): class to which to convert resulting Pandas df expectations_config (string): path to great_expectations config file Returns: great_expectations dataset or ordered dict of great_expectations datasets, if multiple worksheets are imported
juraj-google-style
def reduced_shape(input_shape, axes): constant_input_shape = tensor_util.constant_value(input_shape) if constant_input_shape is not None: constant_axes = tensor_util.constant_value(axes) if constant_axes is not None: constant_axes = np.array(constant_axes, dtype=np.int32) constant_input_shape = np.array(constant_input_shape, dtype=np.int32) constant_input_shape[constant_axes] = 1 return constant_input_shape axes = ops.convert_to_tensor(axes) input_rank = array_ops.size(input_shape, out_type=axes.dtype) axes = (axes + input_rank) % input_rank axes_shape = array_ops.shape(axes) return gen_data_flow_ops.dynamic_stitch([range(input_rank), axes], [input_shape, array_ops.ones(axes_shape, dtype=input_shape.dtype)])
Helper function for reduction ops. Args: input_shape: 1-D Tensor, the shape of the Tensor being reduced. axes: 1-D Tensor, the reduction axes. Returns: A 1-D Tensor, the output shape as if keepdims were set to True.
github-repos
def __init__(self, key_value_pairs=None): if not key_value_pairs: raise errors.FormatError('Missing key value pairs value.') if not isinstance(key_value_pairs, list): raise errors.FormatError('key_value_pairs must be a list') for pair in key_value_pairs: if not isinstance(pair, dict): raise errors.FormatError('key_value_pair must be a dict') if set(pair.keys()) != set(['key', 'value']): key_value_pairs = ', '.join([ '{0:s}: {1:s}'.format(key, value) for key, value in key_value_pairs ]) error_message = ( 'key_value_pair missing "key" and "value" keys, got: ' '{0:s}').format(key_value_pairs) raise errors.FormatError(error_message) WindowsRegistryKeySourceType.ValidateKey(pair['key']) super(WindowsRegistryValueSourceType, self).__init__() self.key_value_pairs = key_value_pairs
Initializes a source type. Args: key_value_pairs (Optional[list[tuple[str, str]]]): key path and value name pairs, where key paths are relative to the root of the Windows Registry. Raises: FormatError: when key value pairs is not set.
juraj-google-style
def is17(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if (bin2int(d[28:56]) != 0): return False caps = cap17(msg) if ('BDS20' not in caps): return False return True
Check if a message is likely to be BDS code 1,7 Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
codesearchnet
def fetch(self, plan_id, data={}, **kwargs): return super(Plan, self).fetch(plan_id, data, **kwargs)
Fetch Plan for given Id Args: plan_id : Id for which Plan object has to be retrieved Returns: Plan dict for given subscription Id
juraj-google-style
def get_committed_signatures(vcs): committed_path = _get_committed_history_path(vcs) known_signatures = [] if os.path.exists(committed_path): with open(committed_path, 'r') as f: known_signatures = f.read().split() return known_signatures
Get the list of committed signatures Args: vcs (easyci.vcs.base.Vcs) Returns: list(basestring) - list of signatures
codesearchnet
def find_contacts(self, geoms_1, geoms_2): for contact in self.sim.data.contact[0:self.sim.data.ncon]: c1_in_g1 = (self.sim.model.geom_id2name(contact.geom1) in geoms_1) c2_in_g2 = (self.sim.model.geom_id2name(contact.geom2) in geoms_2) c2_in_g1 = (self.sim.model.geom_id2name(contact.geom2) in geoms_1) c1_in_g2 = (self.sim.model.geom_id2name(contact.geom1) in geoms_2) if ((c1_in_g1 and c2_in_g2) or (c1_in_g2 and c2_in_g1)): (yield contact)
Finds contact between two geom groups. Args: geoms_1: a list of geom names (string) geoms_2: another list of geom names (string) Returns: iterator of all contacts between @geoms_1 and @geoms_2
codesearchnet
def construct_lanczos_params(self): self.min_eigen_vec = autograph.to_graph(utils.tf_lanczos_smallest_eigval) def _m_vector_prod_fn(x): return self.get_psd_product(x, dtype=self.lanczos_dtype) def _h_vector_prod_fn(x): return self.get_h_product(x, dtype=self.lanczos_dtype) self.m_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension, 1), dtype=np.float64) zeros_m = tf.zeros(shape=(self.matrix_m_dimension, 1), dtype=tf.float64) self.m_min_vec_ph = tf.placeholder_with_default(input=zeros_m, shape=(self.matrix_m_dimension, 1), name='m_min_vec_ph') (self.m_min_eig, self.m_min_vec) = self.min_eigen_vec(_m_vector_prod_fn, self.matrix_m_dimension, self.m_min_vec_ph, self.lzs_params['max_iter'], dtype=self.lanczos_dtype) self.m_min_eig = tf.cast(self.m_min_eig, self.nn_dtype) self.m_min_vec = tf.cast(self.m_min_vec, self.nn_dtype) self.h_min_vec_estimate = np.zeros(shape=((self.matrix_m_dimension - 1), 1), dtype=np.float64) zeros_h = tf.zeros(shape=((self.matrix_m_dimension - 1), 1), dtype=tf.float64) self.h_min_vec_ph = tf.placeholder_with_default(input=zeros_h, shape=((self.matrix_m_dimension - 1), 1), name='h_min_vec_ph') (self.h_min_eig, self.h_min_vec) = self.min_eigen_vec(_h_vector_prod_fn, (self.matrix_m_dimension - 1), self.h_min_vec_ph, self.lzs_params['max_iter'], dtype=self.lanczos_dtype) self.h_min_eig = tf.cast(self.h_min_eig, self.nn_dtype) self.h_min_vec = tf.cast(self.h_min_vec, self.nn_dtype)
Computes matrices T and V using the Lanczos algorithm. Args: k: number of iterations and dimensionality of the tridiagonal matrix Returns: eig_vec: eigen vector corresponding to min eigenvalue
codesearchnet
def trace_set_format(self, fmt): cmd = enums.JLinkTraceCommand.SET_FORMAT data = ctypes.c_uint32(fmt) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to set trace format.') return None
Sets the format for the trace buffer to use. Args: self (JLink): the ``JLink`` instance. fmt (int): format for the trace buffer; this is one of the attributes of ``JLinkTraceFormat``. Returns: ``None``
juraj-google-style
def campaign(self, name, **kwargs): group_obj = Campaign(name, **kwargs) return self._group(group_obj)
Add Campaign data to Batch object. Args: name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. first_seen (str, kwargs): The first seen datetime expression for this Group. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Campaign.
codesearchnet
def GetFormattedSources(self, event): event_formatter = self.GetEventFormatter(event) if not event_formatter: return None, None return event_formatter.GetSources(event)
Retrieves the formatted sources related to the event. Args: event (EventObject): event. Returns: tuple: containing: str: full source string or None if no event formatter was found. str: short source string or None if no event formatter was found.
juraj-google-style
def _initializer(shape, dtype=dtypes.float32, partition_info=None): if dtype != dtypes.float32: raise TypeError('Currently, only float32 is supported. Received dtype: {}'.format(dtype)) if len(shape) != 2: raise ValueError('Expected 2-dim shape, but received: {}'.format(shape)) if shape[0] <= 0: raise ValueError('Expected 1st dim of shape to be > 0, but received shape: {}'.format(shape)) if shape[1] != new_col_vocab_size + num_col_oov_buckets: raise ValueError('Expected 2nd dim of shape to be new_col_vocab_size ({}) + num_col_oov_buckets ({}) = {}, but received shape: {}'.format(new_col_vocab_size, num_col_oov_buckets, new_col_vocab_size + num_col_oov_buckets, shape)) offset = 0 if partition_info is not None: offset = partition_info.single_offset(shape) if offset + shape[0] > new_row_vocab_size + num_row_oov_buckets: raise ValueError('Trying to initialize {} additional rows after {} rows have already been initialized, which would exceed expected total row count of new_row_vocab_size ({}) + num_row_oov_buckets ({}) = {}.'.format(shape[0], offset, new_row_vocab_size, num_row_oov_buckets, new_row_vocab_size + num_row_oov_buckets)) row_oov_buckets_to_use = min(shape[0], max(0, offset + shape[0] - new_row_vocab_size)) num_rows_to_load = shape[0] - row_oov_buckets_to_use if offset > new_row_vocab_size: if shape[0] != row_oov_buckets_to_use: raise ValueError('Partitioned variable offset is greater than new vocab size and not operating on OOV-only partition.') return initializer(shape) return _load_and_remap_matrix(ckpt_path=ckpt_path, old_tensor_name=old_tensor_name, new_row_vocab_offset=offset, num_rows_to_load=num_rows_to_load, new_col_vocab_size=new_col_vocab_size, initializer=initializer, old_row_vocab_size=old_row_vocab_size, old_row_vocab_file=old_row_vocab_file, new_row_vocab_file=new_row_vocab_file, old_col_vocab_file=old_col_vocab_file, new_col_vocab_file=new_col_vocab_file, num_row_oov_buckets=row_oov_buckets_to_use, num_col_oov_buckets=num_col_oov_buckets, max_rows_in_memory=max_rows_in_memory)
Variable initializer. Args: shape: Shape of `Tensor` to return. Should include OOV on both axes. dtype: Must be float32. partition_info: variable_scope._PartitionInfo. Returns: `Tensor` of shape `shape`. Raises: TypeError: If `dtype` is anything other than float32. ValueError: For shape mismatch upon invocation.
github-repos
def rotate_capture_handler_log(self, name): for sc_key, sc in self._stream_capturers.iteritems(): for h in sc[0].capture_handlers: if h['name'] == name: sc[0]._rotate_log(h)
Force a rotation of a handler's log file Args: name: The name of the handler who's log file should be rotated.
juraj-google-style
def has_mixture(val: Any) -> bool: getter = getattr(val, '_has_mixture_', None) result = (NotImplemented if (getter is None) else getter()) if (result is not NotImplemented): return result return (mixture(val, None) is not None)
Returns whether the value has a mixture representation. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method return True if that has a non-default value. Returns False if neither function exists.
codesearchnet
def run_step(self, context): logger.debug("starting") self.set_step_input_context(context) if self.while_decorator: self.while_decorator.while_loop(context, self.run_foreach_or_conditional) else: self.run_foreach_or_conditional(context) logger.debug("done")
Run a single pipeline step. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
juraj-google-style
class CounterMetric(Metric): def __init__(self, counter_metric, submit_timestamp, metric_id): value = counter_metric.result super().__init__(submit_timestamp, metric_id, value, counter_metric)
The Counter Metric in ready-to-publish format. Args: counter_metric (object): counter metric object from MetricResult submit_timestamp (float): date-time of saving metric to database metric_id (uuid): unique id to identify test run
github-repos
def _WritesString(self, content): content_bytes = codecs.encode(content, 'utf-8') self._sample_file.write(content_bytes)
Writes a string to the sample file. Args: content (str): content to write to the sample file.
codesearchnet
def _parse_authors(details): authors = details.find( "tr", {"id": "ctl00_ContentPlaceHolder1_tblRowAutor"} ) if not authors: return [] author_list = [] for author in authors[0].find("a"): author_obj = Author(author.getContent()) if "href" in author.params: author_obj.URL = author.params["href"] author_list.append(author_obj) return author_list
Parse authors of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found.
juraj-google-style
def remove_long_seq(maxlen, seq, label): new_seq, new_label = ([], []) for x, y in zip(seq, label): if len(x) < maxlen: new_seq.append(x) new_label.append(y) return (new_seq, new_label)
Removes sequences that exceed the maximum length. Args: maxlen: Int, maximum length of the output sequences. seq: List of lists, where each sublist is a sequence. label: List where each element is an integer. Returns: new_seq, new_label: shortened lists for `seq` and `label`.
github-repos
def _single_request(self, method, *args, **kwargs): _method = self._service for item in method.split('.'): if method.endswith(item): _method = getattr(_method, item)(*args, **kwargs) else: _method = getattr(_method, item)() _method.uri = _method.uri.replace('$ENDPOINT', self._endpoint) try: return _method.execute(http=self._http) except googleapiclient.errors.HttpError as exc: response = json.loads(exc.content.decode('utf-8'))['error'] raise APIError(code=response['code'], message=response['message'], http_error=exc)
Make a single request to the fleet API endpoint Args: method (str): A dot delimited string indicating the method to call. Example: 'Machines.List' *args: Passed directly to the method being called. **kwargs: Passed directly to the method being called. Returns: dict: The response from the method called. Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400
codesearchnet
def stage(self, pipeline_name, stage_name, pipeline_counter=None): return Stage(self, pipeline_name, stage_name, pipeline_counter=pipeline_counter)
Returns an instance of :class:`Stage` Args: pipeline_name (str): Name of the pipeline the stage belongs to stage_name (str): Name of the stage to act on pipeline_counter (int): The pipeline instance the stage is for. Returns: Stage: an instantiated :class:`Stage`.
juraj-google-style
def _CalculateNTFSTimeHash(self, file_entry): date_time_values = [] access_time = getattr(file_entry, 'access_time', None) if access_time: date_time_string = access_time.CopyToDateTimeString() date_time_values.append('atime:{0:s}'.format(date_time_string)) creation_time = getattr(file_entry, 'creation_time', None) if creation_time: date_time_string = creation_time.CopyToDateTimeString() date_time_values.append('crtime:{0:s}'.format(date_time_string)) modification_time = getattr(file_entry, 'modification_time', None) if modification_time: date_time_string = modification_time.CopyToDateTimeString() date_time_values.append('mtime:{0:s}'.format(date_time_string)) change_time = getattr(file_entry, 'change_time', None) if change_time: date_time_string = change_time.CopyToDateTimeString() date_time_values.append('ctime:{0:s}'.format(date_time_string)) date_time_values = ''.join(date_time_values) date_time_values = date_time_values.encode('ascii') hash_value = hashlib.md5() hash_value.update(date_time_values) return hash_value.hexdigest()
Calculates an MD5 from the date and time value of a NTFS file entry. Args: file_entry (dfvfs.FileEntry): file entry. Returns: str: hexadecimal representation of the MD5 hash value of the date and time values of the file entry.
codesearchnet
def max_sequence_length(self, dataset_split): return {problem.DatasetSplit.TRAIN: 64, problem.DatasetSplit.EVAL: 128, problem.DatasetSplit.TEST: 128}[dataset_split]
Determine the maximum sequence length given a dataset_split. Args: dataset_split: A problem.DatasetSplit. Returns: The maximum length that a sequence can be for this dataset_split.
codesearchnet
def resize(x, mode, factor=4): assert mode in ['bilinear', 'nearest'], mode shp = tf.shape(x)[2:] * factor x = tf.transpose(x, [0, 2, 3, 1]) if mode == 'bilinear': x = tf.image.resize_bilinear(x, shp, align_corners=True) else: x = tf.image.resize_nearest_neighbor(x, shp, align_corners=False) return tf.transpose(x, [0, 3, 1, 2])
Resize input tensor with unkown input-shape by a factor Args: x (tf.Tensor): tensor NCHW factor (int, optional): resize factor for H, W Note: Differences here against Caffe have huge impacts on the quality of the predictions. Returns: tf.Tensor: resized tensor NCHW
juraj-google-style
def download_and_extract(self, url_or_urls): with self._downloader.tqdm(): with self._extractor.tqdm(): return _map_promise(self._download_extract, url_or_urls)
Download and extract given url_or_urls. Is roughly equivalent to: ``` extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls)) ``` Args: url_or_urls: url or `list`/`dict` of urls to download and extract. Each url can be a `str` or `tfds.download.Resource`. If not explicitly specified in `Resource`, the extraction method will automatically be deduced from downloaded file name. Returns: extracted_path(s): `str`, extracted paths of given URL(s).
codesearchnet