code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def simplify(self, assignments): if self.right in assignments: return self else: return self if self.right in assignments[self.left] else FALSE
Simplify this equality. This will try to look up the values, and return FALSE if they're no longer possible. Also, when comparing two variables, it will compute the intersection, and return a disjunction of variable=value equalities instead. Args: assignments: Variable assignments (dict mapping strings to sets of strings). Used to determine whether this equality is still possible, and to compute intersections between two variables. Returns: A new BooleanTerm.
github-repos
def input_elements(self, instruction_id, expected_inputs, abort_callback=None): received = self._receiving_queue(instruction_id) if received is None: raise RuntimeError('Instruction cleaned up already %s' % instruction_id) done_inputs = set() abort_callback = abort_callback or (lambda: False) log_interval_sec = 5 * 60 try: start_time = time.time() next_waiting_log_time = start_time + log_interval_sec while len(done_inputs) < len(expected_inputs): try: element = received.get(timeout=1) except queue.Empty: if self._closed: raise RuntimeError('Channel closed prematurely.') if abort_callback(): return if self._exception: raise self._exception from None current_time = time.time() if next_waiting_log_time <= current_time: _LOGGER.info('Detected input queue delay longer than %s seconds. Waiting to receive elements in input queue for instruction: %s for %.2f seconds.', log_interval_sec, instruction_id, current_time - start_time) next_waiting_log_time = current_time + log_interval_sec else: start_time = time.time() next_waiting_log_time = start_time + log_interval_sec if isinstance(element, beam_fn_api_pb2.Elements.Timers): if element.is_last: done_inputs.add((element.transform_id, element.timer_family_id)) else: yield element elif isinstance(element, beam_fn_api_pb2.Elements.Data): if element.is_last: done_inputs.add(element.transform_id) else: assert element.transform_id not in done_inputs yield element else: raise ValueError('Unexpected input element type %s' % type(element)) finally: self._clean_receiving_queue(instruction_id)
Generator to retrieve elements for an instruction_id input_elements should be called only once for an instruction_id Args: instruction_id(str): instruction_id for which data is read expected_inputs(collection): expected inputs, include both data and timer.
github-repos
def get_all_organization_names(configuration=None, **kwargs): organization = Organization(configuration=configuration) organization['id'] = 'all organizations' return organization._write_to_hdx('list', kwargs, 'id')
Get all organization names in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below sort (str): Sort the search results according to field name and sort-order. Allowed fields are ‘name’, ‘package_count’ and ‘title’. Defaults to 'name asc'. organizations (List[str]): List of names of the groups to return. all_fields (bool): Return group dictionaries instead of just names. Only core fields are returned - get some more using the include_* options. Defaults to False. include_extras (bool): If all_fields, include the group extra fields. Defaults to False. include_tags (bool): If all_fields, include the group tags. Defaults to False. include_groups: If all_fields, include the groups the groups are in. Defaults to False. Returns: List[str]: List of all organization names in HDX
codesearchnet
def device_coordinates(self): return self._device_coordinates
Describes the mapping from TPU devices to topology coordinates. Returns: A rank 3 int32 array with shape `[tasks, devices, axis]`. `tasks` is the number of tasks in the TPU cluster, `devices` is the number of TPU devices per task, and `axis` is the number of axes in the TPU cluster topology. Each entry gives the `axis`-th coordinate in the topology of a task/device pair. TPU topologies are 4-dimensional, with dimensions `(x, y, z, core number)`.
github-repos
def _psd_mask(x): eigenvalues, _ = tf.linalg.eigh(x) return tf.cast( tf.reduce_min(input_tensor=eigenvalues, axis=-1) >= 0, dtype=x.dtype)
Computes whether each square matrix in the input is positive semi-definite. Args: x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`. Returns: mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each scalar is 1 if the corresponding matrix was PSD, otherwise 0.
juraj-google-style
def wrap_layer_objects(layer, serialization_cache): all_losses = layer._callable_losses[:] for child_layer in utils.list_all_layers(layer): all_losses.extend(child_layer._callable_losses) keras_loss_cache = serialization_cache.setdefault('keras_losses', {}) wrapped_loss_functions = [] for loss_fn in all_losses: if loss_fn in keras_loss_cache: wrapped_loss_functions.append(keras_loss_cache[loss_fn]) else: wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache)) keras_loss_cache[loss_fn] = wrapped_loss wrapped_loss_functions.append(wrapped_loss) wrapped_layer_losses = [keras_loss_cache[fn] for fn in layer._callable_losses[:]] layer_metrics = data_structures.wrap_or_unwrap({m.name: m for m in layer._metrics}) return dict(variables=data_structures.wrap_or_unwrap(layer.variables), trainable_variables=data_structures.wrap_or_unwrap(layer.trainable_variables), non_trainable_variables=data_structures.wrap_or_unwrap(layer.non_trainable_variables), layers=data_structures.wrap_or_unwrap(utils.list_all_layers(layer)), metrics=data_structures.wrap_or_unwrap(layer.metrics), regularization_losses=data_structures.wrap_or_unwrap(wrapped_loss_functions), layer_regularization_losses=data_structures.wrap_or_unwrap(wrapped_layer_losses), layer_metrics=layer_metrics)
Returns extra trackable objects to attach to the serialized layer. Args: layer: Keras Layer object. serialization_cache: Dictionary shared between all objects during serialization. Returns: A dictionary containing all checkpointable objects from a SerializedAttributes object. See LayerAttributes and ModelAttributes for entire list of objects
github-repos
def invite_by_email(self, email, user, organization, **kwargs): try: invitee = self.user_model.objects.get(email__iexact=email) except self.user_model.DoesNotExist: invitee = None user_invitation = self.invitation_model.objects.create(invitee=invitee, invitee_identifier=email.lower(), invited_by=user, organization=organization) self.send_invitation(user_invitation) return user_invitation
Primary interface method by which one user invites another to join Args: email: request: **kwargs: Returns: an invitation instance Raises: MultipleObjectsReturned if multiple matching users are found
codesearchnet
def parse_args(args=None): parser = argparse.ArgumentParser(description="Main script to run LIVVkit.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, fromfile_prefix_chars='@') parser.add_argument('-o', '--out-dir', default=os.path.join(os.getcwd(), "vv_" + time.strftime("%Y-%m-%d")), help='Location to output the LIVVkit webpages.' ) parser.add_argument('-v', '--verify', nargs=2, default=None, help=' '.join(['Specify the locations of the test and bench bundle to', 'compare (respectively).' ]) ) parser.add_argument('-V', '--validate', action='store', nargs='+', default=None, help=' '.join(['Specify the location of the configuration files for', 'validation tests.' ]) ) parser.add_argument('-e', '--extension', action='store', nargs='+', default=None, dest='validate', metavar='EXTENSION', help=' '.join(['Specify the location of the configuration files for', 'LIVVkit extensions.' ]) ) parser.add_argument('-p', '--publish', action='store_true', help=' '.join(['Also produce a publication quality copy of the figure in', 'the output directory (eps, 600d pi).' ]) ) parser.add_argument('-s', '--serve', nargs='?', type=int, const=8000, help=' '.join(['Start a simple HTTP server for the output website specified', 'by OUT_DIR on port SERVE.' ]) ) parser.add_argument('--version', action='version', version='LIVVkit {}'.format(livvkit.__version__), help="Show LIVVkit's version number and exit" ) return init(parser.parse_args(args))
Handles the parsing of options for LIVVkit's command line interface Args: args: The list of arguments, typically sys.argv[1:]
juraj-google-style
def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: if labels is not None: labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns:
github-repos
def _build_processor(cls, session: AppSession): web_processor = cls._build_web_processor(session) ftp_processor = cls._build_ftp_processor(session) delegate_processor = session.factory.new('Processor') delegate_processor.register('http', web_processor) delegate_processor.register('https', web_processor) delegate_processor.register('ftp', ftp_processor)
Create the Processor Returns: Processor: An instance of :class:`.processor.BaseProcessor`.
codesearchnet
def delete_existing_cname(env, zone_id, dns_name): client = boto3.Session(profile_name=env).client('route53') startrecord = None newrecord_name = dns_name startrecord = find_existing_record(env, zone_id, newrecord_name, check_key='Type', check_value='CNAME') if startrecord: LOG.info('Deleting old record: %s', newrecord_name) _response = client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch={'Changes': [{'Action': 'DELETE', 'ResourceRecordSet': startrecord}]}) LOG.debug('Response from deleting %s: %s', dns_name, _response)
Delete an existing CNAME record. This is used when updating to multi-region for deleting old records. The record can not just be upserted since it changes types. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. dns_name (str): FQDN of application's dns entry to add/update.
codesearchnet
def _experiments_to_circuits(qobj): if qobj.experiments: circuits = [] for x in qobj.experiments: quantum_registers = [QuantumRegister(i[1], name=i[0]) for i in x.header.qreg_sizes] classical_registers = [ClassicalRegister(i[1], name=i[0]) for i in x.header.creg_sizes] circuit = QuantumCircuit(*quantum_registers, *classical_registers, name=x.header.name) qreg_dict = {} creg_dict = {} for reg in quantum_registers: qreg_dict[reg.name] = reg for reg in classical_registers: creg_dict[reg.name] = reg for i in x.instructions: instr_method = getattr(circuit, i.name) qubits = [] try: for qubit in i.qubits: qubit_label = x.header.qubit_labels[qubit] qubits.append( qreg_dict[qubit_label[0]][qubit_label[1]]) except Exception: pass clbits = [] try: for clbit in i.memory: clbit_label = x.header.clbit_labels[clbit] clbits.append( creg_dict[clbit_label[0]][clbit_label[1]]) except Exception: pass params = [] try: params = i.params except Exception: pass if i.name in ['snapshot']: instr_method( i.label, snapshot_type=i.snapshot_type, qubits=qubits, params=params) elif i.name == 'initialize': instr_method(params, qubits) else: instr_method(*params, *qubits, *clbits) circuits.append(circuit) return circuits return None
Return a list of QuantumCircuit object(s) from a qobj Args: qobj (Qobj): The Qobj object to convert to QuantumCircuits Returns: list: A list of QuantumCircuit objects from the qobj
juraj-google-style
def print_schema_results(results, level=0): for error in results.errors: print_level(logger.error, (_RED + '[X] %s'), level, error)
Print JSON Schema validation errors to stdout. Args: results: An instance of ObjectValidationResults. level: The level at which to print the results.
codesearchnet
def _get_pmap_impl(f, devices, has_tpu): if has_tpu: output_is_list = [False] def recorder(args, kwargs, res): del args, kwargs output_is_list[0] = isinstance(res, list) return res f = _record_result_type(recorder, f) def tf_f(*tf_args): np_args = _tf_to_np(tf_args) np_out = f(*np_args) return np_out if has_tpu: @polymorphic_function.function(autograph=False) def fn(inputs): res = tpu.replicate(tf_f, inputs) if res and isinstance(res[0], list) and (len(res[0]) == 1) and (not output_is_list[0]): res = [x[0] for x in res] return res return fn else: jit_tf_f = polymorphic_function.function(tf_f, autograph=False) @polymorphic_function.function(autograph=False) def fn(all_per_device_args): results = [] for per_device_args, device in zip(all_per_device_args, devices): with ops.device(device): results.append(jit_tf_f(*per_device_args)) return results return fn
This is a helper function to return the pmap impl. Args: f: a function that takes ndarrays and returns ndarrays. devices: a list of strings; the device list. has_tpu: boolean; whether `devices` contains TPU devices. Returns: A function that takes tensors and returns tensors.
github-repos
def length_of_overlap(first_start, first_end, second_start, second_end): if ((first_end <= second_start) or (first_start >= second_end)): return 0.0 if (first_start < second_start): if (first_end < second_end): return abs((first_end - second_start)) else: return abs((second_end - second_start)) if (first_start > second_start): if (first_end > second_end): return abs((second_end - first_start)) else: return abs((first_end - first_start))
Find the length of the overlapping part of two segments. Args: first_start (float): Start of the first segment. first_end (float): End of the first segment. second_start (float): Start of the second segment. second_end (float): End of the second segment. Return: float: The amount of overlap or 0 if they don't overlap at all.
codesearchnet
def from_string(cls, public_key): public_key_data = _helpers.to_bytes(public_key) if (_CERTIFICATE_MARKER in public_key_data): cert = cryptography.x509.load_pem_x509_certificate(public_key_data, _BACKEND) pubkey = cert.public_key() else: pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND) return cls(pubkey)
Construct an Verifier instance from a public key or public certificate string. Args: public_key (Union[str, bytes]): The public key in PEM format or the x509 public key certificate. Returns: Verifier: The constructed verifier. Raises: ValueError: If the public key can't be parsed.
codesearchnet
def prepare_adiabatic_limit(slh, k=None): if (k is None): k = symbols('k', positive=True) Ld = slh.L.dag() LdL = (Ld * slh.L)[(0, 0)] K = (((- LdL) / 2) + (I * slh.H)).expand().simplify_scalar() N = slh.S.dag() (B, A, Y) = K.series_expand(k, 0, 2) (G, F) = Ld.series_expand(k, 0, 1) return (Y, A, B, F, G, N)
Prepare the adiabatic elimination on an SLH object Args: slh: The SLH object to take the limit for k: The scaling parameter $k \rightarrow \infty$. The default is a positive symbol 'k' Returns: tuple: The objects ``Y, A, B, F, G, N`` necessary to compute the limiting system.
codesearchnet
def BuildDefaultValue(self, value_cls): try: return value_cls() except Exception as e: logging.exception(e) raise DefaultValueError( "Can't create default for value %s: %s" % (value_cls.__name__, e))
Renders default value of a given class. Args: value_cls: Default value of this class will be rendered. This class has to be (or to be a subclass of) a self.value_class (i.e. a class that this renderer is capable of rendering). Returns: An initialized default value. Raises: DefaultValueError: if something goes wrong.
juraj-google-style
def GetMap(self, cache_info, data): entries = collections.defaultdict(dict) for line in json.loads(cache_info.read()): key = line.get('Key', '').split('/') value = line.get('Value', '') if not value or not key: continue value = base64.b64decode(value) name = str(key[-2]) entry_piece = key[-1] entries[name][entry_piece] = value for name, entry in list(entries.items()): map_entry = self._ReadEntry(name, entry) if map_entry is None: self.log.warning('Could not create entry from line %r in cache, skipping', entry) continue if not data.Add(map_entry): self.log.warning('Could not add entry %r read from line %r in cache', map_entry, entry) return data
Returns a map from a cache. Args: cache_info: file like object containing the cache. data: a Map to populate. Returns: A child of Map containing the cache data.
github-repos
def _anonymous_match(self, struct1, struct2, fu, s1_supercell=True, use_rms=False, break_on_match=False, single_match=False): if not isinstance(self._comparator, SpeciesComparator): raise ValueError('Anonymous fitting currently requires SpeciesComparator') sp1 = struct1.composition.elements sp2 = struct2.composition.elements if len(sp1) != len(sp2): return None ratio = fu if s1_supercell else 1/fu swapped = len(struct1) * ratio < len(struct2) s1_comp = struct1.composition s2_comp = struct2.composition matches = [] for perm in itertools.permutations(sp2): sp_mapping = dict(zip(sp1, perm)) mapped_comp = Composition({sp_mapping[k]: v for k, v in s1_comp.items()}) if (not self._subset) and ( self._comparator.get_hash(mapped_comp) != self._comparator.get_hash(s2_comp)): continue mapped_struct = struct1.copy() mapped_struct.replace_species(sp_mapping) if swapped: m = self._strict_match(struct2, mapped_struct, fu, (not s1_supercell), use_rms, break_on_match) else: m = self._strict_match(mapped_struct, struct2, fu, s1_supercell, use_rms, break_on_match) if m: matches.append((sp_mapping, m)) if single_match: break return matches
Tries all permutations of matching struct1 to struct2. Args: struct1, struct2 (Structure): Preprocessed input structures Returns: List of (mapping, match)
juraj-google-style
def release(self, connection: Connection, reuse: bool=True): (yield from self._condition.acquire()) self.busy.remove(connection) if reuse: self.ready.add(connection) self._condition.notify() self._condition.release()
Unregister a connection. Args: connection: Connection instance returned from :meth:`acquire`. reuse: If True, the connection is made available for reuse. Coroutine.
codesearchnet
def __init__(self, descriptor_db=None): self._internal_db = descriptor_database.DescriptorDatabase() self._descriptor_db = descriptor_db self._descriptors = {} self._enum_descriptors = {} self._service_descriptors = {} self._file_descriptors = {} self._toplevel_extensions = {} self._file_desc_by_toplevel_extension = {} self._extensions_by_name = collections.defaultdict(dict) self._extensions_by_number = collections.defaultdict(dict)
Initializes a Pool of proto buffs. The descriptor_db argument to the constructor is provided to allow specialized file descriptor proto lookup code to be triggered on demand. An example would be an implementation which will read and compile a file specified in a call to FindFileByName() and not require the call to Add() at all. Results from this database will be cached internally here as well. Args: descriptor_db: A secondary source of file descriptors.
juraj-google-style
def resolve(self, method, path): if ((method in self._literal) and (path in self._literal[method])): return (self._literal[method][path], [], {}) else: return self._resolve_non_literal_route(method, path)
Resolve a request to a route handler. Arguments: method (str): HTTP method, e.g. GET, POST, etc. (type: str) path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if no route matches the request.
codesearchnet
def impad(img, shape, pad_val=0): if not isinstance(pad_val, (int, float)): assert len(pad_val) == img.shape[-1] if len(shape) < len(img.shape): shape = shape + (img.shape[-1], ) assert len(shape) == len(img.shape) for i in range(len(shape) - 1): assert shape[i] >= img.shape[i] pad = np.empty(shape, dtype=img.dtype) pad[...] = pad_val pad[:img.shape[0], :img.shape[1], ...] = img return pad
Pad an image to a certain shape. Args: img (ndarray): Image to be padded. shape (tuple): Expected padding shape. pad_val (number or sequence): Values to be filled in padding areas. Returns: ndarray: The padded image.
juraj-google-style
def allsame(iterable, eq=operator.eq): iter_ = iter(iterable) try: first = next(iter_) except StopIteration: return True return all((eq(first, item) for item in iter_))
Determine if all items in a sequence are the same Args: iterable (Iterable): items to determine if they are all the same eq (Callable, optional): function to determine equality (default: operator.eq) Example: >>> allsame([1, 1, 1, 1]) True >>> allsame([]) True >>> allsame([0, 1]) False >>> iterable = iter([0, 1, 1, 1]) >>> next(iterable) >>> allsame(iterable) True >>> allsame(range(10)) False >>> allsame(range(10), lambda a, b: True) True
codesearchnet
def _get_target_dtype(self, from_dtype: Optional[_NpDType]) -> Optional[_NpDType]:
Validate and normalize the numpy dtype. Args: from_dtype: DType of the array to cast Returns: to_dtype: DType of the array after casting
github-repos
def wait_all(jobs, timeout=None): return Job._wait(jobs, timeout, concurrent.futures.ALL_COMPLETED)
Return when at all of the specified jobs have completed or timeout expires. Args: jobs: a Job or list of Jobs to wait on. timeout: a timeout in seconds to wait for. None (the default) means no timeout. Returns: A list of the jobs that have now completed or None if there were no jobs.
juraj-google-style
def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, boxes: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: def _is_valid_text_input(t): if isinstance(t, str): return True elif isinstance(t, (list, tuple)): if len(t) == 0: return True elif isinstance(t[0], str): return True elif isinstance(t[0], (list, tuple)): return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: if not _is_valid_text_input(text): raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ') if not isinstance(text_pair, (list, tuple)): raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).') elif not isinstance(text, (list, tuple)): raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).') if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError('You must provide corresponding bounding boxes') if is_batched: if len(words) != len(boxes): raise ValueError('You must provide words and boxes for an equal amount of examples') for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError('You must provide as many words as there are bounding boxes') elif len(words) != len(boxes): raise ValueError('You must provide as many words as there are bounding boxes') if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.') batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD).
github-repos
def explore_package(module_name): packages = [] loader = pkgutil.get_loader(module_name) for sub_module in pkgutil.walk_packages([os.path.dirname(loader.get_filename())], prefix=(module_name + '.')): (_, sub_module_name, _) = sub_module packages.append(sub_module_name) return packages
returns all the packages in the module Args: module_name: name of module Returns:
codesearchnet
def run(self, timeout=(- 1)): def target(): self.process = subprocess.Popen(self.cmd, stdout=self.stdout_dest, stderr=self.stderr_dest, shell=self.shell) (stdout, stderr) = self.process.communicate() if self.decode_out: if stdout: self.stdout = stdout.decode('utf-8') if stderr: self.stderr = stderr.decode('utf-8') thread = threading.Thread(target=target) thread.start() if (timeout > 0): thread.join(timeout) if thread.is_alive(): self.process.terminate() thread.join() raise SubprocessError('Reached timeout after {t} seconds'.format(t=timeout)) else: thread.join() return (self.process.returncode, self.stdout, self.stderr)
Run the subprocess. Arguments: timeout (optional) If a positive real value, then timout after the given number of seconds. Raises: SubprocessError If subprocess has not completed after "timeout" seconds.
codesearchnet
def search(self, tags=None): if isinstance(tags, str): tags = [tags] return self.workbench.generate_sample_set(tags)
Wrapper for the Workbench search method Args: tags: a single tag 'pcap' or a list of tags to search for ['bad','aptz13'] Returns: A sample_set that contains the md5s for all matching samples
juraj-google-style
def AsRegEx(self): parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value) result = u''.join((self._ReplaceRegExPart(p) for p in parts)) return rdf_standard.RegularExpression((u'(?i)\\A%s\\Z' % result))
Return the current glob as a simple regex. Note: No interpolation is performed. Returns: A RegularExpression() object.
codesearchnet
def handle_or_else(self, orelse, test): if isinstance(orelse[0], ast.If): control_flow_node = self.visit(orelse[0]) control_flow_node.test.label = ('el' + control_flow_node.test.label) test.connect(control_flow_node.test) return control_flow_node.last_nodes else: else_connect_statements = self.stmt_star_handler(orelse, prev_node_to_avoid=self.nodes[(- 1)]) test.connect(else_connect_statements.first_statement) return else_connect_statements.last_statements
Handle the orelse part of an if or try node. Args: orelse(list[Node]) test(Node) Returns: The last nodes of the orelse branch.
codesearchnet
def egress(self, envelope, http_headers, operation, binding_options): custom_headers = self._header_handler.GetHTTPHeaders() http_headers.update(custom_headers) return envelope, http_headers
Overriding the egress function to set our headers. Args: envelope: An Element with the SOAP request data. http_headers: A dict of the current http headers. operation: The SoapOperation instance. binding_options: An options dict for the SOAP binding. Returns: A tuple of the envelope and headers.
juraj-google-style
def update_asset(self, asset, asset_id, asset_name, asset_type): if (not self.can_update()): self._tcex.handle_error(910, [self.type]) if (asset == 'PHONE'): return self.tc_requests.update_victim_phone_asset(self.unique_id, asset_id, asset_name) if (asset == 'EMAIL'): return self.tc_requests.update_victim_email_asset(self.unique_id, asset_id, asset_name, asset_type) if (asset == 'NETWORK'): return self.tc_requests.update_victim_network_asset(self.unique_id, asset_id, asset_name, asset_type) if (asset == 'SOCIAL'): return self.tc_requests.update_victim_social_asset(self.unique_id, asset_id, asset_name, asset_type) if (asset == 'WEB'): return self.tc_requests.update_victim_web_asset(self.unique_id, asset_id, asset_name) self._tcex.handle_error(925, ['asset_type', 'update_asset', 'asset_type', 'asset_type', asset_type]) return None
Update a asset of a Victim Valid asset_type: + PHONE + EMAIL + NETWORK + SOCIAL + WEB Args: asset: asset_name: asset_id: asset_type: PHONE, EMAIL, NETWORK, SOCIAL, or WEB Returns:
codesearchnet
def _calculate_minimum_silent_period(baudrate): _checkNumerical(baudrate, minvalue=1, description='baudrate') BITTIMES_PER_CHARACTERTIME = 11 MINIMUM_SILENT_CHARACTERTIMES = 3.5 bittime = (1 / float(baudrate)) return ((bittime * BITTIMES_PER_CHARACTERTIME) * MINIMUM_SILENT_CHARACTERTIMES)
Calculate the silent period length to comply with the 3.5 character silence between messages. Args: baudrate (numerical): The baudrate for the serial port Returns: The number of seconds (float) that should pass between each message on the bus. Raises: ValueError, TypeError.
codesearchnet
def wait_for(self, pattern, timeout=None): should_continue = True if self.block: raise TypeError(NON_BLOCKING_ERROR_MESSAGE) def stop(signum, frame): nonlocal should_continue if should_continue: raise TimeoutError() if timeout: signal.signal(signal.SIGALRM, stop) signal.alarm(timeout) while should_continue: output = self.poll_output() + self.poll_error() filtered = [line for line in output if re.match(pattern, line)] if filtered: should_continue = False
Block until a pattern have been found in stdout and stderr Args: pattern(:class:`~re.Pattern`): The pattern to search timeout(int): Maximum number of second to wait. If None, wait infinitely Raises: TimeoutError: When timeout is reach
juraj-google-style
def try_to_create_directory(directory_path): logger = logging.getLogger("ray") directory_path = os.path.expanduser(directory_path) if not os.path.exists(directory_path): try: os.makedirs(directory_path) except OSError as e: if e.errno != errno.EEXIST: raise e logger.warning( "Attempted to create '{}', but the directory already " "exists.".format(directory_path)) try: os.chmod(directory_path, 0o0777) except OSError as e: if e.errno in [errno.EACCES, errno.EPERM]: pass else: raise
Attempt to create a directory that is globally readable/writable. Args: directory_path: The path of the directory to create.
juraj-google-style
def get_coordination_service_leader(self): return '/job:' + self.get_job_name() + '/task:0'
Returns the location for coordination service. The coordination service should be located on TPU worker0. Returns: A string indicate the location path.
github-repos
def set_global(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): cls.user_agent = cls._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
Set global user agent string Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: None
juraj-google-style
def GetVShadowStoreByPathSpec(self, path_spec): store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec) if store_index is None: return None return self._vshadow_volume.get_store(store_index)
Retrieves a VSS store for a path specification. Args: path_spec (PathSpec): path specification. Returns: pyvshadow.store: a VSS store or None if not available.
juraj-google-style
def connect_from(self, vertex, weight=1): for edge in self.edges_in: if vertex == edge.vertex_out: return edge return Edge(vertex, self, weight)
Connect another vertex to this one. Args: vertex (Vertex): vertex to connect from. weight (int): weight of the edge. Returns: Edge: the newly created edge.
juraj-google-style
def find_furious_yaml(config_file=__file__): checked = set() result = _find_furious_yaml(os.path.dirname(config_file), checked) if not result: result = _find_furious_yaml(os.getcwd(), checked) return result
Traverse directory trees to find a furious.yaml file Begins with the location of this file then checks the working directory if not found Args: config_file: location of this file, override for testing Returns: the path of furious.yaml or None if not found
juraj-google-style
def create_feature_map(features, feature_indices, output_dir): feature_map = [] for (name, info) in feature_indices: transform_name = features[name]['transform'] source_column = features[name]['source_column'] if (transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]): feature_map.append((info['index_start'], name)) elif (transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]): (vocab, _) = read_vocab_file(os.path.join(output_dir, (VOCAB_ANALYSIS_FILE % source_column))) for (i, word) in enumerate(vocab): if (transform_name == ONE_HOT_TRANSFORM): feature_map.append(((info['index_start'] + i), ('%s=%s' % (source_column, word)))) elif (transform_name == MULTI_HOT_TRANSFORM): feature_map.append(((info['index_start'] + i), ('%s has "%s"' % (source_column, word)))) elif (transform_name == IMAGE_TRANSFORM): for i in range(info['size']): feature_map.append(((info['index_start'] + i), ('%s image feature %d' % (source_column, i)))) return feature_map
Returns feature_map about the transformed features. feature_map includes information such as: 1, cat1=0 2, cat1=1 3, numeric1 ... Returns: List in the from [(index, feature_description)]
codesearchnet
def correlate(x1, x2, mode='valid'): if any_symbolic_tensors((x1, x2)): return Correlate(mode=mode).symbolic_call(x1, x2) return backend.numpy.correlate(x1, x2, mode=mode)
Compute the cross-correlation of two 1-dimensional tensors. Args: x1: First 1-dimensional input tensor of length M. x2: Second 1-dimensional input tensor of length N. mode: Either `valid`, `same` or `full`. By default the mode is set to `valid`, which returns an output of length max(M, N) - min(M, N) + 1. `same` returns an output of length max(M, N). `full` mode returns the convolution at each point of overlap, with an output length of N+M-1 Returns: Output tensor, cross-correlation of `x1` and `x2`.
github-repos
def is_value_type_valid_for_exact_conditions(self, value): if isinstance(value, string_types) or isinstance(value, (numbers.Integral, float)): return True return False
Method to validate if the value is valid for exact match type evaluation. Args: value: Value to validate. Returns: Boolean: True if value is a string, boolean, or number. Otherwise False.
juraj-google-style
def merge(inputs, name=None): if any((inp is None for inp in inputs)): raise ValueError('At least one of the merge inputs is None: %s' % inputs) with ops.name_scope(name, 'Merge', inputs) as name: inputs = [ops.internal_convert_to_tensor_or_composite(inp, as_ref=True) for inp in inputs] if all((isinstance(v, tensor_lib.Tensor) for v in inputs)): if all((v.dtype._is_ref_dtype for v in inputs)): return gen_control_flow_ops.ref_merge(inputs, name) else: return gen_control_flow_ops.merge(inputs, name) else: if all((isinstance(v, (indexed_slices.IndexedSlices, tensor_lib.Tensor)) for v in inputs)): inputs = math_ops._as_indexed_slices_list(inputs, optimize=False) for v in inputs: if not isinstance(v, composite_tensor.CompositeTensor): raise TypeError('Type %s not supported' % type(v)) for v in inputs[1:]: nest.assert_same_structure(inputs[0], v, expand_composites=True) flat_inputs = [nest.flatten(v, expand_composites=True) for v in inputs] merged_results = [gen_control_flow_ops.merge(component) for component in zip(*flat_inputs)] flat_merged = [tensor for tensor, _ in merged_results] chosen_index = merged_results[0][1] merged_inputs = nest.pack_sequence_as(inputs[0], flat_merged, expand_composites=True) return (merged_inputs, chosen_index)
Returns the value of an available element of `inputs`. This op tests each of the tensors in `inputs` in turn to determine if any of them is available. If it finds an available tensor, it returns it and its index in `inputs`. It is an error if more than one tensor in `inputs` is available. If no tensor in `inputs` is available, the returned tensor and index are not set. This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of `Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices before merging. Args: inputs: The input tensors, at most one of which is available. name: A name for this operation (optional). Returns: A tuple containing the chosen input tensor and its index in `inputs`. Raises: ValueError: If any of the inputs is None, or inputs are IndexedSlices and some but not all have a dense_shape property.
github-repos
def hkl_transformation(transf, miller_index): lcm = lambda a, b: a * b reduced_transf = reduce(lcm, [int(1 / i) for i in itertools.chain(*transf) if i != 0]) * transf reduced_transf = reduced_transf.astype(int) t_hkl = np.dot(reduced_transf, miller_index) d = abs(reduce(gcd, t_hkl)) t_hkl = np.array([int(i / d) for i in t_hkl]) if len([i for i in t_hkl if i < 0]) > 1: t_hkl *= -1 return tuple(t_hkl)
Returns the Miller index from setting A to B using a transformation matrix Args: transf (3x3 array): The transformation matrix that transforms a lattice of A to B miller_index ([h, k, l]): Miller index to transform to setting B
juraj-google-style
def MeshViewers(shape=(1, 1), titlebar='Mesh Viewers', keepalive=False, window_width=1280, window_height=960): if (not test_for_opengl()): return Dummy() mv = MeshViewerLocal(shape=shape, titlebar=titlebar, uid=None, keepalive=keepalive, window_width=window_width, window_height=window_height) return mv.get_subwindows()
Allows subplot-style inspection of primitives in multiple subwindows. Args: shape: a tuple indicating the number of vertical and horizontal windows requested Returns: a list of lists of MeshViewer objects: one per window requested.
codesearchnet
def get_propagator(name): from .sgp4 import Sgp4 from .sgp4beta import Sgp4Beta scope = locals().copy() scope.update(globals()) if (name not in scope): raise UnknownPropagatorError(name) return scope[name]
Retrieve a named propagator Args: name (str): Name of the desired propagator Return: Propagator class
codesearchnet
def set_examples(self, examples): self.store('examples', examples) if (len(examples) > 0): self.store('are_sequence_examples', isinstance(examples[0], tf.train.SequenceExample)) return self
Sets the examples to be displayed in WIT. Args: examples: List of example protos. Returns: self, in order to enabled method chaining.
codesearchnet
def load_words(self, words): self._dictionary.update([word.lower() for word in words]) self._update_dictionary()
Load a list of words from which to generate a word frequency list Args: words (list): The list of words to be loaded
codesearchnet
def init_pool_generator(gens, random_seed=None, id_queue=None): global _SHARED_SEQUENCES _SHARED_SEQUENCES = gens worker_proc = multiprocessing.current_process() worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name) if random_seed is not None: np.random.seed(random_seed + worker_proc.ident) if id_queue is not None: id_queue.put(worker_proc.ident, block=True, timeout=0.1)
Initializer function for pool workers. Args: gens: State which should be made available to worker processes. random_seed: An optional value with which to seed child processes. id_queue: A multiprocessing Queue of worker ids. This is used to indicate that a worker process was created by Keras and can be terminated using the cleanup_all_keras_forkpools utility.
github-repos
def halted(self): result = int(self._dll.JLINKARM_IsHalted()) if (result < 0): raise errors.JLinkException(result) return (result > 0)
Returns whether the CPU core was halted. Args: self (JLink): the ``JLink`` instance Returns: ``True`` if the CPU core is halted, otherwise ``False``. Raises: JLinkException: on device errors.
codesearchnet
def cli_cmd_to_string(args): if isinstance(args, basestring): return args return ' '.join([pipes.quote(arg) for arg in args])
Converts a cmd arg list to string. Args: args: list of strings, the arguments of a command. Returns: String representation of the command.
codesearchnet
def list_offers(access_token, subscription_id, location, publisher): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers?api-version=', COMP_API]) return do_get(endpoint, access_token)
List available VM image offers from a publisher. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): Publisher name, e.g. Canonical. Returns: HTTP response with JSON list of image offers.
juraj-google-style
def write_index(self, overwrite: bool = False, mock: bool = False) -> None: write_if_allowed(self.index_filename, self.index_content(), overwrite=overwrite, mock=mock)
Writes the index file, if permitted. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't
juraj-google-style
def parse_radl(data): if (data is None): return None elif os.path.isfile(data): f = open(data) data = ''.join(f.readlines()) f.close() elif (data.strip() == ''): return RADL() data = (data + '\n') parser = RADLParser(lextab='radl') return parser.parse(data)
Parse a RADL document. Args: - data(str): filepath to a RADL content or a string with content. Return: RADL object.
codesearchnet
def _interactive_input_tensor_to_features_dict(feature_map, hparams): inputs = tf.convert_to_tensor(feature_map["inputs"]) input_is_image = False if len(inputs.get_shape()) < 3 else True x = inputs if input_is_image: x = tf.image.resize_images(x, [299, 299]) x = tf.reshape(x, [1, 299, 299, -1]) x = tf.to_int32(x) else: num_samples = x[0] length = x[2] x = tf.slice(x, [3], tf.to_int32([length])) x = tf.reshape(x, [1, -1, 1, 1]) x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1])) p_hparams = hparams.problem_hparams input_space_id = tf.constant(p_hparams.input_space_id) target_space_id = tf.constant(p_hparams.target_space_id) features = {} features["input_space_id"] = input_space_id features["target_space_id"] = target_space_id features["decode_length"] = ( IMAGE_DECODE_LENGTH if input_is_image else inputs[1]) features["inputs"] = x return features
Convert the interactive input format (see above) to a dictionary. Args: feature_map: dict with inputs. hparams: model hyperparameters Returns: a features dictionary, as expected by the decoder.
juraj-google-style
def __init__(self, name: str, func: Callable[..., T], args: Iterable[Expression], proxy: Optional[T]=None, _id: Optional[str]=None, requires_partition_by: partitionings.Partitioning=partitionings.Index(), preserves_partition_by: partitionings.Partitioning=partitionings.Singleton()): if not _get_allow_non_parallel() and isinstance(requires_partition_by, partitionings.Singleton): reason = requires_partition_by.reason or f'Encountered non-parallelizable form of {name!r}.' raise NonParallelOperation(f"{reason}\nConsider using an allow_non_parallel_operations block if you're sure you want to do this. See https: args = tuple(args) if proxy is None: proxy = func(*(arg.proxy() for arg in args)) super().__init__(name, proxy, _id) self._func = func self._args = args self._requires_partition_by = requires_partition_by self._preserves_partition_by = preserves_partition_by
Initialize a computed expression. Args: name: The name of this expression. func: The function that will be used to compute the value of this expression. Should accept arguments of the types returned when evaluating the `args` expressions. args: The list of expressions that will be used to produce inputs to `func`. proxy: (Optional) a proxy object with same type as the objects that this ComputedExpression will produce at execution time. If not provided, a proxy will be generated using `func` and the proxies of `args`. _id: (Optional) a string to uniquely identify this expression. requires_partition_by: The required (common) partitioning of the args. preserves_partition_by: The level of partitioning preserved.
github-repos
def __init__(self, matrix: np.ndarray) -> None: if matrix.shape != (2, 2) or not linalg.is_unitary(matrix): raise ValueError('Not a 2x2 unitary matrix: {}'.format(matrix)) self._matrix = matrix
Initializes the 2-qubit matrix gate. Args: matrix: The matrix that defines the gate.
juraj-google-style
def update( self, jump ): atom = jump.initial_site.atom dr = jump.dr( self.cell_lengths ) jump.final_site.occupation = atom.number jump.final_site.atom = atom jump.final_site.is_occupied = True jump.initial_site.occupation = 0 jump.initial_site.atom = None jump.initial_site.is_occupied = False atom.site = jump.final_site atom.number_of_hops += 1 atom.dr += dr atom.summed_dr2 += np.dot( dr, dr )
Update the lattice state by accepting a specific jump Args: jump (Jump): The jump that has been accepted. Returns: None.
juraj-google-style
def replace_output(self, output, tag=None): if isinstance(output, pvalue.DoOutputsTuple): self.replace_output(output[output._main_tag]) elif isinstance(output, pvalue.PValue): self.outputs[tag] = output elif isinstance(output, dict): for output_tag, out in output.items(): self.outputs[output_tag] = out else: raise TypeError('Unexpected output type: %s' % output) from apache_beam.transforms import external if isinstance(self.transform, external.ExternalTransform): self.transform.replace_named_outputs(self.named_outputs())
Replaces the output defined by the given tag with the given output. Args: output: replacement output tag: tag of the output to be replaced.
github-repos
def _parse_trunk_groups(self, config): values = TRUNK_GROUP_RE.findall(config) return dict(trunk_groups=values)
_parse_trunk_groups scans the provided configuration block and extracts all the vlan trunk groups. If no trunk groups are configured an empty List is returned as the vlaue. The return dict is intended to be merged into the response dict. Args: config (str): The vlan configuration block form the node's running configuration Returns: dict: resource dict attribute
codesearchnet
def on_merge(self, to_be_merged, merge_result, context): pass
Called when multiple windows are merged. Args: to_be_merged: the set of windows to be merged merge_result: the window into which the windows are being merged context: a context (e.g. a TriggerContext instance) for managing state and setting timers
github-repos
def get_snapshot_by(self, volume_id_or_uri, field, value): uri = self.__build_volume_snapshot_uri(volume_id_or_uri) return self._client.get_by(field, value, uri=uri)
Gets all snapshots that match the filter. The search is case-insensitive. Args: volume_id_or_uri: Can be either the volume id or the volume uri. field: Field name to filter. value: Value to filter. Returns: list: Snapshots
juraj-google-style
def cumprod(vari, axis=None): if isinstance(vari, Poly): if (np.prod(vari.shape) == 1): return vari.copy() if (axis is None): vari = chaospy.poly.shaping.flatten(vari) axis = 0 vari = chaospy.poly.shaping.rollaxis(vari, axis) out = [vari[0]] for poly in vari[1:]: out.append((out[(- 1)] * poly)) return Poly(out, vari.dim, vari.shape, vari.dtype) return np.cumprod(vari, axis)
Perform the cumulative product of a shapeable quantity over a given axis. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Input data. axis (int): Axis over which the sum is taken. By default ``axis`` is None, and all elements are summed. Returns: (chaospy.poly.base.Poly): An array shaped as ``vari`` but with the specified axis removed. Examples: >>> vari = cp.prange(4) >>> print(vari) [1, q0, q0^2, q0^3] >>> print(cp.cumprod(vari)) [1, q0, q0^3, q0^6]
codesearchnet
def log(x): return math_ops.log(x)
Element-wise log. Args: x: Tensor or variable. Returns: A tensor.
github-repos
def _layer_stack(mp, inputs, self_attention_bias, layers, hparams, encoder_output=None, encoder_decoder_attention_bias=None): layers = layers.strip(",").split(",") self_attention_bias_3d = mp(tf.squeeze, self_attention_bias, 1) if encoder_decoder_attention_bias is not None: encoder_decoder_attention_bias_3d = mp( tf.squeeze, encoder_decoder_attention_bias, 1) relu_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "relu_dropout_broadcast_dims", ""))) mix_size = int(hparams.mix_fraction * hparams.hidden_size) accumulator = inputs x = inputs for layer_num, layer_type in enumerate(layers): with tf.variable_scope("%s_%d" % (layer_type, layer_num)): tf.logging.info("%s_%d" % (layer_type, layer_num)) if layer_type == "a": accumulator = mp(tf.add, x, accumulator) x = accumulator elif layer_type == "n": x = mp(common_layers.apply_norm, x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) elif layer_type == "d": x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout) elif layer_type == "m": if mix_size > 0: def _split(t): return tuple(tf.split( t, [mix_size, hparams.hidden_size - mix_size], 2)) to_mix, to_keep = mp(_split, x) mixed = expert_utils.all_reduce_ring(to_mix, mp) mixed = mp(tf.multiply, mixed, mp.n ** -0.5) x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep) elif layer_type == "att": q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, name="q_transform") x = mp( common_attention.scaled_dot_product_attention_simple, q, x, x, self_attention_bias_3d) x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, name="o_transform") elif layer_type == "enc-att": q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, name="q_transform") assert encoder_output is not None x = mp( common_attention.scaled_dot_product_attention_simple, q, encoder_output, encoder_output, encoder_decoder_attention_bias_3d) x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, name="o_transform") elif layer_type == "multihead-att": x = mp( common_attention.multihead_attention, x, None, self_attention_bias, hparams.multihead_attention_key_channels or hparams.hidden_size, hparams.multihead_attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.multihead_attention_num_heads, hparams.attention_dropout) elif layer_type == "enc-multihead-att": x = mp( common_attention.multihead_attention, x, encoder_output, encoder_decoder_attention_bias, hparams.multihead_attention_key_channels or hparams.hidden_size, hparams.multihead_attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.multihead_attention_num_heads, hparams.attention_dropout) elif layer_type == "ffn": x = mp( common_layers.dense_relu_dense, x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, dropout_broadcast_dims=[relu_dropout_broadcast_dims] * mp.n) else: assert False, "unknown sublayer %s" % layer_type return x
A stack of layers. Args: mp: a Parallelism object inputs: a list of Tensors self_attention_bias: list of bias Tensor for self-attention (see common_attention.attention_bias()) layers: a string hparams: hyperparameters for model encoder_output: optional list of tensors encoder_decoder_attention_bias: optional list of tensors Returns: y: a list of Tensors
juraj-google-style
def get_ams_access_token(accountname, accountkey): accountkey_encoded = urllib.parse.quote(accountkey, safe='') body = "grant_type=client_credentials&client_id=" + accountname + \ "&client_secret=" + accountkey_encoded + " &scope=urn%3aWindowsAzureMediaServices" return do_ams_auth(ams_auth_endpoint, body)
Get Media Services Authentication Token. Args: accountname (str): Azure Media Services account name. accountkey (str): Azure Media Services Key. Returns: HTTP response. JSON body.
juraj-google-style
def _ParseOriginalFilename(self, file_object, format_version): file_offset = file_object.tell() if format_version == 1: data_type_map = self._GetDataTypeMap( 'recycle_bin_metadata_utf16le_string') else: data_type_map = self._GetDataTypeMap( 'recycle_bin_metadata_utf16le_string_with_size') try: original_filename, _ = self._ReadStructureFromFileObject( file_object, file_offset, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse original filename with error: {0!s}'.format( exception)) if format_version == 1: return original_filename.rstrip('\x00') return original_filename.string.rstrip('\x00')
Parses the original filename. Args: file_object (FileIO): file-like object. format_version (int): format version. Returns: str: filename or None on error. Raises: ParseError: if the original filename cannot be read.
juraj-google-style
def discard_event(event: events.Event, bot_id: str = None) -> bool: if event["type"] in SKIP_EVENTS: return True elif bot_id and isinstance(event, events.Message): if event.get("bot_id") == bot_id: LOG.debug("Ignoring event: %s", event) return True elif "message" in event and event["message"].get("bot_id") == bot_id: LOG.debug("Ignoring event: %s", event) return True return False
Check if the incoming event needs to be discarded Args: event: Incoming :class:`slack.events.Event` bot_id: Id of connected bot Returns: boolean
juraj-google-style
def joint_distribution(dataframe, rownames, colnames): cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=True) total_observations = cont_table['All']['All'] return cont_table/total_observations
Joint Distribution Table - The Continguency Table normalized by the total number of observations Args: rownames: the column name or list of columns names that make the keys of the rows colnames: the column name or list of columns names that make the keys of the columns
juraj-google-style
def from_service_account_file(cls, filename, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file(filename) kwargs['credentials'] = credentials return cls(*args, **kwargs)
Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: dialogflow_v2.SessionEntityTypesClient: The constructed client.
codesearchnet
def SelectArtifacts(cls, os_name=None, cpe=None, labels=None, restrict_checks=None): results = set() for condition in cls.Conditions(None, os_name, cpe, labels): trigger = condition[1:] for chk in itervalues(cls.checks): if restrict_checks and chk.check_id not in restrict_checks: continue results.update(chk.triggers.Artifacts(*trigger)) return results
Takes targeting info, identifies artifacts to fetch. Args: os_name: 0+ OS names. cpe: 0+ CPE identifiers. labels: 0+ GRR labels. restrict_checks: A list of check ids whose artifacts should be fetched. Returns: the artifacts that should be collected.
juraj-google-style
def _plot(self, axes_list, data = None): plot_type = self.settings['plot_style'] if data is None: data = self.data if data is not None and data is not {}: if plot_type in ('main', 'two'): if not data['random data'] is None: axes_list[0].plot(data['random data']) axes_list[0].hold(False) if plot_type in ('aux', 'two', '2D'): if not data['random data'] is None: axes_list[1].plot(data['random data']) axes_list[1].hold(False) if plot_type == '2D': if 'image data' in data and not data['image data'] is None: fig = axes_list[0].get_figure() implot = axes_list[0].imshow(data['image data'], cmap='pink', interpolation="nearest", extent=[-1,1,1,-1]) fig.colorbar(implot, label='kcounts/sec')
plots the data only the axes objects that are provided in axes_list Args: axes_list: a list of axes objects, this should be implemented in each subscript data: data to be plotted if empty take self.data Returns: None
juraj-google-style
def save(self, fname, mode=None, validate=True, encoding='utf-8', wd=False, inline=False, relative=False, pack=False): self._closed() if (mode is None): mode = 'abs' if pack: mode = 'pack' elif wd: mode = 'wd' elif relative: mode = 'rel' msg = "Using deprecated save method. Please save the workflow with: wf.save('{}', mode='{}'). Redirecting to new save method.".format(fname, mode) warnings.warn(msg, DeprecationWarning) modes = ('rel', 'abs', 'wd', 'inline', 'pack') if (mode not in modes): msg = 'Illegal mode "{}". Choose one of ({}).'.format(mode, ','.join(modes)) raise ValueError(msg) if validate: self.validate() dirname = os.path.dirname(os.path.abspath(fname)) if (not os.path.exists(dirname)): os.makedirs(dirname) if (mode == 'inline'): msg = "Inline saving is deprecated. Please save the workflow using mode='pack'. Setting mode to pack." warnings.warn(msg, DeprecationWarning) mode = 'pack' if (mode == 'rel'): relpath = dirname save_yaml(fname=fname, wf=self, pack=False, relpath=relpath, wd=False) if (mode == 'abs'): save_yaml(fname=fname, wf=self, pack=False, relpath=None, wd=False) if (mode == 'pack'): self._pack(fname, encoding) if (mode == 'wd'): if (self.get_working_dir() is None): raise ValueError('Working directory not set.') else: bn = os.path.basename(fname) wd_file = os.path.join(self.working_dir, bn) save_yaml(fname=wd_file, wf=self, pack=False, relpath=None, wd=True) try: shutil.copy2(wd_file, fname) except shutil.Error: pass
Save the workflow to file. Save the workflow to a CWL file that can be run with a CWL runner. Args: fname (str): file to save the workflow to. mode (str): one of (rel, abs, wd, inline, pack) encoding (str): file encoding to use (default: ``utf-8``).
codesearchnet
def correct_tables(self, generation: str) -> str: for l in generation.split('\n'): if l.count('\\begin{tabular}') > 15 or l.count('\\multicolumn') > 60 or l.count('&') > 400: generation = generation.replace(l, '') generation = generation.replace('\\begin{table} \\begin{tabular}', '\\begin{table}\n\\begin{tabular}') generation = generation.replace('\\end{tabular} \\end{table}', '\\end{tabular}\n\\end{table}') generation = generation.replace('\\end{table} Tab', '\\end{table}\nTab') generation = re.sub('(^.+)\\\\begin{tab', '\\1\\n\\\\begin{tab', generation, flags=re.M) generation = generation.replace('\\begin{tabular}{l l} & \\\\ \\end{tabular}', '') generation = generation.replace('\\begin{tabular}{}\n\n\\end{tabular}', '') return generation
Takes a generated string and fixes tables/tabulars to make them match the markdown format needed. Args: generation (str): The generated text to be postprocessed. Returns: str: The postprocessed text. Example: ```python correct_tables("\begin{table} \begin{tabular}{l l} & \ \end{tabular} \end{table}") "\begin{table} \begin{tabular}{l l} & \ \end{tabular} \end{table}" ```
github-repos
def safejoin(base, *elements): base = os.path.abspath(base) path = os.path.join(base, *elements) path = os.path.normpath(path) if not path_is_inside(path, base): raise ValueError('target path is outside of the base path') return path
Safely joins paths together. The result will always be a subdirectory under `base`, otherwise ValueError is raised. Args: base (str): base path elements (list of strings): path elements to join to base Returns: elements joined to base
juraj-google-style
def get_channel(self, **kwargs): if self.compatibility_mode: if hasattr(self.chef_module, 'get_channel'): config.LOGGER.info("Calling get_channel... ") channel = self.chef_module.get_channel(**kwargs) if hasattr(self.chef_module, 'create_channel'): config.LOGGER.info("Calling create_channel... ") channel = self.chef_module.create_channel(**kwargs) else: channel = None return channel elif hasattr(self, 'channel_info'): channel = ChannelNode( source_domain=self.channel_info['CHANNEL_SOURCE_DOMAIN'], source_id=self.channel_info['CHANNEL_SOURCE_ID'], title=self.channel_info['CHANNEL_TITLE'], thumbnail=self.channel_info.get('CHANNEL_THUMBNAIL'), language=self.channel_info.get('CHANNEL_LANGUAGE'), description=self.channel_info.get('CHANNEL_DESCRIPTION'), ) return channel else: raise NotImplementedError('BaseChef must overrride the get_channel method')
Call chef script's get_channel method in compatibility mode ...or... Create a `ChannelNode` from the Chef's `channel_info` class attribute. Args: kwargs (dict): additional keyword arguments that `uploadchannel` received Returns: channel created from get_channel method or None
juraj-google-style
def assert_no_current_path(self, path, **kwargs): query = CurrentPathQuery(path, **kwargs) @self.document.synchronize def assert_no_current_path(): if query.resolves_for(self): raise ExpectationNotMet(query.negative_failure_message) assert_no_current_path() return True
Asserts that the page doesn't have the given path. Args: path (str | RegexObject): The string or regex that the current "path" should match. **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
juraj-google-style
def setup_client(client_id: int, test_name: str, env: Mapping[str, str], num_local_devices: int): redirect_output(f'test-{test_name}-process-{client_id}.log') for var, val in env.items(): os.environ[var] = val setup_local_devices(num_local_devices) accelerator_util.initialize_accelerator_system()
Set up a DTensor client for use in multi-client tests. Args: client_id: the index of the client. test_name: the name of the test under which this client is running, used To identify the log file artifact containing the test output. env: a dictionary of environment variables to update. num_local_devices: number of local devices to set up.
github-repos
def make_pose(translation, rotation): pose = np.zeros((4, 4)) pose[(:3, :3)] = rotation pose[(:3, 3)] = translation pose[(3, 3)] = 1.0 return pose
Makes a homogenous pose matrix from a translation vector and a rotation matrix. Args: translation: a 3-dim iterable rotation: a 3x3 matrix Returns: pose: a 4x4 homogenous matrix
codesearchnet
def set_nodes_vlan(site, nodes, interface, vlan_id): def _to_network_address(host): splitted = host.split('.') splitted[0] = splitted[0] + "-" + interface return ".".join(splitted) gk = get_api_client() network_addresses = [_to_network_address(n) for n in nodes] gk.sites[site].vlans[str(vlan_id)].submit({"nodes": network_addresses})
Set the interface of the nodes in a specific vlan. It is assumed that the same interface name is available on the node. Args: site(str): site to consider nodes(list): nodes to consider interface(str): the network interface to put in the vlan vlan_id(str): the id of the vlan
juraj-google-style
def _ReadParserPresetsFromFile(self): self._presets_file = os.path.join(self._data_location, self._PRESETS_FILE_NAME) if (not os.path.isfile(self._presets_file)): raise errors.BadConfigOption('No such parser presets file: {0:s}.'.format(self._presets_file)) try: parsers_manager.ParsersManager.ReadPresetsFromFile(self._presets_file) except errors.MalformedPresetError as exception: raise errors.BadConfigOption('Unable to read presets from file with error: {0!s}'.format(exception))
Reads the parser presets from the presets.yaml file. Raises: BadConfigOption: if the parser presets file cannot be read.
codesearchnet
def add_untagged(self, *responses: 'Response') -> None: for resp in responses: try: merge_key = resp.merge_key except TypeError: self._untagged.append(resp) else: key = (type(resp), merge_key) try: untagged_idx = self._mergeable[key] except KeyError: untagged_idx = len(self._untagged) self._mergeable[key] = untagged_idx self._untagged.append(resp) else: merged = self._untagged[untagged_idx].merge(resp) self._untagged[untagged_idx] = merged self._raw = None
Add an untagged response. These responses are shown before the parent response. Args: responses: The untagged responses to add.
codesearchnet
def _TestGetItem(self, struct, slice_spec, expected): tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True) tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False) value1 = struct.__getitem__(slice_spec) value2 = struct.__getitem__(tensor_slice_spec1) value3 = struct.__getitem__(tensor_slice_spec2) self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,)) self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,)) self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
Helper function for testing StructuredTensor.__getitem__. Checks that calling `struct.__getitem__(slice_spec) returns the expected value. Checks three different configurations for each slice spec: * Call __getitem__ with the slice spec as-is (with int values) * Call __getitem__ with int values in the slice spec wrapped in `tf.constant()`. * Call __getitem__ with int values in the slice spec wrapped in `tf.compat.v1.placeholder()` (so value is not known at graph construction time). Args: struct: The StructuredTensor to test. slice_spec: The slice spec. expected: The expected value of struct.__getitem__(slice_spec), as a python list.
github-repos
def convert_sqla_type_for_dialect(coltype: TypeEngine, dialect: Dialect, strip_collation: bool=True, convert_mssql_timestamp: bool=True, expand_for_scrubbing: bool=False) -> TypeEngine: assert (coltype is not None) to_mysql = (dialect.name == SqlaDialectName.MYSQL) to_mssql = (dialect.name == SqlaDialectName.MSSQL) typeclass = type(coltype) if isinstance(coltype, sqltypes.Enum): return sqltypes.String(length=coltype.length) if isinstance(coltype, sqltypes.UnicodeText): return sqltypes.UnicodeText() if isinstance(coltype, sqltypes.Text): return sqltypes.Text() if isinstance(coltype, sqltypes.Unicode): if (((coltype.length is None) and to_mysql) or expand_for_scrubbing): return sqltypes.UnicodeText() if isinstance(coltype, sqltypes.String): if (((coltype.length is None) and to_mysql) or expand_for_scrubbing): return sqltypes.Text() if strip_collation: return remove_collation(coltype) return coltype if ((typeclass == mssql.base.BIT) and to_mysql): return mysql.base.BIT() is_mssql_timestamp = isinstance(coltype, MSSQL_TIMESTAMP) if (is_mssql_timestamp and to_mssql and convert_mssql_timestamp): return mssql.base.BINARY(8) return coltype
Converts an SQLAlchemy column type from one SQL dialect to another. Args: coltype: SQLAlchemy column type in the source dialect dialect: destination :class:`Dialect` strip_collation: remove any ``COLLATION`` information? convert_mssql_timestamp: since you cannot write to a SQL Server ``TIMESTAMP`` field, setting this option to ``True`` (the default) converts such types to something equivalent but writable. expand_for_scrubbing: The purpose of expand_for_scrubbing is that, for example, a ``VARCHAR(200)`` field containing one or more instances of ``Jones``, where ``Jones`` is to be replaced with ``[XXXXXX]``, will get longer (by an unpredictable amount). So, better to expand to unlimited length. Returns: an SQLAlchemy column type instance, in the destination dialect
codesearchnet
def EscapeWildcards(string): precondition.AssertType(string, Text) return string.replace("%", r"\%").replace("_", r"\_")
Escapes wildcard characters for strings intended to be used with `LIKE`. Databases don't automatically escape wildcard characters ('%', '_'), so any non-literal string that is passed to `LIKE` and is expected to match literally has to be manually escaped. Args: string: A string to escape. Returns: An escaped string.
juraj-google-style
def to_json(value: Any, **kwargs) -> Any: if isinstance(value, Symbolic): return value.sym_jsonify(**kwargs) return utils.to_json(value, **kwargs)
Serializes a (maybe) symbolic value into a plain Python object. Example:: @pg.members([ ('x', pg.typing.Any()) ]) class A(pg.Object): pass a1 = A(1) json = a1.to_json() a2 = pg.from_json(json) assert pg.eq(a1, a2) Args: value: value to serialize. Applicable value types are: * Builtin python types: None, bool, int, float, string; * JSONConvertible types; * List types; * Tuple types; * Dict types. **kwargs: Keyword arguments to pass to value.to_json if value is JSONConvertible. Returns: JSON value.
github-repos
def build_masters(filename, master_dir, designspace_instance_dir=None, designspace_path=None, family_name=None, propagate_anchors=True, minimize_glyphs_diffs=False, normalize_ufos=False, create_background_layers=False, generate_GDEF=True, store_editor_state=True): font = GSFont(filename) if (not os.path.isdir(master_dir)): os.mkdir(master_dir) if (designspace_instance_dir is None): instance_dir = None else: instance_dir = os.path.relpath(designspace_instance_dir, master_dir) designspace = to_designspace(font, family_name=family_name, propagate_anchors=propagate_anchors, instance_dir=instance_dir, minimize_glyphs_diffs=minimize_glyphs_diffs, generate_GDEF=generate_GDEF, store_editor_state=store_editor_state) ufos = {} for source in designspace.sources: if (source.filename in ufos): assert (source.font is ufos[source.filename]) continue if create_background_layers: ufo_create_background_layer_for_all_glyphs(source.font) ufo_path = os.path.join(master_dir, source.filename) clean_ufo(ufo_path) source.font.save(ufo_path) if normalize_ufos: import ufonormalizer ufonormalizer.normalizeUFO(ufo_path, writeModTimes=False) ufos[source.filename] = source.font if (not designspace_path): designspace_path = os.path.join(master_dir, designspace.filename) designspace.write(designspace_path) return Masters(ufos, designspace_path)
Write and return UFOs from the masters and the designspace defined in a .glyphs file. Args: master_dir: Directory where masters are written. designspace_instance_dir: If provided, a designspace document will be written alongside the master UFOs though no instances will be built. family_name: If provided, the master UFOs will be given this name and only instances with this name will be included in the designspace. Returns: A named tuple of master UFOs (`ufos`) and the path to the designspace file (`designspace_path`).
codesearchnet
def append(self, item): self._items.append(item)
Append an item to the Menu. Args: item: (MenuItem) the item to be appended.
github-repos
def uni_to_beta(text): u = _UNICODE_MAP transform = [] for ch in text: try: conv = u[ch] except KeyError: conv = ch transform.append(conv) converted = ''.join(transform) return converted
Convert unicode text to a betacode equivalent. This method can handle tónos or oxeîa characters in the input. Args: text: The text to convert to betacode. This text does not have to all be Greek polytonic text, and only Greek characters will be converted. Note that in this case, you cannot convert to beta and then back to unicode. Returns: The betacode equivalent of the inputted text where applicable.
juraj-google-style
def __init__(self, cell): self._cell = cell
Creates a new BoolGaugeCell. Args: cell: A c pointer of TFE_MonitoringBoolGaugeCell.
github-repos
def _open_script_interface(self, conn_id, callback): try: handle = self._find_handle(conn_id) services = self._connections[handle]['services'] except (ValueError, KeyError): callback(conn_id, self.id, False, 'Connection closed unexpectedly before we could open the script interface') return success = TileBusHighSpeedCharacteristic in services[TileBusService]['characteristics'] reason = None if not success: reason = 'Could not find high speed streaming characteristic' callback(conn_id, self.id, success, reason)
Enable script streaming interface for this IOTile device Args: conn_id (int): the unique identifier for the connection callback (callback): Callback to be called when this command finishes callback(conn_id, adapter_id, success, failure_reason)
juraj-google-style
def conv2d(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: scale = [1.0] * self.out_channel_size offset = [0.5] * self.out_channel_size mean, variance = (scale, offset) out = nn_ops.conv2d(input_tensor, self.filters, strides=strides, dilations=dilations, padding=padding, data_format='NHWC', name='sample/conv') if bias_fn is not None: out = nn_ops.bias_add(out, self.bias) if has_batch_norm: out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(out, scale, offset, mean, variance, is_training=False) if activation_fn is not None: out = activation_fn(out) return {'output': out}
Performs a 2D convolution operation. Args: input_tensor: Input tensor to perform convolution on. Returns: A map of: output key -> output result.
github-repos
def find_indices(lst, element): result = [] offset = -1 while True: try: offset = lst.index(element, offset+1) except ValueError: return result result.append(offset)
Returns the indices for all occurrences of 'element' in 'lst'. Args: lst (list): List to search. element: Element to find. Returns: list: List of indices or values
juraj-google-style
def CaseGroups(unicode_dir=_UNICODE_DIR): togroup = {} def DoLine(codes, fields): 'Process single CaseFolding.txt line, updating togroup.' (_, foldtype, lower, _) = fields if (foldtype not in ('C', 'S')): return lower = _UInt(lower) togroup.setdefault(lower, [lower]).extend(codes) ReadUnicodeTable((unicode_dir + '/CaseFolding.txt'), 4, DoLine) groups = togroup.values() for g in groups: g.sort() groups.sort() return (togroup, groups)
Returns list of Unicode code groups equivalent under case folding. Each group is a sorted list of code points, and the list of groups is sorted by first code point in the group. Args: unicode_dir: Unicode data directory Returns: list of Unicode code groups
codesearchnet
def slope(self, other): X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y Y3 = Y1 - Y2 X3 = X1 - X2 return (Y3 * self.inverse(X3)) % self.P
Determines the slope between this point and another point. Args: other (AffinePoint): The second point. Returns: int: Slope between self and other.
juraj-google-style
def _sia(cache_key, subsystem): log.info('Calculating big-phi data for %s...', subsystem) if not subsystem: log.info('Subsystem %s is empty; returning null SIA ' 'immediately.', subsystem) return _null_sia(subsystem) if not connectivity.is_strong(subsystem.cm, subsystem.node_indices): log.info('%s is not strongly connected; returning null SIA ' 'immediately.', subsystem) return _null_sia(subsystem) if len(subsystem.cut_indices) == 1: if not subsystem.cm[subsystem.node_indices][subsystem.node_indices]: log.info('Single micro nodes %s without selfloops cannot have ' 'phi; returning null SIA immediately.', subsystem) return _null_sia(subsystem) elif not config.SINGLE_MICRO_NODES_WITH_SELFLOOPS_HAVE_PHI: log.info('Single micro nodes %s with selfloops cannot have ' 'phi; returning null SIA immediately.', subsystem) return _null_sia(subsystem) log.debug('Finding unpartitioned CauseEffectStructure...') unpartitioned_ces = _ces(subsystem) if not unpartitioned_ces: log.info('Empty unpartitioned CauseEffectStructure; returning null ' 'SIA immediately.') return _null_sia(subsystem) log.debug('Found unpartitioned CauseEffectStructure.') if len(subsystem.cut_indices) == 1: cuts = [Cut(subsystem.cut_indices, subsystem.cut_indices, subsystem.cut_node_labels)] else: cuts = sia_bipartitions(subsystem.cut_indices, subsystem.cut_node_labels) engine = ComputeSystemIrreducibility( cuts, subsystem, unpartitioned_ces) result = engine.run(config.PARALLEL_CUT_EVALUATION) if config.CLEAR_SUBSYSTEM_CACHES_AFTER_COMPUTING_SIA: log.debug('Clearing subsystem caches.') subsystem.clear_caches() log.info('Finished calculating big-phi data for %s.', subsystem) return result
Return the minimal information partition of a subsystem. Args: subsystem (Subsystem): The candidate set of nodes. Returns: SystemIrreducibilityAnalysis: A nested structure containing all the data from the intermediate calculations. The top level contains the basic irreducibility information for the given subsystem.
juraj-google-style