code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __ge__(self, other): other = self._cast_to_frameset(other) if other is NotImplemented: return NotImplemented return self.items >= other.items
Check if `self` >= `other` via a comparison of the contents. If `other` is not a :class:`FrameSet`, but is a set, frozenset, or is iterable, it will be cast to a :class:`FrameSet`. Args: other (:class:`FrameSet`): Also accepts an object that can be cast to a :class:`FrameSet` Returns: bool: :class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
juraj-google-style
def unsqueeze(self, dim: int) -> Rigid: if dim >= len(self.shape): raise ValueError('Invalid dimension') rots = self._rots.unsqueeze(dim) trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1) return Rigid(rots, trans)
Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation. Args: dim: A positive or negative dimension index. Returns: The unsqueezed transformation.
github-repos
def _BuildFindSpecsFromRegistrySourceKey(self, key_path): find_specs = [] for key_path_glob in path_helper.PathHelper.ExpandRecursiveGlobs(key_path, '\\'): logger.debug('building find spec from key path glob: {0:s}'.format(key_path_glob)) key_path_glob_upper = key_path_glob.upper() if key_path_glob_upper.startswith('HKEY_USERS\\%%USERS.SID%%'): key_path_glob = 'HKEY_CURRENT_USER{0:s}'.format(key_path_glob[26:]) find_spec = registry_searcher.FindSpec(key_path_glob=key_path_glob) find_specs.append(find_spec) return find_specs
Build find specifications from a Windows Registry source type. Args: key_path (str): Windows Registry key path defined by the source. Returns: list[dfwinreg.FindSpec]: find specifications for the Windows Registry source type.
codesearchnet
def write_test_cases(fp, model_name, examples): writer = TextFormatWriter(fp) writer.write_field('load_model', os.path.basename(model_name)) for example in examples: inputs = [] for name in example['inputs'].keys(): if name: inputs.append(name) outputs = [] for name in example['outputs'].keys(): if name: outputs.append(name) if not (inputs and outputs): raise RuntimeError('Empty input / output names.') with writer.sub_message('reshape') as reshape: for name, value in example['inputs'].items(): with reshape.sub_message('input') as input_msg: input_msg.write_field('key', name) input_msg.write_field('value', ','.join(map(str, value.shape))) with writer.sub_message('invoke') as invoke: for name, value in example['inputs'].items(): with invoke.sub_message('input') as input_msg: input_msg.write_field('key', name) input_msg.write_field('value', format_result(value)) for name, value in example['outputs'].items(): with invoke.sub_message('output') as output_msg: output_msg.write_field('key', name) output_msg.write_field('value', format_result(value)) with invoke.sub_message('output_shape') as output_shape: output_shape.write_field('key', name) output_shape.write_field('value', ','.join([str(dim) for dim in value.shape]))
Given a dictionary of `examples`, write a text format representation. The file format is protocol-buffer-like, even though we don't use proto due to the needs of the Android team. Args: fp: File-like object to write to. model_name: Filename where the model was written to, relative to filename. examples: Example dictionary consisting of keys "inputs" and "outputs" Raises: RuntimeError: Example dictionary does not have input / output names.
github-repos
def GetDefinitionByName(self, name): lookup_name = name.lower() if (lookup_name not in self._definitions): lookup_name = self._aliases.get(name, None) return self._definitions.get(lookup_name, None)
Retrieves a specific data type definition by name. Args: name (str): name of the data type definition. Returns: DataTypeDefinition: data type definition or None if not available.
codesearchnet
class DabDetrEncoder(DabDetrPreTrainedModel): def __init__(self, config: DabDetrConfig): super().__init__(config) self.dropout = config.dropout self.query_scale = DabDetrMLP(config.hidden_size, config.hidden_size, config.hidden_size, 2) self.layers = nn.ModuleList([DabDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) self.norm = nn.LayerNorm(config.hidden_size) if config.normalize_before else None self.gradient_checkpointing = False self.post_init() def forward(self, inputs_embeds, attention_mask, object_queries, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) pos_scales = self.query_scale(hidden_states) scaled_object_queries = object_queries * pos_scales if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, scaled_object_queries, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask=attention_mask, object_queries=scaled_object_queries, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.norm: hidden_states = self.norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`DabDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for DAB-DETR: - object_queries are added to the forward pass. Args: config: DabDetrConfig
github-repos
def write_dict_to_new_file(file_name, localization_key_to_comment): output_file_descriptor = open_strings_file(file_name, "w") for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)): write_entry_to_file(output_file_descriptor, entry_comment, entry_key) output_file_descriptor.write(u'\n') output_file_descriptor.close()
Writes dictionary of localization keys and comments to a file. Args: localization_key_to_comment (dict): A mapping between localization keys and comments. file_name (str): The path of the file to append to.
juraj-google-style
def _ParseRegisteredDLLs(self, parser_mediator, registry_key): notify_key = registry_key.GetSubkeyByName('Notify') if not notify_key: return for subkey in notify_key.GetSubkeys(): for trigger in self._TRIGGERS: handler_value = subkey.GetValueByName(trigger) if not handler_value: continue values_dict = { 'Application': subkey.name, 'Handler': handler_value.GetDataAsObject(), 'Trigger': trigger} command_value = subkey.GetValueByName('DllName') if command_value: values_dict['Command'] = command_value.GetDataAsObject() event_data = windows_events.WindowsRegistryEventData() event_data.key_path = subkey.path event_data.offset = subkey.offset event_data.regvalue = values_dict event_data.source_append = ': Winlogon' event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the registered DLLs that receive event notifications. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def add(self, value): value = int(value) if (value < 10): value = 10 if (value > 600): value = 600 self._data.setdefault(value, 0) self._data[value] += 1 self._len += 1
Add the value to this histogram. Args: value (int): The value. Values outside of ``10 <= x <= 600`` will be raised to ``10`` or reduced to ``600``.
codesearchnet
def add_buffer(self, buf_header, buf_payload): if 'num_buffers' in self._header: self._header['num_buffers'] += 1 else: self._header['num_buffers'] = 1 self._header_json = None self._buffers.append((buf_header, buf_payload))
Associate a buffer header and payload with this message. Args: buf_header (``JSON``) : a buffer header buf_payload (``JSON`` or bytes) : a buffer payload Returns: None Raises: MessageError
juraj-google-style
def validate_full_name(self, full_name, timeout=(- 1)): uri = ((self.URI + '/validateUserName/') + full_name) return self._client.create_with_zero_body(uri=uri, timeout=timeout)
Verifies if a fullName is already in use. Args: full_name: The fullName to be verified. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: True if full name is in use, False if it is not.
codesearchnet
def __init__(self, host: str, port: int, username: Optional[str], password: Optional[str], batch_size: int=100): self.host = host self.port = port self.username = username | os.getenv('OPENSEARCH_USERNAME') self.password = password | os.getenv('OPENSEARCH_PASSWORD') self._batch_size = batch_size if not self.username or not self.password: raise ValueError('Username and password are needed for connecting to Opensearch cluster.')
Args: host (str): The opensearch host port (int): The opensearch port username (str): username of OpenSearch DB password (str): password of OpenSearch DB batch_size(int): Number of key, values pairs to write at once Returns: :class:`~apache_beam.transforms.ptransform.PTransform`
github-repos
def mul(left, right): from .mv_mul import MvMul length = max(left, right) if (length == 1): return Mul(left, right) return MvMul(left, right)
Distribution multiplication. Args: left (Dist, numpy.ndarray) : left hand side. right (Dist, numpy.ndarray) : right hand side.
codesearchnet
def _copy_hdxobjects(self, hdxobjects, hdxobjectclass, attribute_to_copy=None): newhdxobjects = list() for hdxobject in hdxobjects: newhdxobjectdata = copy.deepcopy(hdxobject.data) newhdxobject = hdxobjectclass(newhdxobjectdata, configuration=self.configuration) if attribute_to_copy: value = getattr(hdxobject, attribute_to_copy) setattr(newhdxobject, attribute_to_copy, value) newhdxobjects.append(newhdxobject) return newhdxobjects
Helper function to make a deep copy of a supplied list of HDX objects Args: hdxobjects (List[T <= HDXObject]): list of HDX objects to copy hdxobjectclass (type): Type of the HDX Objects to be copied attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None. Returns: List[T <= HDXObject]: Deep copy of list of HDX objects
codesearchnet
def __init__(self, timeout_s): self.start = time.time() self.timeout_s = timeout_s
Construct a PolledTimeout object. Args: timeout_s: This may either be a number or None. If a number, this object will consider to be expired after number seconds after construction. If None, this object never expires.
juraj-google-style
def add_comment(self, app_id, record_id, field_id, message): self._swimlane.request( 'post', 'app/{0}/record/{1}/{2}/comment'.format( app_id, record_id, field_id ), json={ 'message': message, 'createdDate': pendulum.now().to_rfc3339_string() } )
Directly add a comment to a record without retrieving the app or record first Warnings: Does not perform any app, record, or field ID validation Args: app_id (str): Full App ID string record_id (str): Full parent Record ID string field_id (str): Full field ID to target reference field on parent Record string message (str): New comment message body
juraj-google-style
def AddLogFileOptions(self, argument_group): argument_group.add_argument('--logfile', '--log_file', '--log-file', action='store', metavar='FILENAME', dest='log_file', type=str, default='', help='Path of the file in which to store log messages, by default this file will be named: "{0:s}-YYYYMMDDThhmmss.log.gz". Note that the file will be gzip compressed if the extension is ".gz".'.format(self.NAME))
Adds the log file option to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
codesearchnet
def write(self, writer: WriteStream) -> None: for untagged in self._untagged: untagged.write(writer) writer.write(b'%b %b\r\n' % (self.tag, self.text))
Write the object to the stream, with one or more calls to :meth:`~asyncio.WriteStream.write`. Args: writer: The output stream.
juraj-google-style
def parse(self, input_str, reference_date=""): if not jpype.isThreadAttachedToJVM(): jpype.attachThreadToJVM() if reference_date: return json.loads(self._sutime.annotate(input_str, reference_date)) return json.loads(self._sutime.annotate(input_str))
Parses datetime information out of string input. It invokes the SUTimeWrapper.annotate() function in Java. Args: input_str: The input as string that has to be parsed. reference_date: Optional reference data for SUTime. Returns: A list of dicts with the result from the SUTimeWrapper.annotate() call.
juraj-google-style
def _broadcast_arg(U, arg, argtype, name): if arg is None or isinstance(arg, argtype): return [arg for _ in range(U.ndim)] elif np.iterable(arg): if len(arg) != U.ndim: raise ValueError('Parameter {} was specified as a sequence of ' 'incorrect length. The length must match the ' 'number of tensor dimensions ' '(U.ndim={})'.format(name, U.ndim)) elif not all([isinstance(a, argtype) for a in arg]): raise TypeError('Parameter {} specified as a sequence of ' 'incorrect type. ' 'Expected {}.'.format(name, argtype)) else: return arg else: raise TypeError('Parameter {} specified as a {}.' ' Expected {}.'.format(name, type(arg), argtype))
Broadcasts plotting option `arg` to all factors. Args: U : KTensor arg : argument provided by the user argtype : expected type for arg name : name of the variable, used for error handling Returns: iterable version of arg of length U.ndim
juraj-google-style
def resolve_pname(self, pname: PrefName, mid: ModuleId) -> Tuple[(YangIdentifier, ModuleId)]: (p, s, loc) = pname.partition(':') try: mdata = self.modules[mid] except KeyError: raise ModuleNotRegistered(*mid) from None try: return ((loc, mdata.prefix_map[p]) if s else (p, mdata.main_module)) except KeyError: raise UnknownPrefix(p, mid) from None
Return the name and module identifier in which the name is defined. Args: pname: Name with an optional prefix. mid: Identifier of the module in which `pname` appears. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If the prefix specified in `pname` is not declared.
codesearchnet
def reset(self, entries_to_reset): num_updates = tf.size(entries_to_reset) update_vals = tf.scatter_update(self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims(tf.fill([self.memory_size, self.val_depth], 0.0), 0), [num_updates, 1, 1])) update_logits = tf.scatter_update(self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims(tf.fill([self.memory_size], 0.0), 0), [num_updates, 1])) reset_op = tf.group([update_vals, update_logits]) return reset_op
Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op.
codesearchnet
def __init__(self, cls, required=False, default=Empty): assert isclass(cls) assert issubclass(cls, Object) if default is not Empty and not isinstance(default, cls): self._default = cls(default) else: self._default = default self._cls = cls self._required = required
Create an instance of a type signature. Args: cls (Class): the "type" of the object this signature represents. required (bool): default(object): an instance of the type for a default value. This should be either an instance of cls or something coercable to cls.
juraj-google-style
def __init__(self, project, query, data): super().__init__(project, query, 'unused_checksum') self.expected_data = data self.actual_data = None
Initialize BigQueryMatcher object. Args: project: The name (string) of the project. query: The query (string) to perform. data: List of tuples with the expected data.
github-repos
def send_handshake_request(self, uid=UNKNOWN_UID, cmd=ConnectionHandshakeCommand.INIT): request = json.dumps({'cmd': cmd.value, 'uid': uid}) self.log.debug('Sending handshake request %s.', request) self._client_send(request) response = self._client_receive() if not response: raise errors.ProtocolError(self._device, errors.ProtocolError.NO_RESPONSE_FROM_HANDSHAKE) response = self._decode_socket_response_bytes(response) result = json.loads(response) if result['status']: self.uid = result['uid'] else: self.uid = UNKNOWN_UID
Sends a handshake request to the server to prepare for the communication. Through the handshake response, this function checks whether the server is ready for the communication. If ready, it sets `self.uid` to the server session id. Otherwise, it sets `self.uid` to `UNKNOWN_UID`. Args: uid: int, the uid of the server session to continue. It will be ignored if the `cmd` requires the server to create a new session. cmd: ConnectionHandshakeCommand, the handshake command Enum for the server, which requires the server to create a new session or use the current session. Raises: errors.ProtocolError: something went wrong when sending the handshake request.
github-repos
def _skip_remaining_tests(self, exception): for test_name in self.results.requested: if not self.results.is_test_executed(test_name): test_record = records.TestResultRecord(test_name, self.TAG) test_record.test_skip(exception) self.results.add_record(test_record) self.summary_writer.dump(test_record.to_dict(), records.TestSummaryEntryType.RECORD)
Marks any requested test that has not been executed in a class as skipped. This is useful for handling abort class signal. Args: exception: The exception object that was thrown to trigger the skip.
github-repos
def __init__(self, features, targets, **kwargs): super().__init__(**kwargs) self.features = features self.targets = targets self.fit(features.train, targets.train)
Inits a Random Forest Classifier with a market attribute Args: **kwargs: Scikit Learn's RandomForestClassifier kwargs
juraj-google-style
def get_average_record(self, n): history_deque = collections.deque() averages = [] for d in self.data_points: history_deque.appendleft(d) if len(history_deque) > n: history_deque.pop() avg = sum(history_deque) / len(history_deque) averages.append(round(avg, self.lr)) return averages
Returns a list of average current numbers, each representing the average over the last n data points. Args: n: Number of data points to average over. Returns: A list of average current values.
juraj-google-style
def json_to_bulk(tc_data, value_fields, resource_type, resource_type_parent): if (not isinstance(tc_data, list)): tc_data = [tc_data] bulk_array = [] for d in tc_data: values = [] for field in value_fields: if (d.get(field) is not None): values.append(d.get(field)) del d[field] if (resource_type_parent in ['Group', 'Task', 'Victim']): d['name'] = ' : '.join(values) elif (resource_type_parent in ['Indicator']): d['summary'] = ' : '.join(values) if ('owner' in d): d['ownerName'] = d['owner']['name'] del d['owner'] if (d.get('type') is None): d['type'] = resource_type bulk_array.append(d) return bulk_array
Convert ThreatConnect JSON response to a Bulk Format. .. Attention:: This method is subject to frequent changes Args: tc_data (dictionary): Array of data returned from TC API call. value_fields (list): Field names that contain the "value" data. resource_type (string): The resource type of the tc_data provided. resource_type_parent (string): The resource parent type of the tc_data provided. Returns: (list): A dictionary representing a TCEntityArray
codesearchnet
def delete(filename, retry_params=None, _account_id=None): api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) common.validate_file_path(filename) filename = api_utils._quote_filename(filename) status, resp_headers, content = api.delete_object(filename) errors.check_status(status, [204], filename, resp_headers=resp_headers, body=content)
Delete a Google Cloud Storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Raises: errors.NotFoundError: if the file doesn't exist prior to deletion.
juraj-google-style
def _testDrawBoundingBoxColorCycling(self, img, dtype=dtypes.float32, colors=None): color_table = colors if colors is None: color_table = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 0, 1], [0, 1, 0, 1], [0.5, 0, 0.5, 1], [0.5, 0.5, 0, 1], [0.5, 0, 0, 1], [0, 0, 0.5, 1], [0, 1, 1, 1], [1, 0, 1, 1]]) assert len(img.shape) == 3 depth = img.shape[2] assert depth <= color_table.shape[1] assert depth == 1 or depth == 3 or depth == 4 if depth == 1: color_table[:, 0] = 1 num_colors = color_table.shape[0] for num_boxes in range(1, num_colors + 2): image = np.copy(img) color = color_table[(num_boxes - 1) % num_colors, 0:depth] test_drawn_image = self._fillBorder(image, color) bboxes = np.asarray([0, 0, 1, 1]) bboxes = np.vstack([bboxes for _ in range(num_boxes)]) bboxes = math_ops.cast(bboxes, dtypes.float32) bboxes = array_ops.expand_dims(bboxes, 0) image = ops.convert_to_tensor(image) image = image_ops_impl.convert_image_dtype(image, dtype) image = array_ops.expand_dims(image, 0) image = image_ops.draw_bounding_boxes(image, bboxes, colors=colors) with self.cached_session(use_gpu=False) as sess: op_drawn_image = np.squeeze(sess.run(image), 0) self.assertAllEqual(test_drawn_image, op_drawn_image)
Tests if cycling works appropriately. Args: img: 3-D numpy image on which to draw. dtype: image dtype (float, half). colors: color table.
github-repos
def get_text_features(self, input_ids, attention_mask=None, position_ids=None, token_type_ids=None, params: Optional[dict]=None, dropout_rng: jax.random.PRNGKey=None, train=False): if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _get_features(module, input_ids, attention_mask, position_ids, token_type_ids, deterministic): text_outputs = module.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, deterministic=deterministic) pooled_output = text_outputs[1] text_features = module.text_projection(pooled_output) return text_features return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), jnp.array(token_type_ids, dtype='i4'), not train, method=_get_features, rngs=rngs)
Args: input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Returns: text_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of text model.
github-repos
def union(self, other): if not isinstance(other, self.__class__): m = "You can only union striplogs with each other." raise StriplogError(m) result = [] for iv in deepcopy(self): for jv in other: if iv.any_overlaps(jv): iv = iv.union(jv) result.append(iv) return Striplog(result)
Makes a striplog of all unions. Args: Striplog. The striplog instance to union with. Returns: Striplog. The result of the union.
juraj-google-style
def write_unitth(suites, out_dir): if not os.path.isdir(out_dir): os.mkdir(out_dir) for classname, cases in suites.items(): doc_xml = minidom.Document() suite_xml = doc_xml.createElement('testsuite') suite_xml.setAttribute('name', classname) suite_xml.setAttribute('tests', str(len(cases))) suite_xml.setAttribute('errors', str(sum('error' in case for case in cases))) suite_xml.setAttribute('failures', str(sum('failure' in case for case in cases))) suite_xml.setAttribute('skipped', str(sum('skipped' in case for case in cases))) suite_xml.setAttribute('time', '{:.3f}'.format(sum(case['time'] for case in cases))) doc_xml.appendChild(suite_xml) for case in cases: case_xml = doc_xml.createElement('testcase') case_xml.setAttribute('classname', classname) case_xml.setAttribute('name', case['name']) case_xml.setAttribute('time', '{:.3f}'.format(case['time'])) suite_xml.appendChild(case_xml) if 'skipped' in case: skipped_xml = doc_xml.createElement('skipped') skipped_xml.setAttribute('type', case['skipped']['type']) skipped_xml.setAttribute('message', case['skipped']['message']) case_xml.appendChild(skipped_xml) skipped_text_xml = doc_xml.createCDATASection(case['skipped']['text']) skipped_xml.appendChild(skipped_text_xml) if 'failure' in case: failure_xml = doc_xml.createElement('failure') failure_xml.setAttribute('type', case['failure']['type']) failure_xml.setAttribute('message', case['failure']['message']) case_xml.appendChild(failure_xml) failure_text_xml = doc_xml.createCDATASection(case['failure']['text']) failure_xml.appendChild(failure_text_xml) if 'error' in case: error_xml = doc_xml.createElement('error') error_xml.setAttribute('type', case['error']['type']) error_xml.setAttribute('message', case['error']['message']) case_xml.appendChild(error_xml) error_text_xml = doc_xml.createCDATASection(case['error']['text']) error_xml.appendChild(error_text_xml) with open(os.path.join(out_dir, '{}.xml'.format(classname)), 'w') as output: doc_xml.writexml(output, encoding='utf-8', addindent='', newl="") doc_xml.unlink()
Write UnitTH-style test reports Args: suites (:obj:`dict`): dictionary of test suites out_dir (:obj:`str`): path to save UnitTH-style test reports
juraj-google-style
def get_pipeline_stage(self, pipeline_key, stage_key = None, sort_by = None): if not pipeline_key: return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.stages_suffix ]) if stage_key: uri = '/'.join([ uri, stage_key ]) if sort_by: if sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']: uri += self.sort_by_postfix + sort_by else: return requests.codes.bad_request, {'success' : 'False', 'error': 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\''} code, data = self._req('get', uri) if stage_key: data = list(data.values()) return code, data
Gets a list of one/all stage objects in a pipeline. Performs a single GET. Args: pipeline_key key for pipeline stage_key key for stage (default: None i.e. ALL) sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp' may or may not be supported returns (status code for the GET request, dict of stages) It is not a list hence the .values() before return
juraj-google-style
def manual_invoice(cls, user, due_delta, description_price_pairs): line_items = [] for (description, price) in description_price_pairs: line_item = commerce.LineItem(description=description, quantity=1, price=Decimal(price), product=None) line_items.append(line_item) min_due_time = (timezone.now() + due_delta) return cls._generate(user, None, min_due_time, line_items)
Generates an invoice for arbitrary items, not held in a user's cart. Arguments: user (User): The user the invoice is being generated for. due_delta (datetime.timedelta): The length until the invoice is due. description_price_pairs ([(str, long or Decimal), ...]): A list of pairs. Each pair consists of the description for each line item and the price for that line item. The price will be cast to Decimal. Returns: an Invoice.
codesearchnet
def generate_entry_label(entry): if isinstance(entry, MultiEntry): return " + ".join([latexify_ion(e.name) for e in entry.entry_list]) else: return latexify_ion(latexify(entry.name))
Generates a label for the pourbaix plotter Args: entry (PourbaixEntry or MultiEntry): entry to get a label for
juraj-google-style
def easeInBack(n, s=1.70158): _checkRange(n) return n * n * ((s + 1) * n - s)
A tween function that backs up first at the start and then goes to the destination. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
juraj-google-style
def render(raw_config, environment=None): t = Template(raw_config) buff = StringIO() if (not environment): environment = {} try: substituted = t.substitute(environment) except KeyError as e: raise exceptions.MissingEnvironment(e.args[0]) except ValueError: substituted = t.safe_substitute(environment) if (not isinstance(substituted, str)): substituted = substituted.decode('utf-8') buff.write(substituted) buff.seek(0) return buff.read()
Renders a config, using it as a template with the environment. Args: raw_config (str): the raw stacker configuration string. environment (dict, optional): any environment values that should be passed to the config Returns: str: the stacker configuration populated with any values passed from the environment
codesearchnet
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(Interval, self).read(istream, kmip_version=kmip_version) if (self.length != Interval.LENGTH): raise exceptions.InvalidPrimitiveLength('interval length must be {0}'.format(Interval.LENGTH)) self.value = unpack('!I', istream.read(Interval.LENGTH))[0] pad = unpack('!I', istream.read(Interval.LENGTH))[0] if (pad != 0): raise exceptions.InvalidPaddingBytes('padding bytes must be zero') self.validate()
Read the encoding of the Interval from the input stream. Args: istream (stream): A buffer containing the encoded bytes of the value of an Interval. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: InvalidPrimitiveLength: if the Interval encoding read in has an invalid encoded length. InvalidPaddingBytes: if the Interval encoding read in does not use zeroes for its padding bytes.
codesearchnet
def series_expand(self, param: Symbol, about, order: int): s = self.shape emats = zip(*[o.series_expand(param, about, order) for o in self.matrix.ravel()]) return tuple((Matrix(np_array(em).reshape(s)) for em in emats))
Expand the matrix expression as a truncated power series in a scalar parameter. Args: param: Expansion parameter. about (.Scalar): Point about which to expand. order: Maximum order of expansion >= 0 Returns: tuple of length (order+1), where the entries are the expansion coefficients.
codesearchnet
def generate_multiline_list(self, items, before='', after='', delim=('(', ')'), compact=True, sep=',', skip_last_sep=False): assert ((len(delim) == 2) and isinstance(delim[0], six.text_type) and isinstance(delim[1], six.text_type)), 'delim must be a tuple of two unicode strings.' if (len(items) == 0): self.emit((((before + delim[0]) + delim[1]) + after)) return if (len(items) == 1): self.emit(((((before + delim[0]) + items[0]) + delim[1]) + after)) return if compact: self.emit((((before + delim[0]) + items[0]) + sep)) def emit_list(items): items = items[1:] for (i, item) in enumerate(items): if (i == (len(items) - 1)): self.emit(((item + delim[1]) + after)) else: self.emit((item + sep)) if (before or delim[0]): with self.indent((len(before) + len(delim[0]))): emit_list(items) else: emit_list(items) else: if (before or delim[0]): self.emit((before + delim[0])) with self.indent(): for (i, item) in enumerate(items): if ((i == (len(items) - 1)) and skip_last_sep): self.emit(item) else: self.emit((item + sep)) if (delim[1] or after): self.emit((delim[1] + after)) elif delim[1]: self.emit(delim[1])
Given a list of items, emits one item per line. This is convenient for function prototypes and invocations, as well as for instantiating arrays, sets, and maps in some languages. TODO(kelkabany): A backend that uses tabs cannot be used with this if compact is false. Args: items (list[str]): Should contain the items to generate a list of. before (str): The string to come before the list of items. after (str): The string to follow the list of items. delim (str, str): The first element is added immediately following `before`. The second element is added prior to `after`. compact (bool): In compact mode, the enclosing parentheses are on the same lines as the first and last list item. sep (str): The string that follows each list item when compact is true. If compact is false, the separator is omitted for the last item. skip_last_sep (bool): When compact is false, whether the last line should have a trailing separator. Ignored when compact is true.
codesearchnet
def fix_variables(self, fixed): for v, val in fixed.items(): self.fix_variable(v, val)
Fix the value of the variables and remove it from a binary quadratic model. Args: fixed (dict): A dictionary of variable assignments. Examples: >>> bqm = dimod.BinaryQuadraticModel({'a': -.5, 'b': 0., 'c': 5}, {('a', 'b'): -1}, 0.0, dimod.SPIN) >>> bqm.fix_variables({'a': -1, 'b': +1})
juraj-google-style
def enable_napps(cls, napps): mgr = NAppsManager() for napp in napps: mgr.set_napp(*napp) LOG.info('NApp %s:', mgr.napp_id) cls.enable_napp(mgr)
Enable a list of NApps. Args: napps (list): List of NApps.
codesearchnet
def _run_inline_graph_optimization(func, lower_control_flow, aggressive_inlining): graph_def = func.graph.as_graph_def() if not lower_control_flow: graph_def = disable_lower_using_switch_merge(graph_def) for function in graph_def.library.function: if 'api_implements' in function.attr: del function.attr['api_implements'] meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph) for name in ['variables', 'model_variables', 'trainable_variables', 'local_variables']: raw_list = [] for raw in meta_graph.collection_def['variables'].bytes_list.value: variable = variable_pb2.VariableDef() variable.ParseFromString(raw) variable.ClearField('initializer_name') raw_list.append(variable.SerializeToString()) meta_graph.collection_def[name].bytes_list.value[:] = raw_list fetch_collection = meta_graph_pb2.CollectionDef() for array in func.inputs + func.outputs: fetch_collection.node_list.value.append(array.name) meta_graph.collection_def['train_op'].CopyFrom(fetch_collection) config = config_pb2.ConfigProto() rewrite_options = config.graph_options.rewrite_options rewrite_options.min_graph_nodes = -1 rewrite_options.optimizers.append('function') if aggressive_inlining: rewrite_options.function_optimization = rewriter_config_pb2.RewriterConfig.AGGRESSIVE return tf_optimizer.OptimizeGraph(config, meta_graph)
Apply function inline optimization to the graph. Returns the GraphDef after Grappler's function inlining optimization is applied. This optimization does not work on models with control flow. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) aggressive_inlining: Boolean indicating whether or not to do aggressive function inlining (might be unsafe if function has stateful ops not properly connected to control outputs). Returns: GraphDef
github-repos
def _sysapi_changed_nilrt(): nisysapi_path = '/usr/local/natinst/share/nisysapi.ini' if (os.path.exists(nisysapi_path) and _file_changed_nilrt(nisysapi_path)): return True restartcheck_state_dir = '/var/lib/salt/restartcheck_state' nisysapi_conf_d_path = '/usr/lib/{0}/nisysapi/conf.d/experts/'.format(('arm-linux-gnueabi' if ('arm' in __grains__.get('cpuarch')) else 'x86_64-linux-gnu')) if os.path.exists(nisysapi_conf_d_path): rs_count_file = '{0}/sysapi.conf.d.count'.format(restartcheck_state_dir) if (not os.path.exists(rs_count_file)): return True with salt.utils.files.fopen(rs_count_file, 'r') as fcount: current_nb_files = len(os.listdir(nisysapi_conf_d_path)) rs_stored_nb_files = int(fcount.read()) if (current_nb_files != rs_stored_nb_files): return True for fexpert in os.listdir(nisysapi_conf_d_path): if _file_changed_nilrt('{0}/{1}'.format(nisysapi_conf_d_path, fexpert)): return True return False
Besides the normal Linux kernel driver interfaces, NILinuxRT-supported hardware features an extensible, plugin-based device enumeration and configuration interface named "System API". When an installed package is extending the API it is very hard to know all repercurssions and actions to be taken, so reboot making sure all drivers are reloaded, hardware reinitialized, daemons restarted, etc. Returns: - True/False depending if nisysapi .ini files got modified/touched - False if no nisysapi .ini files exist
codesearchnet
def search(self, **kwargs): path = self._get_path('search') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get movies that match the search query string from the API. Args: q (optional): plain text search query; remember to URI encode page_limit (optional): number of search results to show per page, default=30 page (optional): results page number, default=1 Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def _build(self, inputs, multiplier=1): input_shape = tuple(inputs.get_shape().as_list()) bias_shape = calculate_bias_shape(input_shape, self._bias_dims) if (len(input_shape) < 2): raise base.IncompatibleShapeError('Rank of input shape must be >=2 not: {}.'.format(len(input_shape))) if ((self._input_shape is not None) and (input_shape[1:] != self._input_shape[1:])): raise base.IncompatibleShapeError('Input shape has changed.') if callable(self._output_shape): self._output_shape = self._output_shape() if (self._output_shape is None): raise base.ParentNotBuiltError('Build the original untransposed module before building this one.') if ((self._output_shape is not None) and (self._output_shape[1:] != input_shape[1:])): raise base.IncompatibleShapeError('Input shape must be {} not: {}.'.format(self._output_shape, input_shape[1])) self._input_shape = input_shape dtype = inputs.dtype if ('b' not in self._initializers): self._initializers['b'] = create_bias_initializer(bias_shape, dtype) self._b = tf.get_variable('b', shape=bias_shape, dtype=dtype, initializer=self._initializers['b'], partitioner=self._partitioners.get('b', None), regularizer=self._regularizers.get('b', None)) bias = self._b if (multiplier != 1): bias = (bias * multiplier) outputs = (inputs + bias) return outputs
Connects the Add module into the graph, with input Tensor `inputs`. Args: inputs: A Tensor of size `[batch_size, input_size1, ...]`. multiplier: A scalar or Tensor which the bias term is multiplied by before adding it to `inputs`. Anything which works in the expression `bias * multiplier` is acceptable here. This may be useful if you want to add a bias in one place and subtract the same bias in another place via `multiplier=-1`. Returns: A Tensor of size `[batch_size, input_size1, ...]`. Raises: base.IncompatibleShapeError: If the input is not a >= 2D `Tensor`. base.IncompatibleShapeError: If connecting the module into the graph any time after the first time, and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the `output_shape` has been specified but it does not match the input_shape`. base.ParentNotBuiltError: If the module is a transposed and the original untransposed module has not been built.
codesearchnet
def save_weights_to_hdf5_group(f, layers): from tensorflow.python.keras import __version__ as keras_version save_attributes_to_hdf5_group(f, 'layer_names', [layer.name.encode('utf8') for layer in layers]) f.attrs['backend'] = backend.backend().encode('utf8') f.attrs['keras_version'] = str(keras_version).encode('utf8') for layer in sorted(layers, key=lambda x: x.name): g = f.create_group(layer.name) weights = _legacy_weights(layer) weight_values = backend.batch_get_value(weights) weight_names = [w.name.encode('utf8') for w in weights] save_attributes_to_hdf5_group(g, 'weight_names', weight_names) for name, val in zip(weight_names, weight_values): param_dset = g.create_dataset(name, val.shape, dtype=val.dtype) if not val.shape: param_dset[()] = val else: param_dset[:] = val
Saves the weights of a list of layers to a HDF5 group. Args: f: HDF5 group. layers: List of layer instances.
github-repos
def result(self, timeout=None): self._blocking_poll(timeout=timeout) if self._exception is not None: raise self._exception return self._result
Get the result of the operation, blocking if necessary. Args: timeout (int): How long (in seconds) to wait for the operation to complete. If None, wait indefinitely. Returns: google.protobuf.Message: The Operation's result. Raises: google.api_core.GoogleAPICallError: If the operation errors or if the timeout is reached before the operation completes.
juraj-google-style
def set(self, *args): assert len(args) in (1, 2) if len(args) == 1: value = args[0] self._impl.set(value) else: index, value = args if isinstance(value, Real): self._impl.setTplDbl(Tuple(index)._impl, value) elif isinstance(value, basestring): self._impl.setTplStr(Tuple(index)._impl, value) else: raise TypeError
Set the value of a single instance of this parameter. Args: args: value if the parameter is scalar, index and value otherwise. Raises: RuntimeError: If the entity has been deleted in the underlying AMPL. TypeError: If the parameter is not scalar and the index is not provided.
juraj-google-style
def load_exons(self, exons, genes=None, build='37'): genes = genes or self.ensembl_genes(build) for exon in exons: exon_obj = build_exon(exon, genes) if not exon_obj: continue res = self.exon_collection.insert_one(exon_obj)
Create exon objects and insert them into the database Args: exons(iterable(dict))
juraj-google-style
def get(self, personId): check_type(personId, basestring, may_be_none=False) json_data = self._session.get(API_ENDPOINT + '/' + personId) return self._object_factory(OBJECT_TYPE, json_data)
Get a person's details, by ID. Args: personId(basestring): The ID of the person to be retrieved. Returns: Person: A Person object with the details of the requested person. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
juraj-google-style
def _GenerateNames(name, fromlist, globals): def GetCurrentPackage(globals): 'Finds the name of the package for the currently executing module.' if (not globals): return None current = globals.get('__name__') if (not current): return None current_file = globals.get('__file__') if (not current_file): return None root = os.path.splitext(os.path.basename(current_file))[0] if (root == '__init__'): return current else: return current.rpartition('.')[0] curpkg = GetCurrentPackage(globals) names = set() for from_entry in (fromlist or []): entry = (((name + '.') + from_entry) if name else from_entry) names.add(entry) if curpkg: names.add(((curpkg + '.') + entry)) while name: names.add(name) if curpkg: names.add(((curpkg + '.') + name)) name = name.rpartition('.')[0] return names
Generates the names of modules that might be loaded via this import. Args: name: Argument as passed to the importer. fromlist: Argument as passed to the importer. globals: Argument as passed to the importer. Returns: A set that contains the names of all modules that are loaded by the currently executing import statement, as they would show up in sys.modules. The returned set may contain module names that were already loaded before the execution of this import statement. The returned set may contain names that are not real modules.
codesearchnet
def _write_object_proto(self, proto, options): write_object_proto_for_resource_variable(self, proto, options)
Writes additional information of the variable into the SavedObject proto. Subclasses of ResourceVariables could choose to override this method to customize extra information to provide when saving a SavedModel. Ideally, this should contain the logic in write_object_proto_for_resource_variable but `DistributedValue` is an outlier at the momemnt. Once `DistributedValue` becomes a proper ResourceVariable, we should remove the helper method below. Args: proto: `SavedObject` proto to update. options: A `SaveOption` instance that configures save behavior.
github-repos
def _set_median_session_metrics(session_group, aggregation_metric): measurements = sorted(_measurements(session_group, aggregation_metric), key=operator.attrgetter('metric_value.value')) median_session = measurements[(len(measurements) - 1) del session_group.metric_values[:] session_group.metric_values.MergeFrom( session_group.sessions[median_session].metric_values)
Sets the metrics for session_group to those of its "median session". The median session is the session in session_group with the median value of the metric given by 'aggregation_metric'. The median is taken over the subset of sessions in the group whose 'aggregation_metric' was measured at the largest training step among the sessions in the group. Args: session_group: A SessionGroup protobuffer. aggregation_metric: A MetricName protobuffer.
juraj-google-style
def init(self): resp = self._execute(Command.NEW_SESSION, {'desiredCapabilities': self.desired_capabilities}, False) resp.raise_for_status() self.session_id = str(resp.session_id) self.capabilities = resp.value
Create Session by desiredCapabilities Support: Android iOS Web(WebView) Returns: WebDriver Object.
codesearchnet
def round_to_nearest(dt, n_round_sec=1.0): ts = ts_from_dt(strip_timezone(dt)) + n_round_sec / 2.0 res = dt_from_ts(ts - (ts % n_round_sec)) return res.replace(tzinfo=dt.tzinfo)
Round datetime up or down to nearest divisor. Round datetime up or down to nearest number of seconds that divides evenly by the divisor. Any timezone is preserved but ignored in the rounding. Args: dt: datetime n_round_sec : int or float Divisor for rounding Examples: - ``n_round_sec`` = 0.1: nearest 10th of a second. - ``n_round_sec`` = 1: nearest second. - ``n_round_sec`` = 30: nearest half minute.
juraj-google-style
def path_in_cache(self, filename, metahash): cpath = self._genpath(filename, metahash) if os.path.exists(cpath): return cpath else: raise CacheMiss
Generates the path to a file in the mh cache. The generated path does not imply the file's existence! Args: filename: Filename relative to buildroot rule: A targets.SomeBuildRule object metahash: hash object
codesearchnet
def get_entry(self, pathname_name): pathname_name = self._normalized_entryname(pathname_name) return self.contents[pathname_name]
Retrieves the specified child file or directory entry. Args: pathname_name: The basename of the child object to retrieve. Returns: The fake file or directory object. Raises: KeyError: if no child exists by the specified name.
codesearchnet
def check_get_splits(self, query, num_splits, num_entities): for id_or_name in [True, False, None]: if id_or_name is None: client_entities = helper.create_client_entities(num_entities, False) client_entities.extend(helper.create_client_entities(num_entities, True)) num_entities *= 2 else: client_entities = helper.create_client_entities(num_entities, id_or_name) mock_client = mock.MagicMock() mock_client_query = mock.MagicMock() mock_client_query.fetch.return_value = client_entities with mock.patch.object(types.Query, '_to_client_query', return_value=mock_client_query): split_queries = query_splitter.get_splits(mock_client, query, num_splits) mock_client_query.fetch.assert_called_once() expected_num_splits = min(num_splits, num_entities + 1) self.assertEqual(len(split_queries), expected_num_splits) prev_client_key = None last_query_seen = False for split_query in split_queries: self.assertFalse(last_query_seen) lt_key = None gte_key = None for _filter in split_query.filters: self.assertEqual(query_splitter.KEY_PROPERTY_NAME, _filter[0]) if _filter[1] == '<': lt_key = _filter[2] elif _filter[1] == '>=': gte_key = _filter[2] if lt_key is None and gte_key is None: self.assertEqual(1, len(split_queries)) break if prev_client_key is None: self.assertIsNone(gte_key) self.assertIsNotNone(lt_key) prev_client_key = lt_key else: self.assertEqual(prev_client_key, gte_key) prev_client_key = lt_key if lt_key is None: last_query_seen = True
A helper method to test the query_splitter get_splits method. Args: query: the query to be split num_splits: number of splits num_entities: number of scatter entities returned to the splitter.
github-repos
def slice(array, start, size, ty): weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array weld_template = weld_obj.weld_code = weld_template % {"array": array_var, "start": start, "ty": ty, "size": size} return weld_obj
Returns a new array-of-arrays with each array truncated, starting at index `start` for `length` characters. Args: array (WeldObject / Numpy.ndarray): Input array start (int): starting index size (int): length to truncate at ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
juraj-google-style
def _validate_testbed_configs(testbed_configs): seen_names = set() for config in testbed_configs: name = config[keys.Config.key_testbed_name.value] _validate_testbed_name(name) if name in seen_names: raise MoblyConfigError('Duplicate testbed name %s found.' % name) seen_names.add(name)
Validates the testbed configurations. Args: testbed_configs: A list of testbed configuration dicts. Raises: MoblyConfigError: Some parts of the configuration is invalid.
juraj-google-style
def cumulative_distribution(self, X): self.check_fit() def func(*args): return self.probability_density(list(args)) lower_bound = self.get_lower_bound() ranges = [[lower_bound, val] for val in X] return integrate.nquad(func, ranges)[0]
Computes the cumulative distribution function for the copula Args: X: `numpy.ndarray` or `pandas.DataFrame` Returns: np.array: cumulative probability
juraj-google-style
def build_authorization_endpoint(self, request, disable_sso=None): self.load_config() redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None) if (not redirect_to): redirect_to = django_settings.LOGIN_REDIRECT_URL redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode() query = QueryDict(mutable=True) query.update({'response_type': 'code', 'client_id': settings.CLIENT_ID, 'resource': settings.RELYING_PARTY_ID, 'redirect_uri': self.redirect_uri(request), 'state': redirect_to}) if (self._mode == 'openid_connect'): query['scope'] = 'openid' if (((disable_sso is None) and settings.DISABLE_SSO) or (disable_sso is True)): query['prompt'] = 'login' return '{0}?{1}'.format(self.authorization_endpoint, query.urlencode())
This function returns the ADFS authorization URL. Args: request(django.http.request.HttpRequest): A django Request object disable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt. Returns: str: The redirect URI
codesearchnet
def interceptable(func): @functools.wraps(func) def func_wrapped(*args, **kwargs): with get_next_interceptor() as interceptor: return interceptor(func, *args, **kwargs) return func_wrapped
Decorator that wraps `func` so that its execution is intercepted. The wrapper passes `func` to the interceptor for the current thread. If there is no next interceptor, we perform an "immediate" call to `func`. That is, `func` terminates without forwarding its execution to another interceptor. Args: func: Function to wrap. Returns: The decorated function.
codesearchnet
def dirac_notation(state: Sequence, decimals: int=2) -> str: perm_list = [''.join(seq) for seq in itertools.product('01', repeat=(int(len(state)).bit_length() - 1))] components = [] ket = '|{}⟩' for x in range(len(perm_list)): format_str = (('({:.' + str(decimals)) + 'g})') val = (round(state[x].real, decimals) + (1j * round(state[x].imag, decimals))) if ((round(val.real, decimals) == 0) and (round(val.imag, decimals) != 0)): val = val.imag format_str = (('{:.' + str(decimals)) + 'g}j') elif ((round(val.imag, decimals) == 0) and (round(val.real, decimals) != 0)): val = val.real format_str = (('{:.' + str(decimals)) + 'g}') if (val != 0): if (round(state[x], decimals) == 1): components.append(ket.format(perm_list[x])) else: components.append((format_str + ket).format(val, perm_list[x])) if (not components): return '0' return ' + '.join(components).replace(' + -', ' - ')
Returns the wavefunction as a string in Dirac notation. For example: state = np.array([1/np.sqrt(2), 1/np.sqrt(2)], dtype=np.complex64) print(dirac_notation(state)) -> 0.71|0⟩ + 0.71|1⟩ Args: state: A sequence representing a wave function in which the ordering mapping to qubits follows the standard Kronecker convention of numpy.kron. decimals: How many decimals to include in the pretty print. Returns: A pretty string consisting of a sum of computational basis kets and non-zero floats of the specified accuracy.
codesearchnet
def _recreate(self, proto, node_id, nodes): registered_class = registration.get_registered_class(proto.registered_name) if registered_class is None: registered_class = _BUILT_IN_REGISTRATIONS.get(proto.WhichOneof('kind')) dependencies = {} for key, dep_node_id in self._get_node_dependencies(proto).items(): dependencies[key] = nodes[dep_node_id] if registered_class: obj = registered_class._deserialize_from_proto(proto=proto.serialized_user_proto, object_proto=proto, dependencies=dependencies, export_dir=self._export_dir, asset_file_def=self._asset_file_def, operation_attributes=self._operation_attributes) if isinstance(obj, base.Trackable): setter = type(obj)._add_trackable_child else: setter = setattr return (obj, setter) else: return self._recreate_default(proto, node_id, dependencies)
Creates a Python object from a SavedObject protocol buffer. Args: proto: a SavedObject proto node_id: int, the index of this object in the SavedObjectGraph node list. nodes: dict mapping int node_ids -> created objects. Returns: The recreated object, and the set-attribute function for reconnecting the trackable children.
github-repos
def import_module(self, module=None, recursive=False, **params): if module is None: if "module_" in params: warnings.warn( "Parameter 'module_' is deprecated. Use 'module' instead.") module = params.pop("module_") else: raise ValueError("no module specified") if "bases" in params: params["bases"] = get_impls(params["bases"]) space = ( self._impl.model.currentspace ) = self._impl.new_space_from_module( module, recursive=recursive, **params ) return get_interfaces(space)
Create a child space from an module. Args: module: a module object or name of the module object. recursive: Not yet implemented. **params: arguments to pass to ``new_space`` Returns: The new child space created from the module.
juraj-google-style
def transform_data(input_handle, outfile_prefix, working_dir, schema_file, transform_dir=None, max_rows=None, pipeline_args=None, publish_to_bq=False, project=None, metrics_table=None, metrics_dataset=None): def preprocessing_fn(inputs): outputs = {} for key in taxi.DENSE_FLOAT_FEATURE_KEYS: outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(_fill_in_missing(inputs[key])) for key in taxi.VOCAB_FEATURE_KEYS: outputs[taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(_fill_in_missing(inputs[key]), top_k=taxi.VOCAB_SIZE, num_oov_buckets=taxi.OOV_SIZE) for key in taxi.BUCKET_FEATURE_KEYS: outputs[taxi.transformed_name(key)] = transform.bucketize(_fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT) for key in taxi.CATEGORICAL_FEATURE_KEYS: outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key]) taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY]) tips = _fill_in_missing(inputs[taxi.LABEL_KEY]) outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(tf.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), tf.cast(tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs namespace = metrics_table metrics_monitor = None if publish_to_bq: metrics_monitor = MetricsReader(publish_to_bq=publish_to_bq, project_name=project, bq_table=metrics_table, bq_dataset=metrics_dataset, namespace=namespace, filters=MetricsFilter().with_namespace(namespace)) schema = taxi.read_schema(schema_file) raw_feature_spec = taxi.get_raw_feature_spec(schema) raw_schema = schema_utils.schema_from_feature_spec(raw_feature_spec) raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema) pipeline = beam.Pipeline(argv=pipeline_args) with tft_beam.Context(temp_dir=working_dir): query = taxi.make_sql(input_handle, max_rows, for_eval=False) raw_data = pipeline | 'ReadBigQuery' >> ReadFromBigQuery(query=query, project=project, use_standard_sql=True) | 'Measure time: start' >> beam.ParDo(MeasureTime(namespace)) decode_transform = beam.Map(taxi.clean_raw_data_dict, raw_feature_spec=raw_feature_spec) if transform_dir is None: decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform transform_fn = (decoded_data, raw_data_metadata) | 'Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn) _ = transform_fn | 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir) else: transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir) shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle() decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform transformed_data, transformed_metadata = ((decoded_data, raw_data_metadata), transform_fn) | 'Transform' >> tft_beam.TransformDataset() coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema) _ = transformed_data | 'SerializeExamples' >> beam.Map(coder.encode) | 'Measure time: end' >> beam.ParDo(MeasureTime(namespace)) | 'WriteExamples' >> beam.io.WriteToTFRecord(os.path.join(working_dir, outfile_prefix), file_name_suffix='.gz') result = pipeline.run() result.wait_until_finish() if metrics_monitor: metrics_monitor.publish_metrics(result)
The main tf.transform method which analyzes and transforms data. Args: input_handle: BigQuery table name to process specified as DATASET.TABLE or path to csv file with input data. outfile_prefix: Filename prefix for emitted transformed examples working_dir: Directory in which transformed examples and transform function will be emitted. schema_file: An file path that contains a text-serialized TensorFlow metadata schema of the input data. transform_dir: Directory in which the transform output is located. If provided, this will load the transform_fn from disk instead of computing it over the data. Hint: this is useful for transforming eval data. max_rows: Number of rows to query from BigQuery pipeline_args: additional DataflowRunner or DirectRunner args passed to the beam pipeline.
github-repos
def main(raw_args=None): multifile_choices = frozenset(['c_files']) if (raw_args is None): raw_args = sys.argv[1:] parser = build_parser() args = parser.parse_args(raw_args) if ((args.output is None) and (args.format in multifile_choices)): print(('You must specify an output file with -o, --output when using a format that produces multiple files (-f %s)' % args.format)) return 1 desc = TBDescriptor(args.bus_definition) if (args.format == 'json'): print('JSON output is not yet supported') return 1 block = desc.get_block() template_map = {'command_map_c': 'command_map_c.c.tpl', 'command_map_h': 'command_map_c.h.tpl', 'config_map_c': 'config_variables_c.c.tpl', 'config_map_h': 'config_variables_c.h.tpl'} template_name = template_map.get(args.format) data = block.render_template(template_name) print(data) return 0
Run the iotile-tbcompile script. Args: raw_args (list): Optional list of command line arguments. If not passed these are pulled from sys.argv.
codesearchnet
def __init__(self, iterable): if not is_iterable(iterable): raise TypeError("Cannot construct Queryable from non-iterable {0}" .format(str(type(iterable))[7: -2])) self._iterable = iterable
Construct a Queryable from any iterable. Args: iterable: Any object supporting the iterator protocol. Raises: TypeError: if iterable does not support the iterator protocol.
juraj-google-style
def binary_mask_to_rle(mask): if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs)
Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format.
github-repos
def parse(self, filename): filehandle = storage.open_vos_or_local(filename, 'rb') assert (filehandle is not None), 'Failed to open file {} '.format(filename) filestr = filehandle.read() filehandle.close() assert (filestr is not None), 'File contents are None' observations = self._parse_observation_list(filestr) self._parse_observation_headers(filestr, observations) sys_header = self._parse_system_header(filestr) sources = self._parse_source_data(filestr, observations) return AstromData(observations, sys_header, sources, discovery_only=self.discovery_only)
Parses a file into an AstromData structure. Args: filename: str The name of the file whose contents will be parsed. Returns: data: AstromData The file contents extracted into a data structure for programmatic access.
codesearchnet
def poll_error(self): if self.block: return self.error new_list = self.error[self.old_error_size:] self.old_error_size += len(new_list) return new_list
Append lines from stderr to self.errors. Returns: list: The lines added since last call
codesearchnet
def create_run_config(hp, output_dir=None): save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency) save_ckpt_secs = FLAGS.save_checkpoints_secs or None if save_ckpt_secs: save_ckpt_steps = None assert FLAGS.output_dir or FLAGS.checkpoint_path tpu_config_extra_kwargs = {} if FLAGS.tpu_job_name is not None: tpu_config_extra_kwargs["tpu_job_name"] = FLAGS.tpu_job_name if getattr(hp, "mtf_mode", False): save_ckpt_steps = None save_ckpt_secs = None tpu_config_extra_kwargs = { "num_cores_per_replica": 1, "per_host_input_for_training": tpu_config.InputPipelineConfig.BROADCAST, } daisy_chain_variables = ( hp.daisy_chain_variables and hp.activation_dtype == "float32" and hp.weight_dtype == "float32") return trainer_lib.create_run_config( model_name=FLAGS.model, model_dir=output_dir or os.path.expanduser(FLAGS.output_dir), master=FLAGS.master, iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.tpu_num_shards, log_device_placement=FLAGS.log_device_placement, save_checkpoints_steps=save_ckpt_steps, save_checkpoints_secs=save_ckpt_secs, keep_checkpoint_max=FLAGS.keep_checkpoint_max, keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, num_gpus=FLAGS.worker_gpu, gpu_order=FLAGS.gpu_order, num_async_replicas=FLAGS.worker_replicas, gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction, enable_graph_rewriter=FLAGS.enable_graph_rewriter, use_tpu=FLAGS.use_tpu, use_tpu_estimator=FLAGS.use_tpu_estimator, xla_jit_level=FLAGS.xla_jit_level, schedule=FLAGS.schedule, no_data_parallelism=hp.no_data_parallelism, optionally_use_dist_strat=FLAGS.optionally_use_dist_strat, daisy_chain_variables=daisy_chain_variables, ps_replicas=FLAGS.ps_replicas, ps_job=FLAGS.ps_job, ps_gpu=FLAGS.ps_gpu, sync=FLAGS.sync, worker_id=FLAGS.worker_id, worker_job=FLAGS.worker_job, random_seed=FLAGS.random_seed, tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs, inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads, log_step_count_steps=FLAGS.log_step_count_steps, intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads, tpu_config_extra_kwargs=tpu_config_extra_kwargs, cloud_tpu_name=FLAGS.cloud_tpu_name)
Create a run config. Args: hp: model hyperparameters output_dir: model's output directory, defaults to output_dir flag. Returns: a run config
juraj-google-style
def get_operation(self, name, options=None): request = operations_pb2.GetOperationRequest(name=name) return self._get_operation(request, options)
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. Example: >>> from google.gapic.longrunning import operations_client >>> api = operations_client.OperationsClient() >>> name = '' >>> response = api.get_operation(name) Args: name (string): The name of the operation resource. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Returns: A :class:`google.longrunning.operations_pb2.Operation` instance. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid.
codesearchnet
def _parse_address(self, config): match = re.search(r'ip address ([^\s]+)', config) value = match.group(1) if match else None return dict(address=value)
Parses the config block and returns the ip address value The provided configuration block is scaned and the configured value for the IP address is returned as a dict object. If the IP address value is not configured, then None is returned for the value Args: config (str): The interface configuration block to parse Return: dict: A dict object intended to be merged into the resource dict
juraj-google-style
def getsource(classorfunc): if _isbuiltin(classorfunc): return '' try: source = inspect.getsource(classorfunc) except TypeError: source = getsourcefallback(classorfunc) declaration = [] lines = source.splitlines() if PY2 and not isinstance(source, unicode): encoding = detect_encoding(iter(lines).next)[0] sourcelines = (s.decode(encoding) for s in lines) else: sourcelines = iter(lines) found_keyword = False for line in sourcelines: words = line.split() if not words: continue if words[0] in ('def', 'class'): found_keyword = True if found_keyword: cind = line.find(':') if cind > 0: declaration.append(line[:cind + 1]) after_decl = line[cind + 1:].strip() break else: declaration.append(line) bodylines = list(sourcelines) if type(classorfunc) == type: cls = classorfunc base_imports = {} for base in cls.__bases__: if base.__name__ == 'object' and base.__module__ == 'builtins': continue if base in base_imports: continue if base.__module__ == '__main__': continue base_imports[base] = 'from %s import %s' % (base.__module__, base.__name__) cind = declaration[0].index('class ') declstring = declaration[0][:cind] + 'class %s(%s):%s' % ( cls.__name__, ','.join([base.__name__ for base in cls.__bases__]), after_decl) declaration = [impstring for c, impstring in base_imports.items() if c.__module__ != '__builtin__'] declaration.append(declstring) else: declaration[-1] += after_decl return '\n'.join(declaration + bodylines)
Return the source code for a class or function. Notes: Returned source will not include any decorators for the object. This will only return the explicit declaration of the object, not any dependencies Args: classorfunc (type or function): the object to get the source code for Returns: str: text of source code (without any decorators). Note: in python 2, this returns unicode
juraj-google-style
def __call__(self, shape, dtype, axis=0): raise NotImplementedError
Partitions the given `shape` and returns the partition results. Examples of a partitioner that allocates a fixed number of shards: ```python partitioner = FixedShardsPartitioner(num_shards=2) partitions = partitioner(tf.TensorShape([10, 3], tf.float32), axis=0) print(partitions) # [2, 0] ``` Args: shape: a `tf.TensorShape`, the shape to partition. dtype: a `tf.dtypes.Dtype` indicating the type of the partition value. axis: The axis to partition along. Default: outermost axis. Returns: A list of integers representing the number of partitions on each axis, where i-th value corresponds to i-th axis.
github-repos
def _unknown_args(self, args): for u in args: self.tcex.log.warning(u'Unsupported arg found ({}).'.format(u))
Log argparser unknown arguments. Args: args (list): List of unknown arguments
codesearchnet
def extract_table(tabletag): theadtag = tabletag.find_next('thead') headertags = theadtag.find_all('th') if len(headertags) == 0: headertags = theadtag.find_all('td') headers = [] for tag in headertags: headers.append(get_text(tag)) tbodytag = tabletag.find_next('tbody') trtags = tbodytag.find_all('tr') table = list() for trtag in trtags: row = dict() tdtags = trtag.find_all('td') for i, tag in enumerate(tdtags): row[headers[i]] = get_text(tag) table.append(row) return table
Extract HTML table as list of dictionaries Args: tabletag (Tag): BeautifulSoup tag Returns: str: Text of tag stripped of leading and trailing whitespace and newlines and with &nbsp replaced with space
juraj-google-style
def get_sharded_shape(self, shape, shard_index=None): if self._shard_dimension is None or self._number_of_shards is None: return None if shard_index is not None: if shard_index < 0 or shard_index >= self.number_of_shards: raise ValueError(f'Requested shard_index {shard_index}, but shard_index must be in [0,{self._number_of_shards}).') shape = tensor_shape.as_shape(shape) if self._number_of_shards == 1: return shape ndims = shape.ndims if ndims is None: raise ValueError(f'Shape {shape} must be a known shape.') if ndims <= self._shard_dimension: raise ValueError(f'Shape {shape.as_list()} does not contain shard_dimension {self._shard_dimension}') dims = shape.as_list() if dims[self._shard_dimension] is None: raise ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known at construction time.') if dims[self._shard_dimension] % self._number_of_shards != 0: raise ValueError(f'Shape {shape.as_list()} cannot be sharded {self._number_of_shards} ways along dimension {self._shard_dimension}') dims[self._shard_dimension] return tensor_shape.TensorShape(dims)
Returns the shape of a shard of a full Tensor. When given the shape of a 'full-size' Tensor, returns the shape of the sub-Tensor after it has been sharded. Freezes the policy if it has not yet been frozen. Args: shape: The shape of the full-size Tensor to be sharded. shard_index: The index of the shard whose shape should be returned. shard_index can be None for sharding policies that use the same shape for every shard. Returns: The shape of the sharded version of the Tensor. Raises: ValueError: If shard_index is None when shards are of different shapes; or shard_index is not None and !(0<=shard_index<number_of_shards); or shape does not have at least self.shard_dimension+1 dimensions; or the value of shape's shard dimension is not a multiple of self.number_of_shards
github-repos
def add(self, data, conn_type, squash=True): if (data in self.children): return data if (not squash): self.children.append(data) return data if (self.connector == conn_type): if (isinstance(data, QBase) and (not data.negated) and ((data.connector == conn_type) or (len(data) == 1))): self.children.extend(data.children) return self else: self.children.append(data) return data else: obj = self._new_instance(self.children, self.connector, self.negated) self.connector = conn_type self.children = [obj, data] return data
Combine this tree and the data represented by data using the connector conn_type. The combine is done by squashing the node other away if possible. This tree (self) will never be pushed to a child node of the combined tree, nor will the connector or negated properties change. Return a node which can be used in place of data regardless if the node other got squashed or not. If `squash` is False the data is prepared and added as a child to this tree without further logic. Args: conn_type (str, optional ["AND", "OR"]): connection method
codesearchnet
def adversary(self, name, owner=None, **kwargs): return Adversary(self.tcex, name, owner=owner, **kwargs)
Create the Adversary TI object. Args: owner: name: **kwargs: Return:
juraj-google-style
def slh_associate(a_features, b_features, max_sigma=5): proximity = _weighted_proximity(a_features, b_features) association_matrix = _proximity_to_association(proximity) associations = [] if (association_matrix.shape[0] == 0): return np.zeros((0, 2)) col_max_idxs = np.argmax(association_matrix, axis=0) prox_threshold = np.exp((((- 0.5) * max_sigma) * max_sigma)) for (row_idx, row) in enumerate(association_matrix): if (row.shape[0] == 0): continue col_idx = np.argmax(row) if (col_max_idxs[col_idx] == row_idx): prox = proximity[(row_idx, col_idx)] if (prox > prox_threshold): associations.append((row_idx, col_idx)) if (len(associations) == 0): return np.zeros((0, 2)) return np.vstack(associations)
An implementation of the Scott and Longuet-Higgins algorithm for feature association. This function takes two lists of features. Each feature is a :py:class:`MultivariateNormal` instance representing a feature location and its associated uncertainty. Args: a_features (list of MultivariateNormal) b_features (list of MultivariateNormal) max_sigma (float or int): maximum number of standard deviations two features can be separated and still considered "associated". Returns: (array): A Nx2 array of feature associations. Column 0 is the index into the a_features list, column 1 is the index into the b_features list.
codesearchnet
async def warn_user(channel, user): data = datatools.get_data() server_id = channel.server.id if "warnings_max" not in data["discord"]["servers"][server_id][_data.modulename]: data["discord"]["servers"][server_id][_data.modulename]["warnings_max"] = 3 if "warnings" not in data["discord"]["servers"][server_id][_data.modulename]: data["discord"]["servers"][server_id][_data.modulename]["warnings"] = {} if user.id in data["discord"]["servers"][server_id][_data.modulename]["warnings"]: data["discord"]["servers"][server_id][_data.modulename]["warnings"][user.id] += 1 else: data["discord"]["servers"][server_id][_data.modulename]["warnings"][user.id] = 1 datatools.write_data(data) warnings = data["discord"]["servers"][server_id][_data.modulename]["warnings"][user.id] max_warnings = data["discord"]["servers"][server_id][_data.modulename]["warnings_max"] await client.send_typing(channel) embed = ui_embed.user_warning(channel, user, warnings, max_warnings) await embed.send() if warnings >= max_warnings: await ban_user(channel, user)
Gives a user a warning, and bans them if they are over the maximum warnings Args: channel: The channel to send the warning message in user: The user to give the warning to
juraj-google-style
def broadcast_implementation(self, tensor, destinations): return simple_broadcast(tensor, destinations, always_mirrored=True, canonicalize_devices=self._canonicalize_devices)
Implementation of `broadcast`. Args: tensor: a `tf.Tensor` like object. The value to broadcast. destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a `tf.Tensor` alike object, or a device string. It specifies the devices to broadcast to. `destinations`. Note that if it's a `tf.Variable`, the value is broadcasted to the devices of that variable, this method doesn't update the variable. Returns: A `tf.Tensor` or `tf.distribute.DistributedValues`.
github-repos
def get_version(): sys.modules['setup_helpers'] = object() sys.modules['setup_helpers_macos'] = object() sys.modules['setup_helpers_windows'] = object() filename = os.path.join(_ROOT_DIR, 'setup.py') loader = importlib.machinery.SourceFileLoader('setup', filename) setup_mod = loader.load_module() return setup_mod.VERSION
Get the current version from ``setup.py``. Assumes that importing ``setup.py`` will have no side-effects (i.e. assumes the behavior is guarded by ``if __name__ == "__main__"``). Returns: str: The current version in ``setup.py``.
codesearchnet
def WriteGraphExecutionTrace(self, graph_execution_trace): debug_event = debug_event_pb2.DebugEvent(graph_execution_trace=graph_execution_trace) self._EnsureTimestampAdded(debug_event) _pywrap_debug_events_writer.WriteGraphExecutionTrace(self._dump_root, debug_event)
Write a GraphExecutionTrace proto with the writer. Args: graph_execution_trace: A GraphExecutionTrace proto, concerning the value of an intermediate tensor or a list of intermediate tensors that are computed during the graph's execution.
github-repos
def _EscapeGlobCharacters(path): drive, path = os.path.splitdrive(path) return '%s%s' % (drive, _ESCAPE_GLOB_CHARACTERS_REGEX.sub(r'[\1]', path))
Escapes the glob characters in a path. Python 3 has a glob.escape method, but python 2 lacks it, so we manually implement this method. Args: path: The absolute path to escape. Returns: The escaped path string.
juraj-google-style
def merge_translations(localization_bundle_path): logging.info("Merging translations") for lang_dir in os.listdir(localization_bundle_path): if lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME: continue for translated_path in glob.glob(os.path.join(localization_bundle_path, lang_dir, "*" + TRANSLATED_SUFFIX)): strings_path = translated_path[:-1 * len(TRANSLATED_SUFFIX)] localizable_path = os.path.join(localization_bundle_path, DEFAULT_LANGUAGE_DIRECTORY_NAME, os.path.basename(strings_path)) localization_merge_back(localizable_path, strings_path, translated_path, strings_path)
Merges the new translation with the old one. The translated files are saved as '.translated' file, and are merged with old translated file. Args: localization_bundle_path (str): The path to the localization bundle.
juraj-google-style
def prepend(self, node): if (not isinstance(node, grammar.STATEMENTS)): raise ValueError self.to_prepend[(- 1)].appendleft(node)
Prepend a statement to the current statement. Note that multiple calls to prepend will result in the last statement to be prepended to end up at the top. Args: node: The statement to prepend. Raises: ValueError: If the given node is not a statement.
codesearchnet
class Idefics3Encoder(nn.Module): def __init__(self, config: Idefics3Config): super().__init__() self.config = config self.layers = nn.ModuleList([Idefics3EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Idefics3EncoderLayer`]. Args: config: Idefics3Config
github-repos
def from_dict(cls, data): try: fulfillment = _fulfillment_from_details(data['condition']['details']) except KeyError: fulfillment = data['condition']['uri'] try: amount = int(data['amount']) except ValueError: raise AmountError(('Invalid amount: %s' % data['amount'])) return cls(fulfillment, data['public_keys'], amount)
Transforms a Python dictionary to an Output object. Note: To pass a serialization cycle multiple times, a Cryptoconditions Fulfillment needs to be present in the passed-in dictionary, as Condition URIs are not serializable anymore. Args: data (dict): The dict to be transformed. Returns: :class:`~bigchaindb.common.transaction.Output`
codesearchnet
def _call_post_with_session(self, url, payload): now = datetime.datetime.utcnow() if (now >= self.expires_at): self.session.close() self._create_session() response = self.session.post(url, data=payload) return (response.status_code, response.text)
Make a post request using the session object to a SuccessFactors endpoint. Args: url (str): The url to post to. payload (str): The json encoded payload to post.
codesearchnet
def copy_script(self, filename, id_=(- 1)): if (('jss' in self.connection.keys()) and self.connection['jss'].jss_migrated): self._copy_script_migrated(filename, id_, SCRIPT_FILE_TYPE) else: basename = os.path.basename(filename) self._copy(filename, os.path.join(self.connection['mount_point'], 'Scripts', basename))
Copy a script to the repo's Script subdirectory. Scripts are copied as files to a path, or, on a "migrated" JSS, are POSTed to the JSS (pass an id if you wish to associate the script with an existing Script object). Args: filename: Path for file to copy. id_: Int ID, used _only_ for migrated repos. Default is -1, which creates a new Script.
codesearchnet
def is_single_tree(data_wrapper): db = data_wrapper.data_block bad_ids = db[(db[(:, COLS.P)] == (- 1))][(1:, COLS.ID)] return CheckResult((len(bad_ids) == 0), bad_ids.tolist())
Check that data forms a single tree Only the first point has ID of -1. Returns: CheckResult with result and list of IDs Note: This assumes no_missing_parents passed.
codesearchnet
def extract_ranges(index_list, range_size_limit=32): if (not index_list): return ([], []) first = index_list[0] last = first ranges = [] singles = [] for i in index_list[1:]: if ((i == (last + 1)) and ((last - first) <= range_size_limit)): last = i else: if (last > first): ranges.append([first, last]) else: singles.append(first) first = i last = i if (last > first): ranges.append([first, last]) else: singles.append(first) return (ranges, singles)
Extract consecutive ranges and singles from index_list. Args: index_list: List of monotone increasing non-negative integers. range_size_limit: Largest size range to return. If a larger consecutive range exists it will be returned as multiple ranges. Returns: ranges, singles where ranges is a list of [first, last] pairs of consecutive elements in index_list, and singles is all of the other elements, in original order.
codesearchnet
def __init__(self, logger=None, timeout=60): self.etag = 0 self.logger = logger or logging self.timeout = timeout
Constructor. Args: logger: logger object, used to write to SysLog and serial port. timeout: int, timeout in seconds for metadata requests.
juraj-google-style