code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _apply_base_theme(app): if QT_VERSION < (5,): app.setStyle('plastique') else: app.setStyle('Fusion') with open(_STYLESHEET) as stylesheet: app.setStyleSheet(stylesheet.read())
Apply base theme to the application. Args: app (QApplication): QApplication instance.
juraj-google-style
def switch(condition, then_expression, else_expression): if condition.dtype != dtypes_module.bool: condition = math_ops.cast(condition, 'bool') cond_ndim = ndim(condition) if not cond_ndim: if not callable(then_expression): def then_expression_fn(): return then_expression else: then_expression_fn = then_expression if not callable(else_expression): def else_expression_fn(): return else_expression else: else_expression_fn = else_expression x = cond.cond(condition, then_expression_fn, else_expression_fn) else: if callable(then_expression): then_expression = then_expression() if callable(else_expression): else_expression = else_expression() expr_ndim = ndim(then_expression) if cond_ndim > expr_ndim: raise ValueError('Rank of `condition` should be less than or equal to rank of `then_expression` and `else_expression`. ndim(condition)=' + str(cond_ndim) + ', ndim(then_expression)=' + str(expr_ndim)) if cond_ndim > 1: ndim_diff = expr_ndim - cond_ndim cond_shape = array_ops.concat([array_ops.shape(condition), [1] * ndim_diff], axis=0) condition = array_ops.reshape(condition, cond_shape) expr_shape = array_ops.shape(then_expression) shape_diff = expr_shape - cond_shape tile_shape = array_ops.where_v2(shape_diff > 0, expr_shape, array_ops.ones_like(expr_shape)) condition = array_ops.tile(condition, tile_shape) x = array_ops.where_v2(condition, then_expression, else_expression) return x
Switches between two operations depending on a scalar value. Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. Args: condition: tensor (`int` or `bool`). then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. Returns: The selected tensor. Raises: ValueError: If rank of `condition` is greater than rank of expressions.
github-repos
def put_async(self, path, value): request = Put(self._get_next_id(), path, value) request.set_callback(self._q.put) future = self._dispatch_request(request) return future
Puts a value to a path and returns immediately Args: path (list): The path to put to value (object): The value to set Returns: Future: A single Future which will resolve to the result
juraj-google-style
def show_tricky_tasks(self, verbose=0): nids, tasks = [], [] for task in self.iflat_tasks(): if task.num_launches > 1 or any(n > 0 for n in (task.num_restarts, task.num_corrections)): nids.append(task.node_id) tasks.append(task) if not nids: cprint("Everything's fine, no tricky tasks found", color="green") else: self.show_status(nids=nids) if not verbose: print("Use --verbose to print task history.") return for nid, task in zip(nids, tasks): cprint(repr(task), **task.status.color_opts) self.show_history(nids=[nid], full_history=False, metadata=False) if task.num_corrections: self.show_corrections(nids=[nid])
Print list of tricky tasks i.e. tasks that have been restarted or launched more than once or tasks with corrections. Args: verbose: Verbosity level. If > 0, task history and corrections (if any) are printed.
juraj-google-style
def add_args(self, args): for key, value in vars(args).items(): if value is not None: setattr(self, key.upper(), value)
Add the args Args: args (namespace): The commandline args
juraj-google-style
def getRow(self, key): return Row(self._impl.getRow(Tuple(key)._impl))
Get a row by value of the indexing columns. If the index is not specified, gets the only row of a dataframe with no indexing columns. Args: key: Tuple representing the index of the desired row. Returns: The row.
codesearchnet
def get_inlined_extension_url(field: descriptor.FieldDescriptor) -> str: options = annotation_utils.get_options(field) if options.HasExtension(annotations_pb2.fhir_inlined_extension_url): return options.Extensions[annotations_pb2.fhir_inlined_extension_url] return field.camelcase_name
Returns the FHIR inlined extension URL for a field. Args: field: The FieldDescriptor to examine. Returns: The FHIR inlined extension URL, if one exists, otherwise returns the camel- case name of the FieldDescriptor.
github-repos
def __init__(self, binary_line_reader, delimiter): super(BinaryDSVReader, self).__init__() self._line_reader = binary_line_reader self._delimiter = delimiter
Initializes the delimited separated values reader. Args: binary_line_reader (BinaryLineReader): a binary file reader delimiter (bytes): field delimiter.
juraj-google-style
def query(self, rank): self._flush() current = self._head if (not current): return 0 mid_rank = math.floor((rank * self._observations)) max_rank = (mid_rank + math.floor((self._invariant(mid_rank, self._observations) / 2))) rank = 0.0 while current._successor: rank += current._rank if (((rank + current._successor._rank) + current._successor._delta) > max_rank): return current._value current = current._successor return current._value
Retrieves the value estimate for the requested quantile rank. The requested quantile rank must be registered in the estimator's invariants a priori! Args: rank: A floating point quantile rank along the interval [0, 1]. Returns: A numeric value for the quantile estimate.
codesearchnet
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None): return self.__StreamMedia(callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=False)
Send this resumable upload in a single request. Args: callback: Progress callback function with inputs (http_wrapper.Response, transfer.Upload) finish_callback: Final callback function with inputs (http_wrapper.Response, transfer.Upload) additional_headers: Dict of headers to include with the upload http_wrapper.Request. Returns: http_wrapper.Response of final response.
codesearchnet
def contains(self, sub): sub = sub.lower() found_words = set() res = cgaddag.gdg_contains(self.gdg, sub.encode(encoding='ascii')) tmp = res while tmp: word = tmp.contents.str.decode('ascii') found_words.add(word) tmp = tmp.contents.next cgaddag.gdg_destroy_result(res) return list(found_words)
Find all words containing a substring. Args: sub: A substring to be searched for. Returns: A list of all words found.
codesearchnet
def _TypeCompatibilityCheck(self, type_params): type_params = {t for t in type_params if not isinstance(t, pytd.AnythingType)} if not all((isinstance(t, pytd.ClassType) for t in type_params)): return False mro_list = [set(mro.GetBasesInMRO(t.cls)) for t in type_params] mro_list.sort(key=len) prev = set() for cur in mro_list: if not cur.issuperset(prev): return False prev = cur return True
Check if the types are compatible. It is used to handle the case: class A(Sequence[A]): pass class B(A, Sequence[B]): pass class C(B, Sequence[C]): pass In class `C`, the type parameter `_T` of Sequence could be `A`, `B` or `C`. Next we will check they have a linear inheritance relationship: `A` -> `B` -> `C`. Args: type_params: The class type params. Returns: True if all the types are compatible.
github-repos
def __init__(self, options={}): settings = { 'currency': { 'symbol': "$", 'format': "%s%v", 'decimal': ".", 'thousand': ",", 'precision': 2, 'grouping': 3 }, 'number': { 'precision': 0, 'grouping': 3, 'thousand': ",", 'decimal': "." } } if options: settings.update(options) self.settings = settings
Summary. Args: options (dict, optional): settings configuration object.
juraj-google-style
def set_inheritance(obj_name, enabled, obj_type='file', clear=False): if (obj_type not in ['file', 'registry', 'registry32']): raise SaltInvocationError('obj_type called with incorrect parameter: {0}'.format(obj_name)) if clear: obj_dacl = dacl(obj_type=obj_type) else: obj_dacl = dacl(obj_name, obj_type) return obj_dacl.save(obj_name, (not enabled))
Enable or disable an objects inheritance. Args: obj_name (str): The name of the object enabled (bool): True to enable inheritance, False to disable obj_type (Optional[str]): The type of object. Only three objects allow inheritance. Valid objects are: - file (default): This is a file or directory - registry - registry32 (for WOW64) clear (Optional[bool]): True to clear existing ACEs, False to keep existing ACEs. Default is False Returns: bool: True if successful, otherwise an Error Usage: .. code-block:: python salt.utils.win_dacl.set_inheritance('C:\\Temp', False)
codesearchnet
def is_int(string): try: a = float(string) b = int(a) except ValueError: return False else: return a == b
Checks if a string is an integer. If the string value is an integer return True, otherwise return False. Args: string: a string to test. Returns: boolean
juraj-google-style
def sign(mv): md5 = hashlib.md5() update_hash(md5, mv) return md5.digest()
Obtains a signature for a `MetricValue` Args: mv (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricValue`): a MetricValue that's part of an operation Returns: string: a unique signature for that operation
codesearchnet
def __rmfile(path): logger.info("rmfile: %s" % path) try: os.remove(path) return True except Exception as e: logger.error("rmfile: %s failed! Error: %s" % (path, e)) return False
Delete a file. Args: path (str): Path to the file that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise.
juraj-google-style
def get_messages(module): answer = collections.OrderedDict() for name in dir(module): candidate = getattr(module, name) if (inspect.isclass(candidate) and issubclass(candidate, message.Message)): answer[name] = candidate return answer
Discovers all protobuf Message classes in a given import module. Args: module (module): A Python module; :func:`dir` will be run against this module to find Message subclasses. Returns: dict[str, google.protobuf.message.Message]: A dictionary with the Message class names as keys, and the Message subclasses themselves as values.
codesearchnet
def deserialize_function(serial, function_type): if (function_type == 'function'): function = tf.keras.utils.deserialize_keras_object(serial) elif (function_type == 'lambda'): function = generic_utils.func_load(serial) else: raise TypeError('Unknown function type:', function_type) return function
Deserializes the Keras-serialized function. (De)serializing Python functions from/to bytecode is unsafe. Therefore we also use the function's type as an anonymous function ('lambda') or named function in the Python environment ('function'). In the latter case, this lets us use the Python scope to obtain the function rather than reload it from bytecode. (Note that both cases are brittle!) Keras-deserialized functions do not perform lexical scoping. Any modules that the function requires must be imported within the function itself. This serialization mimicks the implementation in `tf.keras.layers.Lambda`. Args: serial: Serialized Keras object: typically a dict, string, or bytecode. function_type: Python string denoting 'function' or 'lambda'. Returns: function: Function the serialized Keras object represents. #### Examples ```python serial, function_type = serialize_function(lambda x: x) function = deserialize_function(serial, function_type) assert function(2.3) == 2.3 # function is identity ```
codesearchnet
def UpdateNumberOfEventSources(self, number_of_consumed_sources, number_of_produced_sources): consumed_sources_delta = 0 if (number_of_consumed_sources is not None): if (number_of_consumed_sources < self.number_of_consumed_sources): raise ValueError('Number of consumed sources smaller than previous update.') consumed_sources_delta = (number_of_consumed_sources - self.number_of_consumed_sources) self.number_of_consumed_sources = number_of_consumed_sources self.number_of_consumed_sources_delta = consumed_sources_delta produced_sources_delta = 0 if (number_of_produced_sources is not None): if (number_of_produced_sources < self.number_of_produced_sources): raise ValueError('Number of produced sources smaller than previous update.') produced_sources_delta = (number_of_produced_sources - self.number_of_produced_sources) self.number_of_produced_sources = number_of_produced_sources self.number_of_produced_sources_delta = produced_sources_delta return ((consumed_sources_delta > 0) or (produced_sources_delta > 0))
Updates the number of event sources. Args: number_of_consumed_sources (int): total number of event sources consumed by the process. number_of_produced_sources (int): total number of event sources produced by the process. Returns: bool: True if either number of event sources has increased. Raises: ValueError: if the consumed or produced number of event sources is smaller than the value of the previous update.
codesearchnet
def conv(self, conv_input: core.Tensor) -> Mapping[str, core.Tensor]: out = nn_ops.conv2d(conv_input, self.conv_filters, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC') return {'output': out}
Performs a 2D convolution operation. Args: conv_input: Input tensor to perform convolution on. Returns: A map of: output key -> output result.
github-repos
def get_pmg_structure(phonopy_structure): lattice = phonopy_structure.get_cell() frac_coords = phonopy_structure.get_scaled_positions() symbols = phonopy_structure.get_chemical_symbols() masses = phonopy_structure.get_masses() mms = phonopy_structure.get_magnetic_moments() mms = (mms or ([0] * len(symbols))) return Structure(lattice, symbols, frac_coords, site_properties={'phonopy_masses': masses, 'magnetic_moments': mms})
Convert a PhonopyAtoms object to pymatgen Structure object. Args: phonopy_structure (PhonopyAtoms): A phonopy structure object.
codesearchnet
def set_name(self, name): if not self._campfire.get_user().admin: return False result = self._connection.put("room/%s" % self.id, {"room": {"name": name}}) if result["success"]: self._load() return result["success"]
Set the room name. Args: name (str): Name Returns: bool. Success
juraj-google-style
def __init__(self, application_namespace=None, application_data=None): super(ApplicationSpecificInformation, self).__init__( Tags.APPLICATION_SPECIFIC_INFORMATION) if application_namespace is None: self.application_namespace = ApplicationNamespace() else: self.application_namespace = application_namespace if application_data is None: self.application_data = ApplicationData() else: self.application_data = application_data self.validate()
Construct an ApplicationSpecificInformation object. Args: application_namespace (ApplicationNamespace): The name of a namespace supported by the server. Optional, defaults to None. application_data (ApplicationData): String data relevant to the specified namespace. Optional, defaults to None.
juraj-google-style
def get_config_bool_option(parser: ConfigParser, section: str, option: str, default: bool=None) -> bool: if (not parser.has_section(section)): raise ValueError(('config missing section: ' + section)) return parser.getboolean(section, option, fallback=default)
Retrieves a boolean value from a parser. Args: parser: instance of :class:`ConfigParser` section: section name within config file option: option (variable) name within that section default: value to return if option is absent Returns: string value Raises: ValueError: if the section is absent
codesearchnet
def initial_value(self): raise NotImplementedError
Returns the Tensor used as the initial value for the variable. Note that this is different from `initialized_value()` which runs the op that initializes the variable before returning its value. This method returns the tensor that is used by the op that initializes the variable. Returns: A `Tensor`.
github-repos
def kill_pid(self, pid): try: p = psutil.Process(pid) p.terminate() self.info_log('Killed [pid:%s][name:%s]' % (p.pid, p.name())) except psutil.NoSuchProcess: self.error_log('No such process: [pid:%s]' % pid)
Kill process by pid Args: pid (int)
juraj-google-style
def _generate_shape(word: str) -> str: def counting_stars(w) -> List[int]: count = [1] for i in range(1, len(w)): if w[i - 1] == w[i]: count[-1] += 1 else: count.append(1) return count shape = "" p = 0 for c in counting_stars(word): if c > 4: shape += word[p:p + 4] else: shape += word[p:p + c] p = p + c return shape
Recreate shape from a token input by user Args: word: str Returns: str
juraj-google-style
def qr(x, mode='reduced'): if any_symbolic_tensors((x,)): return Qr(mode=mode).symbolic_call(x) x = backend.convert_to_tensor(x) return backend.linalg.qr(x, mode=mode)
Computes the QR decomposition of a tensor. Args: x: Input tensor of shape `(..., M, N)`. mode: A string specifying the mode of the QR decomposition. - 'reduced': Returns the reduced QR decomposition. (default) - 'complete': Returns the complete QR decomposition. Returns: A tuple containing two tensors. The first tensor of shape `(..., M, K)` is the orthogonal matrix `q` and the second tensor of shape `(..., K, N)` is the upper triangular matrix `r`, where `K = min(M, N)`. Example: >>> x = keras.ops.convert_to_tensor([[1., 2.], [3., 4.], [5., 6.]]) >>> q, r = qr(x) >>> print(q) array([[-0.16903079 0.897085] [-0.5070925 0.2760267 ] [-0.8451542 -0.34503305]], shape=(3, 2), dtype=float32)
github-repos
def set_expected_update_frequency(self, update_frequency): try: int(update_frequency) except ValueError: update_frequency = Dataset.transform_update_frequency(update_frequency) if not update_frequency: raise HDXError('Invalid update frequency supplied!') self.data['data_update_frequency'] = update_frequency
Set expected update frequency Args: update_frequency (str): Update frequency Returns: None
juraj-google-style
def compare_profiles(profile1, profile2): length = len(profile1) profile1 = np.array(list(profile1)) profile2 = np.array(list(profile2)) similarity_array = profile1 == profile2 matches = np.sum(similarity_array) similarity_ratio = matches/length return similarity_ratio
Given two profiles, determine the ratio of similarity, i.e. the hamming distance between the strings. Args: profile1/2 (str): profile string Returns: similarity_ratio (float): the ratio of similiarity (0-1)
juraj-google-style
def plot(self, data): import IPython if not isinstance(data, dict) or not all(isinstance(v, pd.DataFrame) for v in data.values()): raise ValueError('Expect a dictionary where the values are all dataframes.') gfsg = GenericFeatureStatisticsGenerator() data = [{'name': k, 'table': self._remove_nonascii(v)} for k, v in six.iteritems(data)] data_proto = gfsg.ProtoFromDataFrames(data) protostr = base64.b64encode(data_proto.SerializeToString()).decode("utf-8") html_id = 'f' + datalab.utils.commands.Html.next_id() HTML_TEMPLATE = html = HTML_TEMPLATE.format(html_id=html_id, protostr=protostr) return IPython.core.display.HTML(html)
Plots an overview in a list of dataframes Args: data: a dictionary with key the name, and value the dataframe.
juraj-google-style
def fail_request(self, orig_request, message, start_response): cors_handler = self._create_cors_handler(orig_request) return util.send_wsgi_error_response( message, start_response, cors_handler=cors_handler)
Write an immediate failure response to outfile, no redirect. This calls start_response and returns the error body. Args: orig_request: An ApiRequest, the original request from the user. message: A string containing the error message to be displayed to user. start_response: A function with semantics defined in PEP-333. Returns: A string containing the body of the error response.
juraj-google-style
def _decode_doubles(message): binary = base64.b64decode(message) return struct.unpack(('<' + ('d' * (len(binary)
Helper for decode_qp, decodes a double array. The double array is stored as little endian 64 bit doubles. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. Args: message: the double array Returns: decoded double array
codesearchnet
def unitary(val: Any, default: TDefault=RaiseTypeErrorIfNotProvided) -> Union[(np.ndarray, TDefault)]: from cirq import Gate, Operation getter = getattr(val, '_unitary_', None) result = (NotImplemented if (getter is None) else getter()) if (result is not NotImplemented): return result if isinstance(val, (Gate, Operation)): decomposed_unitary = _decompose_and_get_unitary(val) if (decomposed_unitary is not None): return decomposed_unitary if (default is not RaiseTypeErrorIfNotProvided): return default if (getter is None): raise TypeError("object of type '{}' has no _unitary_ method.".format(type(val))) raise TypeError("object of type '{}' does have a _unitary_ method, but it returned NotImplemented.".format(type(val)))
Returns a unitary matrix describing the given value. Args: val: The value to describe with a unitary matrix. default: Determines the fallback behavior when `val` doesn't have a unitary matrix. If `default` is not set, a TypeError is raised. If default is set to a value, that value is returned. Returns: If `val` has a _unitary_ method and its result is not NotImplemented, that result is returned. Otherwise, if `val` is a cirq.Gate or cirq.Operation, decomposition will be attempted and the resulting unitary is returned if unitaries exist for all operations of the decompostion. If the result is still NotImplemented and a default value was specified, the default value is returned. Raises: TypeError: `val` doesn't have a _unitary_ method (or that method returned NotImplemented) and also no default value was specified.
codesearchnet
class RunThresholdCriterion(beam.PTransform[beam.PCollection[NestedKeyedOutputT], beam.PCollection[NestedKeyedOutputT]]): def __init__(self, threshold_criterion: ThresholdFn): self._threshold_fn = threshold_criterion def expand(self, input: beam.PCollection[NestedKeyedOutputT]) -> beam.PCollection[NestedKeyedOutputT]: if self._threshold_fn.is_stateful: return input | beam.ParDo(_StatefulThresholdDoFn(self._threshold_fn.to_spec())) else: return input | beam.ParDo(_StatelessThresholdDoFn(self._threshold_fn.to_spec()))
Applies a threshold criterion to anomaly detection results. This PTransform applies a `ThresholdFn` to the anomaly scores in `AnomalyResult` objects, updating the prediction labels. It handles both stateful and stateless `ThresholdFn` implementations. Args: threshold_criterion: The `ThresholdFn` to apply.
github-repos
def get_visualizations(): if (not hasattr(g, 'visualizations')): g.visualizations = {} for VisClass in _get_visualization_classes(): vis = VisClass(get_model()) g.visualizations[vis.__class__.__name__] = vis return g.visualizations
Get the available visualizations from the request context. Put the visualizations in the request context if they are not yet there. Returns: :obj:`list` of instances of :class:`.BaseVisualization` or derived class
codesearchnet
def from_text(cls, text, lexicon, required=None, first_only=True): component = lexicon.get_component(text, first_only=first_only) if (required and (required not in component)): return None else: return cls(component)
Generate a Component from a text string, using a Lexicon. Args: text (str): The text string to parse. lexicon (Lexicon): The dictionary to use for the categories and lexemes. required (str): An attribute that we must have. If a required attribute is missing from the component, then None is returned. first_only (bool): Whether to only take the first match of a lexeme against the text string. Returns: Component: A Component object, or None if there was no must-have field.
codesearchnet
def validlocations(configuration=None): if Locations._validlocations is None: if configuration is None: configuration = Configuration.read() Locations._validlocations = configuration.call_remoteckan('group_list', {'all_fields': True}) return Locations._validlocations
Read valid locations from HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[Dict]: A list of valid locations
juraj-google-style
def handle_arguments(self, string, root, opening, closing): args = string[(opening + 1):closing].replace(' ', '') if ((opening > 0) or (not self.arguments.match(args))): if (opening == 0): raise errors.ParseError('Invalid argument sequence!') (string, meta) = self.escape_meta(string, opening) (string, meta) = self.escape_meta(string, meta.start()) return (string, root, meta) if ('!' in args): root.override = True args = args.replace('!', '') if ('+' in args): root.increment = True args = args.replace('+', '') root.arguments = [int(i) for i in args.split(',') if i] string = string[(closing + 1):] meta = self.meta.search(string) return (string, root, meta)
Handles phrase-arguments. Sets the override and increment flags if found. Also makes sure that the argument sequence is at the start of the phrase and else warns about the unescaped meta characters. If the arguments are indeed at the start but do not match the arguments regular expression, an error is raised. Arguments: string (str): The string being parsed. root (str): The current root phrase. opening (int): The index of the opening paranthese. closing (int): The index of the closing paranthese. Returns: The (possibly escaped) string, the root phrase (if no escaping, then with arguments and flags) and the next meta match. Raises: errors.ParseError: If the arguments are invalid.
codesearchnet
def retrieve_review(self, reviewer, product): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product))
Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product.
juraj-google-style
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs): vision_data = {} if image_sizes is not None: images_kwargs = Qwen2_5_VLProcessorKwargs._defaults.get('images_kwargs', {}) images_kwargs.update(kwargs) merge_size = images_kwargs.get('merge_size', None) or self.image_processor.merge_size num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes] num_image_tokens = [num_patches vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) if video_sizes is not None: videos_kwargs = Qwen2_5_VLProcessorKwargs._defaults.get('videos_kwargs', {}) videos_kwargs.update(kwargs) num_video_patches = [self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs) for video_size in video_sizes] num_video_tokens = [num_patches vision_data['num_video_tokens'] = num_video_tokens return MultiModalData(**vision_data)
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`List[List[int]]`, *optional*): The input sizes formatted as (height, width) per each image. video_sizes (`List[List[int]]`, *optional*): The input sizes formatted as (num_frames, height, width) per each video. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data.
github-repos
def sackin(self, normalize='leaves'): num_nodes_from_root = dict() sackin = 0 num_leaves = 0 for node in self.traverse_preorder(): num_nodes_from_root[node] = 1 if (not node.is_root()): num_nodes_from_root[node] += num_nodes_from_root[node.parent] if node.is_leaf(): num_nodes_from_root[node] -= 1 sackin += num_nodes_from_root[node] num_leaves += 1 if ((normalize is None) or (normalize is False)): return sackin elif (not isinstance(normalize, str)): raise TypeError('normalize must be None or a string') normalize = normalize.lower() if (normalize == 'leaves'): return (float(sackin) / num_leaves) elif (normalize == 'yule'): x = sum(((1.0 / i) for i in range(2, (num_leaves + 1)))) return ((sackin - ((2 * num_leaves) * x)) / num_leaves) elif (normalize == 'pda'): return (sackin / (num_leaves ** 1.5)) else: raise RuntimeError("normalize must be None, 'leaves', 'yule', or 'pda'")
Compute the Sackin balance index of this ``Tree`` Args: ``normalize`` (``str``): How to normalize the Sackin index (if at all) * ``None`` to not normalize * ``"leaves"`` to normalize by the number of leaves * ``"yule"`` to normalize to the Yule model * ``"pda"`` to normalize to the Proportional to Distinguishable Arrangements model Returns: ``float``: Sackin index (either normalized or not)
codesearchnet
def Collect(self, knowledge_base, artifact_definition, searcher): for source in artifact_definition.sources: if (source.type_indicator not in (artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY, artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE)): continue if (source.type_indicator == artifact_definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY): key_value_pairs = [{'key': key} for key in source.keys] else: key_value_pairs = source.key_value_pairs for key_value_pair in key_value_pairs: key_path = key_value_pair['key'] key_path_upper = key_path.upper() if key_path_upper.startswith('%%CURRENT_CONTROL_SET%%'): key_path = '{0:s}{1:s}'.format('HKEY_LOCAL_MACHINE\\System\\CurrentControlSet', key_path[23:]) find_spec = registry_searcher.FindSpec(key_path_glob=key_path) for key_path in searcher.Find(find_specs=[find_spec]): try: registry_key = searcher.GetKeyByPath(key_path) except IOError as exception: raise errors.PreProcessFail('Unable to retrieve Windows Registry key: {0:s} with error: {1!s}'.format(key_path, exception)) if registry_key: value_name = key_value_pair.get('value', None) self._ParseKey(knowledge_base, registry_key, value_name)
Collects values using a Windows Registry value artifact definition. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. artifact_definition (artifacts.ArtifactDefinition): artifact definition. searcher (dfwinreg.WinRegistrySearcher): Windows Registry searcher to preprocess the Windows Registry. Raises: PreProcessFail: if the Windows Registry key or value cannot be read.
codesearchnet
def save_headers(cls, filename: str, response: HTTPResponse): new_filename = filename + '-new' with open('wb') as new_file: new_file.write(response.header()) with wpull.util.reset_file_offset(response.body): response.body.seek(0) shutil.copyfileobj(response.body, new_file) os.remove(filename) os.rename(new_filename, filename)
Prepend the HTTP response header to the file. Args: filename: The path of the file response: Response
juraj-google-style
def insort_event_right(self, event, lo=0, hi=None): if (lo < 0): raise ValueError('lo must be non-negative') if (hi is None): hi = len(self.queue) while (lo < hi): mid = ((lo + hi) if (event[0] < self.queue[mid][0]): hi = mid else: lo = (mid + 1) self.queue.insert(lo, event)
Insert event in queue, and keep it sorted assuming queue is sorted. If event is already in queue, insert it to the right of the rightmost event (to keep FIFO order). Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: event: a (time in sec since unix epoch, callback, args, kwds) tuple.
codesearchnet
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(MACSignatureKeyInformation, self).read(input_stream, kmip_version=kmip_version) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER) self._unique_identifier.read(local_stream, kmip_version=kmip_version) else: raise ValueError('Invalid struct missing the unique identifier attribute.') if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream): self._cryptographic_parameters = CryptographicParameters() self._cryptographic_parameters.read(local_stream, kmip_version=kmip_version) self.is_oversized(local_stream)
Read the data encoding the MACSignatureKeyInformation struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
codesearchnet
def retrieve_artifacts(self, compose_data, output_data_config, job_name): artifacts = os.path.join(self.container_root, 'artifacts') compressed_artifacts = os.path.join(self.container_root, 'compressed_artifacts') os.mkdir(artifacts) model_artifacts = os.path.join(artifacts, 'model') output_artifacts = os.path.join(artifacts, 'output') artifact_dirs = [model_artifacts, output_artifacts, compressed_artifacts] for d in artifact_dirs: os.mkdir(d) for host in self.hosts: volumes = compose_data['services'][str(host)]['volumes'] for volume in volumes: (host_dir, container_dir) = volume.split(':') if (container_dir == '/opt/ml/model'): sagemaker.local.utils.recursive_copy(host_dir, model_artifacts) elif (container_dir == '/opt/ml/output'): sagemaker.local.utils.recursive_copy(host_dir, output_artifacts) model_files = [os.path.join(model_artifacts, name) for name in os.listdir(model_artifacts)] output_files = [os.path.join(output_artifacts, name) for name in os.listdir(output_artifacts)] sagemaker.utils.create_tar_file(model_files, os.path.join(compressed_artifacts, 'model.tar.gz')) sagemaker.utils.create_tar_file(output_files, os.path.join(compressed_artifacts, 'output.tar.gz')) if (output_data_config['S3OutputPath'] == ''): output_data = ('file: else: output_data = sagemaker.local.utils.move_to_destination(compressed_artifacts, output_data_config['S3OutputPath'], job_name, self.sagemaker_session) _delete_tree(model_artifacts) _delete_tree(output_artifacts) return os.path.join(output_data, 'model.tar.gz')
Get the model artifacts from all the container nodes. Used after training completes to gather the data from all the individual containers. As the official SageMaker Training Service, it will override duplicate files if multiple containers have the same file names. Args: compose_data(dict): Docker-Compose configuration in dictionary format. Returns: Local path to the collected model artifacts.
codesearchnet
def victim_asset_associations( self, main_type, sub_type, unique_id, branch_type, owner=None, params=None ): params = params or {} if owner: params['owner'] = owner if not sub_type: url = '/v2/{}/{}/victimAssets/{}'.format(main_type, unique_id, branch_type) else: url = '/v2/{}/{}/{}/victimAssets/{}'.format(main_type, sub_type, unique_id, branch_type) for vaa in self._iterate(url, params, 'victimAsset'): yield vaa
Args: owner: main_type: sub_type: unique_id: branch_type: params: Return:
juraj-google-style
def get_scan_plot(self, coords=None): from pymatgen.util.plotting import pretty_plot plt = pretty_plot(12, 8) d = self.read_scan() if coords and coords in d["coords"]: x = d["coords"][coords] plt.xlabel(coords) else: x = range(len(d["energies"])) plt.xlabel("points") plt.ylabel("Energy (eV)") e_min = min(d["energies"]) y = [(e - e_min) * Ha_to_eV for e in d["energies"]] plt.plot(x, y, "ro--") return plt
Get a matplotlib plot of the potential energy surface. Args: coords: internal coordinate name to use as abcissa.
juraj-google-style
def _add_genotypes(self, variant_obj, gemini_variant, case_id, individual_objs): for ind in individual_objs: index = ind.ind_index variant_obj.add_individual(Genotype(sample_id=ind.ind_id, genotype=gemini_variant['gts'][index], case_id=case_id, phenotype=ind.phenotype, ref_depth=gemini_variant['gt_ref_depths'][index], alt_depth=gemini_variant['gt_alt_depths'][index], depth=gemini_variant['gt_depths'][index], genotype_quality=gemini_variant['gt_quals'][index]))
Add the genotypes for a variant for all individuals Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow): The gemini variant case_id (str): related case id individual_objs (list(dict)): A list of Individuals
codesearchnet
def get_descriptor_defaults(self, api_info, hostname=None, x_google_api_name=False): hostname = (hostname or util.get_app_hostname() or api_info.hostname) protocol = 'http' if ((hostname and hostname.startswith('localhost')) or util.is_running_on_devserver()) else 'https' base_path = api_info.base_path if base_path != '/': base_path = base_path.rstrip('/') defaults = { 'swagger': '2.0', 'info': { 'version': api_info.api_version, 'title': api_info.name }, 'host': hostname, 'consumes': ['application/json'], 'produces': ['application/json'], 'schemes': [protocol], 'basePath': base_path, } if x_google_api_name: defaults['x-google-api-name'] = _validate_api_name(api_info.name) return defaults
Gets a default configuration for a service. Args: api_info: _ApiInfo object for this service. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary with the default configuration.
juraj-google-style
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states) pooled_output = text_outputs.pooler_output text_features = self.text_projection(pooled_output) return text_features
Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, CLIPModel >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```
github-repos
def get_config(self): config = {} for (_, curriculum) in self.brains_to_curriculums.items(): curr_config = curriculum.get_config() config.update(curr_config) return config
Get the combined configuration of all curriculums in this MetaCurriculum. Returns: A dict from parameter to value.
codesearchnet
def getOption(self, name): try: value = lock_and_call( lambda: self._impl.getOption(name).value(), self._lock ) except RuntimeError: return None else: try: return int(value) except ValueError: try: return float(value) except ValueError: return value
Get the current value of the specified option. If the option does not exist, returns None. Args: name: Option name. Returns: Value of the option. Raises: InvalidArgumet: if the option name is not valid.
juraj-google-style
def download_file(bucket_name, path, target, sagemaker_session): path = path.lstrip('/') boto_session = sagemaker_session.boto_session s3 = boto_session.resource('s3') bucket = s3.Bucket(bucket_name) bucket.download_file(path, target)
Download a Single File from S3 into a local path Args: bucket_name (str): S3 bucket name path (str): file path within the bucket target (str): destination directory for the downloaded file. sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3.
juraj-google-style
def add_file(self, path, compress): if (not os.path.isfile(path)): raise ValueError('{} is not a file'.format(path)) self.fileobj.seek(self.last_offset) with open(path, 'rb') as f: flags = (os.stat(path).st_mode & 511) self.add_fileobj(f, path, compress, flags)
Add a single file to the MAR file. Args: path (str): path to a file to add to this MAR file. compress (str): One of 'xz', 'bz2', or None. Defaults to None.
codesearchnet
def union(*schedules: List[Union[ScheduleComponent, Tuple[int, ScheduleComponent]]], name: str = None) -> Schedule: if name is None and schedules: sched = schedules[0] if isinstance(sched, (list, tuple)): name = sched[1].name else: name = sched.name return Schedule(*schedules, name=name)
Create a union (and also shift if desired) of all input `Schedule`s. Args: *schedules: Schedules to take the union of name: Name of the new schedule. Defaults to first element of `schedules`
juraj-google-style
def __init__(self, location=None, parent=None, store_index=None, **kwargs): if not parent: raise ValueError('Missing parent value.') super(VShadowPathSpec, self).__init__(parent=parent, **kwargs) self.location = location self.store_index = store_index
Initializes a path specification. Note that the VSS path specification must have a parent. Args: location (Optional[str]): location. parent (Optional[PathSpec]): parent path specification. store_index (Optional[int]): store index. Raises: ValueError: when parent is not set.
juraj-google-style
def tritonast2arybo(e, use_exprs=True, use_esf=False, context=None): children_ = e.getChildren() children = (tritonast2arybo(c,use_exprs,use_esf,context) for c in children_) reversed_children = (tritonast2arybo(c,use_exprs,use_esf,context) for c in reversed(children_)) Ty = e.getType() if Ty == TAstN.ZX: n = next(children) v = next(children) n += v.nbits if n == v.nbits: return v return v.zext(n) if Ty == TAstN.SX: n = next(children) v = next(children) n += v.nbits if n == v.nbits: return v return v.sext(n) if Ty == TAstN.INTEGER: return e.getInteger() if Ty == TAstN.BV: cst = next(children) nbits = next(children) if use_exprs: return EX.ExprCst(cst, nbits) else: return _get_mba(nbits,use_esf).from_cst(cst) if Ty == TAstN.EXTRACT: last = next(children) first = next(children) v = next(children) return v[first:last+1] if Ty == TAstN.CONCAT: if use_exprs: return EX.ExprConcat(*list(reversed_children)) else: return flatten(reversed_children) if Ty == TAstN.VARIABLE: name = e.getSymbolicVariable().getName() ret = _get_mba(e.getBitvectorSize(),use_esf).var(name) if use_exprs: ret = EX.ExprBV(ret) return ret if Ty == TAstN.REFERENCE: if context is None: raise ValueError("reference node without context can't be resolved") id_ = e.getSymbolicExpression().getId() ret = context.get(id_, None) if ret is None: raise ValueError("expression id %d not found in context" % id_) return ret if Ty == TAstN.LET: raise ValueError("unsupported LET operation") shifts = { TAstN.BVASHR: lambda a,b: a.ashr(b), TAstN.BVLSHR: lambda a,b: a.lshr(b), TAstN.BVSHL: operator.lshift, TAstN.BVROL: lambda x,n: x.rol(n), TAstN.BVROR: lambda x,n: x.ror(n) } shift = shifts.get(Ty, None) if not shift is None: v = next(children) n = next(children) return shift(v,n) unops = { TAstN.BVNOT: lambda x: ~x, TAstN.LNOT: lambda x: ~x, TAstN.BVNEG: operator.neg } unop = unops.get(Ty, None) if not unop is None: return unop(next(children)) binops = { TAstN.BVADD: operator.add, TAstN.BVSUB: operator.sub, TAstN.BVAND: operator.and_, TAstN.BVOR: operator.or_, TAstN.BVXOR: operator.xor, TAstN.BVMUL: operator.mul, TAstN.BVNAND: lambda x,y: ~(x&y), TAstN.BVNOR: lambda x,y: ~(x|y), TAstN.BVXNOR: lambda x,y: ~(x^y), TAstN.BVUDIV: lambda x,y: x.udiv(y), TAstN.BVSDIV: lambda x,y: x.sdiv(y), TAstN.BVUREM: lambda x,y: x.urem(y), TAstN.BVSREM: lambda x,y: x.srem(y), TAstN.LAND: operator.and_, TAstN.LOR: operator.or_ } binop = binops.get(Ty, None) if not binop is None: return reduce(binop, children) lops = { TAstN.EQUAL: lambda x,y: EX.ExprCmpEq(x,y), TAstN.DISTINCT: lambda x,y: EX.ExprCmpNeq(x,y), TAstN.BVUGE: lambda x,y: EX.ExprCmpGte(x,y,False), TAstN.BVUGT: lambda x,y: EX.ExprCmpGt(x,y,False), TAstN.BVULE: lambda x,y: EX.ExprCmpLte(x,y,False), TAstN.BVULT: lambda x,y: EX.ExprCmpLt(x,y,False), TAstN.BVSGE: lambda x,y: EX.ExprCmpGte(x,y,True), TAstN.BVSGT: lambda x,y: EX.ExprCmpGt(x,y,True), TAstN.BVSLE: lambda x,y: EX.ExprCmpLte(x,y,True), TAstN.BVSLT: lambda x,y: EX.ExprCmpLt(x,y,True) } lop = lops.get(Ty, None) if not lop is None: return reduce(lop, children) if Ty != TAstN.ITE: raise ValueError("unsupported node type %s" % str(Ty)) return EX.ExprCond(next(children), next(children), next(children))
Convert a subset of Triton's AST into Arybo's representation Args: e: Triton AST use_esf: use ESFs when creating the final expression context: dictionnary that associates Triton expression ID to arybo expressions Returns: An :class:`arybo.lib.MBAVariable` object
juraj-google-style
def _copy_delpoy_scripts(self, scripts): if not os.path.exists(self.paths.scripts()): os.makedirs(self.paths.scripts()) new_scripts = [] for script in scripts: script = os.path.expandvars(script) if not os.path.exists(script): raise RuntimeError('Script %s does not exist' % script) sanitized_name = script.replace('/', '_') new_script_cur_path = os.path.expandvars( self.paths.scripts(sanitized_name) ) shutil.copy(script, new_script_cur_path) new_script_init_path = os.path.join( '$LAGO_PREFIX_PATH', os.path.basename(self.paths.scripts()), sanitized_name, ) new_scripts.append(new_script_init_path) return new_scripts
Copy the given deploy scripts to the scripts dir in the prefix Args: scripts(list of str): list of paths of the scripts to copy to the prefix Returns: list of str: list with the paths to the copied scripts, with a prefixed with $LAGO_PREFIX_PATH so the full path is not hardcoded
juraj-google-style
def reconstruct_non_debug_graph_def(debug_graph_def): return DebugGraph(debug_graph_def).non_debug_graph_def
Reconstruct original (non-debugger-decorated) partition GraphDef. This method strips the input `tf.compat.v1.GraphDef` of the Copy* and Debug*-type nodes inserted by the debugger. The reconstructed partition graph is identical to the original (i.e., non-debugger-decorated) partition graph except in the following respects: 1) The exact names of the runtime-inserted internal nodes may differ. These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops. 2) As a consequence of 1, the nodes that receive input directly from such send- and recv-type ops will have different input names. 3) The parallel_iteration attribute of while-loop Enter ops are set to 1. Args: debug_graph_def: The debugger-decorated `tf.compat.v1.GraphDef`, with the debugger-inserted Copy* and Debug* nodes. Returns: The reconstructed `tf.compat.v1.GraphDef` stripped of the debugger-inserted nodes.
github-repos
def _connect_to_device(self, uuid, key, client): slug = self._build_device_slug(uuid) message = {'client': client, 'type': 'response', 'operation': 'connect'} self._logger.info("Connection attempt for device %d", uuid) if uuid in self._connections: message['success'] = False message['failure_reason'] = 'Someone else is connected to the device' self._publish_status(slug, message) return resp = yield self._manager.connect(uuid) message['success'] = resp['success'] if resp['success']: conn_id = resp['connection_id'] self._connections[uuid] = {'key': key, 'client': client, 'connection_id': conn_id, 'last_touch': monotonic(), 'script': [], 'trace_accum': bytes(), 'last_trace': None, 'trace_scheduled': False, 'last_progress': None} else: message['failure_reason'] = resp['reason'] self._connections[uuid] = {} connection = self._connections[uuid] connection['report_monitor'] = self._manager.register_monitor(uuid, ['report'], self._notify_report) connection['trace_monitor'] = self._manager.register_monitor(uuid, ['trace'], self._notify_trace) self._publish_status(slug, message)
Connect to a device given its uuid Args: uuid (int): The unique id of the device key (string): A 64 byte string used to secure this connection client (string): The client id for who is trying to connect to the device.
juraj-google-style
def cxx(project, detect_project=False): from benchbuild.utils import cmd cxx_name = str(CFG['compiler']['cxx']) wrap_cc(cxx_name, compiler(cxx_name), project, detect_project=detect_project) return cmd['./{name}'.format(name=cxx_name)]
Return a clang++ that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command.
codesearchnet
def compose_full_url(pub, uuid_url=False): url = compose_path(pub, uuid_url) if (WEB_PORT == 80): return ('%s: return ('%s:
Compose full url for given `pub`, with protocol, server's address and port. Args: pub (obj): :class:`.DBPublication` instance. uuid_url (bool, default False): Compose URL using UUID. Returns: str: Absolute url of the publication. Raises: PrivatePublicationError: When the `pub` is private publication.
codesearchnet
def _GetISO8601String(self, structure): fraction_of_second_length = len(structure.fraction_of_second) if (fraction_of_second_length not in (3, 6, 7)): raise ValueError('unsupported time fraction of second length: {0:d}'.format(fraction_of_second_length)) try: fraction_of_second = int(structure.fraction_of_second, 10) except (TypeError, ValueError) as exception: raise ValueError('unable to determine fraction of second with error: {0!s}'.format(exception)) if (fraction_of_second_length == 7): (fraction_of_second, _) = divmod(fraction_of_second, 10) date_time_string = '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}'.format(structure.year, structure.month, structure.day, structure.hour, structure.minute, structure.second) if (fraction_of_second_length > 0): date_time_string = '{0:s}.{1:d}'.format(date_time_string, fraction_of_second) utc_offset_minutes = structure.get('utc_offset_minutes', None) if (utc_offset_minutes is not None): try: time_zone_offset = int(utc_offset_minutes[1:], 10) except (IndexError, ValueError) as exception: raise ValueError('Unable to parse time zone offset with error: {0!s}.'.format(exception)) (time_zone_hours, time_zone_minutes) = divmod(time_zone_offset, 60) date_time_string = '{0:s}{1:s}{2:02d}:{3:02d}'.format(date_time_string, utc_offset_minutes[0], time_zone_hours, time_zone_minutes) return date_time_string
Retrieves an ISO8601 date time string from the structure. The date and time values in the SCCM log are formatted as: time="19:33:19.766-330" date="11-28-2014" Args: structure (pyparsing.ParseResults): structure of tokens derived from a line of a text file. Returns: str: ISO 8601 date time string. Raises: ValueError: if the structure cannot be converted into a date time string.
codesearchnet
def __init__(self, initial_structure, final_structure): if final_structure.formula != initial_structure.formula: raise ValueError("Initial and final structures have different " + "formulas!") self.initial = initial_structure self.final = final_structure
Please note that the input and final structures should have the same ordering of sites. This is typically the case for most computational codes. Args: initial_structure (Structure): Initial input structure to calculation. final_structure (Structure): Final output structure from calculation.
juraj-google-style
def add(self, rid, data, raise_on_error=True): return self.post(rid, data, raise_on_error)
Write data to the DataStore. Alias for post() method. Args: rid (str): The record identifier. data (dict): The record data. raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response.
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): file_header_map = self._GetDataTypeMap('java_idx_file_header') try: file_header, file_offset = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) if not file_header.format_version in self._SUPPORTED_FORMAT_VERSIONS: raise errors.UnableToParseFile('Unsupported format version.') if file_header.format_version == 602: section1_map = self._GetDataTypeMap('java_idx_602_section1') elif file_header.format_version in (603, 604): section1_map = self._GetDataTypeMap('java_idx_603_section1') elif file_header.format_version == 605: section1_map = self._GetDataTypeMap('java_idx_605_section1') try: section1, data_size = self._ReadStructureFromFileObject( file_object, file_offset, section1_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse section 1 (format version: {0:d}) with error: ' '{1!s}').format(file_header.format_version, exception)) file_offset += data_size if file_header.format_version == 602: section2_map = self._GetDataTypeMap('java_idx_602_section2') elif file_header.format_version in (603, 604, 605): file_offset = 128 section2_map = self._GetDataTypeMap('java_idx_603_section2') try: section2, data_size = self._ReadStructureFromFileObject( file_object, file_offset, section2_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile(( 'Unable to parse section 2 (format version: {0:d}) with error: ' '{1!s}').format(file_header.format_version, exception)) file_offset += data_size if not section2.url: raise errors.UnableToParseFile('URL not found in file.') date_http_header = None for _ in range(section2.number_of_http_headers): http_header_map = self._GetDataTypeMap('java_idx_http_header') try: http_header, data_size = self._ReadStructureFromFileObject( file_object, file_offset, http_header_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'Unable to parse HTTP header value at offset: 0x{0:08x}'.format( file_offset)) break file_offset += data_size if http_header.name == 'date': date_http_header = http_header break event_data = JavaIDXEventData() event_data.idx_version = file_header.format_version event_data.ip_address = getattr(section2, 'ip_address', None) event_data.url = section2.url date_time = dfdatetime_java_time.JavaTime( timestamp=section1.modification_time) event = time_events.DateTimeValuesEvent(date_time, 'File Hosted Date') parser_mediator.ProduceEventWithEventData(event, event_data) if section1.expiration_time: date_time = dfdatetime_java_time.JavaTime( timestamp=section1.expiration_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) if date_http_header: try: download_date = timelib.Timestamp.FromTimeString( date_http_header.value, gmt_as_timezone=False) except errors.TimestampError: parser_mediator.ProduceExtractionWarning( 'Unable to parse date HTTP header value: {0:s}'.format( date_http_header.value)) if download_date: event = time_events.TimestampEvent( download_date, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a Java WebStart Cache IDX file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dvfvs.FileIO): a file-like object to parse. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def pose2mat(pose): homo_pose_mat = np.zeros((4, 4), dtype=np.float32) homo_pose_mat[(:3, :3)] = quat2mat(pose[1]) homo_pose_mat[(:3, 3)] = np.array(pose[0], dtype=np.float32) homo_pose_mat[(3, 3)] = 1.0 return homo_pose_mat
Converts pose to homogeneous matrix. Args: pose: a (pos, orn) tuple where pos is vec3 float cartesian, and orn is vec4 float quaternion. Returns: 4x4 homogeneous matrix
codesearchnet
def from_api_repr(cls, resource): config = cls(resource["sourceFormat"]) for optcls in _OPTION_CLASSES: opts = resource.get(optcls._RESOURCE_NAME) if opts is not None: config._options = optcls.from_api_repr(opts) break config._properties = copy.deepcopy(resource) return config
Factory: construct an :class:`~.external_config.ExternalConfig` instance given its API representation. Args: resource (Dict[str, Any]): Definition of an :class:`~.external_config.ExternalConfig` instance in the same representation as is returned from the API. Returns: :class:`~.external_config.ExternalConfig`: Configuration parsed from ``resource``.
juraj-google-style
def __init__(self, low, high, output_shape): self.__low = low self.__high = high self.__output_shape = output_shape
Init. Args: low: Lower boundary of the output interval. All values generated will be greater than or equal to low. high: Upper boundary of the output interval. All values generated will be less than high. output_shape: Output shape. the shape is `(batch size, d1, d2, d3, ...)`.
juraj-google-style
def verify(self, obj): if not isinstance(obj, int): raise ValidationError("Object is not a int", reason='object is not a int', object=obj, type=type(obj), int_type=int) return obj
Verify that the object conforms to this verifier's schema Args: obj (object): A python object to verify Raises: ValidationError: If there is a problem verifying the dictionary, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation.
juraj-google-style
def list_autoscale_settings(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.insights/', '/autoscaleSettings?api-version=', INSIGHTS_API]) return do_get(endpoint, access_token)
List the autoscale settings in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of autoscale settings.
juraj-google-style
def normalize(code): if len(code) == 3: return code normalized = translate(code) if normalized: return normalized country = countries.get(code, None) if country: return country.alpha3.lower() return code
Normalize language codes to ISO 639-2. If all conversions fails, return the `code` as it was given. Args: code (str): Language / country code. Returns: str: ISO 639-2 country code.
juraj-google-style
def get_log_id(cls, id): conn = Qubole.agent() r = conn.get_raw(cls.element_path(id) + "/logs") return r.text
Fetches log for the command represented by this id Args: `id`: command id
juraj-google-style
def __call__(self, environ, start_response): start_time = datetime.datetime.utcnow() name = environ.get('PATH_INFO') or '/' closure = {'status': '200 OK'} http_method = environ.get('REQUEST_METHOD', 'GET') self.client.context.operation.id = str(uuid.uuid4()) self.client.context.operation.name = http_method + ' ' + name def status_interceptor(status_string, headers_array, exc_info=None): closure['status'] = status_string start_response(status_string, headers_array, exc_info) for part in self._wsgi_application(environ, status_interceptor): yield part success = True response_match = re.match(r'\s*(?P<code>\d+)', closure['status']) if response_match: response_code = response_match.group('code') if int(response_code) >= 400: success = False else: response_code = closure['status'] success = False url = name query_string = environ.get('QUERY_STRING') if query_string: url += '?' + query_string scheme = environ.get('wsgi.url_scheme', 'http') host = environ.get('HTTP_HOST', environ.get('SERVER_NAME', 'unknown')) url = scheme + ': end_time = datetime.datetime.utcnow() duration = int((end_time - start_time).total_seconds() * 1000) self.client.track_request(name, url, success, start_time.isoformat() + 'Z', duration, response_code, http_method, self._common_properties)
Callable implementation for WSGI middleware. Args: environ (dict). a dictionary containing all WSGI environment properties for this request.\n start_response (func). a function used to store the status, HTTP headers to be sent to the client and optional exception information. Returns: (obj). the response to send back to the client.
juraj-google-style
def outer_definition_name(cls): outer_definition = cls.message_definition() if (not outer_definition): return util.get_package_for_module(cls.__module__) return outer_definition.definition_name()
Helper method for creating outer definition name. Returns: If definition is nested, will return the outer definitions name, else the package name.
codesearchnet
def get_student_current_grade(self, username, course_id): resp = self.requester.get(urljoin(self.base_url, '/api/grades/v1/courses/{course_key}/?username={username}'.format(username=username, course_key=course_id))) resp.raise_for_status() return CurrentGrade(resp.json()[0])
Returns an CurrentGrade object for the user in a course Args: username (str): an edx user's username course_id (str): an edX course id. Returns: CurrentGrade: object representing the student current grade for a course
codesearchnet
def _strict_match(self, struct1, struct2, fu, s1_supercell=True, use_rms=False, break_on_match=False): if fu < 1: raise ValueError("fu cannot be less than 1") mask, s1_t_inds, s2_t_ind = self._get_mask(struct1, struct2, fu, s1_supercell) if mask.shape[0] > mask.shape[1]: raise ValueError('after supercell creation, struct1 must ' 'have more sites than struct2') if (not self._subset) and mask.shape[1] != mask.shape[0]: return None if LinearAssignment(mask).min_cost > 0: return None best_match = None for s1fc, s2fc, avg_l, sc_m in \ self._get_supercells(struct1, struct2, fu, s1_supercell): normalization = (len(s1fc) / avg_l.volume) ** (1/3) inv_abc = np.array(avg_l.reciprocal_lattice.abc) frac_tol = inv_abc * self.stol / (np.pi * normalization) for s1i in s1_t_inds: t = s1fc[s1i] - s2fc[s2_t_ind] t_s2fc = s2fc + t if self._cmp_fstruct(s1fc, t_s2fc, frac_tol, mask): inv_lll_abc = np.array(avg_l.get_lll_reduced_lattice().reciprocal_lattice.abc) lll_frac_tol = inv_lll_abc * self.stol / (np.pi * normalization) dist, t_adj, mapping = self._cart_dists( s1fc, t_s2fc, avg_l, mask, normalization, lll_frac_tol) if use_rms: val = np.linalg.norm(dist) / len(dist) ** 0.5 else: val = max(dist) if best_match is None or val < best_match[0]: total_t = t + t_adj total_t -= np.round(total_t) best_match = val, dist, sc_m, total_t, mapping if (break_on_match or val < 1e-5) and val < self.stol: return best_match if best_match and best_match[0] < self.stol: return best_match
Matches struct2 onto struct1 (which should contain all sites in struct2). Args: struct1, struct2 (Structure): structures to be matched fu (int): size of supercell to create s1_supercell (bool): whether to create the supercell of struct1 (vs struct2) use_rms (bool): whether to minimize the rms of the matching break_on_match (bool): whether to stop search at first valid match
juraj-google-style
def is_param_method(obj, has_deps=False): parameterized = (inspect.ismethod(obj) and isinstance(get_method_owner(obj), param.Parameterized)) if (parameterized and has_deps): return getattr(obj, '_dinfo', {}).get('dependencies') return parameterized
Whether the object is a method on a parameterized object. Args: obj: Object to check has_deps (boolean, optional): Check for dependencies Whether to also check whether the method has been annotated with param.depends Returns: A boolean value indicating whether the object is a method on a Parameterized object and if enabled whether it has any dependencies
codesearchnet
def gcd_float(numbers, tol=1e-08): def pair_gcd_tol(a, b): 'Calculate the Greatest Common Divisor of a and b.\n\n Unless b==0, the result will have the same sign as b (so that when\n b is divided by it, the result comes out positive).\n ' while (b > tol): (a, b) = (b, (a % b)) return a n = numbers[0] for i in numbers: n = pair_gcd_tol(n, i) return n
Returns the greatest common divisor for a sequence of numbers. Uses a numerical tolerance, so can be used on floats Args: numbers: Sequence of numbers. tol: Numerical tolerance Returns: (int) Greatest common divisor of numbers.
codesearchnet
def remove_import_statements(code): new_code = [] for line in code.splitlines(): if ((not line.lstrip().startswith('import ')) and (not line.lstrip().startswith('from '))): new_code.append(line) while (new_code and (new_code[0] == '')): new_code.pop(0) while (new_code and (new_code[(- 1)] == '')): new_code.pop() return '\n'.join(new_code)
Removes lines with import statements from the code. Args: code: The code to be stripped. Returns: The code without import statements.
codesearchnet
def GetCacheSize(self): if ((not self._cache_start_offset) or (not self._cache_end_offset)): return 0 return (self._cache_end_offset - self._cache_start_offset)
Determines the size of the uncompressed cached data. Returns: int: number of cached bytes.
codesearchnet
def get_config_status(): cmd = 'Get-DscConfigurationStatus | Select-Object -Property HostName, Status, MetaData, @{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if ('No status information available' in exc.info['stderr']): raise CommandExecutionError('Not Configured') raise
Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status
codesearchnet
def _parse_price(html_chunk): price = get_first_content( html_chunk.find("div", {"class": "prices"}) ) if not price: return None price = dhtmlparser.removeTags(price) price = price.split("\n")[-1] return price
Parse price of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Price as string with currency or None if not found.
juraj-google-style
def default_output_fn(prediction, accept): return _worker.Response(response=_encoders.encode(prediction, accept), mimetype=accept)
Function responsible to serialize the prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: (worker.Response): a Flask response object with the following args: * Args: response: the serialized data to return accept: the content-type that the data was transformed to.
juraj-google-style
def _GetScanner(self, specification_store, signature_identifiers): if (not specification_store): return None scanner_object = pysigscan.scanner() for format_specification in specification_store.specifications: if (format_specification.identifier not in signature_identifiers): continue for signature in format_specification.signatures: pattern_offset = signature.offset if (pattern_offset is None): signature_flags = pysigscan.signature_flags.NO_OFFSET elif (pattern_offset < 0): pattern_offset *= (- 1) signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END else: signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START scanner_object.add_signature(signature.identifier, pattern_offset, signature.pattern, signature_flags) self._signature_identifiers.append(format_specification.identifier) return scanner_object
Initializes the scanner form the specification store. Args: specification_store (FormatSpecificationStore): a specification store. signature_identifiers (list[str]): signature identifiers. Returns: pysigscan.scanner: signature scanner or None.
codesearchnet
def encode_field(self, field, value): for encoder in _GetFieldCodecs(field, 'encoder'): result = encoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.EnumField): if field.repeated: remapped_value = [(GetCustomJsonEnumMapping(field.type, python_name=e.name) or e.name) for e in value] else: remapped_value = GetCustomJsonEnumMapping(field.type, python_name=value.name) if remapped_value: return remapped_value if (isinstance(field, messages.MessageField) and (not isinstance(field, message_types.DateTimeField))): value = json.loads(self.encode_message(value)) return super(_ProtoJsonApiTools, self).encode_field(field, value)
Encode the given value as JSON. Args: field: a messages.Field for the field we're encoding. value: a value for field. Returns: A python value suitable for json.dumps.
codesearchnet
def destroy_elb(app='', env='dev', region='us-east-1', **_): task_json = get_template( template_file='destroy/destroy_elb.json.j2', app=app, env=env, region=region, vpc=get_vpc_id(account=env, region=region)) wait_for_task(task_json) return True
Destroy ELB Resources. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: True upon successful completion.
juraj-google-style
def add_gene_info(self, variant_obj, gene_panels=None): gene_panels = gene_panels or [] variant_obj['has_refseq'] = False extra_info = {} for panel_obj in gene_panels: for gene_info in panel_obj['genes']: hgnc_id = gene_info['hgnc_id'] if hgnc_id not in extra_info: extra_info[hgnc_id] = [] extra_info[hgnc_id].append(gene_info) for variant_gene in variant_obj.get('genes', []): hgnc_id = variant_gene['hgnc_id'] hgnc_gene = self.hgnc_gene(hgnc_id) if not hgnc_gene: continue transcripts_dict = {} for transcript in hgnc_gene.get('transcripts', []): tx_id = transcript['ensembl_transcript_id'] transcripts_dict[tx_id] = transcript hgnc_gene['transcripts_dict'] = transcripts_dict if hgnc_gene.get('incomplete_penetrance'): variant_gene['omim_penetrance'] = True panel_info = extra_info.get(hgnc_id, []) disease_associated = set() disease_associated_no_version = set() manual_penetrance = False mosaicism = False manual_inheritance = set() for gene_info in panel_info: for tx in gene_info.get('disease_associated_transcripts', []): stripped = re.sub(r'\.[0-9]', '', tx) disease_associated_no_version.add(stripped) disease_associated.add(tx) if gene_info.get('reduced_penetrance'): manual_penetrance = True if gene_info.get('mosaicism'): mosaicism = True manual_inheritance.update(gene_info.get('inheritance_models', [])) variant_gene['disease_associated_transcripts'] = list(disease_associated) variant_gene['manual_penetrance'] = manual_penetrance variant_gene['mosaicism'] = mosaicism variant_gene['manual_inheritance'] = list(manual_inheritance) for transcript in variant_gene.get('transcripts', []): tx_id = transcript['transcript_id'] if not tx_id in transcripts_dict: continue hgnc_transcript = transcripts_dict[tx_id] if hgnc_transcript.get('is_primary'): transcript['is_primary'] = True if not hgnc_transcript.get('refseq_id'): continue refseq_id = hgnc_transcript['refseq_id'] transcript['refseq_id'] = refseq_id variant_obj['has_refseq'] = True if refseq_id in disease_associated_no_version: transcript['is_disease_associated'] = True transcript['refseq_identifiers'] = hgnc_transcript.get('refseq_identifiers',[]) variant_gene['common'] = hgnc_gene variant_gene['disease_terms'] = self.disease_terms(hgnc_id) return variant_obj
Add extra information about genes from gene panels Args: variant_obj(dict): A variant from the database gene_panels(list(dict)): List of panels from database
juraj-google-style
def stop_condition(self, condition): for cond_format in self._known_conditions: try: cond = cond_format.FromString(condition) self.stop_conditions.append(cond) return except ArgumentError: continue raise ArgumentError("Stop condition could not be processed by any known StopCondition type", condition=condition, suggestion="It may be mistyped or otherwise invalid.")
Add a stop condition to this simulation. Stop conditions are specified as strings and parsed into the appropriate internal structures. Args: condition (str): a string description of the stop condition
juraj-google-style
def _get_path_params(match): result = {} for var_name, value in match.groupdict().iteritems(): actual_var_name = ApiConfigManager._from_safe_path_param_name(var_name) result[actual_var_name] = urllib.unquote_plus(value) return result
Gets path parameters from a regular expression match. Args: match: A regular expression Match object for a path. Returns: A dictionary containing the variable names converted from base64.
juraj-google-style
def predict_proba(self, x, y=None, **kwargs): if self.clf is None: raise ValueError("Model has to be trained before making predictions.") if x is pandas.Series: input_ = self.featurize_row(x.iloc[0], x.iloc[1]).reshape((1, -1)) elif x is pandas.DataFrame: input_ = np.array([self.featurize_row(x.iloc[0], x.iloc[1]) for row in x]) elif y is not None: input_ = self.featurize_row(x, y).reshape((1, -1)) else: raise TypeError("DataType not understood.") return self.clf.predict(input_)
Predict the causal score using a trained RCC model Args: x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset. args (numpy.array): second variable (optional depending on the 1st argument). Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
juraj-google-style
def stop_apppool(name): ps_cmd = ['Stop-WebAppPool', "'{0}'".format(name)] cmd_ret = _srvmgr(ps_cmd) return (cmd_ret['retcode'] == 0)
Stop an IIS application pool. .. versionadded:: 2017.7.0 Args: name (str): The name of the App Pool to stop. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.stop_apppool name='MyTestPool'
codesearchnet
def cycle_find(key, width=4): key_len = len(key) buf = '' it = deBruijn(width, 26) for i in range(key_len): buf += chr((ord('A') + next(it))) if (buf == key): return 0 for (i, c) in enumerate(it): buf = (buf[1:] + chr((ord('A') + c))) if (buf == key): return (i + 1) return (- 1)
Given an element of a de Bruijn sequence, find its index in that sequence. Args: key(str): The piece of the de Bruijn sequence to find. width(int): The width of each element in the sequence. Returns: int: The index of ``key`` in the de Bruijn sequence.
codesearchnet
def get_client_kwargs(self, path): bucket_name, key = self.split_locator(path) kwargs = dict(Bucket=bucket_name) if key: kwargs['Key'] = key return kwargs
Get base keyword arguments for client for a specific path. Args: path (str): Absolute path or URL. Returns: dict: client args
juraj-google-style
def GetVolumeSystemTypeIndicators(cls, path_spec, resolver_context=None): if (cls._volume_system_remainder_list is None or cls._volume_system_store is None): specification_store, remainder_list = cls._GetSpecificationStore( definitions.FORMAT_CATEGORY_VOLUME_SYSTEM) cls._volume_system_remainder_list = remainder_list cls._volume_system_store = specification_store if cls._volume_system_scanner is None: cls._volume_system_scanner = cls._GetSignatureScanner( cls._volume_system_store) return cls._GetTypeIndicators( cls._volume_system_scanner, cls._volume_system_store, cls._volume_system_remainder_list, path_spec, resolver_context=resolver_context)
Determines if a file contains a supported volume system types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators.
juraj-google-style
def unpackStruct(self, data, def_buf): struct_str = "=" for fld in def_buf: if not def_buf[fld][MeterData.CalculatedFlag]: struct_str = struct_str + str(def_buf[fld][MeterData.SizeValue]) + "s" if len(data) == 255: contents = struct.unpack(struct_str, str(data)) else: self.writeCmdMsg("Length error. Len() size = " + str(len(data))) contents = () return contents
Wrapper for struct.unpack with SerialBlock buffer definitionns. Args: data (str): Implicit cast bytes to str, serial port return. def_buf (SerialBlock): Block object holding field lengths. Returns: tuple: parsed result of struct.unpack() with field definitions.
juraj-google-style
def replace_batch_norm(model): for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = DetrFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device('meta'): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module)
Recursively replace all `torch.nn.BatchNorm2d` with `DetrFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model
github-repos