code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _add_example(self, example): if len(example.fields) != 1: raise InvalidSpec( 'Example for union must specify exactly one tag.', example.lineno, example.path) example_field = list(example.fields.values())[0] tag = example_field.name for field in self.all_fields: if tag == field.name: break else: raise InvalidSpec( "Unknown tag '%s' in example." % tag, example.lineno, example.path ) try: field.data_type.check_example(example_field) except InvalidSpec as e: e.msg = "Bad example for field '{}': {}".format( field.name, e.msg) raise self._raw_examples[example.label] = example
Adds a "raw example" for this type. This does basic sanity checking to ensure that the example is valid (required fields specified, no unknown fields, correct types, ...). The example is not available via :meth:`get_examples` until :meth:`_compute_examples` is called. Args: example (stone.frontend.ast.AstExample): An example of this type.
juraj-google-style
def qx(mt, x): if x < len(mt.qx): return mt.qx[x] else: return 0
qx: Returns the probability that a life aged x dies before 1 year With the convention: the true probability is qx/1000 Args: mt: the mortality table x: the age as integer number.
juraj-google-style
def triggered(self, walker): if self.use_count: comp_value = walker.count() else: if walker.count() == 0: return False comp_value = walker.peek().value return self.comp_function(comp_value, self.reference)
Check if this input is triggered on the given stream walker. Args: walker (StreamWalker): The walker to check Returns: bool: Whether this trigger is triggered or not
juraj-google-style
def _ParseCachedEntry8(self, value_data, cached_entry_offset): try: cached_entry = self._ReadStructureFromByteStream(value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to parse cached entry value with error: {0!s}'.format(exception)) if (cached_entry.signature not in (self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1)): raise errors.ParseError('Unsupported cache entry signature') cached_entry_data = value_data[cached_entry_offset:] if (cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_0): data_type_map_name = 'appcompatcache_cached_entry_body_8_0' elif (cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_1): data_type_map_name = 'appcompatcache_cached_entry_body_8_1' data_type_map = self._GetDataTypeMap(data_type_map_name) context = dtfabric_data_maps.DataTypeMapContext() try: cached_entry_body = self._ReadStructureFromByteStream(cached_entry_data[12:], (cached_entry_offset + 12), data_type_map, context=context) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to parse cached entry body with error: {0!s}'.format(exception)) data_offset = context.byte_size data_size = cached_entry_body.data_size cached_entry_object = AppCompatCacheCachedEntry() cached_entry_object.cached_entry_size = (12 + cached_entry.cached_entry_data_size) cached_entry_object.insertion_flags = cached_entry_body.insertion_flags cached_entry_object.last_modification_time = cached_entry_body.last_modification_time cached_entry_object.path = cached_entry_body.path cached_entry_object.shim_flags = cached_entry_body.shim_flags if (data_size > 0): cached_entry_object.data = cached_entry_data[data_offset:(data_offset + data_size)] return cached_entry_object
Parses a Windows 8.0 or 8.1 cached entry. Args: value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: AppCompatCacheCachedEntry: cached entry. Raises: ParseError: if the value data could not be parsed.
codesearchnet
def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False): try: notify_info2('Updating: ', oldobj) if oldobj is newobj: return if type(oldobj) is not type(newobj): notify_error('Type of: %s changed... Skipping.' % (oldobj,)) return if isinstance(newobj, types.FunctionType): self._update_function(oldobj, newobj) return if isinstance(newobj, types.MethodType): self._update_method(oldobj, newobj) return if isinstance(newobj, classmethod): self._update_classmethod(oldobj, newobj) return if isinstance(newobj, staticmethod): self._update_staticmethod(oldobj, newobj) return if hasattr(types, 'ClassType'): classtype = (types.ClassType, type) else: classtype = type if isinstance(newobj, classtype): self._update_class(oldobj, newobj) return if hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and newobj.__metaclass__ == newobj.__class__: self._update_class(oldobj, newobj) return if namespace is not None: if oldobj != newobj and str(oldobj) != str(newobj) and repr(oldobj) != repr(newobj): xreload_old_new = None if is_class_namespace: xreload_old_new = getattr(namespace, '__xreload_old_new__', None) if xreload_old_new is not None: self.found_change = True xreload_old_new(name, oldobj, newobj) elif '__xreload_old_new__' in namespace: xreload_old_new = namespace['__xreload_old_new__'] xreload_old_new(namespace, name, oldobj, newobj) self.found_change = True except: notify_error('Exception found when updating %s. Proceeding for other items.' % (name,)) pydev_log.exception()
Update oldobj, if possible in place, with newobj. If oldobj is immutable, this simply returns newobj. Args: oldobj: the object to be updated newobj: the object used as the source for the update
juraj-google-style
def load(overlay, path=''): global DEBUG global _LOADED if DEBUG: print('LOAD OVERLAY: {0} @ {1}'.format(overlay, path)) if (overlay.upper() in _OVERLAYS.keys()): cpath = ((OVERLAYCONFIGPATH + '/') + _FOLDERS[overlay.upper()]) if DEBUG: print('VALID OVERLAY') print('CONFIG PATH: {0}'.format(cpath)) if ((overlay.upper() == 'CUST') and (path == '')): raise ValueError('Path must be specified for Custom Overlay Choice') elif ((overlay.upper() == 'CUST') and _LOADED[overlay.upper()]): print('Custom Overlay already loaded') return 2 elif ((overlay.upper() == 'CUST') and (not os.path.exists(path))): print('Custom Overlay path does not exist') return 1 if (is_chip_pro() and (overlay.upper() == 'PWM0')): print('CHIP Pro supports PWM0 in base DTB, exiting') return 1 if (overlay.upper() != 'CUST'): opath = OVERLAYINSTALLPATH opath += ('/' + _OVERLAYS[overlay.upper()]) else: opath = path if DEBUG: print('OVERLAY PATH: {0}'.format(opath)) if ((overlay.upper() == 'PWM0') and _LOADED[overlay.upper()]): print('PWM0 Overlay already loaded') return 2 if ((overlay.upper() == 'SPI2') and _LOADED[overlay.upper()]): print('SPI2 Overlay already loaded') return 2 errc = _set_overlay_verify(overlay.upper(), opath, cpath) if DEBUG: print('_SET_OVERLAY_VERIFY ERRC: {0}'.format(errc)) if (errc == 0): _LOADED[overlay.upper()] = True else: raise ValueError('Invalid Overlay name specified! Choose between: SPI2, PWM0, CUST')
load - Load a DTB Overlay Inputs: overlay - Overlay Key: SPI2, PWM0, CUST path - Full Path to where the custom overlay is stored Returns: 0 - Successful Load 1 - Unsuccessful Load 2 - Overlay was previously set
codesearchnet
def field_is_set(msg: message.Message, field: Union[descriptor.FieldDescriptor, str]) -> bool: return field_content_length(msg, field) > 0
Returns True if the field is set. Args: msg: The Message whose fields to examine. field: The FieldDescriptor or name of the field to examine. Returns: True if field has been set.
github-repos
def feature_hash(feature, dim, seed=123): vec = np.zeros(dim) i = (mmh3.hash(feature, seed) % dim) vec[i] = 1 return vec
Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`.
codesearchnet
def _GetTSKPartitionIdentifiers(self, scan_node): if not scan_node or not scan_node.path_spec: raise errors.ScannerError('Invalid scan node.') volume_system = tsk_volume_system.TSKVolumeSystem() volume_system.Open(scan_node.path_spec) volume_identifiers = self._source_scanner.GetVolumeIdentifiers( volume_system) if not volume_identifiers: return [] if len(volume_identifiers) == 1: return volume_identifiers if not self._mediator: raise errors.ScannerError( 'Unable to proceed. Partitions found but no mediator to determine ' 'how they should be used.') try: volume_identifiers = self._mediator.GetPartitionIdentifiers( volume_system, volume_identifiers) except KeyboardInterrupt: raise errors.UserAbort('File system scan aborted.') return self._NormalizedVolumeIdentifiers( volume_system, volume_identifiers, prefix='p')
Determines the TSK partition identifiers. Args: scan_node (SourceScanNode): scan node. Returns: list[str]: TSK partition identifiers. Raises: ScannerError: if the format of or within the source is not supported or the scan node is invalid or if the volume for a specific identifier cannot be retrieved. UserAbort: if the user requested to abort.
juraj-google-style
def get_canonical_serializer( resource_key, model=None, instance=None, resource_name=None ): if model: resource_key = get_model_table(model) elif instance: resource_key = instance._meta.db_table elif resource_name: resource_key = resource_name_map[resource_name] if resource_key not in resource_map: return None return resource_map[resource_key]['viewset'].serializer_class
Return canonical serializer for a given resource name. Arguments: resource_key - Resource key, usually DB table for model-based resources, otherwise the plural name. model - (Optional) Model class to look up by. instance - (Optional) Model object instance. Returns: serializer class
juraj-google-style
def Sign(verifiable, keypair): prikey = bytes(keypair.PrivateKey) hashdata = verifiable.GetHashData() res = Crypto.Default().Sign(hashdata, prikey) return res
Sign the `verifiable` object with the private key from `keypair`. Args: verifiable: keypair (neocore.KeyPair): Returns: bool: True if successfully signed. False otherwise.
juraj-google-style
def _open_debug_interface(self, conn_id, callback, connection_string=None): self._try_connect(connection_string) callback(conn_id, self.id, True, None)
Enable debug interface for this IOTile device Args: conn_id (int): the unique identifier for the connection callback (callback): Callback to be called when this command finishes callback(conn_id, adapter_id, success, failure_reason)
juraj-google-style
def call(self, method, args=None): message = SoapMessage(endpoint=self.endpoint, method=method, parameters=([] if (args is None) else args), http_headers=self.http_headers, soap_action='http: try: result_elt = message.call() except SoapFault as exc: if ('Client.TokenRefreshRequired' in exc.faultcode): log.debug('Token refresh required. Trying again') self._cached_soap_header = None auth_token = exc.detail.findtext('. private_key = exc.detail.findtext('. self.music_service.account.oa_device_id = auth_token self.music_service.account.key = private_key message = SoapMessage(endpoint=self.endpoint, method=method, parameters=args, http_headers=self.http_headers, soap_action='http: result_elt = message.call() else: raise MusicServiceException(exc.faultstring, exc.faultcode) result = list(parse(XML.tostring(result_elt), process_namespaces=True, namespaces={'http: return (result if (result is not None) else {})
Call a method on the server. Args: method (str): The name of the method to call. args (List[Tuple[str, str]] or None): A list of (parameter, value) pairs representing the parameters of the method. Defaults to `None`. Returns: ~collections.OrderedDict: An OrderedDict representing the response. Raises: `MusicServiceException`: containing details of the error returned by the music service.
codesearchnet
def rot_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: def row_mul(i: int) -> torch.Tensor: return torch.stack([a[..., i, 0] * b[..., 0, 0] + a[..., i, 1] * b[..., 1, 0] + a[..., i, 2] * b[..., 2, 0], a[..., i, 0] * b[..., 0, 1] + a[..., i, 1] * b[..., 1, 1] + a[..., i, 2] * b[..., 2, 1], a[..., i, 0] * b[..., 0, 2] + a[..., i, 1] * b[..., 1, 2] + a[..., i, 2] * b[..., 2, 2]], dim=-1) return torch.stack([row_mul(0), row_mul(1), row_mul(2)], dim=-2)
Performs matrix multiplication of two rotation matrix tensors. Written out by hand to avoid AMP downcasting. Args: a: [*, 3, 3] left multiplicand b: [*, 3, 3] right multiplicand Returns: The product ab
github-repos
def _AddUser(self, user): self.logger.info('Creating a new user account for %s.', user) command = self.useradd_cmd.format(user=user) try: subprocess.check_call(command.split(' ')) except subprocess.CalledProcessError as e: self.logger.warning('Could not create user %s. %s.', user, str(e)) return False else: self.logger.info('Created user account %s.', user) return True
Configure a Linux user account. Args: user: string, the name of the Linux user account to create. Returns: bool, True if user creation succeeded.
codesearchnet
def matchmaker_request(url, token, method, content_type=None, accept=None, data=None): headers = Headers() headers = {'X-Auth-Token': token} if content_type: headers['Content-Type'] = content_type if accept: headers['Accept'] = accept req_data = (data or {'timestamp': datetime.datetime.now().timestamp()}) json_response = None try: LOG.info('Sending {} request to MME url {}. Data sent: {}'.format(method, url, req_data)) resp = requests.request(method=method, url=url, headers=headers, data=json.dumps(req_data)) json_response = resp.json() LOG.info('MME server response was:{}'.format(json_response)) if isinstance(json_response, str): json_response = {'message': json_response} elif isinstance(json_response, list): return json_response json_response['status_code'] = resp.status_code except Exception as err: LOG.info('An error occurred while sending HTTP request to server ({})'.format(err)) json_response = {'message': str(err)} return json_response
Send a request to MatchMaker and return its response Args: url(str): url to send request to token(str): MME server authorization token method(str): 'GET', 'POST' or 'DELETE' content_type(str): MME request Content-Type accept(str): accepted response data(dict): eventual data to send in request Returns: json_response(dict): server response
codesearchnet
def GetMessages(self, files): result = {} for file_name in files: file_desc = self.pool.FindFileByName(file_name) for name, msg in file_desc.message_types_by_name.items(): if file_desc.package: full_name = '.'.join([file_desc.package, name]) else: full_name = msg.name result[full_name] = self.GetPrototype( self.pool.FindMessageTypeByName(full_name)) for name, extension in file_desc.extensions_by_name.items(): if extension.containing_type.full_name not in self._classes: self.GetPrototype(extension.containing_type) extended_class = self._classes[extension.containing_type.full_name] extended_class.RegisterExtension(extension) return result
Gets all the messages from a specified file. This will find and resolve dependencies, failing if the descriptor pool cannot satisfy them. Args: files: The file names to extract messages from. Returns: A dictionary mapping proto names to the message classes. This will include any dependent messages as well as any messages defined in the same file as a specified message.
juraj-google-style
def DeregisterSourceType(cls, source_type_class): if (source_type_class.TYPE_INDICATOR not in cls._source_type_classes): raise KeyError('Source type not set for type: {0:s}.'.format(source_type_class.TYPE_INDICATOR)) del cls._source_type_classes[source_type_class.TYPE_INDICATOR]
Deregisters a source type. Source types are identified based on their type indicator. Args: source_type_class (type): source type. Raises: KeyError: if a source type is not set for the corresponding type indicator.
codesearchnet
def broadcast_shape(shape_x, shape_y): if shape_x.ndims is None or shape_y.ndims is None: return tensor_shape.unknown_shape() return_dims = _broadcast_shape_helper(shape_x, shape_y) if return_dims is None: raise ValueError(f'Incompatible shapes for broadcasting. Two shapes are compatible if for each dimension pair they are either equal or one of them is 1. Received: {shape_x} and {shape_y}.') return tensor_shape.TensorShape(return_dims)
Returns the broadcasted shape between `shape_x` and `shape_y`. Args: shape_x: A `TensorShape` shape_y: A `TensorShape` Returns: A `TensorShape` representing the broadcasted shape. Raises: ValueError: If the two shapes can not be broadcasted.
github-repos
def total_seconds(td): secs = (td.seconds + ((td.days * 24) * 3600)) if td.microseconds: secs += 1 return secs
convert a timedelta to seconds. This is patterned after timedelta.total_seconds, which is only available in python 27. Args: td: a timedelta object. Returns: total seconds within a timedelta. Rounded up to seconds.
codesearchnet
def read_double(self, little_endian=True): if little_endian: endian = '<' else: endian = '>' return self.unpack(('%sd' % endian), 8)
Read 8 bytes as a double value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: float:
codesearchnet
def read_sql(cls, sql, con, index_col=None, **kwargs): if cls.read_sql_remote_task is None: return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs) row_cnt_query = "SELECT COUNT(*) FROM ({})".format(sql) row_cnt = pandas.read_sql(row_cnt_query, con).squeeze() cols_names_df = pandas.read_sql( "SELECT * FROM ({}) LIMIT 0".format(sql), con, index_col=index_col ) cols_names = cols_names_df.columns num_parts = cls.frame_mgr_cls._compute_num_partitions() partition_ids = [] index_ids = [] limit = math.ceil(row_cnt / num_parts) for part in range(num_parts): offset = part * limit query = "SELECT * FROM ({}) LIMIT {} OFFSET {}".format(sql, limit, offset) partition_id = cls.read_sql_remote_task._remote( args=(num_parts, query, con, index_col, kwargs), num_return_vals=num_parts + 1, ) partition_ids.append( [cls.frame_partition_cls(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) if index_col is None: index_lens = ray.get(index_ids) new_index = pandas.RangeIndex(sum(index_lens)) else: index_lst = [x for part_index in ray.get(index_ids) for x in part_index] new_index = pandas.Index(index_lst).set_names(index_col) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names ) return new_query_compiler
Reads a SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). kwargs: Pass into pandas.read_sql function.
juraj-google-style
def _html_tree_view(self, *, view: 'HtmlTreeView', name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, **kwargs) -> Html: return view.render(self, name=name, parent=parent, root_path=root_path, **kwargs)
Returns the topmost HTML representation of this extension. Args: view: The view to render the object. name: The name of the object. parent: The parent of the object. root_path: The key path of the object relative to the root. **kwargs: kwargs to pass to the view. See `_html_tree_view_config` for the builtin arguments. Returns: The rendered HTML.
github-repos
def get_histograms_in_list(filename: str, list_name: str=None) -> Dict[(str, Any)]: hists: dict = {} with RootOpen(filename=filename, mode='READ') as fIn: if (list_name is not None): hist_list = fIn.Get(list_name) else: hist_list = [obj.ReadObj() for obj in fIn.GetListOfKeys()] if (not hist_list): fIn.ls() fIn.Close() raise ValueError(f'Could not find list with name "{list_name}". Possible names are listed above.') for obj in hist_list: _retrieve_object(hists, obj) return hists
Get histograms from the file and make them available in a dict. Lists are recursively explored, with all lists converted to dictionaries, such that the return dictionaries which only contains hists and dictionaries of hists (ie there are no ROOT ``TCollection`` derived objects). Args: filename: Filename of the ROOT file containing the list. list_name: Name of the list to retrieve. Returns: Contains hists with keys as their names. Lists are recursively added, mirroring the structure under which the hists were stored. Raises: ValueError: If the list could not be found in the given file.
codesearchnet
def _getargspec(target): fullargspecs = getfullargspec(target) if hasattr(_inspect, 'ArgSpec'): argspecs = ArgSpec(args=fullargspecs.args, varargs=fullargspecs.varargs, keywords=fullargspecs.varkw, defaults=fullargspecs.defaults) else: argspecs = FullArgSpec(args=fullargspecs.args, varargs=fullargspecs.varargs, varkw=fullargspecs.varkw, defaults=fullargspecs.defaults, kwonlyargs=[], kwonlydefaults=None, annotations={}) return argspecs
A python3 version of getargspec. Calls `getfullargspec` and assigns args, varargs, varkw, and defaults to a python 2/3 compatible `ArgSpec`. The parameter name 'varkw' is changed to 'keywords' to fit the `ArgSpec` struct. Args: target: the target object to inspect. Returns: An ArgSpec with args, varargs, keywords, and defaults parameters from FullArgSpec.
github-repos
def _add_trackable(self, trackable_object, trainable): if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler): handler = trackable_object else: handler = base_layer_utils.TrackableWeightHandler(trackable_object) if trainable: self._trainable_weights.append(handler) else: self._non_trainable_weights.append(handler) return handler
Adds a Trackable object to this layer's state. Args: trackable_object: The tf.tracking.Trackable object to add. trainable: Boolean, whether the variable should be part of the layer's "trainable_variables" (e.g. variables, biases) or "non_trainable_variables" (e.g. BatchNorm mean and variance). Returns: The TrackableWeightHandler used to track this object.
github-repos
def deactivate(self, node_id): node = self.node_list[node_id] self.node_list[node_id] = node._replace(active=False)
Deactivate the node identified by node_id. Deactivates the node corresponding to node_id, which means that it can never be the output of a nearest_point query. Note: The node is not removed from the tree, its data is steel available. Args: node_id (int): The node identifier (given to the user after its insertion).
juraj-google-style
def _determine_outliers_for_moving_average(moving_average: np.ndarray, moving_average_threshold: float, number_of_values_to_search_ahead: int, limit_of_number_of_values_below_threshold: int) -> int: below_threshold = (moving_average < moving_average_threshold) values_to_check = [] for i in range(limit_of_number_of_values_below_threshold): values_to_check.append(below_threshold[i:((- ((limit_of_number_of_values_below_threshold - 1) - i)) or None)]) found_at_least_one_bin_above_threshold = False cut_index = (- 1) for (i, values) in enumerate(zip(*values_to_check)): if (i == 0): continue above_threshold = [(not value) for value in values] if any(above_threshold): found_at_least_one_bin_above_threshold = True if (found_at_least_one_bin_above_threshold and all(np.invert(above_threshold))): logger.debug(f'i at found cut_index: {i} with moving_average: {moving_average[i]}') cut_index = (i + (limit_of_number_of_values_below_threshold break return cut_index
Determine outliers to remove from a given moving average. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: moving_average: Moving average. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: 0-indexed index of the histogram axes where the outliers begin.
codesearchnet
def mask(self, image, nan_to_num=True, layers=None, in_global_mask=False): self.set_mask(layers) image = self.get_image(image, output='vector') if in_global_mask: masked_data = image[self.global_mask] masked_data[(~ self.get_mask(in_global_mask=True))] = 0 else: masked_data = image[self.current_mask] if nan_to_num: masked_data = np.nan_to_num(masked_data) return masked_data
Vectorize an image and mask out all invalid voxels. Args: images: The image to vectorize and mask. Input can be any object handled by get_image(). layers: Which mask layers to use (specified as int, string, or list of ints and strings). When None, applies the conjunction of all layers. nan_to_num: boolean indicating whether to convert NaNs to 0. in_global_mask: Whether to return the resulting masked vector in the globally masked space (i.e., n_voxels = len(self.global_mask)). If False (default), returns in the full image space (i.e., n_voxels = len(self.volume)). Returns: A 1D NumPy array of in-mask voxels.
codesearchnet
def get_energy_tersoff(structure, gulp_cmd='gulp'): gio = GulpIO() gc = GulpCaller(gulp_cmd) gin = gio.tersoff_input(structure) gout = gc.run(gin) return gio.get_energy(gout)
Compute the energy of a structure using Tersoff potential. Args: structure: pymatgen.core.structure.Structure gulp_cmd: GULP command if not in standard place
codesearchnet
def ParseHeader(table): precondition.AssertIterableType(table, dict) prototype = None for row in table: columns = list(iterkeys(row)) if prototype is None: prototype = columns elif prototype != columns: message = "Expected columns '{expected}', got '{actual}' for table {json}" message = message.format(expected=prototype, actual=columns, json=table) raise ValueError(message) result = rdf_osquery.OsqueryHeader() for name in prototype or []: result.columns.append(rdf_osquery.OsqueryColumn(name=name)) return result
Parses header of osquery output. Args: table: A table in a "parsed JSON" representation. Returns: A parsed `rdf_osquery.OsqueryHeader` instance.
juraj-google-style
def url(self, text, **kwargs): indicator_obj = URL(text, **kwargs) return self._indicator(indicator_obj)
Add URL Address data to Batch object. Args: text (str): The value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of URL.
codesearchnet
def add_deps(self, deps): if isinstance(deps, collections.Mapping): deps = [Dependency(node, exts) for node, exts in deps.items()] if not isinstance(deps, (list, tuple)): deps = [deps] assert all(isinstance(d, Dependency) for d in deps) self._deps.extend(deps) if self.is_work: for task in self: task.add_deps(deps) for dep in (d for d in deps if d.node.is_file): dep.node.add_filechild(self)
Add a list of dependencies to the :class:`Node`. Args: deps: List of :class:`Dependency` objects specifying the dependencies of the node. or dictionary mapping nodes to file extensions e.g. {task: "DEN"}
juraj-google-style
def from_event(cls, event): return cls(uuid=event['uuid'], job_type=event['job_type'], event_type=event['type'], queue=event['queue'], hostname=event['hostname'], pid=event['pid'], name=event['name'], workflow_id=event['workflow_id'], event_time=event['time'], duration=event['duration'])
Create a JobEvent object from the event dictionary returned by celery. Args: event (dict): The dictionary as returned by celery. Returns: JobEvent: A fully initialized JobEvent object.
codesearchnet
def scale_regularization_loss(regularization_loss): if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context(): raise RuntimeError('You are calling `scale_regularization_loss` in cross replica context, while it was expected to be called in replica context.') num_replicas = distribute_lib.get_strategy().num_replicas_in_sync return math_ops.reduce_sum(regularization_loss) / num_replicas
Scales the sum of the given regularization losses by number of replicas. Usage with distribution strategy and custom training loop: ```python with strategy.scope(): def compute_loss(self, label, predictions): per_example_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, predictions) # Compute loss that is scaled by sample_weight and by global batch size. loss = tf.nn.compute_average_loss( per_example_loss, sample_weight=sample_weight, global_batch_size=GLOBAL_BATCH_SIZE) # Add scaled regularization losses. loss += tf.nn.scale_regularization_loss(tf.nn.l2_loss(weights)) return loss ``` Args: regularization_loss: Regularization loss. Returns: Scalar loss value.
github-repos
def as_base_units(self): b = collections.defaultdict(int) factor = 1 for (k, v) in self.items(): derived = False for d in DERIVED_UNITS.values(): if (k in d): for (k2, v2) in d[k].items(): if isinstance(k2, Number): factor *= (k2 ** (v2 * v)) else: b[k2] += (v2 * v) derived = True break if (not derived): (si, f) = _get_si_unit(k) b[si] += v factor *= (f ** v) return ({k: v for (k, v) in b.items() if (v != 0)}, factor)
Converts all units to base SI units, including derived units. Returns: (base_units_dict, scaling factor). base_units_dict will not contain any constants, which are gathered in the scaling factor.
codesearchnet
def __ne__(self, rhs): return self.key != rhs.key or not self.sequence_equal(rhs)
Determine value inequality with another grouping. Args: rhs: The object on the right-hand-side of the comparison must support a property called 'key' and be iterable. Returns: True if the keys or sequences are not equal, otherwise False.
juraj-google-style
def add_columns(tree_view, df_py_dtypes, list_store): tree_view.set_model(list_store) for column_i, (i, dtype_i) in df_py_dtypes[['i', 'dtype']].iterrows(): tree_column_i = gtk.TreeViewColumn(column_i) tree_column_i.set_name(column_i) if dtype_i in (int, long): property_name = 'text' cell_renderer_i = gtk.CellRendererSpin() elif dtype_i == float: property_name = 'text' cell_renderer_i = gtk.CellRendererSpin() elif dtype_i in (bool, ): property_name = 'active' cell_renderer_i = gtk.CellRendererToggle() elif dtype_i in (str, ): property_name = 'text' cell_renderer_i = gtk.CellRendererText() else: raise ValueError('No cell renderer for dtype: %s' % dtype_i) cell_renderer_i.set_data('column_i', i) cell_renderer_i.set_data('column', tree_column_i) tree_column_i.pack_start(cell_renderer_i, True) tree_column_i.add_attribute(cell_renderer_i, property_name, i) tree_view.append_column(tree_column_i)
Add columns to a `gtk.TreeView` for the types listed in `df_py_dtypes`. Args: tree_view (gtk.TreeView) : Tree view to append columns to. df_py_dtypes (pandas.DataFrame) : Data frame containing type information for one or more columns in `list_store`. list_store (gtk.ListStore) : Model data. Returns: None
juraj-google-style
def _scalar_operations(self, axis, scalar, func): if isinstance(scalar, (list, np.ndarray, pandas.Series)): new_index = self.index if axis == 0 else self.columns def list_like_op(df): if axis == 0: df.index = new_index else: df.columns = new_index return func(df) new_data = self._map_across_full_axis( axis, self._prepare_method(list_like_op) ) return self.__constructor__(new_data, self.index, self.columns) else: return self._map_partitions(self._prepare_method(func))
Handler for mapping scalar operations across a Manager. Args: axis: The axis index object to execute the function on. scalar: The scalar value to map. func: The function to use on the Manager with the scalar. Returns: A new QueryCompiler with updated data and new index.
juraj-google-style
def clean_lines(string_list, remove_empty_lines=True): for s in string_list: clean_s = s if (' ind = s.index(' clean_s = s[:ind] clean_s = clean_s.strip() if ((not remove_empty_lines) or (clean_s != '')): (yield clean_s)
Strips whitespace, carriage returns and empty lines from a list of strings. Args: string_list: List of strings remove_empty_lines: Set to True to skip lines which are empty after stripping. Returns: List of clean strings with no whitespaces.
codesearchnet
def apply_schema(cls, schema: Optional[pg_typing.Schema]=None) -> None: if schema is not None: schema = cls._normalize_schema(schema) setattr(cls, '__schema__', schema) setattr(cls, '__sym_fields', pg_typing.Dict(schema)) cls._on_schema_update()
Applies a schema to a symbolic class. Args: schema: The schema that will be applied to class. If `cls` was attached with an existing schema. The old schema will be dropped. If None, the cls will update its signature and getters according to the (maybe updated) old schema.
github-repos
def destringize(self, string): m = read_tuple_destr_pattern.match(string) if not m: smbl.messages.error( "'{}' is not a valid read name with respect to the RNF specification".format(string), program="RNFtools", subprogram="RNF format", exception=ValueError ) groups = m.groups() self.prefix = groups[0] read_tuple_id = groups[1] self.read_tuple_id = int(read_tuple_id, 16) self.segments = [] segments_str = groups[2:-1] for b_str in segments_str: if b_str is not None: if b_str[0] == ",": b_str = b_str[1:] b = rnftools.rnfformat.Segment() b.destringize(b_str) self.segments.append(b) self.suffix = groups[-1]
Get RNF values for this read from its textual representation and save them into this object. Args: string(str): Textual representation of a read. Raises: ValueError
juraj-google-style
def murmur2(data): if six.PY2: data = bytearray(bytes(data)) length = len(data) seed = 0x9747b28c m = 0x5bd1e995 r = 24 h = seed ^ length length4 = length for i in range(length4): i4 = i * 4 k = ((data[i4 + 0] & 0xff) + ((data[i4 + 1] & 0xff) << 8) + ((data[i4 + 2] & 0xff) << 16) + ((data[i4 + 3] & 0xff) << 24)) k &= 0xffffffff k *= m k &= 0xffffffff k ^= (k % 0x100000000) >> r k &= 0xffffffff k *= m k &= 0xffffffff h *= m h &= 0xffffffff h ^= k h &= 0xffffffff extra_bytes = length % 4 if extra_bytes >= 3: h ^= (data[(length & ~3) + 2] & 0xff) << 16 h &= 0xffffffff if extra_bytes >= 2: h ^= (data[(length & ~3) + 1] & 0xff) << 8 h &= 0xffffffff if extra_bytes >= 1: h ^= (data[length & ~3] & 0xff) h &= 0xffffffff h *= m h &= 0xffffffff h ^= (h % 0x100000000) >> 13 h &= 0xffffffff h *= m h &= 0xffffffff h ^= (h % 0x100000000) >> 15 h &= 0xffffffff return h
Pure-python Murmur2 implementation. Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 Args: data (bytes): opaque bytes Returns: MurmurHash2 of data
juraj-google-style
def _store_request_line(self, req_line): if (not isinstance(req_line, str)): try: req_line = self.raw_request_line = req_line.decode() except UnicodeDecodeError: raise HTTPErrorBadRequest try: (self.method_str, self.original_url, self.version) = req_line.split() except ValueError: raise HTTPErrorBadRequest() if (self.version not in ('HTTP/1.1', 'HTTP/1.0')): raise HTTPErrorVersionNotSupported(self.version) try: self.method = HTTPMethod[self.method_str] except KeyError: err = "Unknown HTTP Method '{}'".format(self.method_str) raise HTTPErrorNotImplemented(err) self._process_headers = {HTTPMethod.GET: self.process_get_headers, HTTPMethod.POST: self.process_post_headers}.get(self.method, (lambda data: True)) (_, num_str) = self.version.split('/', 1) self.HTTP_VERSION = tuple(num_str.split('.')) self.version_number = float(num_str) self.parsed_url = urlparse(self.original_url) self.path = unquote(self.parsed_url.path) self.query = parse_qs(self.parsed_url.query) return (self.method, self.parsed_url, self.version)
Splits the request line given into three components. Ensures that the version and method are valid for this server, and uses the urllib.parse function to parse the request URI. Note: This method has the additional side effect of updating all request line related attributes of the parser. Returns: tuple: Tuple containing the parsed (method, parsed_url, version) Raises: HTTPErrorBadRequest: If request line is invalid HTTPErrorNotImplemented: If HTTP method is not recognized HTTPErrorVersionNotSupported: If HTTP version is not recognized.
codesearchnet
def _on_change(self, field_updates: Dict[utils.KeyPath, base.FieldUpdate]): del field_updates return self._on_bound()
Event that is triggered when field values in the subtree are updated. This event will be called * On per-field basis when object is modified via attribute. * In batch when multiple fields are modified via `rebind` method. When a field in an object tree is updated, all ancestors' `_on_change` event will be triggered in order, from the nearest one to furthest one. Args: field_updates: Updates made to the subtree. Key path is relative to current object. Returns: it will call `_on_bound` and return the return value of `_on_bound`.
github-repos
def push(self, stream, reading): reading = copy.copy(reading) reading.stream = stream.encode() if stream.buffered: output_buffer = stream.output if (self.id_assigner is not None): reading.reading_id = self.id_assigner(stream, reading) try: self._engine.push(reading) except StorageFullError: if ((stream.output and (not self._rollover_streaming)) or ((not stream.output) and (not self._rollover_storage))): raise self._erase_buffer(stream.output) self._engine.push(reading) for walker in self._queue_walkers: if (walker.selector.output == output_buffer): walker.notify_added(stream) for selector in self._monitors: if ((selector is None) or selector.matches(stream)): for callback in self._monitors[selector]: callback(stream, reading) for walker in self._virtual_walkers: if walker.matches(stream): walker.push(stream, reading) self._last_values[stream] = reading
Push a reading into a stream, updating any associated stream walkers. Args: stream (DataStream): the stream to push the reading into reading (IOTileReading): the reading to push
codesearchnet
def get(object_ids): worker = global_worker worker.check_connected() with profiling.profile('ray.get'): if (worker.mode == LOCAL_MODE): return object_ids global last_task_error_raise_time if isinstance(object_ids, list): values = worker.get_object(object_ids) for (i, value) in enumerate(values): if isinstance(value, RayError): last_task_error_raise_time = time.time() raise value return values else: value = worker.get_object([object_ids])[0] if isinstance(value, RayError): last_task_error_raise_time = time.time() raise value return value
Get a remote object or a list of remote objects from the object store. This method blocks until the object corresponding to the object ID is available in the local object store. If this object is not in the local object store, it will be shipped from an object store that has it (once the object has been created). If object_ids is a list, then the objects corresponding to each object in the list will be returned. Args: object_ids: Object ID of the object to get or a list of object IDs to get. Returns: A Python object or a list of Python objects. Raises: Exception: An exception is raised if the task that created the object or that created one of the objects raised an exception.
codesearchnet
def put(self, destination): if not self._fetched: self._fetch() DirectoryArchive.put(self, destination)
Copy the referenced directory to this path Args: destination (str): path to put this directory (which must NOT already exist)
juraj-google-style
def create(cls, session, web_hook): cls('/hooks.json', data=web_hook.to_api(), request_type=RequestPaginator.POST, session=session) return True
Create a web hook. Note that creating a new web hook will overwrite the web hook that is already configured for this company. There is also no way to programmatically determine if a web hook already exists for the company. This is a limitation of the HelpScout API and cannot be circumvented. Args: session (requests.sessions.Session): Authenticated session. web_hook (helpscout.models.WebHook): The web hook to be created. Returns: bool: ``True`` if the creation was a success. Errors otherwise.
codesearchnet
def apply(self, transform, pvalueish=None, label=None): if isinstance(transform, ptransform._NamedPTransform): return self.apply(transform.transform, pvalueish, label or transform.label) if not isinstance(transform, ptransform.PTransform): raise TypeError('Expected a PTransform object, got %s' % transform) if label: old_label, transform.label = (transform.label, label) try: return self.apply(transform, pvalueish) finally: transform.label = old_label if self._current_transform() is self._root_transform(): alter_label_if_ipython(transform, pvalueish) full_label = '/'.join([self._current_transform().full_label, transform.label]).lstrip('/') if full_label in self.applied_labels: auto_unique_labels = self._options.view_as(StandardOptions).auto_unique_labels if auto_unique_labels: logging.warning('Using --auto_unique_labels could cause data loss when updating a pipeline or reloading the job state. This is not recommended for streaming jobs.') unique_label = self._generate_unique_label(transform) return self.apply(transform, pvalueish, unique_label) else: raise RuntimeError('A transform with label "%s" already exists in the pipeline. To apply a transform with a specified label, write pvalue | "label" >> transform or use the option "auto_unique_labels" to automatically generate unique transform labels. Note "auto_unique_labels" could cause data loss when updating a pipeline or reloading the job state. This is not recommended for streaming jobs.' % full_label) self.applied_labels.add(full_label) if pvalueish is None: full_label = self._current_transform().full_label raise TypeCheckError(f'Transform "{full_label}" was applied to the output of an object of type None.') pvalueish, inputs = transform._extract_input_pvalues(pvalueish) try: if not isinstance(inputs, dict): inputs = {str(ix): input for ix, input in enumerate(inputs)} except TypeError: raise NotImplementedError('Unable to extract PValue inputs from %s; either %s does not accept inputs of this format, or it does not properly override _extract_input_pvalues' % (pvalueish, transform)) for t, leaf_input in inputs.items(): if not isinstance(leaf_input, pvalue.PValue) or not isinstance(t, str): raise NotImplementedError('%s does not properly override _extract_input_pvalues, returned %s from %s' % (transform, inputs, pvalueish)) current = AppliedPTransform(self._current_transform(), transform, full_label, inputs, None, annotations=self._current_annotations()) self._current_transform().add_part(current) try: self.transforms_stack.append(current) type_options = self._options.view_as(TypeOptions) if type_options.pipeline_type_check: transform.type_check_inputs(pvalueish) if isinstance(pvalueish, pvalue.PBegin) and isinstance(transform, ParDo): full_label = self._current_transform().full_label raise TypeCheckError(f"Transform '{full_label}' expects a PCollection as input. Got a PBegin/Pipeline instead.") self._assert_not_applying_PDone(pvalueish, transform) pvalueish_result = self.runner.apply(transform, pvalueish, self._options) if type_options is not None and type_options.pipeline_type_check: transform.type_check_outputs(pvalueish_result) for tag, result in ptransform.get_named_nested_pvalues(pvalueish_result): assert isinstance(result, (pvalue.PValue, pvalue.DoOutputsTuple)) if result.producer is None: result.producer = current self._infer_result_type(transform, tuple(inputs.values()), result) assert isinstance(result.producer.inputs, tuple) if isinstance(result, pvalue.DoOutputsTuple): current.add_output(result, result._main_tag) continue base = tag counter = 0 while tag in current.outputs: counter += 1 tag = '%s_%d' % (base, counter) current.add_output(result, tag) if type_options is not None and type_options.type_check_strictness == 'ALL_REQUIRED' and (transform.get_type_hints().output_types is None): ptransform_name = '%s(%s)' % (transform.__class__.__name__, full_label) raise TypeCheckError('Pipeline type checking is enabled, however no output type-hint was found for the PTransform %s' % ptransform_name) finally: self.transforms_stack.pop() return pvalueish_result
Applies a custom transform using the pvalueish specified. Args: transform (~apache_beam.transforms.ptransform.PTransform): the :class:`~apache_beam.transforms.ptransform.PTransform` to apply. pvalueish (~apache_beam.pvalue.PCollection): the input for the :class:`~apache_beam.transforms.ptransform.PTransform` (typically a :class:`~apache_beam.pvalue.PCollection`). label (str): label of the :class:`~apache_beam.transforms.ptransform.PTransform`. Raises: TypeError: if the transform object extracted from the argument list is not a :class:`~apache_beam.transforms.ptransform.PTransform`. RuntimeError: if the transform object was already applied to this pipeline and needs to be cloned in order to apply again.
github-repos
def set_atten(self, idx, value): if (not self.is_open): raise attenuator.Error(('Connection to attenuator at %s is not open!' % self._telnet_client.host)) if ((idx + 1) > self.path_count): raise IndexError('Attenuator index out of range!', self.path_count, idx) if (value > self.max_atten): raise ValueError('Attenuator value out of range!', self.max_atten, value) self._telnet_client.cmd(('CHAN:%s:SETATT:%s' % ((idx + 1), value)))
Sets the attenuation value for a particular signal path. Args: idx: Zero-based index int which is the identifier for a particular signal path in an instrument. For instruments that only has one channel, this is ignored by the device. value: A float that is the attenuation value to set. Raises: Error: The underlying telnet connection to the instrument is not open. IndexError: The index of the attenuator is greater than the maximum index of the underlying instrument. ValueError: The requested set value is greater than the maximum attenuation value.
codesearchnet
def _get_parser(use_v2_converter): parser = argparse.ArgumentParser(description='Command line tool to run TensorFlow Lite Converter.') parser.add_argument('--output_file', type=str, help='Full filepath of the output file.', required=True) if use_v2_converter: _get_tf2_flags(parser) else: _get_tf1_flags(parser) parser.add_argument('--experimental_new_converter', action=_ParseBooleanFlag, nargs='?', default=True, help='Experimental flag, subject to change. Enables MLIR-based conversion instead of TOCO conversion. (default True)') parser.add_argument('--experimental_new_quantizer', action=_ParseBooleanFlag, nargs='?', help='Experimental flag, subject to change. Enables MLIR-based quantizer instead of flatbuffer conversion. (default True)') return parser
Returns an ArgumentParser for tflite_convert. Args: use_v2_converter: Indicates which converter to return. Return: ArgumentParser.
github-repos
def from_rtm(cls, raw_event: MutableMapping) -> "Event": if raw_event["type"].startswith("message"): return Message(raw_event) else: return Event(raw_event)
Create an event with data coming from the RTM API. If the event type is a message a :class:`slack.events.Message` is returned. Args: raw_event: JSON decoded data from the RTM API Returns: :class:`slack.events.Event` or :class:`slack.events.Message`
juraj-google-style
def is_field_remote(model, field_name): if not hasattr(model, '_meta'): return False model_field = get_model_field(model, field_name) return isinstance(model_field, (ManyToManyField, RelatedObject))
Check whether a given model field is a remote field. A remote field is the inverse of a one-to-many or a many-to-many relationship. Arguments: model: a Django model field_name: the name of a field Returns: True if `field_name` is a remote field, False otherwise.
juraj-google-style
def flatten(self): if (self._flat is None): flat = {} for arg in self.args: if isinstance(arg, Option): flat[arg.name] = arg elif isinstance(arg, ListOption): flat[arg.name] = arg elif isinstance(arg, DictOption): flat[arg.name] = arg if arg.scheme: for (k, v) in arg.scheme.flatten().items(): flat[((arg.name + '.') + k)] = v self._flat = flat return self._flat
Flatten the scheme into a dictionary where the keys are compound 'dot' notation keys, and the values are the corresponding options. Returns: dict: The flattened `Scheme`.
codesearchnet
def precheck_dist_hash(context): key = "{}/{}/dist-hash".format(context.service_name, context.env) print_if_verbose("precheck_dist_hash with key: {}".format(key)) try: current_dist_hash = Version(context.aws_client("s3").get_object( Bucket=EFConfig.S3_VERSION_BUCKET, Key=key )) print_if_verbose("dist-hash found: {}".format(current_dist_hash.value)) except ClientError as error: if error.response["Error"]["Code"] == "NoSuchKey": print_if_verbose("precheck passed without check because current dist-hash is None") return True else: fail("Exception while prechecking dist_hash for {} {}: {}".format(context.service_name, context.env, error)) try: response = urllib2.urlopen(current_dist_hash.location, None, 5) if response.getcode() != 200: raise IOError("Non-200 response " + str(response.getcode()) + " reading " + current_dist_hash.location) dist_hash_in_service = response.read().strip() except urllib2.URLError as error: raise IOError("URLError in http_get_dist_version: " + repr(error)) if dist_hash_in_service != current_dist_hash.value: raise RuntimeError("{} dist-hash in service: {} but expected dist-hash: {}" .format(key, dist_hash_in_service, current_dist_hash.value)) return True
Is the dist in service the same as the dist marked current in the version records? This tool won't update records unless the world state is coherent. Args: context: a populated EFVersionContext object Returns: True if ok to proceed Raises: RuntimeError if not ok to proceed
juraj-google-style
def assert_no_entries_with_modulus_zero(x, message=None, name='assert_no_entries_with_modulus_zero'): with ops.name_scope(name, values=[x]): x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x') dtype = x.dtype.base_dtype should_be_nonzero = math_ops.abs(x) zero = tensor_conversion.convert_to_tensor_v2_with_dispatch(0, dtype=dtype.real_dtype) return check_ops.assert_less(zero, should_be_nonzero, message=message)
Returns `Op` that asserts Tensor `x` has no entries with modulus zero. Args: x: Numeric `Tensor`, real, integer, or complex. message: A string message to prepend to failure message. name: A name to give this `Op`. Returns: An `Op` that asserts `x` has no entries with modulus zero.
github-repos
def isFrameRange(frange): frange = str(frange).translate(None, ''.join(PAD_MAP.keys())) if not frange: return True for part in frange.split(','): if not part: continue try: FrameSet._parse_frange_part(part) except ParseException: return False return True
Return True if the given string is a frame range. Any padding characters, such as '#' and '@' are ignored. Args: frange (str): a frame range to test Returns: bool:
juraj-google-style
def description(self, value): try: value = np.dtype(value) except TypeError as e: return None for (dtype, string) in self._all: if (dtype == value): return string return None
Fetches the translated description for the given datatype. The given value will be converted to a `numpy.dtype` object, matched against the supported datatypes and the description will be translated into the preferred language. (Usually a settings dialog should be available to change the language). If the conversion fails or no match can be found, `None` will be returned. Args: value (type|numpy.dtype): Any object or type. Returns: str: The translated description of the datatype None: If no match could be found or an error occured during convertion.
codesearchnet
def get_single_value(value): if not all_elements_equal(value): raise ValueError('Not all values are equal to each other.') if is_scalar(value): return value return value.item(0)
Get a single value out of the given value. This is meant to be used after a call to :func:`all_elements_equal` that returned True. With this function we return a single number from the input value. Args: value (ndarray or number): a numpy array or a single number. Returns: number: a single number from the input Raises: ValueError: if not all elements are equal
juraj-google-style
def from_csv(cls, filename: str): with open(filename, 'r', encoding='utf-8') as f: reader = csv.reader(f, delimiter=unicode2str(','), quotechar=unicode2str('"'), quoting=csv.QUOTE_MINIMAL) entries = list() header_read = False elements = None for row in reader: if (not header_read): elements = row[1:(len(row) - 1)] header_read = True else: name = row[0] energy = float(row[(- 1)]) comp = dict() for ind in range(1, (len(row) - 1)): if (float(row[ind]) > 0): comp[Element(elements[(ind - 1)])] = float(row[ind]) entries.append(PDEntry(Composition(comp), energy, name)) return cls(entries)
Imports PDEntries from a csv. Args: filename: Filename to import from. Returns: List of Elements, List of PDEntries
codesearchnet
def open(self): log.info('WebSocket connection opened') proto_version = self.get_argument('bokeh-protocol-version', default=None) if (proto_version is None): self.close() raise ProtocolError('No bokeh-protocol-version specified') session_id = self.get_argument('bokeh-session-id', default=None) if (session_id is None): self.close() raise ProtocolError('No bokeh-session-id specified') if (not check_session_id_signature(session_id, signed=self.application.sign_sessions, secret_key=self.application.secret_key)): log.error('Session id had invalid signature: %r', session_id) raise ProtocolError('Invalid session ID') def on_fully_opened(future): e = future.exception() if (e is not None): log.debug('Failed to fully open connection %r', e) future = self._async_open(session_id, proto_version) self.application.io_loop.add_future(future, on_fully_opened)
Initialize a connection to a client. Returns: None
codesearchnet
def _parse_hextet(cls, hextet_str): if not cls._HEX_DIGITS.issuperset(hextet_str): raise ValueError("Only hex digits permitted in %r" % hextet_str) if len(hextet_str) > 4: msg = "At most 4 characters permitted in %r" raise ValueError(msg % hextet_str) return int(hextet_str, 16)
Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn't strictly a hex number from [0..FFFF].
juraj-google-style
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval, reload_task): if (load_interval < 0): raise ValueError(('load_interval is negative: %d' % load_interval)) def _reload(): while True: start = time.time() logger.info('TensorBoard reload process beginning') for (path, name) in six.iteritems(path_to_run): multiplexer.AddRunsFromDirectory(path, name) logger.info('TensorBoard reload process: Reload the whole Multiplexer') multiplexer.Reload() duration = (time.time() - start) logger.info('TensorBoard done reloading. Load took %0.3f secs', duration) if (load_interval == 0): break time.sleep(load_interval) if (reload_task == 'process'): logger.info('Launching reload in a child process') import multiprocessing process = multiprocessing.Process(target=_reload, name='Reloader') process.daemon = True process.start() elif (reload_task in ('thread', 'auto')): logger.info('Launching reload in a daemon thread') thread = threading.Thread(target=_reload, name='Reloader') thread.daemon = True thread.start() elif (reload_task == 'blocking'): if (load_interval != 0): raise ValueError('blocking reload only allowed with load_interval=0') _reload() else: raise ValueError(('unrecognized reload_task: %s' % reload_task))
Starts automatically reloading the given multiplexer. If `load_interval` is positive, the thread will reload the multiplexer by calling `ReloadMultiplexer` every `load_interval` seconds, starting immediately. Otherwise, reloads the multiplexer once and never again. Args: multiplexer: The `EventMultiplexer` to add runs to and reload. path_to_run: A dict mapping from paths to run names, where `None` as the run name is interpreted as a run name equal to the path. load_interval: An integer greater than or equal to 0. If positive, how many seconds to wait after one load before starting the next load. Otherwise, reloads the multiplexer once and never again (no continuous reloading). reload_task: Indicates the type of background task to reload with. Raises: ValueError: If `load_interval` is negative.
codesearchnet
def pprint_value(self, value): own_type = (type(value) if (self.type is None) else self.type) formatter = (self.value_format if self.value_format else self.type_formatters.get(own_type)) if formatter: if callable(formatter): return formatter(value) elif isinstance(formatter, basestring): if isinstance(value, (dt.datetime, dt.date)): return value.strftime(formatter) elif isinstance(value, np.datetime64): return util.dt64_to_dt(value).strftime(formatter) elif re.findall('\\{(\\w+)\\}', formatter): return formatter.format(value) else: return (formatter % value) return unicode(bytes_to_unicode(value))
Applies the applicable formatter to the value. Args: value: Dimension value to format Returns: Formatted dimension value
codesearchnet
def _masked_crc32c(cls, value, crc32c_fn=_default_crc32c_fn): crc = crc32c_fn(value) return (crc >> 15 | crc << 17) + 2726488792 & 4294967295
Compute a masked crc32c checksum for a value. Args: value: A bytes object for which we compute the crc. crc32c_fn: A function that can compute a crc32c. This is a performance hook that also helps with testing. Callers are not expected to make use of it directly. Returns: Masked crc32c checksum.
github-repos
def _as_document(self, partition): doc = super(self.__class__, self)._as_document(partition) doc['keywords'] = doc['keywords'].replace('-', '_') doc['doc'] = doc['doc'].replace('-', '_') doc['title'] = doc['title'].replace('-', '_') doc['time_coverage'] = partition.time_coverage return doc
Converts partition to document indexed by to FTS index. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema.
juraj-google-style
def close(self): raise NotImplementedError
Closes the current writer. Please see documentation in ``iobase.Sink`` for an example. Returns: An object representing the writes that were performed by the current writer.
github-repos
def _ParseUSNChangeJournal(self, parser_mediator, usn_change_journal): if (not usn_change_journal): return usn_record_map = self._GetDataTypeMap('usn_record_v2') usn_record_data = usn_change_journal.read_usn_record() while usn_record_data: current_offset = usn_change_journal.get_offset() try: usn_record = self._ReadStructureFromByteStream(usn_record_data, current_offset, usn_record_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to parse USN record at offset: 0x{0:08x} with error: {1!s}'.format(current_offset, exception)) name_offset = (usn_record.name_offset - 60) utf16_stream = usn_record.name[name_offset:usn_record.name_size] try: name_string = utf16_stream.decode('utf-16-le') except (UnicodeDecodeError, UnicodeEncodeError) as exception: name_string = utf16_stream.decode('utf-16-le', errors='replace') parser_mediator.ProduceExtractionWarning('unable to decode USN record name string with error: {0:s}. Characters that cannot be decoded will be replaced with "?" or "\\ufffd".'.format(exception)) event_data = NTFSUSNChangeEventData() event_data.file_attribute_flags = usn_record.file_attribute_flags event_data.file_reference = usn_record.file_reference event_data.filename = name_string event_data.offset = current_offset event_data.parent_file_reference = usn_record.parent_file_reference event_data.update_reason_flags = usn_record.update_reason_flags event_data.update_sequence_number = usn_record.update_sequence_number event_data.update_source_flags = usn_record.update_source_flags if (not usn_record.update_date_time): date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime(timestamp=usn_record.update_date_time) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) usn_record_data = usn_change_journal.read_usn_record()
Parses an USN change journal. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. usn_change_journal (pyfsntsfs.usn_change_journal): USN change journal. Raises: ParseError: if an USN change journal record cannot be parsed.
codesearchnet
def _empty_resource_attributes(self): self.status_code = 404 self.headers = {} self.exists = False self.rdf = self._build_rdf() if (type(self) == NonRDFSource): self.binary.empty()
small method to empty values if resource is removed or absent Args: None Return: None: empties selected resource attributes
codesearchnet
def traverse_levelorder(self, leaves=True, internal=True): q = deque(); q.append(self) while len(q) != 0: n = q.popleft() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n q.extend(n.children)
Perform a levelorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
juraj-google-style
def __init__(self, temp_dir, use_gpu): self._temp_dir = temp_dir self._use_gpu = use_gpu self._tmp_extracted_dir = os.path.join(self._temp_dir, 'tmp_extracted') self._extracted_submission_dir = os.path.join(self._temp_dir, 'extracted') self._sample_input_dir = os.path.join(self._temp_dir, 'input') self._sample_output_dir = os.path.join(self._temp_dir, 'output')
Initializes instance of SubmissionValidator. Args: temp_dir: temporary working directory use_gpu: whether to use GPU
juraj-google-style
def _validate_inputs(self, input_tensors, quantized_input_stats): if not self._is_unknown_shapes_allowed() and self._has_valid_tensors(): for tensor in input_tensors: shape = tensor.shape if not shape: raise ValueError("Provide an input shape for input array '{0}'.".format(_get_tensor_name(tensor))) shape_list = shape.as_list() if None in shape_list[1:]: raise ValueError("None is only supported in the 1st dimension. Tensor '{0}' has invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list)) elif shape_list and shape_list[0] is None: self._set_batch_size(batch_size=1) if quantized_input_stats: self._quantized_stats = [] invalid_stats = [] for name in self.get_input_arrays(): if name in quantized_input_stats: self._quantized_stats.append(quantized_input_stats[name]) else: invalid_stats.append(name) if invalid_stats: raise ValueError("Quantization input stats are not available for input tensors '{0}'.".format(','.join(invalid_stats))) else: self._quantized_stats = None
Validate input parameters. Args: input_tensors: List of input tensors. quantized_input_stats: Map of input tensor names to a tuple of floats representing the mean and standard deviation of the training data. Raises: ValueError: Input shape is not specified. Quantization input stats is required but not provided.
github-repos
def inspect_secret(self, id): url = self._url('/secrets/{0}', id) return self._result(self._get(url), True)
Retrieve secret metadata Args: id (string): Full ID of the secret to remove Returns (dict): A dictionary of metadata Raises: :py:class:`docker.errors.NotFound` if no secret with that ID exists
juraj-google-style
def _Recv(self, timeout): buf = '' wait_for_line = (timeout is TIMEOUT_FOREVER) deadline = (time.time() + (timeout if (not wait_for_line) else 0)) def TimeLeft(): return max((1000 * (deadline - time.time())), 0) continue_reading = True while continue_reading: poll_timeout = (None if wait_for_line else TimeLeft()) fd_list = [event[0] for event in self._poller.poll(poll_timeout) if (event[1] & (select.POLLIN | select.POLLPRI))] if ((not wait_for_line) and (TimeLeft() == 0)): continue_reading = False if (self._outfile_r.fileno() in fd_list): buf += self._outfile_r.readline() if buf.endswith('\n'): return buf if (self._errfile_r.fileno() in fd_list): exc = self._errfile_r.readline() if exc: exc_text = '\n-----------------------------------\n' exc_text += 'Error occurred within GdbService:\n' try: exc_text += json.loads(exc) except ValueError: deadline = (time.time() + 0.5) while (self.is_running and (TimeLeft() > 0)): exc += self._errfile_r.read() try: exc_text += json.loads(exc) except ValueError: exc_text = exc raise ProxyError(exc_text) raise TimeoutError()
Receive output from gdb. This reads gdb's stdout and stderr streams, returns a single line of gdb's stdout or rethrows any exceptions thrown from within gdb as well as it can. Args: timeout: floating point number of seconds after which to abort. A value of None or TIMEOUT_FOREVER means "there is no timeout", i.e. this might block forever. Raises: ProxyError: All exceptions received from the gdb service are generically reraised as this. TimeoutError: Raised if no answer is received from gdb in after the specified time. Returns: The current contents of gdb's stdout buffer, read until the next newline, or `None`, should the read fail or timeout.
codesearchnet
def get_feature_from_key(self, feature_key): feature = self.feature_key_map.get(feature_key) if feature: return feature self.logger.error('Feature "%s" is not in datafile.' % feature_key) return None
Get feature for the provided feature key. Args: feature_key: Feature key for which feature is to be fetched. Returns: Feature corresponding to the provided feature key.
juraj-google-style
def delete_nic(access_token, subscription_id, resource_group, nic_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/networkInterfaces/', nic_name, '?api-version=', NETWORK_API]) return do_delete(endpoint, access_token)
Delete a network interface. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. nic_name (str): Name of the NIC. Returns: HTTP response.
juraj-google-style
def disconnect(signal, receiver): inputkey = __make_id(receiver) with __lock: __purge() receivers = __receivers.get(signal) for idx in six.moves.range(len(receivers)): connected = receivers[idx]() if inputkey != __make_id(connected): continue del receivers[idx] return True return False
Disconnect the receiver `func` from the signal, identified by `signal_id`. Args: signal: The signal identifier. receiver: The callable receiver to disconnect. Returns: True if the receiver was successfully disconnected. False otherwise.
juraj-google-style
def run_pipeline(pipeline, context, pipeline_context_input=None, parse_input=True): logger.debug('starting') try: if parse_input: logger.debug('executing context_parser') prepare_context(pipeline=pipeline, context_in_string=pipeline_context_input, context=context) else: logger.debug('skipping context_parser') pypyr.stepsrunner.run_step_group(pipeline_definition=pipeline, step_group_name='steps', context=context) logger.debug('pipeline steps complete. Running on_success steps now.') pypyr.stepsrunner.run_step_group(pipeline_definition=pipeline, step_group_name='on_success', context=context) except Exception: logger.error('Something went wrong. Will now try to run on_failure.') pypyr.stepsrunner.run_failure_step_group(pipeline=pipeline, context=context) logger.debug('Raising original exception to caller.') raise logger.debug('done')
Run the specified pypyr pipeline. This function runs the actual pipeline. If you are running another pipeline from within a pipeline, call this, not main(). Do call main() instead for your 1st pipeline if there are pipelines calling pipelines. Pipeline and context should be already loaded. Args: pipeline (dict): Dictionary representing the pipeline. context (pypyr.context.Context): Reusable context object. pipeline_context_input (str): Initialize the pypyr context with this string. parse_input (bool): run context_parser in pipeline. Returns: None
codesearchnet
def get_status(self, batch_id): with self._lock: if self._batch_committed(batch_id): return ClientBatchStatus.COMMITTED if batch_id in self._invalid: return ClientBatchStatus.INVALID if batch_id in self._pending: return ClientBatchStatus.PENDING return ClientBatchStatus.UNKNOWN
Returns the status enum for a batch. Args: batch_id (str): The id of the batch to get the status for Returns: int: The status enum
juraj-google-style
def _fuse_awq_attention_layers(model, module, modules_to_fuse, current_module_name, target_cls): from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV module_has_been_fused = False if len(modules_to_fuse['attention']) == 0: return module_has_been_fused if hasattr(module, modules_to_fuse['attention'][0]): q_proj = getattr(module, modules_to_fuse['attention'][0]) if isinstance(q_proj, WQLinear_GEMV): linear_target_cls = WQLinear_GEMV cat_dim = 0 elif isinstance(q_proj, WQLinear_GEMM): linear_target_cls = WQLinear_GEMM cat_dim = 1 elif is_ipex_available() and version.parse(importlib.metadata.version('autoawq')) > version.parse('0.2.6'): from awq.modules.linear import WQLinear_IPEX if isinstance(q_proj, WQLinear_IPEX): linear_target_cls = WQLinear_IPEX cat_dim = 1 else: raise ValueError('Unsupported q_proj type: {type(q_proj)}') previous_device = q_proj.qweight.device k_proj = getattr(module, modules_to_fuse['attention'][1]) v_proj = getattr(module, modules_to_fuse['attention'][2]) o_proj = getattr(module, modules_to_fuse['attention'][3]) bias = torch.cat([q_proj.bias, k_proj.bias, v_proj.bias], dim=0) if q_proj.bias is not None else None qkv_layer = linear_target_cls(q_proj.w_bit, q_proj.group_size, q_proj.in_features, q_proj.out_features + k_proj.out_features + v_proj.out_features, q_proj.bias is not None, next(iter(module.state_dict().values())).device) qkv_layer.qweight = torch.cat([q_proj.qweight, k_proj.qweight, v_proj.qweight], dim=cat_dim) qkv_layer.qzeros = torch.cat([q_proj.qzeros, k_proj.qzeros, v_proj.qzeros], dim=cat_dim) qkv_layer.scales = torch.cat([q_proj.scales, k_proj.scales, v_proj.scales], dim=cat_dim) if isinstance(qkv_layer, WQLinear_GEMV): qkv_layer.split_k_iters = q_proj.split_k_iters qkv_layer.bias = bias fused_attention_layer = target_cls(modules_to_fuse['hidden_size'], modules_to_fuse['num_attention_heads'], modules_to_fuse['num_key_value_heads'], qkv_layer, o_proj, previous_device, modules_to_fuse['max_seq_len'], use_alibi=modules_to_fuse['use_alibi'], rope_theta=modules_to_fuse.get('rope_theta', 10000.0)) fused_attention_layer.is_hf_transformers = True parent_name, child_name = current_module_name.rsplit('.', 1) parent = model.get_submodule(parent_name) setattr(parent, child_name, fused_attention_layer.to(previous_device)) del q_proj, k_proj, v_proj, o_proj module_has_been_fused = True return module_has_been_fused
Fuse the Attention layers into a target class using autoawq Args: model (`~PreTrainedModel`): The input pretrained model module (`nn.Module`): The pytorch parent module that has layernorm modules to fuse modules_to_fuse (`List[str]`): The module fusing mapping. The dictionary has to contain a field `attention` with attention module names in the correct order: q, k, v, o layer current_module_name (`str`): The current submodule name target_cls (`~autoawq.QuantAttentionFused`): The `QuantAttentionFused` class as it only supports that class for now.
github-repos
def get_cookie_header(queue_item): header = [] path = URLHelper.get_path(queue_item.request.url) for cookie in queue_item.request.cookies: root_path = cookie.path == "" or cookie.path == "/" if path.startswith(cookie.path) or root_path: header.append(cookie.name + "=" + cookie.value) return "&".join(header)
Convert a requests cookie jar to a HTTP request cookie header value. Args: queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request. Returns: str: The HTTP cookie header value.
juraj-google-style
def and_evaluator(conditions, leaf_evaluator): saw_null_result = False for condition in conditions: result = evaluate(condition, leaf_evaluator) if (result is False): return False if (result is None): saw_null_result = True return (None if saw_null_result else True)
Evaluates a list of conditions as if the evaluator had been applied to each entry and the results AND-ed together. Args: conditions: List of conditions ex: [operand_1, operand_2]. leaf_evaluator: Function which will be called to evaluate leaf condition values. Returns: Boolean: - True if all operands evaluate to True. - False if a single operand evaluates to False. None: if conditions couldn't be evaluated.
codesearchnet
class InstructBlipVideoEncoder(nn.Module): def __init__(self, config: InstructBlipVideoConfig): super().__init__() self.config = config self.layers = nn.ModuleList([InstructBlipVideoEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`InstructBlipVideoEncoderLayer`]. Args: config (`InstructBlipVideoConfig`): The corresponding vision configuration for the `InstructBlipVideoEncoder`.
github-repos
def has_enough_gas_reserve( raiden, channels_to_open: int = 0, ) -> Tuple[bool, int]: secure_reserve_estimate = get_reserve_estimate(raiden, channels_to_open) current_account_balance = raiden.chain.client.balance(raiden.chain.client.address) return secure_reserve_estimate <= current_account_balance, secure_reserve_estimate
Checks if the account has enough balance to handle the lifecycles of all open channels as well as the to be created channels. Note: This is just an estimation. Args: raiden: A raiden service instance channels_to_open: The number of new channels that should be opened Returns: Tuple of a boolean denoting if the account has enough balance for the remaining lifecycle events and the estimate for the remaining lifecycle cost
juraj-google-style
def Get(self, request, global_params=None): config = self.GetMethodConfig('Get') return self._RunMethod(config, request, global_params=global_params)
Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information. Args: request: (CloudbuildProjectsLocationsBuildsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Build) The response message.
github-repos
def setPresence(self, status=SkypeUtils.Status.Online): self.conn("PUT", "{0}/users/ME/presenceDocs/messagingService".format(self.conn.msgsHost), auth=SkypeConnection.Auth.RegToken, json={"status": status.label})
Set the current user's presence on the network. Supports :attr:`.Status.Online`, :attr:`.Status.Busy` or :attr:`.Status.Hidden` (shown as :attr:`.Status.Offline` to others). Args: status (.Status): new availability to display to contacts
juraj-google-style
def precompute_edge_matrices(adjacency, hparams): batch_size, num_nodes, _, edge_dim = common_layers.shape_list(adjacency) with tf.variable_scope("edge_network"): x = tf.reshape( adjacency, [batch_size * num_nodes * num_nodes, edge_dim], name="adj_reshape_in") for ip_layer in range(hparams.edge_network_layers): name = "edge_network_layer_%d"%ip_layer x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), hparams.edge_network_hidden_size, activation=tf.nn.relu, name=name) x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), hparams.hidden_size**2, activation=None, name="edge_network_output") edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes, num_nodes, hparams.hidden_size, hparams.hidden_size]) edge_matrices = tf.reshape( tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [ -1, num_nodes * hparams.hidden_size, num_nodes * hparams.hidden_size ], name="edge_matrices") return edge_matrices
Precompute the a_in and a_out tensors. (we don't want to add to the graph everytime _fprop is called) Args: adjacency: placeholder of real valued vectors of shape [B, L, L, E] hparams: HParams object Returns: edge_matrices: [batch, L * D, L * D] the dense matrix for message passing viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function of the edge vector of the adjacency matrix at that spot.
juraj-google-style
def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if isinstance(output_shape, (tuple, list)): output_shape = array_ops_stack.stack(output_shape) x, tf_data_format = _preprocess_conv3d_input(x, data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[4], output_shape[1]) if output_shape[0] is None: output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:]) output_shape = array_ops_stack.stack(list(output_shape)) padding = _preprocess_padding(padding) if tf_data_format == 'NDHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = nn.conv3d_transpose(x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = array_ops.transpose(x, (0, 4, 1, 2, 3)) return x
3D deconvolution (i.e. transposed convolution). Args: x: input tensor. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, "same" or "valid". data_format: string, `"channels_last"` or `"channels_first"`. Returns: A tensor, result of transposed 3D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`.
github-repos
def calculate_character_to_length_mapping(measurer: text_measurer.TextMeasurer, characters: Iterable[str]) -> Mapping[(str, float)]: char_to_length = {} for c in characters: char_to_length[c] = measurer.text_width(c) return char_to_length
Return a mapping between each given character and its length. Args: measurer: The TextMeasurer used to measure the width of the text in pixels. characters: The characters to measure e.g. "ml". Returns: A mapping from the given characters to their length in pixels, as determined by 'measurer' e.g. {'m': 5.2, 'l', 1.2}.
codesearchnet
def final_block(x1, x2, dim='2d', training=True, scope='final_block'): with tf.variable_scope(scope): y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis']) y = tf.layers.batch_normalization(y, training=training) y = tf.nn.relu(y) net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'], name='final_pool', keep_dims=True) return net
Converts activations from last RevNet block to pre-logits. Args: x1: [NxHxWxC] tensor of network activations. x2: [NxHxWxC] tensor of network activations. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. training: True for train phase, False for eval phase. scope: Optional variable scope for the final block. Returns: [N, hidden_dim] pre-logits tensor from activations x1 and x2.
juraj-google-style
def full(shape, fill_value, dtype=None, **kwargs): return (dc.zeros(shape, **kwargs) + fill_value).astype(dtype)
Create an array of given shape and type, filled with `fill_value`. Args: shape (sequence of ints): 2D shape of the array. fill_value (scalar or numpy.ndarray): Fill value or array. dtype (data-type, optional): Desired data-type for the array. kwargs (optional): Other arguments of the array (*coords, attrs, and name). Returns: array (decode.array): Decode array filled with `fill_value`.
codesearchnet
def __init__(self, nfiles=1, tmp_prefix=None): self._fnames = ['inchoate{}'.format(i) for i in range(nfiles)] self._tmpprefix = tmp_prefix self._fids = []
Initialization of instances: Args: nfiles (int): number of files. Defaults to 1. tmp_prefix (str): prefix name of temporary files. Use this parameter if you want to easily track down the temporary files created by the manager.
juraj-google-style
def __init__(self, modules: Sequence[RelativePositionBiasBase]): super().__init__() self.biases = nn.ModuleList(modules)
Class which sums up various computed biases. Args: modules (Sequence[RelativePositionBiasBase]): List of relative bias modules.
github-repos
def to_string(self, ast_obj=None, fmt: str = "medium") -> str: if not ast_obj: ast_obj = self bel_relation = None if self.bel_relation and fmt == "short": bel_relation = self.spec["relations"]["to_short"].get( self.bel_relation, self.bel_relation ) elif self.bel_relation: bel_relation = self.spec["relations"]["to_long"].get( self.bel_relation, self.bel_relation ) if self.bel_subject and bel_relation and self.bel_object: if isinstance(self.bel_object, BELAst): return "{} {} ({})".format( self.bel_subject.to_string(fmt=fmt), bel_relation, self.bel_object.to_string(fmt=fmt), ) else: return "{} {} {}".format( self.bel_subject.to_string(fmt=fmt), bel_relation, self.bel_object.to_string(fmt=fmt), ) elif self.bel_subject: return "{}".format(self.bel_subject.to_string(fmt=fmt)) else: return ""
Convert AST object to string Args: fmt (str): short, medium, long formatted BEL statements short = short function and short relation format medium = short function and long relation format long = long function and long relation format canonicalize Returns: str: string version of BEL AST
juraj-google-style
def copy_framebuffer(self, dst, src) -> None: self.mglo.copy_framebuffer(dst.mglo, src.mglo)
Copy framebuffer content. Use this method to: - blit framebuffers. - copy framebuffer content into a texture. - downsample framebuffers. (it will allow to read the framebuffer's content) - downsample a framebuffer directly to a texture. Args: dst (Framebuffer or Texture): Destination framebuffer or texture. src (Framebuffer): Source framebuffer.
codesearchnet
def _update_graph_variables(self, learning_rate: float=None, momentum: float=None): if (learning_rate is not None): K.set_value(self.get_learning_rate_variable(), learning_rate) if (momentum is not None): K.set_value(self.get_momentum_variable(), momentum)
Update graph variables setting giving `learning_rate` and `momentum` Args: learning_rate: learning rate value to be set in graph (set if not None) momentum: momentum value to be set in graph (set if not None) Returns: None
codesearchnet
def frame(self, action): choices = {'on': '1', 'off': '0'} if action in choices: self.send(chr(27)+'if'+choices[action]) else: raise RuntimeError('Invalid action for function frame, choices are on and off')
Places/removes frame around text Args: action -- Enable or disable frame. Options are 'on' and 'off' Returns: None Raises: RuntimeError: Invalid action.
juraj-google-style
def tile(tensor, tile_assignment, assign_tuple_sharding=False, use_sharding_op=False, unspecified_dims=None): return Sharding.tile(tile_assignment).apply_to_tensor(tensor, assign_tuple_sharding=assign_tuple_sharding, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or [])
Returns a tensor that has tiled sharding. Args: tensor: A tf.Tensor to shard. tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: If true, adds a sharding op to set the sharding. unspecified_dims: An optional list of dimensions unspecified.
github-repos
def pr_curves_route(self, request): runs = request.args.getlist('run') if (not runs): return http_util.Respond(request, 'No runs provided when fetching PR curve data', 400) tag = request.args.get('tag') if (not tag): return http_util.Respond(request, 'No tag provided when fetching PR curve data', 400) try: response = http_util.Respond(request, self.pr_curves_impl(runs, tag), 'application/json') except ValueError as e: return http_util.Respond(request, str(e), 'text/plain', 400) return response
A route that returns a JSON mapping between runs and PR curve data. Returns: Given a tag and a comma-separated list of runs (both stored within GET parameters), fetches a JSON object that maps between run name and objects containing data required for PR curves for that run. Runs that either cannot be found or that lack tags will be excluded from the response.
codesearchnet