code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def deregister(cls, name: str) -> None: if (name not in cls.available): raise ConnectionPluginNotRegistered(f'Connection {name!r} is not registered') cls.available.pop(name)
Deregisters a registered connection plugin by its name Args: name: name of the connection plugin to deregister Raises: :obj:`nornir.core.exceptions.ConnectionPluginNotRegistered`
codesearchnet
def encode_configuration(self, did, eid, parameters): parameters = [{'parameterId': k, 'parameterValue': v} for (k, v) in parameters.items()] payload = {'parameters': parameters} req_headers = {'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json'} res = self._api.request('post', (((('/api/elements/d/' + did) + '/e/') + eid) + '/configurationencodings'), body=payload, headers=req_headers) return json.loads(res.content.decode('utf-8'))['encodedId']
Encode parameters as a URL-ready string Args: - did (str): Document ID - eid (str): Element ID - parameters (dict): key-value pairs of the parameters to be encoded Returns: - configuration (str): the url-ready configuration string.
codesearchnet
def print_format_output(dataframe): print_df = pd.DataFrame() dropped_cols = [] empty_cols = [] for (i, col) in enumerate(dataframe): if dataframe[col].isnull().all(): empty_cols += [col] continue print_df[col] = dataframe[col] test_table = tabulate(print_df, headers='keys', tablefmt='psql') if (str(test_table).index('\n') > TERM_WIDTH): print_df.drop(col, axis=1, inplace=True) dropped_cols += list(dataframe.columns)[i:] break table = tabulate(print_df, headers='keys', tablefmt='psql', showindex='never') print(table) if dropped_cols: print('Dropped columns:', dropped_cols) print('Please increase your terminal size to view remaining columns.') if empty_cols: print('Empty columns:', empty_cols) return (table, dropped_cols, empty_cols)
Prints output of given dataframe to fit into terminal. Returns: table (pd.DataFrame): Final outputted dataframe. dropped_cols (list): Columns dropped due to terminal size. empty_cols (list): Empty columns (dropped on default).
codesearchnet
def from_version(cls, version, op=None): lower = None upper = None if (op is None): lower = _LowerBound(version, True) upper = _UpperBound(version.next(), False) elif (op in ('eq', '==')): lower = _LowerBound(version, True) upper = _UpperBound(version, True) elif (op in ('gt', '>')): lower = _LowerBound(version, False) elif (op in ('gte', '>=')): lower = _LowerBound(version, True) elif (op in ('lt', '<')): upper = _UpperBound(version, False) elif (op in ('lte', '<=')): upper = _UpperBound(version, True) else: raise VersionError(("Unknown bound operation '%s'" % op)) bound = _Bound(lower, upper) range = cls(None) range.bounds = [bound] return range
Create a range from a version. Args: version: Version object. This is used as the upper/lower bound of the range. op: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<', 'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created that contains the version superset. Returns: `VersionRange` object.
codesearchnet
def process(self, feed_item): if not feed_item.get(FieldMap.CAMPAIGN_CREATIVE_ASSOCIATION_ID, None): campaign = self.campaign_dao.get(feed_item, required=True) creative = self.creative_dao.get(feed_item, required=True) if campaign and creative: if campaign: feed_item[FieldMap.CAMPAIGN_ID] = campaign['id'] feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name'] association = {'creativeId': str(creative['id'])} result = self._api().insert(profileId=self.profile_id, campaignId=str(campaign['id']), body=association).execute() feed_item[FieldMap.CAMPAIGN_CREATIVE_ASSOCIATION_ID] = '%s|%s' % (campaign['id'], creative['id']) return result return None
Processes a feed item by creating the creative association in DCM. Args: feed_item: Feed item representing the creative association from the Bulkdozer feed. Returns: The newly created object from DCM.
github-repos
def cancel(self, force=False): return self.rest_client._sc._delegator._cancel_job(self, force)
Cancel this job. Args: force (bool, optional): Forcefully cancel this job. Returns: bool: True if the job was cancelled, otherwise False if an error occurred.
codesearchnet
def copy(reader, writer, start, stop, insertLocation=None, tsCol=None): assert (stop >= start) startRows = [] copyRows = [] ts = None inc = None if (tsCol is None): tsCol = reader.getTimestampFieldIdx() for (i, row) in enumerate(reader): if (ts is None): ts = row[tsCol] elif (inc is None): inc = (row[tsCol] - ts) if ((i >= start) and (i <= stop)): copyRows.append(row) startRows.append(row) if (insertLocation is None): insertLocation = (stop + 1) startRows[insertLocation:insertLocation] = copyRows for row in startRows: row[tsCol] = ts writer.appendRecord(row) ts += inc
Copies a range of values to a new location in the data set. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. start: The first row in the range to copy. stop: The last row in the range to copy. insertLocation: The location to insert the copied range. If not specified, the range is inserted immediately following itself.
codesearchnet
def render_trees(trees, path_composer): trees = list(trees) def create_pub_cache(trees): sub_pubs_uuids = sum((x.collect_publications() for x in trees), []) uuid_mapping = { uuid: search_pubs_by_uuid(uuid) for uuid in set(sub_pubs_uuids) } return { uuid: pub[0] for uuid, pub in uuid_mapping.iteritems() if pub } pub_cache = create_pub_cache(trees) def render_tree(tree, ind=1): if not tree.is_public: return "" rendered_tree = SimpleTemplate(TREE_TEMPLATE).render( tree=tree, render_tree=render_tree, ind=ind, path_composer=path_composer, pub_cache=pub_cache, ) ind_txt = ind * " " return ind_txt + ("\n" + ind_txt).join(rendered_tree.splitlines()) parent = tree_handler().get_parent(trees[0]) link_up = path_composer(parent) if parent else None return SimpleTemplate(TREES_TEMPLATE).render( trees=trees, render_tree=render_tree, link_up=link_up, )
Render list of `trees` to HTML. Args: trees (list): List of :class:`.Tree`. path_composer (fn reference): Function used to compose paths from UUID. Look at :func:`.compose_tree_path` from :mod:`.web_tools`. Returns: str: HTML representation of trees.
juraj-google-style
def _ParseRecord(self, parser_mediator, text_file_object): try: title = text_file_object.readline() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning('unable to read and decode title') return False if (not title): return False try: url = text_file_object.readline() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning('unable to read and decode url') return False try: timestamp = text_file_object.readline() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning('unable to read and decode timestamp') return False try: popularity_index = text_file_object.readline() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning('unable to read and decode popularity index') return False event_data = OperaGlobalHistoryEventData() event_data.url = url.strip() title = title.strip() if (title != event_data.url): event_data.title = title popularity_index = popularity_index.strip() try: event_data.popularity_index = int(popularity_index, 10) except ValueError: parser_mediator.ProduceExtractionWarning('unable to convert popularity index: {0:s}'.format(popularity_index)) if (event_data.popularity_index < 0): event_data.description = 'First and Only Visit' else: event_data.description = 'Last Visit' timestamp = timestamp.strip() try: timestamp = int(timestamp, 10) except ValueError: parser_mediator.ProduceExtractionWarning('unable to convert timestamp: {0:s}'.format(timestamp)) timestamp = None if (timestamp is None): date_time = dfdatetime_semantic_time.SemanticTime('Invalid') else: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data) return True
Parses an Opera global history record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. text_file_object (dfvfs.TextFile): text file. Returns: bool: True if the record was successfully parsed.
codesearchnet
def add_fileobj(self, fileobj, path, compress, flags=None): f = file_iter(fileobj) flags = (flags or (os.stat(path) & 511)) return self.add_stream(f, path, compress, flags)
Add the contents of a file object to the MAR file. Args: fileobj (file-like object): open file object path (str): name of this file in the MAR file compress (str): One of 'xz', 'bz2', or None. Defaults to None. flags (int): permission of this file in the MAR file. Defaults to the permissions of `path`
codesearchnet
def _get_ops_in_metagraph(meta_graph_def): return set(meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))
Returns a set of the ops in the MetaGraph. Returns the set of all the ops used in the MetaGraphDef indicated by the tag_set stored in SavedModel directory. Args: meta_graph_def: MetaGraphDef to list the ops of. Returns: A set of ops.
github-repos
def create(self, name, command_to_run, description='', environment_variables=None, required_arguments=None, required_arguments_default_values=None, extra_data_to_post=None): if (environment_variables is None): environment_variables = [] if (required_arguments is None): required_arguments = [] if (required_arguments_default_values is None): required_arguments_default_values = {} request_url = (self._client.base_api_url + self.list_url) data_to_post = {'name': name, 'description': description, 'command_to_run': command_to_run, 'environment_variables': json.dumps(environment_variables), 'required_arguments': json.dumps(required_arguments), 'required_arguments_default_values': json.dumps(required_arguments_default_values)} if (extra_data_to_post is not None): data_to_post.update(extra_data_to_post) response = self._client.session.post(request_url, data=data_to_post) self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED) return self.response_data_to_model_instance(response.json())
Create a task type. Args: name (str): The name of the task. command_to_run (str): The command to run to execute the task. description (str, optional): The description of the task type. environment_variables (list, optional): The environment variables required on the host to execute the task. required_arguments (list, optional): The argument names for the task type. required_arguments_default_values (dict, optional): Default values for the tasks required arguments. extra_data_to_post (dict, optional): Extra key-value pairs to add to the request data. This is useful for subclasses which require extra parameters. Returns: :class:`saltant.models.base_task_instance.BaseTaskType`: A task type model instance representing the task type just created.
codesearchnet
def add_phenotype(self, institute, case, user, link, hpo_term=None, omim_term=None, is_group=False): hpo_results = [] try: if hpo_term: hpo_results = [hpo_term] elif omim_term: LOG.debug('Fetching info for mim term {0}'.format(omim_term)) disease_obj = self.disease_term(omim_term) if disease_obj: for hpo_term in disease_obj.get('hpo_terms', []): hpo_results.append(hpo_term) else: raise ValueError('Must supply either hpo or omim term') except ValueError as e: raise e existing_terms = set((term['phenotype_id'] for term in case.get('phenotype_terms', []))) updated_case = case phenotype_terms = [] for hpo_term in hpo_results: LOG.debug('Fetching info for hpo term {0}'.format(hpo_term)) hpo_obj = self.hpo_term(hpo_term) if (hpo_obj is None): raise ValueError(('Hpo term: %s does not exist in database' % hpo_term)) phenotype_id = hpo_obj['_id'] description = hpo_obj['description'] if (phenotype_id not in existing_terms): phenotype_term = dict(phenotype_id=phenotype_id, feature=description) phenotype_terms.append(phenotype_term) LOG.info('Creating event for adding phenotype term for case {0}'.format(case['display_name'])) self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='add_phenotype', subject=case['display_name'], content=phenotype_id) if is_group: updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$addToSet': {'phenotype_terms': {'$each': phenotype_terms}, 'phenotype_groups': {'$each': phenotype_terms}}}, return_document=pymongo.ReturnDocument.AFTER) else: updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$addToSet': {'phenotype_terms': {'$each': phenotype_terms}}}, return_document=pymongo.ReturnDocument.AFTER) LOG.debug('Case updated') return updated_case
Add a new phenotype term to a case Create a phenotype term and event with the given information Args: institute (Institute): A Institute object case (Case): Case object user (User): A User object link (str): The url to be used in the event hpo_term (str): A hpo id omim_term (str): A omim id is_group (bool): is phenotype term a group?
codesearchnet
def EnumValueName(self, enum, value): return self.enum_types_by_name[enum].values_by_number[value].name
Returns the string name of an enum value. This is just a small helper method to simplify a common operation. Args: enum: string name of the Enum. value: int, value of the enum. Returns: string name of the enum value. Raises: KeyError if either the Enum doesn't exist or the value is not a valid value for the enum.
codesearchnet
def handle(self, connection_id, message_content): try: request = self._request_proto() request.ParseFromString(message_content) except DecodeError: LOGGER.info('Protobuf %s failed to deserialize', request) return self._wrap_result(self._status.INTERNAL_ERROR) try: response = self._respond(request) except _ResponseFailed as e: response = e.status return self._wrap_result(response)
Handles parsing incoming requests, and wrapping the final response. Args: connection_id (str): ZMQ identity sent over ZMQ socket message_content (bytes): Byte encoded request protobuf to be parsed Returns: HandlerResult: result to be sent in response back to client
codesearchnet
def solve(self, print_solution=False): self._cp_solver = cp_model.CpSolver() status = self._cp_solver.Solve(self._model) if (status != cp_model.OPTIMAL): if (status == cp_model.FEASIBLE): logging.warning('A potentially suboptimal solution was found.') else: logging.error('Solver returned status %d.', status) raise SolverError('The solver could not solve the problem and returned status {}.'.format(status)) if print_solution: print_cp_model_solution.print_solution(self._model, self._cp_solver) layout = [] for mtf_dimension_name in self._layout_validator.splittable_mtf_dimension_names: for mesh_dimension_name in self._layout_validator.mesh_dimension_name_to_size: value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]) if value: layout.append(((mtf_dimension_name + ':') + mesh_dimension_name)) layout.sort() return ';'.join(layout)
Solves the current integer program and returns the computed layout. Args: print_solution: An optional boolean indicating whether to print the full solution in human-readable format. Returns: The computed layout (as a string). Raises: SolverError: the internal solver could not find a solution, or the solution found is infeasible.
codesearchnet
def _get_energy(self, x): return self.pd.get_hull_energy(self.comp1 * x + self.comp2 * (1-x)) - \ self.e1 * x - self.e2 * (1-x)
Computes reaction energy in eV/atom at mixing ratio x : (1-x) for self.comp1 : self.comp2. Args: x (float): Mixing ratio x of reactants, a float between 0 and 1. Returns: Reaction energy.
juraj-google-style
def estimate_size(self) -> Optional[int]: raise NotImplementedError
Estimates the size of source in bytes. An estimate of the total size (in bytes) of the data that would be read from this source. This estimate is in terms of external storage size, before performing decompression or other processing. Returns: estimated size of the source if the size can be determined, ``None`` otherwise.
github-repos
def GetEventTagByIdentifier(self, identifier): event_tag = self._GetAttributeContainerByIndex( self._CONTAINER_TYPE_EVENT_TAG, identifier.row_identifier - 1) if event_tag: event_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, event_tag.event_row_identifier) event_tag.SetEventIdentifier(event_identifier) del event_tag.event_row_identifier return event_tag
Retrieves a specific event tag. Args: identifier (SQLTableIdentifier): event tag identifier. Returns: EventTag: event tag or None if not available.
juraj-google-style
def setMood(self, mood): self.conn("POST", "{0}/users/{1}/profile/partial".format(SkypeConnection.API_USER, self.userId), auth=SkypeConnection.Auth.SkypeToken, json={"payload": {"mood": mood or ""}}) self.user.mood = SkypeUser.Mood(plain=mood) if mood else None
Update the activity message for the current user. Args: mood (str): new mood message
juraj-google-style
def get_legacy_output_types(dataset_or_iterator): return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), get_structure(dataset_or_iterator))
Returns the output shapes for elements of the input dataset / iterator. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`. Returns: A (nested) structure of `tf.DType` objects matching the structure of dataset / iterator elements and specifying the shape of the individual components. @compatibility(TF2) This is a legacy API for inspecting the type signature of dataset elements. In TF 2, you should use the `tf.data.Dataset.element_spec` attribute instead. @end_compatibility
github-repos
def get_num_of_video_patches(self, num_frames: int, height: int, width: int, videos_kwargs=None): min_pixels = videos_kwargs.get('min_pixels', None) or self.size['shortest_edge'] max_pixels = videos_kwargs.get('max_pixels', None) or self.size['longest_edge'] patch_size = videos_kwargs.get('patch_size', None) or self.patch_size merge_size = videos_kwargs.get('merge_size', None) or self.merge_size temporal_patch_size = videos_kwargs.get('temporal_patch_size', None) or self.temporal_patch_size factor = patch_size * merge_size resized_height, resized_width = smart_resize(height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels) grid_h, grid_w = (resized_height grid_t = num_frames return grid_t * grid_h * grid_w
A utility that returns number of video patches a given video size. Args: num_frames (`int`): Number of frames in the input video. height (`int`): Height of the input video. width (`int`): Width of the input video. videos_kwargs (`dict`, *optional*) Any kwargs to override defaults of the video processor. Returns: `Tuple(int, int)`: Number of placeholder tokens required and number of patches per image.
github-repos
def __init__(self, values, row_splits): if not (isinstance(row_splits, (np.ndarray, np.generic)) and row_splits.dtype in (np.int64, np.int32) and (row_splits.ndim == 1)): raise TypeError('row_splits must be a 1D int32 or int64 numpy array') if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)): raise TypeError('values must be a numpy array or a RaggedTensorValue') if isinstance(values, RaggedTensorValue) and row_splits.dtype != values.row_splits.dtype: raise ValueError('row_splits and values.row_splits must have the same dtype') self._values = values self._row_splits = row_splits
Creates a `RaggedTensorValue`. Args: values: A numpy array of any type and shape; or a RaggedTensorValue. row_splits: A 1-D int32 or int64 numpy array.
github-repos
def _resolve_non_literal_route(self, method, path): for route_dict in (self._wildcard, self._regex): if (method in route_dict): for route in reversed(route_dict[method]): callback_data = route.match(path) if (callback_data is not None): return callback_data return None
Resolve a request to a wildcard or regex route handler. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if no route matches the request.
codesearchnet
def heightmap_get_normal(hm: np.ndarray, x: float, y: float, waterLevel: float) -> Tuple[(float, float, float)]: cn = ffi.new('float[3]') lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel) return tuple(cn)
Return the map normal at given coordinates. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. x (float): The x coordinate. y (float): The y coordinate. waterLevel (float): The heightmap is considered flat below this value. Returns: Tuple[float, float, float]: An (x, y, z) vector normal.
codesearchnet
def create_normal_matrix(self, modelview): normal_m = Matrix33.from_matrix44(modelview) normal_m = normal_m.inverse normal_m = normal_m.transpose() return normal_m
Creates a normal matrix from modelview matrix Args: modelview: The modelview matrix Returns: A 3x3 Normal matrix as a :py:class:`numpy.array`
codesearchnet
def setup(self, artifacts, use_tsk, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True): super(GRRHuntArtifactCollector, self).setup(reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) self.artifacts = [item.strip() for item in artifacts.strip().split(',')] if (not artifacts): self.state.add_error('No artifacts were specified.', critical=True) self.use_tsk = use_tsk
Initializes a GRR Hunt artifact collector. Args: artifacts: str, comma-separated list of GRR-defined artifacts. use_tsk: toggle for use_tsk flag. reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: str, comma-separated list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate.
codesearchnet
def delete_snl(self, snl_ids): try: payload = {'ids': json.dumps(snl_ids)} response = self.session.post('{}/snl/delete'.format(self.preamble), data=payload) if (response.status_code in [200, 400]): resp = json.loads(response.text, cls=MontyDecoder) if resp['valid_response']: if resp.get('warning'): warnings.warn(resp['warning']) return resp else: raise MPRestError(resp['error']) raise MPRestError('REST error with status code {} and error {}'.format(response.status_code, response.text)) except Exception as ex: raise MPRestError(str(ex))
Delete earlier submitted SNLs. .. note:: As of now, this MP REST feature is open only to a select group of users. Opening up submissions to all users is being planned for the future. Args: snl_ids: List of SNL ids. Raises: MPRestError
codesearchnet
def _ensure_servable(input_tensors, names_to_output_tensor_infos): plain_input_tensors = nest.flatten(input_tensors, expand_composites=True) graph = op_selector.get_unique_graph(plain_input_tensors) output_tensors = [utils.get_tensor_from_tensor_info(tensor, graph=graph) for tensor in names_to_output_tensor_infos.values()] plain_output_tensors = nest.flatten(output_tensors, expand_composites=True) dependency_ops = op_selector.get_backward_walk_ops(plain_output_tensors, stop_at_ts=plain_input_tensors) fed_tensors = object_identity.ObjectIdentitySet(plain_input_tensors) for dependency_op in dependency_ops: if _must_be_fed(dependency_op) and (not all((output in fed_tensors for output in dependency_op.outputs))): input_tensor_names = [tensor.name for tensor in plain_input_tensors] output_tensor_keys = list(names_to_output_tensor_infos.keys()) output_tensor_names = [tensor.name for tensor in plain_output_tensors] dependency_path = op_selector.show_path(dependency_op, plain_output_tensors, plain_input_tensors) raise ValueError(f"The signature's input tensors {input_tensor_names} are insufficient to compute its output keys {output_tensor_keys} (respectively, tensors {output_tensor_names}) because of the dependency on `{dependency_op.name}` which is not given as a signature input, as illustrated by the following dependency path: {dependency_path}")
Check that the signature outputs don't depend on unreachable placeholders. Args: input_tensors: An iterable of `Tensor`s specified as the signature's inputs. names_to_output_tensor_infos: An mapping from output names to respective `TensorInfo`s corresponding to the signature's output tensors. Raises: ValueError: If any of the signature's outputs depend on placeholders not provided as signature's inputs.
github-repos
def __discover_node(self, node, depth): if (node == None): return if (depth >= self.max_depth): return if (node.discovered > 0): return node.discovered = 1 if (node.ip[0] == '0.0.0.0'): return if (node.snmpobj.success == 0): return dcodes = DCODE_STEP_INTO if (depth == 0): dcodes |= DCODE_ROOT self.__print_step(node.ip[0], node.name, depth, dcodes) snmpobj = node.snmpobj valid_neighbors = [] cdp_neighbors = node.get_cdp_neighbors() lldp_neighbors = node.get_lldp_neighbors() neighbors = cdp_neighbors + lldp_neighbors if (len(neighbors) == 0): return for n in neighbors: if (n.remote_ip == None): n.remote_ip = '0.0.0.0' acl_action = self.__match_node_acl(n.remote_ip, n.remote_name) if (acl_action == 'deny'): continue dcodes = DCODE_DISCOVERED child = None if (acl_action == 'include'): child = natlas_node() child.ip = [n.remote_ip] dcodes |= DCODE_INCLUDE else: child, query_result = self.__query_node(n.remote_ip, n.remote_name) if (child.snmpobj.success == 0): child.name = util.shorten_host_name(n.remote_name, self.config.host_domains) dcodes |= DCODE_ERR_SNMP acl_action = self.__match_node_acl(n.remote_ip, n.remote_name, n.remote_plat, n.remote_ios, child.serial) if (acl_action == 'deny'): continue if (query_result == NODE_NEW): self.nodes.append(child) if (acl_action == 'leaf'): dcodes |= DCODE_LEAF if (n.discovered_proto == 'cdp'): dcodes |= DCODE_CDP if (n.discovered_proto == 'lldp'): dcodes |= DCODE_LLDP self.__print_step(n.remote_ip, n.remote_name, depth+1, dcodes) child.plat = n.remote_plat child.ios = n.remote_ios n.node = child self.__add_link(node, n) if ((query_result == NODE_NEW) & (acl_action != 'leaf') & (acl_action != 'include')): valid_neighbors.append(child) for n in valid_neighbors: self.__discover_node(n, depth+1)
Given a node, recursively enumerate its adjacencies until we reach the specified depth (>0). Args: node: natlas_node object to enumerate. depth: The depth left that we can go further away from the root.
juraj-google-style
def listen(self, message_consumer): while not self._rfile.closed: request_str = self._read_message() if request_str is None: break try: message_consumer(json.loads(request_str.decode('utf-8'))) except ValueError: log.exception("Failed to parse JSON message %s", request_str) continue
Blocking call to listen for messages on the rfile. Args: message_consumer (fn): function that is passed each message as it is read off the socket.
juraj-google-style
def db_update_record(self, table_name, column, value): sql = 'UPDATE {} SET {} = \'{}\''.format(table_name, column, value) cur = self.db_conn.cursor() cur.execute(sql)
Insert records into DB. Args: table_name (str): The name of the table. column (str): The column name in which the value is to be updated. value (str): The value to update in the column.
juraj-google-style
def _build_shuffle_scatter(reduced_shards, dst_devices): num_devices = len(dst_devices) out_tensors = [] for d in range(0, num_devices): with ops.device(dst_devices[d]): out_tensors.append(array_ops.concat(reduced_shards, 0)) return out_tensors
Build the scatter phase of shuffle all-reduce. Args: reduced_shards: list of `tf.Tensor` fully reduced shards dst_devices: list of names of devices at which the fully-reduced value should be reconstituted. Returns: list of `tf.Tensor` scattered tensors.
github-repos
def parse_instrumentation_options(self, parameters=None): if parameters is None: return {} filtered_parameters = {} for parameter_key, parameter_value in parameters.items(): if parameter_key.startswith(self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX): option_key = parameter_key[len(self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):] filtered_parameters[option_key] = parameter_value return filtered_parameters
Returns the options for the instrumentation test from user_params. By default, this method assume that the correct instrumentation options all start with DEFAULT_INSTRUMENTATION_OPTION_PREFIX. Args: parameters: dict, the key value pairs representing an assortment of parameters including instrumentation options. Usually, this argument will be from self.user_params. Returns: A dictionary of options/parameters for the instrumentation tst.
github-repos
def _get_input_readers(self, state): serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY % state.key().id_or_name()) serialized_input_readers = model._HugeTaskPayload.get_by_key_name(serialized_input_readers_key, parent=state) input_reader_class = state.mapreduce_spec.mapper.input_reader_class() split_param = state.mapreduce_spec.mapper if issubclass(input_reader_class, map_job.InputReader): split_param = map_job.JobConfig._to_map_job_config(state.mapreduce_spec, os.environ.get('HTTP_X_APPENGINE_QUEUENAME')) if (serialized_input_readers is None): readers = input_reader_class.split_input(split_param) else: readers = [input_reader_class.from_json_str(_json) for _json in json.loads(zlib.decompress(serialized_input_readers.payload))] if (not readers): return (None, None) state.mapreduce_spec.mapper.shard_count = len(readers) state.active_shards = len(readers) if (serialized_input_readers is None): serialized_input_readers = model._HugeTaskPayload(key_name=serialized_input_readers_key, parent=state) readers_json_str = [i.to_json_str() for i in readers] serialized_input_readers.payload = zlib.compress(json.dumps(readers_json_str)) return (readers, serialized_input_readers)
Get input readers. Args: state: a MapreduceState model. Returns: A tuple: (a list of input readers, a model._HugeTaskPayload entity). The payload entity contains the json serialized input readers. (None, None) when input reader inplitting returned no data to process.
codesearchnet
def diff_str(a: str | object, b: str | object) -> str: if not isinstance(a, str): a = pretty_repr(a).split('\n') if not isinstance(b, str): b = pretty_repr(b).split('\n') diff = difflib.ndiff(a, b) return '\n'.join(diff)
Pretty diff between 2 objects. Args: a: Object/str to compare b: Object/str to compare Returns: The diff string
github-repos
def volatility(self, strike: types.FloatTensor, expiry_dates: Optional[types.DateTensor]=None, expiry_times: Optional[types.FloatTensor]=None, term: Optional[types.Period]=None) -> types.FloatTensor: del term if expiry_dates is not None and expiry_times is not None: raise ValueError('Unexpected inputs: Both expiry_dates and expiry times are specified') if expiry_times is None: expiry_dates = dateslib.convert_to_date_tensor(expiry_dates) expiries = self._day_count_fn(start_date=self._valuation_date, end_date=expiry_dates, dtype=self._dtype) else: expiries = tf.convert_to_tensor(expiry_times, dtype=self._dtype) strike = tf.convert_to_tensor(strike, dtype=self._dtype, name='strike') return self._interpolator(expiries, strike)
Returns the interpolated volatility on a specified set of expiries. Args: strike: The strikes for which the interpolation is desired. expiry_dates: Optional input specifying the expiry dates for which interpolation is desired. The user should supply either `expiry_dates` or `expiry_times` for interpolation. expiry_times: Optional real `Tensor` containing the time to expiration for which interpolation is desired. The user should supply either `expiry_dates` or `expiry_times` for interpolation. term: Optional input specifying the term of the underlying rate for which the interpolation is desired. Relevant for interest rate implied volatility data. Returns: A `Tensor` of the same shape as `expiry` with the interpolated volatility from the volatility surface. Raises: ValueError is both `expiry_dates` and `expiry_times` are specified.
github-repos
def load_disease_term(self, disease_obj): LOG.debug("Loading disease term %s into database", disease_obj['_id']) try: self.disease_term_collection.insert_one(disease_obj) except DuplicateKeyError as err: raise IntegrityError("Disease term %s already exists in database".format(disease_obj['_id'])) LOG.debug("Disease term saved")
Load a disease term into the database Args: disease_obj(dict)
juraj-google-style
def get_ref(profile, ref): resource = "/refs/" + ref data = api.get_request(profile, resource) return prepare(data)
Fetch a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to fetch, e.g., ``heads/my-feature-branch``. Returns A dict with data about the ref.
juraj-google-style
def _parse_trunk_groups(self, config): values = TRUNK_GROUP_RE.findall(config) return dict(trunk_groups=values)
_parse_trunk_groups scans the provided configuration block and extracts all the vlan trunk groups. If no trunk groups are configured an empty List is returned as the vlaue. The return dict is intended to be merged into the response dict. Args: config (str): The vlan configuration block form the node's running configuration Returns: dict: resource dict attribute
juraj-google-style
def lower_and_check_unique(dict_to_check): if (dict_to_check == None): return None else: to_return = {} for key in dict_to_check: new_key = key.lower() if (new_key == 'jobtype'): new_key = 'job_type' if (new_key in to_return): raise Exception((('Multiple instances of key ' + new_key) + ' found!')) else: try: to_return[new_key] = dict_to_check.get(key).lower() except AttributeError: to_return[new_key] = dict_to_check.get(key) return to_return
Takes a dictionary and makes all the keys lower case. Also replaces "jobtype" with "job_type" just so that key specifically can be called elsewhere without ambiguity. Finally, ensures that multiple identical keys, that differed only due to different capitalizations, are not present. If there are multiple equivalent keys, an Exception is raised. Args: dict_to_check (dict): The dictionary to check and standardize Returns: to_return (dict): An identical dictionary but with all keys made lower case and no identical keys present.
codesearchnet
def top_1_tpu(inputs): inputs_max = tf.reduce_max(inputs, axis=(- 1), keepdims=True) mask = tf.to_int32(tf.equal(inputs_max, inputs)) index = (tf.range(tf.shape(inputs)[(- 1)]) * mask) return (tf.squeeze(inputs_max, (- 1)), tf.reduce_max(index, axis=(- 1)))
find max and argmax over the last dimension. Works well on TPU Args: inputs: A tensor with shape [..., depth] Returns: values: a Tensor with shape [...] indices: a Tensor with shape [...]
codesearchnet
def _from_yang_library(self, yang_lib: Dict[(str, Any)]) -> None: try: for item in yang_lib['ietf-yang-library:modules-state']['module']: name = item['name'] rev = item['revision'] mid = (name, rev) mdata = ModuleData(mid) self.modules[mid] = mdata if (item['conformance-type'] == 'implement'): if (name in self.implement): raise MultipleImplementedRevisions(name) self.implement[name] = rev mod = self._load_module(name, rev) mdata.statement = mod if ('feature' in item): mdata.features.update(item['feature']) locpref = mod.find1('prefix', required=True).argument mdata.prefix_map[locpref] = mid if ('submodule' in item): for s in item['submodule']: sname = s['name'] smid = (sname, s['revision']) sdata = ModuleData(mid) self.modules[smid] = sdata mdata.submodules.add(smid) submod = self._load_module(*smid) sdata.statement = submod bt = submod.find1('belongs-to', name, required=True) locpref = bt.find1('prefix', required=True).argument sdata.prefix_map[locpref] = mid except KeyError as e: raise BadYangLibraryData(('missing ' + str(e))) from None self._process_imports() self._check_feature_dependences()
Set the schema structures from YANG library data. Args: yang_lib: Dictionary with YANG library data. Raises: BadYangLibraryData: If YANG library data is invalid. FeaturePrerequisiteError: If a pre-requisite feature isn't supported. MultipleImplementedRevisions: If multiple revisions of an implemented module are listed in YANG library. ModuleNotFound: If a YANG module wasn't found in any of the directories specified in `mod_path`.
codesearchnet
def count(self, event): return len(self._listeners[event]) + len(self._once[event])
Get the number of listeners for the event. Args: event (str): The event for which to count all listeners. The resulting count is a combination of listeners added using 'on'/'add_listener' and 'once'.
juraj-google-style
def get_all_voronoi_polyhedra(self, structure): if (len(structure) == 1): return [self.get_voronoi_polyhedra(structure, 0)] if (self.targets is None): targets = structure.composition.elements else: targets = self.targets sites = [x.to_unit_cell() for x in structure] indices = [(i, 0, 0, 0) for (i, _) in enumerate(structure)] all_neighs = structure.get_all_neighbors(self.cutoff, include_index=True, include_image=True) for neighs in all_neighs: sites.extend([x[0] for x in neighs]) indices.extend([((x[2],) + x[3]) for x in neighs]) indices = np.array(indices, dtype=np.int) (indices, uniq_inds) = np.unique(indices, return_index=True, axis=0) sites = np.array(sites)[uniq_inds] (root_images,) = np.nonzero((np.abs(indices[(:, 1:)]).max(axis=1) == 0)) del indices qvoronoi_input = [s.coords for s in sites] voro = Voronoi(qvoronoi_input) return [self._extract_cell_info(structure, i, sites, targets, voro, self.compute_adj_neighbors) for i in root_images.tolist()]
Get the Voronoi polyhedra for all site in a simulation cell Args: structure (Structure): Structure to be evaluated Returns: A dict of sites sharing a common Voronoi facet with the site n mapped to a directory containing statistics about the facet: - solid_angle - Solid angle subtended by face - angle_normalized - Solid angle normalized such that the faces with the largest - area - Area of the facet - face_dist - Distance between site n and the facet - volume - Volume of Voronoi cell for this face - n_verts - Number of vertices on the facet
codesearchnet
def render_diagram(root_task, out_base, max_param_len=20, horizontal=False, colored=False): import re import codecs import subprocess from ozelot import config from ozelot.etl.tasks import get_task_name, get_task_param_string lines = [u'digraph G {'] if horizontal: lines.append(u'rankdir=LR;') def get_id(task): s = ((get_task_name(task) + '_') + get_task_param_string(task)) return re.sub('\\W+', '', re.sub(' ', '_', s)) existing_nodes = set() existing_edges = set() def _build(task, parent_id=None): tid = get_id(task) if (tid not in existing_nodes): params = task.to_str_params() param_list = '' for (k, v) in params.items(): if (len(v) > max_param_len): v = (v[:max_param_len] + '...') param_list += '<TR><TD ALIGN="LEFT"><FONT POINT-SIZE="10">{:s}</FONT></TD><TD ALIGN="LEFT"><FONT POINT-SIZE="10">{:s}</FONT></TD></TR>'.format(k, v) label = (('<TABLE BORDER="0" CELLSPACING="1" CELLPADDING="1"><TR><TD COLSPAN="2" ALIGN="CENTER"><FONT POINT-SIZE="12">{:s}</FONT></TD></TR>'.format(get_task_name(task)) + param_list) + '</TABLE>') style = getattr(task, 'diagram_style', []) if colored: color = ', color="{:s}"'.format(('green' if task.complete() else 'red')) else: color = '' lines.append(u'{name:s} [label=< {label:s} >, shape="rect" {color:s}, style="{style:s}"];\n'.format(name=tid, label=label, color=color, style=','.join(style))) existing_nodes.add(tid) for req in task.requires(): _build(req, parent_id=tid) if ((parent_id is not None) and ((tid, parent_id) not in existing_edges)): lines.append(u'{source:s} -> {target:s};\n'.format(source=tid, target=parent_id)) _build(root_task) lines.append(u'}') with codecs.open((out_base + '.dot'), 'w', encoding='utf-8') as f: f.write(u'\n'.join(lines)) if (not hasattr(config, 'DOT_EXECUTABLE')): raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'") if (not os.path.exists(config.DOT_EXECUTABLE)): raise IOError(("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE))) subprocess.check_call([config.DOT_EXECUTABLE, '-T', 'png', '-o', (out_base + '.png'), (out_base + '.dot')])
Render a diagram of the ETL pipeline All upstream tasks (i.e. requirements) of :attr:`root_task` are rendered. Nodes are, by default, styled as simple rects. This style is augmented by any :attr:`diagram_style` attributes of the tasks. .. note:: This function requires the 'dot' executable from the GraphViz package to be installed and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`. Args: root_task (luigi.Task): Task instance that defines the 'upstream root' of the pipeline out_base (str): base output file name (file endings will be appended) max_param_len (int): Maximum shown length of task parameter values horizontal (bool): If True, layout graph left-to-right instead of top-to-bottom colored (bool): If True, show task completion status by color of nodes
codesearchnet
def from_Z(z: int): for (sym, data) in _pt_data.items(): if (data['Atomic no'] == z): return Element(sym) raise ValueError(('No element with this atomic number %s' % z))
Get an element from an atomic number. Args: z (int): Atomic number Returns: Element with atomic number z.
codesearchnet
def parse_zip(zipfilename: str, regex: Pattern, invert_match: bool, files_with_matches: bool, files_without_match: bool, grep_inner_file_name: bool, show_inner_file: bool) -> None: assert (not (files_without_match and files_with_matches)) report_lines = ((not files_without_match) and (not files_with_matches)) report_hit_lines = (report_lines and (not invert_match)) report_miss_lines = (report_lines and invert_match) log.debug(('Checking ZIP: ' + zipfilename)) found_in_zip = False try: with ZipFile(zipfilename, 'r') as zf: for contentsfilename in zf.namelist(): log.debug(('... checking file: ' + contentsfilename)) if grep_inner_file_name: found_in_filename = bool(regex.search(contentsfilename)) found_in_zip = (found_in_zip or found_in_filename) if (files_with_matches and found_in_zip): report_hit_filename(zipfilename, contentsfilename, show_inner_file) return if ((report_hit_lines and found_in_filename) or (report_miss_lines and (not found_in_filename))): report_line(zipfilename, contentsfilename, contentsfilename, show_inner_file) else: try: with zf.open(contentsfilename, 'r') as file: try: for line in file.readlines(): found_in_line = bool(regex.search(line)) found_in_zip = (found_in_zip or found_in_line) if (files_with_matches and found_in_zip): report_hit_filename(zipfilename, contentsfilename, show_inner_file) return if ((report_hit_lines and found_in_line) or (report_miss_lines and (not found_in_line))): report_line(zipfilename, contentsfilename, line, show_inner_file) except EOFError: pass except RuntimeError as e: log.warning('RuntimeError whilst processing {} [{}]: probably encrypted contents; error was {!r}', zipfilename, contentsfilename, e) except (zlib.error, BadZipFile) as e: log.debug('Invalid zip: {}; error was {!r}', zipfilename, e) if (files_without_match and (not found_in_zip)): report_miss_filename(zipfilename)
Implement a "grep within an OpenXML file" for a single OpenXML file, which is by definition a ``.zip`` file. Args: zipfilename: name of the OpenXML (zip) file regex: regular expression to match invert_match: find files that do NOT match, instead of ones that do? files_with_matches: show filenames of files with a match? files_without_match: show filenames of files with no match? grep_inner_file_name: search the names of "inner" files, rather than their contents? show_inner_file: show the names of the "inner" files, not just the "outer" (OpenXML) file?
codesearchnet
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False): residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail.
github-repos
def check_correct(state, check, diagnose): feedback = None try: multi(state, check) except TestFail as e: feedback = e.feedback try: multi(state, diagnose) except TestFail as e: if ((feedback is not None) or state.force_diagnose): feedback = e.feedback if (feedback is not None): state.report(feedback) return state
Allows feedback from a diagnostic SCT, only if a check SCT fails. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). check: An sct chain that must succeed. diagnose: An sct chain to run if the check fails. :Example: The SCT below tests whether students query result is correct, before running diagnostic SCTs.. :: Ex().check_correct( check_result(), check_node('SelectStmt') )
codesearchnet
def evaluate_rpn(rpn): vals_stack = [] for item in rpn: if (item in _ALL_OPS): v2 = vals_stack.pop() if (item in _UNARY_OPS): res = _UNARY_OPS[item](v2) elif (item in _BIN_OPS): v1 = vals_stack.pop() res = _BIN_OPS[item](v1, v2) else: raise ValueError(('%s not in unary_ops or bin_ops' % str(item))) vals_stack.append(res) else: vals_stack.append(item) assert (len(vals_stack) == 1) assert isinstance(vals_stack[0], bool) return vals_stack[0]
Evaluates the RPN form produced my map2rpn. Returns: bool
codesearchnet
def _peer_bfd_tx(self, **kwargs): method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \ 'neighbor_neighbor_ips_neighbor_addr_bfd_interval_min_tx' bfd_tx = getattr(self._rbridge, method_name) config = bfd_tx(**kwargs) if kwargs['delete']: tag = 'min-tx' config.find('. return config
Return the BFD minimum transmit interval XML. You should not use this method. You probably want `BGP.bfd`. Args: peer_ip (str): Peer IPv4 address for BFD setting. min_tx (str): BFD transmit interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None
juraj-google-style
def append(self, data): if isinstance(data, list) and len(data) > 0: self.nodes.append(data) else: self.nodes.append([data])
Appends items or lists to the Lattice Args: data (item,list) : The Item or List to be added to the Lattice
juraj-google-style
def v4_int_to_packed(address): if address > _BaseV4._ALL_ONES: raise ValueError('Address too large for IPv4') return Bytes(struct.pack('!I', address))
The binary representation of this address. Args: address: An integer representation of an IPv4 IP address. Returns: The binary representation of this address. Raises: ValueError: If the integer is too large to be an IPv4 IP address.
juraj-google-style
def __init__(self, pipeline: Union[beam_runner_api_pb2.Pipeline, beam.Pipeline], default_vertex_attrs={'shape': 'box'}, default_edge_attrs=None, render_option=None): self._lock = threading.Lock() self._graph: pydot.Dot = None self._pipeline_instrument = None if isinstance(pipeline, beam.Pipeline): self._pipeline_instrument = inst.PipelineInstrument(pipeline, pipeline._options) self._pipeline_instrument.preprocess() if isinstance(pipeline, beam_runner_api_pb2.Pipeline): self._pipeline_proto = pipeline elif isinstance(pipeline, beam.Pipeline): self._pipeline_proto = pipeline.to_runner_api() else: raise TypeError('pipeline should either be a %s or %s, while %s is given' % (beam_runner_api_pb2.Pipeline, beam.Pipeline, type(pipeline))) self._consumers: DefaultDict[str, List[str]] = collections.defaultdict(list) self._producers: Dict[str, str] = {} for transform_id, transform_proto in self._top_level_transforms(): for pcoll_id in transform_proto.inputs.values(): self._consumers[pcoll_id].append(transform_id) for pcoll_id in transform_proto.outputs.values(): self._producers[pcoll_id] = transform_id default_vertex_attrs = default_vertex_attrs or {'shape': 'box'} if 'color' not in default_vertex_attrs: default_vertex_attrs['color'] = 'blue' if 'fontcolor' not in default_vertex_attrs: default_vertex_attrs['fontcolor'] = 'blue' vertex_dict, edge_dict = self._generate_graph_dicts() self._construct_graph(vertex_dict, edge_dict, default_vertex_attrs, default_edge_attrs) self._renderer = pipeline_graph_renderer.get_renderer(render_option)
Constructor of PipelineGraph. Examples: graph = pipeline_graph.PipelineGraph(pipeline_proto) graph.get_dot() or graph = pipeline_graph.PipelineGraph(pipeline) graph.get_dot() Args: pipeline: (Pipeline proto) or (Pipeline) pipeline to be rendered. default_vertex_attrs: (Dict[str, str]) a dict of default vertex attributes default_edge_attrs: (Dict[str, str]) a dict of default edge attributes render_option: (str) this parameter decides how the pipeline graph is rendered. See display.pipeline_graph_renderer for available options.
github-repos
def getaccountaddress(self, user_id=''): address = self.rpc.call('getaccountaddress', user_id) self.logger.debug('Your', self.coin, 'address is', address) return address
Get the coin address associated with a user id. If the specified user id does not yet have an address for this coin, then generate one. Args: user_id (str): this user's unique identifier Returns: str: Base58Check address for this account
codesearchnet
def init_from_dataset_and_submissions_write_to_datastore( self, dataset_batches, attack_submission_ids): batches_x_attacks = itertools.product(dataset_batches.data.keys(), attack_submission_ids) for idx, (dataset_batch_id, attack_id) in enumerate(batches_x_attacks): adv_batch_id = ADVERSARIAL_BATCH_ID_PATTERN.format(idx) self.add_batch(adv_batch_id, {'dataset_batch_id': dataset_batch_id, 'submission_id': attack_id}) self.write_to_datastore()
Init list of adversarial batches from dataset batches and submissions. Args: dataset_batches: instances of DatasetBatches attack_submission_ids: iterable with IDs of all (targeted and nontargeted) attack submissions, could be obtains as CompetitionSubmissions.get_all_attack_ids()
juraj-google-style
def hasReservation(self, pid, subject, vendorSpecific=None): response = self.hasReservationResponse(pid, subject, vendorSpecific) return self._read_boolean_404_response(response)
See Also: hasReservationResponse() Args: pid: subject: vendorSpecific: Returns:
juraj-google-style
def _init_summary_op(self, summary_op=USE_DEFAULT): if summary_op is Supervisor.USE_DEFAULT: summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP) if summary_op is None: summary_op = _summary.merge_all() if summary_op is not None: ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op) self._summary_op = summary_op
Initializes summary_op. Args: summary_op: An Operation that returns a Summary for the event logs. If set to USE_DEFAULT, create an op that merges all the summaries.
github-repos
def ReadFrom(self, byte_stream): try: return self._struct.unpack_from(byte_stream) except (TypeError, struct.error) as exception: raise IOError('Unable to read byte stream with error: {0!s}'.format(exception))
Read values from a byte stream. Args: byte_stream (bytes): byte stream. Returns: tuple[object, ...]: values copies from the byte stream. Raises: IOError: if byte stream cannot be read. OSError: if byte stream cannot be read.
codesearchnet
def resume(): t = timer() if f.t.stopped: raise StoppedError('Cannot resume stopped timer.') if (not f.t.paused): raise PausedError('Cannot resume timer that is not paused.') f.t.paused = False f.t.start_t = t f.t.last_t = t return t
Resume a paused timer, re-activating it. Subsequent time accumulates in the total. Returns: float: The current time. Raises: PausedError: If timer was not in paused state. StoppedError: If timer was already stopped.
codesearchnet
def _translate(pattern, case_sensitive=True): if not case_sensitive: pattern = pattern.lower() i, n = 0, len(pattern) res = "" while i < n: c = pattern[i] i = i + 1 if c == "*": res = res + "[^/]*" elif c == "?": res = res + "." elif c == "[": j = i if j < n and pattern[j] == "!": j = j + 1 if j < n and pattern[j] == "]": j = j + 1 while j < n and pattern[j] != "]": j = j + 1 if j >= n: res = res + "\\[" else: stuff = pattern[i:j].replace("\\", "\\\\") i = j + 1 if stuff[0] == "!": stuff = "^" + stuff[1:] elif stuff[0] == "^": stuff = "\\" + stuff res = "%s[%s]" % (res, stuff) else: res = res + re.escape(c) return res
Translate a wildcard pattern to a regular expression. There is no way to quote meta-characters. Arguments: pattern (str): A wildcard pattern. case_sensitive (bool): Set to `False` to use a case insensitive regex (default `True`). Returns: str: A regex equivalent to the given pattern.
juraj-google-style
def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs): search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1] functions = util.make_list(fn) search = list(product(functions, util.dict_product(kwargs))) results = [] for fn, kw in search: if not pairwise: r = self.index.to_series().apply(lambda step: fn(step, **kw)) else: r = apply_pairwise(self, fn, symmetric=symmetric, diagonal=diagonal, block=block, **kw) name = [] if len(functions) == 1 else [fn.__name__] name += util.dict_subset(kw, search_keys).values() if isinstance(r, pd.DataFrame): columns = pd.MultiIndex.from_tuples( [tuple(name + util.make_list(c)) for c in r.columns]) r.columns = columns else: r.name = tuple(name) results.append(r) if len(results) > 1: result = pd.concat(results, axis=1) column_names = [] if len(functions) == 1 else [None] column_names += search_keys column_names += [None]*(len(result.columns.names)-len(column_names)) result.columns.names = column_names return StepFrame(result) else: result = results[0] if isinstance(result, pd.DataFrame): return StepFrame(result) else: result.name = functions[0].__name__ return StepSeries(result)
Apply function to each step object in the index Args: fn: function to apply. If a list then each function is applied pairwise: whether to apply the function to pairs of steps symmetric, diagonal, block: passed to apply_pairwise when pairwise=True kwargs: a keyword arguments to pass to each function. Arguments with list value are grid searched using util.dict_product. Returns: a StepFrame or StepSeries
juraj-google-style
def _validate_inputs(self, graph_def, input_tensors): self._save_conversion_params_metric(graph_def) self._quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq) self._validate_inference_input_output_types(self._quant_mode) if not self._is_unknown_shapes_allowed(): for tensor in input_tensors: shape_list = tensor.shape.as_list() if None in shape_list[1:]: raise ValueError("None is only supported in the 1st dimension. Tensor '{0}' has invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list)) elif shape_list and shape_list[0] is None: shape = tensor.shape.as_list() shape[0] = 1 tensor.set_shape(shape) if self._trackable_obj is None or not hasattr(self._trackable_obj, 'graph_debug_info'): self._debug_info = _get_debug_info(_build_debug_info_func(self._funcs[0].graph), graph_def) else: self._debug_info = _get_debug_info(_convert_debug_info_func(self._trackable_obj.graph_debug_info), graph_def)
Validate the input parameters. Args: graph_def: The TensorFlow GraphDef. input_tensors: List of input tensors. Raise: ValueError: Input shape is not specified. Invalid quantization parameters.
github-repos
def size(input: ragged_tensor.Ragged, out_type=dtypes.int32, name=None): if ragged_tensor.is_ragged(input): return array_ops.size(input.flat_values, out_type=out_type, name=name) else: return array_ops.size(input, out_type=out_type, name=name)
Returns the size of a potentially ragged tensor. The size of a ragged tensor is the size of its inner values. #### Example: >>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy().item() 3 Args: input: A potentially ragged `Tensor`. out_type: The numeric output type for the operation. name: A name for the operation (optional). Returns: A Tensor of type `out_type`.
github-repos
def __init__(self, meter_id=Meter.OFPM_ALL): super().__init__(InstructionType.OFPIT_METER) self.meter_id = meter_id
Create a InstructionMeter with the optional parameters below. Args: meter_id (int): Meter instance.
juraj-google-style
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(RevocationReason, self).read(istream, kmip_version=kmip_version) tstream = BytearrayStream(istream.read(self.length)) self.revocation_code = RevocationReasonCode() self.revocation_code.read(tstream, kmip_version=kmip_version) if self.is_tag_next(Tags.REVOCATION_MESSAGE, tstream): self.revocation_message = TextString() self.revocation_message.read(tstream, kmip_version=kmip_version) self.is_oversized(tstream) self.validate()
Read the data encoding the RevocationReason object and decode it into its constituent parts. Args: istream (Stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def GetMessages(self, formatter_mediator, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() event_type = event_values.get('event_type', None) if event_type is not None: event_values['event_type'] = self.GetEventTypeString(event_type) severity = event_values.get('severity', None) if severity is not None: event_values['severity'] = self.GetSeverityString(severity) source_name = event_values.get('source_name', None) message_identifier = event_values.get('message_identifier', None) strings = event_values.get('strings', []) if source_name and message_identifier: message_string = formatter_mediator.GetWindowsEventMessage( source_name, message_identifier) if message_string: try: event_values['message_string'] = message_string.format(*strings) except IndexError: pass message_strings = [] for string in strings: message_strings.append('\'{0:s}\''.format(string)) message_string = ', '.join(message_strings) event_values['strings'] = '[{0:s}]'.format(message_string) return self._ConditionalFormatMessages(event_values)
Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def GetValueByPath(self, path_segments): key = self.root_key for path_segment in path_segments: if isinstance(key, dict): try: key = key[path_segment] except KeyError: return None elif isinstance(key, list): try: list_index = int(path_segment, 10) except ValueError: return None key = key[list_index] else: return None if (not key): return None return key
Retrieves a plist value by path. Args: path_segments (list[str]): path segment strings relative to the root of the plist. Returns: object: The value of the key specified by the path or None.
codesearchnet
def get_counters(counter_list): if not isinstance(counter_list, list): raise CommandExecutionError('counter_list must be a list of tuples') try: query = win32pdh.OpenQuery() counters = build_counter_list(counter_list) for counter in counters: counter.add_to_query(query) win32pdh.CollectQueryData(query) time.sleep(1) win32pdh.CollectQueryData(query) ret = {} for counter in counters: try: ret.update({counter.path: counter.value()}) except pywintypes.error as exc: if exc.strerror == 'No data to return.': continue else: raise finally: win32pdh.CloseQuery(query) return ret
Get the values for the passes list of counters Args: counter_list (list): A list of counters to lookup Returns: dict: A dictionary of counters and their values
juraj-google-style
def add_defaults(self, ctype: ContentType=None) -> 'InstanceNode': val = self.value if (not (isinstance(val, StructuredValue) and self.is_internal())): return self res = self if isinstance(val, ObjectValue): if val: for mn in self._member_names(): m = (res._member(mn) if (res is self) else res.sibling(mn)) res = m.add_defaults(ctype) res = res.up() return self.schema_node._add_defaults(res, ctype) if (not val): return res en = res[0] while True: res = en.add_defaults(ctype) try: en = res.next() except NonexistentInstance: break return res.up()
Return the receiver with defaults added recursively to its value. Args: ctype: Content type of the defaults to be added. If it is ``None``, the content type will be the same as receiver's.
codesearchnet
def get_imagery(cls, lat, lon, date=None, dim=None, cloud_score=False): instance = cls('planetary/earth/imagery') filters = {'lat': lat, 'lon': lon, 'date': date, 'dim': dim, 'cloud_score': cloud_score} return instance.get_resource(**filters)
Returns satellite image Args: lat: latitude float lon: longitude float date: date instance of available date from `get_assets` dim: width and height of image in degrees as float cloud_score: boolean to calculate the percentage of the image covered by clouds Returns: json
codesearchnet
def iplot_state_qsphere(rho, figsize=None): html_template = Template() javascript_template = Template() rho = _validate_input_state(rho) if figsize is None: options = {} else: options = {'width': figsize[0], 'height': figsize[1]} qspheres_data = [] num = int(np.log2(len(rho))) weig, stateall = linalg.eigh(rho) for _ in range(2**num): probmix = weig.max() prob_location = weig.argmax() if probmix > 0.001: state = stateall[:, prob_location] loc = np.absolute(state).argmax() for j in range(2**num): test = np.absolute(np.absolute(state[j]) - np.absolute(state[loc])) if test < 0.001: loc = j break angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi) angleset = np.exp(-1j*angles) state = angleset*state state.flatten() spherepoints = [] for i in range(2**num): element = bin(i)[2:].zfill(num) weight = element.count("1") number_of_divisions = n_choose_k(num, weight) weight_order = bit_string_index(element) angle = weight_order * 2 * np.pi / number_of_divisions zvalue = -2 * weight / num + 1 xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle) yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle) prob = np.real(np.dot(state[i], state[i].conj())) angles = (np.angle(state[i]) + 2 * np.pi) % (2 * np.pi) qpoint = { 'x': xvalue, 'y': yvalue, 'z': zvalue, 'prob': prob, 'phase': angles } spherepoints.append(qpoint) sphere = { 'points': spherepoints, 'eigenvalue': probmix } qspheres_data.append(sphere) weig[prob_location] = 0 div_number = str(time.time()) div_number = re.sub('[.]', '', div_number) html = html_template.substitute({ 'divNumber': div_number }) javascript = javascript_template.substitute({ 'data': qspheres_data, 'divNumber': div_number, 'options': options }) display(HTML(html + javascript))
Create a Q sphere representation. Graphical representation of the input array, using a Q sphere for each eigenvalue. Args: rho (array): State vector or density matrix. figsize (tuple): Figure size in pixels.
juraj-google-style
def _GetMessageFromFactory(factory, full_name): proto_descriptor = factory.pool.FindMessageTypeByName(full_name) proto_cls = factory.GetPrototype(proto_descriptor) return proto_cls
Get a proto class from the MessageFactory by name. Args: factory: a MessageFactory instance. full_name: str, the fully qualified name of the proto type. Returns: A class, for the type identified by full_name. Raises: KeyError, if the proto is not found in the factory's descriptor pool.
codesearchnet
def _kl_laplace_laplace(a, b, name=None): with tf.name_scope(name or "kl_laplace_laplace"): distance = tf.abs(a.loc - b.loc) ratio = a.scale / b.scale return (-tf.math.log(ratio) - 1 + distance / b.scale + ratio * tf.exp(-distance / a.scale))
Calculate the batched KL divergence KL(a || b) with a and b Laplace. Args: a: instance of a Laplace distribution object. b: instance of a Laplace distribution object. name: (optional) Name to use for created operations. default is "kl_laplace_laplace". Returns: Batchwise KL(a || b)
juraj-google-style
def set_privilege(self, name, value=None): cmd = 'username %s' % name if value is not None: if not isprivilege(value): raise TypeError('priviledge value must be between 0 and 15') cmd += ' privilege %s' % value else: cmd += ' privilege 1' return self.configure(cmd)
Configures the user privilege value in EOS Args: name (str): The name of the user to craete value (int): The privilege value to assign to the user. Valid values are in the range of 0 to 15 Returns: True if the operation was successful otherwise False Raises: TypeError: if the value is not in the valid range
juraj-google-style
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs): outputs, new_state = cell_call_fn(inputs, state, **kwargs) def assert_shape_match(inp, out): inp.get_shape().assert_is_compatible_with(out.get_shape()) def default_residual_fn(inputs, outputs): nest.assert_same_structure(inputs, outputs) nest.map_structure(assert_shape_match, inputs, outputs) return nest.map_structure(lambda inp, out: inp + out, inputs, outputs) res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs) return (res_outputs, new_state)
Run the cell and then apply the residual_fn on its inputs to its outputs. Args: inputs: cell inputs. state: cell state. cell_call_fn: Wrapped cell's method to use for step computation (cell's `__call__` or 'call' method). **kwargs: Additional arguments passed to the wrapped cell's `call`. Returns: Tuple of cell outputs and new state. Raises: TypeError: If cell inputs and outputs have different structure (type). ValueError: If cell inputs and outputs have different structure (value).
github-repos
def bessel_i1(x, name=None): with ops.name_scope(name, 'bessel_i1', [x]): return gen_special_math_ops.bessel_i1(x)
Computes the Bessel i1 function of `x` element-wise. Modified Bessel function of order 1. It is preferable to use the numerically stabler function `i1e(x)` instead. >>> tf.math.special.bessel_i1([-1., -0.5, 0.5, 1.]).numpy() array([-0.5651591 , -0.25789431, 0.25789431, 0.5651591 ], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.i1 @end_compatibility
github-repos
def get_variation_for_experiment(self, experiment_id): return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY)
Helper method to retrieve variation ID for given experiment. Args: experiment_id: ID for experiment for which variation needs to be looked up for. Returns: Variation ID corresponding to the experiment. None if no decision available.
codesearchnet
def create_dir(self, directory_path, perm_bits=PERM_DEF): directory_path = self.make_string_path(directory_path) directory_path = self.absnormpath(directory_path) self._auto_mount_drive_if_needed(directory_path) if self.exists(directory_path, check_link=True): self.raise_os_error(errno.EEXIST, directory_path) path_components = self._path_components(directory_path) current_dir = self.root new_dirs = [] for component in path_components: directory = self._directory_content(current_dir, component)[1] if not directory: new_dir = FakeDirectory(component, filesystem=self) new_dirs.append(new_dir) current_dir.add_entry(new_dir) current_dir = new_dir else: if S_ISLNK(directory.st_mode): directory = self.resolve(directory.contents) current_dir = directory if directory.st_mode & S_IFDIR != S_IFDIR: self.raise_os_error(errno.ENOTDIR, current_dir.path) for new_dir in new_dirs: new_dir.st_mode = S_IFDIR | perm_bits self._last_ino += 1 current_dir.st_ino = self._last_ino return current_dir
Create `directory_path`, and all the parent directories. Helper method to set up your test faster. Args: directory_path: The full directory path to create. perm_bits: The permission bits as set by `chmod`. Returns: The newly created FakeDirectory object. Raises: OSError: if the directory already exists.
juraj-google-style
def get_momentum_variable(self): optimizer = self.get_optimizer() if hasattr(optimizer, 'rho'): return optimizer.rho elif hasattr(optimizer, 'beta_1'): return optimizer.beta_1 return None
Extract values of momentum variables from optimizer Returns: optimizer's `rho` or `beta_1`
codesearchnet
def __getitem__(self, key: Union[int, slice, str, utils.KeyPath, 'DecisionPoint']) -> Union[None, 'DNA', List[Optional['DNA']]]: if isinstance(key, (int, slice)): return self.children[key] if isinstance(key, DNASpec): key = key.id return self._decision_by_id[key] else: v = self.named_decisions.get(key, None) if v is None: v = self._decision_by_id[key] return v
Get an immediate child DNA or DNA in the sub-tree. Args: key: The key for retrieving the sub-DNA or sub-DNA list. The key should be one of: 1) An integer as the index of an immediate child DNA. 2) A name (string) for named decisions whose DNASpec has a not-None `name` argument. 3) An ID (string or KeyPath) for the decision point to retrieve. See `DNASpec.id` for details. 4) A DecisionPoint object whose decision value will be retrived. Returns: The return value should be one of the following: 1) A DNA object if the key only maps to a single DNA object. 2) None if the decision point exists but it's inactive. 3) A list of DNA or None if there are multiple decision points associated with the key.
github-repos
def _ExtractOAuth2Client(product_yaml_key, product_data, proxy_config): oauth2_kwargs = {'proxy_config': proxy_config} if all(((config in product_data) for config in _OAUTH2_INSTALLED_APP_KEYS)): oauth2_args = [product_data['client_id'], product_data['client_secret'], product_data['refresh_token']] oauth2_client = googleads.oauth2.GoogleRefreshTokenClient for key in _OAUTH2_INSTALLED_APP_KEYS: del product_data[key] elif all(((config in product_data) for config in _OAUTH2_SERVICE_ACCT_KEYS)): oauth2_args = [product_data['path_to_private_key_file'], googleads.oauth2.GetAPIScope(product_yaml_key)] oauth2_kwargs.update({'sub': product_data.get('delegated_account')}) oauth2_client = googleads.oauth2.GoogleServiceAccountClient for key in _OAUTH2_SERVICE_ACCT_KEYS: del product_data[key] for optional_key in _OAUTH2_SERVICE_ACCT_KEYS_OPTIONAL: if (optional_key in product_data): del product_data[optional_key] else: raise googleads.errors.GoogleAdsValueError(('Your yaml file is incorrectly configured for OAuth2. You need to specify credentials for either the installed application flow (%s) or service account flow (%s).' % (_OAUTH2_INSTALLED_APP_KEYS, _OAUTH2_SERVICE_ACCT_KEYS))) return oauth2_client(*oauth2_args, **oauth2_kwargs)
Generates an GoogleOAuth2Client subclass using the given product_data. Args: product_yaml_key: a string key identifying the product being configured. product_data: a dict containing the configurations for a given product. proxy_config: a ProxyConfig instance. Returns: An instantiated GoogleOAuth2Client subclass. Raises: A GoogleAdsValueError if the OAuth2 configuration for the given product is misconfigured.
codesearchnet
def mme_nodes(mme_base_url, token): nodes = [] if ((not mme_base_url) or (not token)): return nodes url = ''.join([mme_base_url, '/nodes']) nodes = matchmaker_request(url=url, token=token, method='GET') LOG.info('Matchmaker has the following connected nodes:{}'.format(nodes)) return nodes
Return the available MatchMaker nodes Args: mme_base_url(str): base URL of MME service token(str): MME server authorization token Returns: nodes(list): a list of node disctionaries
codesearchnet
def validate_allowed_values(allowed_values, value): if not allowed_values or isinstance(value, CFNParameter): return True return value in allowed_values
Support a variable defining which values it allows. Args: allowed_values (Optional[list]): A list of allowed values from the variable definition value (obj): The object representing the value provided for the variable Returns: bool: Boolean for whether or not the value is valid.
juraj-google-style
async def on_message(message): server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if not data["discord"]["servers"][server.id][_data.modulename]["activated"]: return if server is not None and author != channel.server.me: prefix = data["discord"]["servers"][server.id]["prefix"] if content.startswith(prefix): package = content.split(" ") command = package[0][len(prefix):] args = package[1:] arg = ' '.join(args) if server.id not in _data.cache or _data.cache[server.id].state == 'destroyed': _data.cache[server.id] = _musicplayer.MusicPlayer(server.id) if command in ['play', 'playnext', 'playnow', 'playshuffle', 'insert', 'pause', 'resume', 'skip', 'remove', 'rewind', 'restart', 'shuffle', 'volume', 'stop', 'destroy', 'front', 'movehere', 'settopic', 'cleartopic', 'notopic', 'loop']: try: await client.delete_message(message) except discord.errors.NotFound: logger.warning("Could not delete music player command message - NotFound") except discord.errors.Forbidden: logger.warning("Could not delete music player command message - Forbidden") if command == 'play': await _data.cache[server.id].play(author, channel, arg) if command == 'playnext': await _data.cache[server.id].play(author, channel, arg, index=1) if command == 'playnow': await _data.cache[server.id].play(author, channel, arg, index=1, stop_current=True) if command == 'playshuffle': await _data.cache[server.id].play(author, channel, arg, shuffle=True) if command == 'insert': if len(args) >= 2: index = args[0] query = ' '.join(args[1:]) await _data.cache[server.id].play(author, channel, query, index=index) else: await _data.cache[server.id].play(author, channel, arg) elif command == 'pause': await _data.cache[server.id].pause() elif command == 'resume': await _data.cache[server.id].resume() elif command == 'skip': await _data.cache[server.id].skip(query=arg) elif command == 'remove': await _data.cache[server.id].remove(index=arg) elif command == 'rewind': await _data.cache[server.id].rewind(query=arg) elif command == 'restart': await _data.cache[server.id].rewind(query="0") elif command == 'shuffle': await _data.cache[server.id].shuffle() elif command == 'loop': await _data.cache[server.id].set_loop(arg) elif command == 'stop': await _data.cache[server.id].stop(log_stop=True) elif command == 'volume': await _data.cache[server.id].setvolume(arg) elif command == 'settopic': await _data.cache[server.id].set_topic_channel(channel) elif command == 'cleartopic' or command == 'notopic': await _data.cache[server.id].clear_topic_channel(channel) elif command == 'nowplaying': await _data.cache[server.id].nowplaying_info(channel) elif command == 'destroy': await _data.cache[server.id].destroy() elif command == 'front' or command == 'movehere': await _data.cache[server.id].movehere(channel)
The on_message event handler for this module Args: message (discord.Message): Input message
juraj-google-style
def jacobian(output, inputs, use_pfor=True, parallel_iterations=None): flat_inputs = nest.flatten(inputs) output_tensor_shape = output.shape output_shape = array_ops.shape(output) output = array_ops.reshape(output, [-1]) def loop_fn(i): y = array_ops.gather(output, i) return gradient_ops.gradients(y, flat_inputs) try: output_size = int(output.shape[0]) except TypeError: output_size = array_ops.shape(output)[0] if use_pfor: pfor_outputs = control_flow_ops.pfor(loop_fn, output_size, parallel_iterations=parallel_iterations) else: pfor_outputs = control_flow_ops.for_loop(loop_fn, [output.dtype] * len(flat_inputs), output_size, parallel_iterations=parallel_iterations) for i, out in enumerate(pfor_outputs): if isinstance(out, tensor.Tensor): new_shape = array_ops.concat([output_shape, array_ops.shape(out)[1:]], axis=0) out = array_ops.reshape(out, new_shape) out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape)) pfor_outputs[i] = out return nest.pack_sequence_as(inputs, pfor_outputs)
Computes jacobian of `output` w.r.t. `inputs`. Args: output: A tensor. inputs: A tensor or a nested structure of tensor objects. use_pfor: If true, uses pfor for computing the jacobian. Else uses tf.while_loop. parallel_iterations: A knob to control how many iterations and dispatched in parallel. This knob can be used to control the total memory usage. Returns: A tensor or a nested structure of tensors with the same structure as `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has shape [x_1, ..., x_m], the corresponding jacobian has shape [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is sparse (IndexedSlices), jacobian function currently makes it dense and returns a Tensor instead. This may change in the future.
github-repos
def set_dimension(tensor, axis, value): shape = tensor.shape.as_list() if (shape[axis] not in (value, None)): message = 'Cannot set dimension {} of tensor {} to {}; is already {}.' raise ValueError(message.format(axis, tensor.name, value, shape[axis])) shape[axis] = value tensor.set_shape(shape)
Set the length of a tensor along the specified dimension. Args: tensor: Tensor to define shape of. axis: Dimension to set the static shape for. value: Integer holding the length. Raises: ValueError: When the tensor already has a different length specified.
codesearchnet
def money(s, thousand_sep=".", decimal_sep=","): s = s.replace(thousand_sep, "") s = s.replace(decimal_sep, ".") return Decimal(s)
Converts money amount in string to a Decimal object. With the default arguments, the format is expected to be ``-38.500,00``, where dots separate thousands and comma the decimals. Args: thousand_sep: Separator for thousands. decimal_sep: Separator for decimals. Returns: A ``Decimal`` object of the string encoded money amount.
juraj-google-style
def add_real_directory(self, source_path, read_only=True, lazy_read=True, target_path=None): source_path = self._path_without_trailing_separators(source_path) if (not os.path.exists(source_path)): self.raise_io_error(errno.ENOENT, source_path) target_path = (target_path or source_path) if lazy_read: parent_path = os.path.split(target_path)[0] if self.exists(parent_path): parent_dir = self.get_object(parent_path) else: parent_dir = self.create_dir(parent_path) new_dir = FakeDirectoryFromRealDirectory(source_path, self, read_only, target_path) parent_dir.add_entry(new_dir) self._last_ino += 1 new_dir.st_ino = self._last_ino else: new_dir = self.create_dir(target_path) for (base, _, files) in os.walk(source_path): new_base = os.path.join(new_dir.path, os.path.relpath(base, source_path)) for fileEntry in files: self.add_real_file(os.path.join(base, fileEntry), read_only, os.path.join(new_base, fileEntry)) return new_dir
Create a fake directory corresponding to the real directory at the specified path. Add entries in the fake directory corresponding to the entries in the real directory. Args: source_path: The path to the existing directory. read_only: If set, all files under the directory are treated as read-only, e.g. a write access raises an exception; otherwise, writing to the files changes the fake files only as usually. lazy_read: If set (default), directory contents are only read when accessed, and only until the needed subdirectory level. .. note:: This means that the file system size is only updated at the time the directory contents are read; set this to `False` only if you are dependent on accurate file system size in your test target_path: If given, the target directory, otherwise, the target directory is the same as `source_path`. Returns: the newly created FakeDirectory object. Raises: OSError: if the directory does not exist in the real file system. IOError: if the directory already exists in the fake file system.
codesearchnet
def is_diagonal_scale(scale): if (not isinstance(scale, tf.linalg.LinearOperator)): raise TypeError(("Expected argument 'scale' to be instance of LinearOperator. Found: %s" % scale)) return (isinstance(scale, tf.linalg.LinearOperatorIdentity) or isinstance(scale, tf.linalg.LinearOperatorScaledIdentity) or isinstance(scale, tf.linalg.LinearOperatorDiag))
Returns `True` if `scale` is a `LinearOperator` that is known to be diag. Args: scale: `LinearOperator` instance. Returns: Python `bool`. Raises: TypeError: If `scale` is not a `LinearOperator`.
codesearchnet
def _slot_dict(self, slot_name): named_slots = self._slots.get(slot_name, None) if named_slots is None: named_slots = {} self._slots[slot_name] = named_slots return named_slots
Returns a dict for caching slots created under the given name. Args: slot_name: Name for the slot. Returns: A dict that maps primary `Variable` objects to the slot created for that variable, under the given slot name.
github-repos
def get_cohp_by_label(self, label): if label.lower() == "average": return Cohp(efermi=self.efermi, energies=self.energies, cohp=self.cohp, are_coops=self.are_coops, icohp=self.icohp) else: try: return Cohp(efermi=self.efermi, energies=self.energies, cohp=self.all_cohps[label].get_cohp(spin=None, integrated=False), are_coops=self.are_coops, icohp=self.all_cohps[label].get_icohp(spin=None)) except KeyError: print("The label does not exist")
Get specific COHP object. Args: label: string (for newer Lobster versions: a number) Returns: Returns the COHP object to simplify plotting
juraj-google-style
def getWhoisInfo(domain): new = [] try: emails = {} emails["type"] = "i3visio.alias" emails["value"] = str(domain.split(".")[0]) emails["attributes"] = [] new.append(emails) except: pass info = whois.whois(domain) if info.status == None: raise Exception("UnknownDomainError: " + domain + " could not be resolved.") try: emails = {} emails["type"] = "i3visio.email" if type(info.emails) is not list: aux = [info.emails] emails["value"] = json.dumps(aux) else: emails["value"] = json.dumps(info.emails) emails["attributes"] = [] new.append(emails) except: pass try: tmp = {} tmp["type"] = "i3visio.location.country" tmp["value"] = str(info.country) tmp["attributes"] = [] new.append(tmp) except: pass try: tmp = {} tmp["type"] = "i3visio.registrar" tmp["value"] = str(info.registrar) tmp["attributes"] = [] new.append(tmp) except: pass try: tmp = {} tmp["type"] = "i3visio.fullname" try: tmp["value"] = str(info.name) except: tmp["value"] = info.name tmp["attributes"] = [] new.append(tmp) except: pass return new
Method that trie to recover the whois info from a domain. Args: ----- domain: The domain to verify. Returns: -------- dict: A dictionary containing the result as an i3visio entity with its `value`, `type` and `attributes`.
juraj-google-style
def read(self, vals): i = 0 count = int(vals[i]) i += 1 for _ in range(count): obj = GroundTemperature() obj.read(vals[i:i + obj.field_count]) self.add_ground_temperature(obj) i += obj.field_count
Read values. Args: vals (list): list of strings representing values
juraj-google-style
def get_keyvault(access_token, subscription_id, rgname, vault_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) return do_get(endpoint, access_token)
Gets details about the named key vault. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the key vault. Returns: HTTP response. JSON body of key vault properties.
codesearchnet
def find_in_coord_list(coord_list, coord, atol=1e-8): if len(coord_list) == 0: return [] diff = np.array(coord_list) - np.array(coord)[None, :] return np.where(np.all(np.abs(diff) < atol, axis=1))[0]
Find the indices of matches of a particular coord in a coord_list. Args: coord_list: List of coords to test coord: Specific coordinates atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and array. Returns: Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
juraj-google-style
def _as_graph_element(obj): conv_fn = getattr(obj, '_as_graph_element', None) if conv_fn and callable(conv_fn): return conv_fn() return None
Convert `obj` to a graph element if possible, otherwise return `None`. Args: obj: Object to convert. Returns: The result of `obj._as_graph_element()` if that method is available; otherwise `None`.
github-repos
def _open_debug_interface(self, conn_id, callback, connection_string=None): self._try_connect(connection_string) callback(conn_id, self.id, True, None)
Enable debug interface for this IOTile device Args: conn_id (int): the unique identifier for the connection callback (callback): Callback to be called when this command finishes callback(conn_id, adapter_id, success, failure_reason)
codesearchnet
def read_hdf(cls, path, key=None): df = pd.read_hdf(path, key) df['scored_calls'] = df['scored_calls'].apply((lambda x: json.loads(x))) df['channel_values'] = df['channel_values'].apply((lambda x: json.loads(x))) df['regions'] = df['regions'].apply((lambda x: json.loads(x))) df['phenotype_calls'] = df['phenotype_calls'].apply((lambda x: json.loads(x))) df['neighbors'] = df['neighbors'].apply((lambda x: json.loads(x))) df['neighbors'] = df['neighbors'].apply((lambda x: (np.nan if (not isinstance(x, dict)) else dict(zip([int(y) for y in x.keys()], x.values()))))) df['frame_shape'] = df['frame_shape'].apply((lambda x: tuple(json.loads(x)))) df = cls(df) f = h5py.File(path, 'r') mpp = f[key].attrs['microns_per_pixel'] if (not np.isnan(mpp)): df.microns_per_pixel = mpp f.close() return df
Read a CellDataFrame from an hdf5 file. Args: path (str): the path to read from key (str): the name of the location to read from Returns: CellDataFrame
codesearchnet