code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def load(self, filething): fileobj = filething.fileobj self.info = ASFInfo() self.tags = ASFTags() self._tags = {} self._header = HeaderObject.parse_full(self, fileobj) for guid in [ContentDescriptionObject.GUID, ExtendedContentDescriptionObject.GUID, MetadataObject.GUID, MetadataLibraryObject.GUID]: self.tags.extend(self._tags.pop(guid, [])) assert not self._tags
load(filething) Args: filething (filething) Raises: mutagen.MutagenError
juraj-google-style
def collect_filters_to_first_location_occurrence(compound_match_query): new_match_queries = [] for match_query in compound_match_query.match_queries: location_to_filters = _construct_location_to_filter_list(match_query) already_filtered_locations = set() new_match_traversals = [] for match_traversal in match_query.match_traversals: result = _apply_filters_to_first_location_occurrence(match_traversal, location_to_filters, already_filtered_locations) (new_match_traversal, newly_filtered_locations) = result new_match_traversals.append(new_match_traversal) already_filtered_locations.update(newly_filtered_locations) new_match_queries.append(MatchQuery(match_traversals=new_match_traversals, folds=match_query.folds, output_block=match_query.output_block, where_block=match_query.where_block)) return CompoundMatchQuery(match_queries=new_match_queries)
Collect all filters for a particular location to the first instance of the location. Adding edge field non-exsistence filters in `_prune_traverse_using_omitted_locations` may result in filters being applied to locations after their first occurence. OrientDB does not resolve this behavior correctly. Therefore, for each MatchQuery, we collect all the filters for each location in a list. For each location, we make a conjunction of the filter list (`_predicate_list_to_where_block`) and apply the new filter to only the first instance of that location. All other instances will have no filters (None). Args: compound_match_query: CompoundMatchQuery object containing 2^n MatchQuery objects Returns: CompoundMatchQuery with all filters for each location applied to the first instance of that location.
codesearchnet
def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False): if (halo_size == 0): return x block_size = block_size_dim.size partial_size = (halo_size % block_size) num_complete_blocks = (halo_size parts = [x] for i in xrange(1, (num_complete_blocks + 1)): parts = (([shift(x, i, blocks_dim, wrap)] + parts) + [shift(x, (- i), blocks_dim, wrap)]) if (partial_size > 0): left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name) right_margin = mtf_slice(x, (block_size_dim.size - partial_size), partial_size, block_size_dim.name) parts = (([shift(right_margin, (num_complete_blocks + 1), blocks_dim, wrap)] + parts) + [shift(left_margin, (- (num_complete_blocks + 1)), blocks_dim, wrap)]) return concat(parts, block_size_dim.name)
Concat each block with the margins of adjacent blocks. Get left and right blocks_dim and concatenate along block_size_dim. Args: x: a Tensor. blocks_dim: a Dimension in x.shape block_size_dim: a Dimension in x.shape halo_size: an integer wrap: a boolean Returns: a Tensor with the same shape as x, other than in block_size_dim, whose size is increased by 2*halo_size.
codesearchnet
def _extract_all_responses(self, resources, api_endpoint, api_name): all_responses, resources = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources) response_chunks = self._request_reports("resource", resource_chunks, api_endpoint) self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
Aux function to extract all the API endpoint responses. Args: resources: list of string hashes. api_endpoint: endpoint path api_name: endpoint name Returns: A dict with the hash as key and the VT report as value.
juraj-google-style
def get_source_event_declaration(self, event): return next((x.source_mapping for x in self.events if x.name == event))
Return the source mapping where the event is declared Args: event (str): event name Returns: (dict): sourceMapping
juraj-google-style
def __init__(self, cell): self._cell = cell
Creates a new IntGaugeCell. Args: cell: A c pointer of TFE_MonitoringIntGaugeCell.
github-repos
def object(self, key): return _object.Object(self._name, key, context=self._context)
Retrieves a Storage Object for the specified key in this bucket. The object need not exist. Args: key: the key of the object within the bucket. Returns: An Object instance representing the specified key.
juraj-google-style
def _indexOfEndTag(istack): if (len(istack) <= 0): return 0 if (not istack[0].isOpeningTag()): return 0 cnt = 0 opener = istack[0] for (index, el) in enumerate(istack[1:]): if (el.isOpeningTag() and (el.getTagName().lower() == opener.getTagName().lower())): cnt += 1 elif el.isEndTagTo(opener): if (cnt == 0): return (index + 1) cnt -= 1 return 0
Go through `istack` and search endtag. Element at first index is considered as opening tag. Args: istack (list): List of :class:`.HTMLElement` objects. Returns: int: Index of end tag or 0 if not found.
codesearchnet
def create_ref(profile, ref, sha): resource = '/refs' payload = {'ref': ('refs/' + ref), 'sha': sha} data = api.post_request(profile, resource, payload) return prepare(data)
Create a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to create, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref.
codesearchnet
def usufyToOdsExport(d, fPath): from pyexcel_ods import get_data try: oldData = {'OSRFramework': get_data(fPath)} except: oldData = {'OSRFramework': []} tabularData = _generateTabularData(d, oldData) from pyexcel_ods import save_data save_data(fPath, tabularData)
Workaround to export to a .ods file. Args: ----- d: Data to export. fPath: File path for the output file.
codesearchnet
def coerce(self, value): if isinstance(value, bool): return value if not hasattr(value, 'lower'): raise TypeError('Value is not bool or string.') if value.lower() in ('yes', 'true', '1'): return True if value.lower() in ('no', 'false', '0'): return False raise ValueError('Could not coerce {0} to a bool.'.format(value))
Convert text values into boolean values. True values are (case insensitive): 'yes', 'true', '1'. False values are (case insensitive): 'no', 'false', '0'. Args: value (str or bool): The value to coerce. Raises: TypeError: If the value is not a bool or string. ValueError: If the value is not bool or an acceptable value. Returns: bool: The True/False value represented.
juraj-google-style
def _get_call_rng(self, training): if training: return self.seed_generator.next() else: return None
Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `call_fn`. By default, this returns a single `PRNGKey` retrieved by calling `self.seed_generator.next()` when `training` is `True`, and `None` when `training` is `False`. Override this to return a different structure or to pass RNGs in inference mode too. Returns: a JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as the `rng` argument of `call_fn`.
github-repos
def get_all(cls): issues = db.Issue.find((Issue.issue_type_id == IssueType.get(cls.issue_type).issue_type_id)) return {res.issue_id: cls(res) for res in issues}
Returns a list of all issues of a given type Returns: list of issue objects
codesearchnet
def remove_object_from_list(self, obj, list_element): list_element = self._handle_location(list_element) if isinstance(obj, JSSObject): results = [item for item in list_element.getchildren() if (item.findtext('id') == obj.id)] elif isinstance(obj, (int, basestring)): results = [item for item in list_element.getchildren() if ((item.findtext('id') == str(obj)) or (item.findtext('name') == obj))] if (len(results) == 1): list_element.remove(results[0]) elif (len(results) > 1): raise ValueError('There is more than one matching object at that path!')
Remove an object from a list element. Args: obj: Accepts JSSObjects, id's, and names list_element: Accepts an Element or a string path to that element
codesearchnet
def laplacian_pyramid_image(shape, n_levels=4, sd=None): batch_dims = shape[:(- 3)] (w, h, ch) = shape[(- 3):] pyramid = 0 for n in range(n_levels): k = (2 ** n) pyramid += lowres_tensor(shape, (batch_dims + ((w return pyramid
Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument.
codesearchnet
def get_pose_error(target_pose, current_pose): error = np.zeros(6) target_pos = target_pose[(:3, 3)] current_pos = current_pose[(:3, 3)] pos_err = (target_pos - current_pos) r1 = current_pose[(:3, 0)] r2 = current_pose[(:3, 1)] r3 = current_pose[(:3, 2)] r1d = target_pose[(:3, 0)] r2d = target_pose[(:3, 1)] r3d = target_pose[(:3, 2)] rot_err = (0.5 * ((np.cross(r1, r1d) + np.cross(r2, r2d)) + np.cross(r3, r3d))) error[:3] = pos_err error[3:] = rot_err return error
Computes the error corresponding to target pose - current pose as a 6-dim vector. The first 3 components correspond to translational error while the last 3 components correspond to the rotational error. Args: target_pose: a 4x4 homogenous matrix for the target pose current_pose: a 4x4 homogenous matrix for the current pose Returns: A 6-dim numpy array for the pose error.
codesearchnet
def get_params(width, height, distortion_scale): half_height = int(height / 2) half_width = int(width / 2) topleft = (random.randint(0, int(distortion_scale * half_width)), random.randint(0, int(distortion_scale * half_height))) topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1), random.randint(0, int(distortion_scale * half_height))) botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1), random.randint(height - int(distortion_scale * half_height) - 1, height - 1)) botleft = (random.randint(0, int(distortion_scale * half_width)), random.randint(height - int(distortion_scale * half_height) - 1, height - 1)) startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)] endpoints = [topleft, topright, botright, botleft] return startpoints, endpoints
Get parameters for ``perspective`` for a random perspective transform. Args: width : width of the image. height : height of the image. Returns: List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image, List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
juraj-google-style
def create_branch_and_checkout(self, branch_name: str): self.create_branch(branch_name) self.checkout(branch_name)
Creates a new branch if it doesn't exist Args: branch_name: branch name
juraj-google-style
def DeserializeExclusiveData(self, reader): if self.Version > 1: raise Exception('Invalid format') self.Script = reader.ReadVarBytes() if len(self.Script) == 0: raise Exception('Invalid Format') if self.Version >= 1: self.Gas = reader.ReadFixed8() if self.Gas < Fixed8.Zero(): raise Exception("Invalid Format") else: self.Gas = Fixed8(0)
Deserialize full object. Args: reader (neo.IO.BinaryReader): Raises: Exception: If the version read is incorrect.
juraj-google-style
def ensure_value_to_cell(value): def dummy_fn(): value cell_value = dummy_fn.__closure__[0] if not isinstance(value, type(cell_value)): return cell_value return value
Ensures that a value is converted to a python cell object. Args: value: Any value that needs to be casted to the cell type Returns: A value wrapped as a cell object (see function "func_load")
github-repos
def recipe_cm360_segmentology(config, account, auth_read, auth_write, recipe_name, date_range, recipe_slug, advertisers): dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug}) bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}}) google_api(config, {'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'accounts.get', 'kwargs': {'id': account, 'fields': 'id,name'}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'CM360_Account'}}}) dcm(config, {'auth': auth_read, 'report': {'filters': {'advertiser': {'values': advertisers}}, 'account': account, 'body': {'name': recipe_name, 'criteria': {'dateRange': {'kind': 'dfareporting dcm(config, {'auth': auth_read, 'report': {'account': account, 'name': recipe_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'CM360_KPI', 'header': True}}}) bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\n Id AS Partner_Id,\n Name AS Partner,\n Advertiser_Id,\n Advertiser,\n Zip_Postal_Code AS Zip,\n SAFE_DIVIDE(Impressions, SUM(Impressions) OVER(PARTITION BY Advertiser_Id)) AS Impression,\n SAFE_DIVIDE(Clicks, Impressions) AS Click,\n SAFE_DIVIDE(Total_Conversions, Impressions) AS Conversion,\n Impressions AS Impressions FROM `{dataset}.CM360_KPI` CROSS JOIN `{dataset}.CM360_Account` ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'CM360_KPI_Normalized'}}) census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}}) census(config, {'auth': auth_write, 'correlate': {'join': 'Zip', 'pass': ['Partner_Id', 'Partner', 'Advertiser_Id', 'Advertiser'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion'], 'dataset': recipe_slug, 'table': 'CM360_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})
CM360 funnel analysis using Census data. Args: account (string) - NA auth_read (authentication) - Credentials used for reading data. auth_write (authentication) - Authorization used for writing data. recipe_name (string) - Name of report, not needed if ID used. date_range (choice) - Timeframe to run report for. recipe_slug (string) - Name of Google BigQuery dataset to create. advertisers (integer_list) - Comma delimited list of CM360 advertiser ids.
github-repos
def parseEquation(self, inp): inp = MathService._preprocess(inp) split = inp.split(' ') for (i, w) in enumerate(split): if (w in self.__unaryOperators__): op = self.__unaryOperators__[w] eq1 = ' '.join(split[:i]) eq2 = ' '.join(split[(i + 1):]) result = MathService._applyUnary(self.parseEquation(eq2), op) return self.parseEquation(((eq1 + ' ') + str(result))) def extractNumbersAndSymbols(inp): numbers = [] symbols = [] next_number = '' for w in inp.split(' '): if (w in self.__binaryOperators__): symbols.append(self.__binaryOperators__[w]) if next_number: numbers.append(next_number) next_number = '' else: if next_number: next_number += ' ' next_number += w if next_number: numbers.append(next_number) def convert(n): if (n in self.__constants__): return self.__constants__[n] converter = NumberService() return converter.parse(n) numbers = [convert(n) for n in numbers] return (numbers, symbols) (numbers, symbols) = extractNumbersAndSymbols(inp) return MathService._calculate(numbers, symbols)
Solves the equation specified by the input string. Args: inp (str): An equation, specified in words, containing some combination of numbers, binary, and unary operations. Returns: The floating-point result of carrying out the computation.
codesearchnet
def _on_trace(self, sequence, topic, message): try: conn_key = self._find_connection(topic) conn_id = self.conns.get_connection_id(conn_key) except ArgumentError: self._logger.warn("Dropping trace message that does not correspond with a known connection, topic=%s", topic) return try: tracing = messages.TracingNotification.verify(message) self._trigger_callback('on_trace', conn_id, tracing['trace']) except Exception: self._logger.exception("Error processing trace conn_id=%d", conn_id)
Process a trace received from a device. Args: sequence (int): The sequence number of the packet received topic (string): The topic this message was received on message (dict): The message itself
juraj-google-style
def get_variation(self, experiment_key, user_id, attributes=None): if (not self.is_valid): self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_variation')) return None if (not validator.is_non_empty_string(experiment_key)): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key')) return None if (not isinstance(user_id, string_types)): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id')) return None experiment = self.config.get_experiment_from_key(experiment_key) variation_key = None if (not experiment): self.logger.info(('Experiment key "%s" is invalid. Not activating user "%s".' % (experiment_key, user_id))) return None if (not self._validate_user_inputs(attributes)): return None variation = self.decision_service.get_variation(experiment, user_id, attributes) if variation: variation_key = variation.key if self.config.is_feature_experiment(experiment.id): decision_notification_type = enums.DecisionNotificationTypes.FEATURE_TEST else: decision_notification_type = enums.DecisionNotificationTypes.AB_TEST self.notification_center.send_notifications(enums.NotificationTypes.DECISION, decision_notification_type, user_id, (attributes or {}), {'experiment_key': experiment_key, 'variation_key': variation_key}) return variation_key
Gets variation where user will be bucketed. Args: experiment_key: Experiment for which user variation needs to be determined. user_id: ID for user. attributes: Dict representing user attributes. Returns: Variation key representing the variation the user will be bucketed in. None if user is not in experiment or if experiment is not Running.
codesearchnet
def parse_cmd_line(): desc = 'Upload benchmark results to datastore.' opts = [('-a', '--archivedir', str, None, True, 'Directory where benchmark files are archived.'), ('-d', '--datadir', str, None, True, 'Directory of benchmark files to upload.')] parser = argparse.ArgumentParser(description=desc) for opt in opts: parser.add_argument(opt[0], opt[1], type=opt[2], default=opt[3], required=opt[4], help=opt[5]) return parser.parse_args()
Parse command line options. Returns: The parsed arguments object.
github-repos
def start(self, pipeline, return_task=True, countdown=None, eta=None): for (name, slot) in pipeline.outputs._output_dict.iteritems(): slot.key = db.Key.from_path(*slot.key.to_path(), **dict(parent=pipeline._pipeline_key)) (_, output_slots, params_text, params_blob) = _generate_args(pipeline, pipeline.outputs, self.queue_name, self.base_path) @db.transactional(propagation=db.INDEPENDENT) def txn(): pipeline_record = db.get(pipeline._pipeline_key) if (pipeline_record is not None): raise PipelineExistsError(('Pipeline with idempotence key "%s" already exists; params=%s' % (pipeline._pipeline_key.name(), _short_repr(pipeline_record.params)))) entities_to_put = [] for (name, slot) in pipeline.outputs._output_dict.iteritems(): entities_to_put.append(_SlotRecord(key=slot.key, root_pipeline=pipeline._pipeline_key)) entities_to_put.append(_PipelineRecord(key=pipeline._pipeline_key, root_pipeline=pipeline._pipeline_key, is_root_pipeline=True, params=params_text, params_blob=params_blob, start_time=self._gettime(), class_path=pipeline._class_path, max_attempts=pipeline.max_attempts)) entities_to_put.extend(_PipelineContext._create_barrier_entities(pipeline._pipeline_key, pipeline._pipeline_key, _BarrierRecord.FINALIZE, output_slots)) db.put(entities_to_put) task = taskqueue.Task(url=self.pipeline_handler_path, params=dict(pipeline_key=pipeline._pipeline_key), headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key}, target=pipeline.target, countdown=countdown, eta=eta) if return_task: return task task.add(queue_name=self.queue_name, transactional=True) task = txn() for output_slot in pipeline.outputs._output_dict.itervalues(): output_slot._exists = True return task
Starts a pipeline. Args: pipeline: Pipeline instance to run. return_task: When True, do not submit the task to start the pipeline but instead return it for someone else to enqueue. countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: The task to start this pipeline if return_task was True. Raises: PipelineExistsError if the pipeline with the given ID already exists.
codesearchnet
def list_tags(self, image_name): tags_url = self.registry_url + '/v2/{}/tags/list' r = self.get(tags_url.format(image_name), auth=self.auth) data = r.json() if 'tags' in data: return reversed(sorted(data['tags'])) return []
List all tags for the given image stored in the registry. Args: image_name (str): The name of the image to query. The image must be present on the registry for this call to return any values. Returns: list[str]: List of tags for that image.
juraj-google-style
def run(self, configurations): result = CourgetteResult() for configuration in configurations: runner = CourgetteTestsRunner(url=self.url, username=self.username, password=self.password, enterprise=self.enterprise, version=self.apiversion, specification=configuration.specification, sdk_identifier=self.sdk_identifier, monolithe_config=self.monolithe_config, parent_resource=configuration.parent_resource_name, parent_id=configuration.parent_id, default_values=configuration.default_values) result.add_report((configuration.specification.rest_name + '.spec'), runner.run()) return result
Run all tests Returns: A dictionnary containing tests results.
codesearchnet
def validate(cls, job_config): reader_params = job_config.input_reader_params if cls.BUCKET_NAME_PARAM not in reader_params: raise errors.BadReaderParamsError( "%s is required for Google Cloud Storage" % cls.BUCKET_NAME_PARAM) try: cloudstorage.validate_bucket_name( reader_params[cls.BUCKET_NAME_PARAM]) except ValueError, error: raise errors.BadReaderParamsError("Bad bucket name, %s" % (error)) if cls.OBJECT_NAMES_PARAM not in reader_params: raise errors.BadReaderParamsError( "%s is required for Google Cloud Storage" % cls.OBJECT_NAMES_PARAM) filenames = reader_params[cls.OBJECT_NAMES_PARAM] if not isinstance(filenames, list): raise errors.BadReaderParamsError( "Object name list is not a list but a %s" % filenames.__class__.__name__) for filename in filenames: if not isinstance(filename, basestring): raise errors.BadReaderParamsError( "Object name is not a string but a %s" % filename.__class__.__name__) if cls.DELIMITER_PARAM in reader_params: delimiter = reader_params[cls.DELIMITER_PARAM] if not isinstance(delimiter, basestring): raise errors.BadReaderParamsError( "%s is not a string but a %s" % (cls.DELIMITER_PARAM, type(delimiter))) if cls.BUFFER_SIZE_PARAM in reader_params: buffer_size = reader_params[cls.BUFFER_SIZE_PARAM] if not isinstance(buffer_size, int): raise errors.BadReaderParamsError( "%s is not an int but a %s" % (cls.BUFFER_SIZE_PARAM, type(buffer_size))) if cls.PATH_FILTER_PARAM in reader_params: path_filter = reader_params[cls.PATH_FILTER_PARAM] if not isinstance(path_filter, PathFilter): raise errors.BadReaderParamsError( "%s is not an instance of PathFilter but %s." % (cls.PATH_FILTER_PARAM, type(path_filter)))
Validate mapper specification. Args: job_config: map_job.JobConfig. Raises: BadReaderParamsError: if the specification is invalid for any reason such as missing the bucket name or providing an invalid bucket name.
juraj-google-style
def item_from_topics(key, topics): if re.match('{\\d+}', key): pos = int(key.strip('{}')) try: binding = topics[pos] except IndexError: raise IndexError((pos + 1)) else: echo('be.yaml template key not recognised') sys.exit(PROJECT_ERROR) return binding
Get binding from `topics` via `key` Example: {0} == hello --> be in hello world {1} == world --> be in hello world Returns: Single topic matching the key Raises: IndexError (int): With number of required arguments for the key
codesearchnet
def permute(self, ordering: np.ndarray, axis: int) -> None: if self._file.__contains__("tiles"): del self._file['tiles'] ordering = list(np.array(ordering).flatten()) self.layers._permute(ordering, axis=axis) if axis == 0: self.row_attrs._permute(ordering) self.row_graphs._permute(ordering) if axis == 1: self.col_attrs._permute(ordering) self.col_graphs._permute(ordering)
Permute the dataset along the indicated axis. Args: ordering (list of int): The desired order along the axis axis (int): The axis along which to permute Returns: Nothing.
juraj-google-style
def DownloadPqlResultToCsv(self, pql_query, file_handle, values=None): pql_writer = csv.writer(file_handle, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) self._PageThroughPqlSet(pql_query, pql_writer.writerow, values)
Downloads the results of a PQL query to CSV. Args: pql_query: str a statement filter to apply (the query should not include the limit or the offset) file_handle: file the file object to write to. [optional] values: A dict of python objects or a list of raw SOAP values to bind to the pql_query.
juraj-google-style
def stop(self, timeout=None): assert (self.state == STARTED), 'Process not started' self.state = STOPPING self._run_hook(ProcessStopHook, timeout=timeout) for s in self._spawned: if (not s.ready()): self.log.debug('Waiting for %s *%s **%s', s._function, s._args, s._kwargs) s.wait(timeout=timeout) self._spawned = [] self._controllers = OrderedDict() self._unpublished = set() self.state = STOPPED self.log.debug('Done process.stop()')
Stop the process and wait for it to finish Args: timeout (float): Maximum amount of time to wait for each spawned object. None means forever
codesearchnet
def scatter_update(self, sparse_delta, use_locking=False, name=None): if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta) return gen_state_ops.scatter_update(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)
Assigns `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def make_table(contents, headers=None): if (not isinstance(contents, np.ndarray)): raise ValueError('make_table contents must be a numpy ndarray') if (contents.ndim not in [1, 2]): raise ValueError(('make_table requires a 1d or 2d numpy array, was %dd' % contents.ndim)) if headers: if isinstance(headers, (list, tuple)): headers = np.array(headers) if (not isinstance(headers, np.ndarray)): raise ValueError(('Could not convert headers %s into np.ndarray' % headers)) if (headers.ndim != 1): raise ValueError(('Headers must be 1d, is %dd' % headers.ndim)) expected_n_columns = (contents.shape[1] if (contents.ndim == 2) else 1) if (headers.shape[0] != expected_n_columns): raise ValueError(('Number of headers %d must match number of columns %d' % (headers.shape[0], expected_n_columns))) header = ('<thead>\n%s</thead>\n' % make_table_row(headers, tag='th')) else: header = '' n_rows = contents.shape[0] if (contents.ndim == 1): rows = (make_table_row([contents[i]]) for i in range(n_rows)) else: rows = (make_table_row(contents[(i, :)]) for i in range(n_rows)) return ('<table>\n%s<tbody>\n%s</tbody>\n</table>' % (header, ''.join(rows)))
Given a numpy ndarray of strings, concatenate them into a html table. Args: contents: A np.ndarray of strings. May be 1d or 2d. In the 1d case, the table is laid out vertically (i.e. row-major). headers: A np.ndarray or list of string header names for the table. Returns: A string containing all of the content strings, organized into a table. Raises: ValueError: If contents is not a np.ndarray. ValueError: If contents is not 1d or 2d. ValueError: If contents is empty. ValueError: If headers is present and not a list, tuple, or ndarray. ValueError: If headers is not 1d. ValueError: If number of elements in headers does not correspond to number of columns in contents.
codesearchnet
def Write(self, output_writer): for column_index, column_size in enumerate(self._column_sizes): column_size, _ = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB) column_size = (column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB self._column_sizes[column_index] = column_size if self._columns: self._WriteRow(output_writer, self._columns, in_bold=True) for values in self._rows: self._WriteRow(output_writer, values)
Writes the table to output writer. Args: output_writer (CLIOutputWriter): output writer.
juraj-google-style
def Add(self, artifact=None, target=None, callback=None): if (target is None): target = Target() os_name = (target.Get('os') or [None]) cpe = (target.Get('cpe') or [None]) label = (target.Get('label') or [None]) attributes = itertools.product(os_name, cpe, label) new_conditions = [Condition(artifact, *attr) for attr in attributes] self.conditions.update(new_conditions) self._Register(new_conditions, callback)
Add criteria for a check. Args: artifact: An artifact name. target: A tuple of artifact necessary to process the data. callback: Entities that should be called if the condition matches.
codesearchnet
class ImageClassifierOutputWithNoAttention(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
Base class for outputs of image classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage.
github-repos
def with_random_weights(cls, options): return cls([(value, random.randint(1, len(options))) for value in options])
Initialize from a list of options with random weights. The weights assigned to each object are uniformally random integers between ``1`` and ``len(options)`` Args: options (list): The list of options of any type this object can return with the ``get()`` method. Returns: SoftOptions: A newly constructed instance
codesearchnet
def unpack_validation_data(validation_data, raise_if_ambiguous=True): if isinstance(validation_data, (iterator_ops.Iterator, iterator_ops.IteratorBase, data_types.DatasetV2, data_utils.Sequence)) or not hasattr(validation_data, '__len__'): val_x = validation_data val_y = None val_sample_weight = None elif len(validation_data) == 2: try: val_x, val_y = validation_data val_sample_weight = None except ValueError: val_x, val_y, val_sample_weight = (validation_data, None, None) elif len(validation_data) == 3: try: val_x, val_y, val_sample_weight = validation_data except ValueError: val_x, val_y, val_sample_weight = (validation_data, None, None) else: if raise_if_ambiguous: raise ValueError('When passing a `validation_data` argument, it must contain either 2 items (x_val, y_val), or 3 items (x_val, y_val, val_sample_weights), or alternatively it could be a dataset or a dataset or a dataset iterator. However we received `validation_data=%s`' % validation_data) val_x, val_y, val_sample_weight = (validation_data, None, None) return (val_x, val_y, val_sample_weight)
Unpack validation data based input type. The validation data is not touched if its dataset or dataset iterator. For other type of input (Numpy or tensor), it will be unpacked into tuple of 3 which is x, y and sample weights. Args: validation_data: dataset, dataset iterator, or numpy, tensor tuple. raise_if_ambiguous: boolean on whether to fail if validation_data cannot be parsed. Otherwise simply return validation_data, None, None and defer the decision to the caller. Returns: tuple of 3, (x, y, sample_weights) for numpy and tensor input.
github-repos
def __init__(self, entity=None, key=None): self.entity = entity self.key = key self._pb = FakeMessage(entity, key)
Fake mutation request object. Requires exactly one of entity or key to be set. Args: entity: (``google.cloud.datastore.entity.Entity``) entity representing this upsert mutation key: (``google.cloud.datastore.key.Key``) key representing this delete mutation
github-repos
def __init__(self, project, error_context=None): self.project = project self.error_context = error_context or StatikErrorContext() self.supported_providers = project.config.template_providers if project.safe_mode: self.supported_providers = [provider for provider in self.supported_providers \ if provider in SAFER_TEMPLATE_PROVIDERS] if len(self.supported_providers) == 0: raise NoSupportedTemplateProvidersError( SAFER_TEMPLATE_PROVIDERS if project.safe_mode else DEFAULT_TEMPLATE_PROVIDERS, project.safe_mode ) self.provider_classes = dict() self.providers_by_ext = dict() self.exts = [] for provider in self.supported_providers: self.provider_classes[provider] = get_template_provider_class(provider) for ext in TEMPLATE_PROVIDER_EXTS[provider]: if ext not in self.providers_by_ext: self.providers_by_ext[ext] = provider self.exts.append(ext) self.providers = dict() self.cached_templates = dict() self.template_paths = [os.path.join(project.path, project.TEMPLATES_DIR)] if project.config.theme is not None: self.template_paths.append(os.path.join( project.path, project.THEMES_DIR, project.config.theme, project.TEMPLATES_DIR )) logger.debug( "Looking in the following path(s) (in the following order) for templates:\n%s", "\n".join(self.template_paths) ) for path in self.template_paths: if not os.path.exists(path) or not os.path.isdir(path): raise MissingProjectFolderError(path) logger.debug( "Configured the following template providers: %s", ", ".join(self.supported_providers) )
Constructor. Args: project: The project to which this template engine relates.
juraj-google-style
def get_new_python_files_between_commits(base_commit: str, commits: List[str]) -> List[str]: code_diff = [] for commit in commits: for diff_obj in commit.diff(base_commit): if diff_obj.change_type == 'A' and diff_obj.b_path.endswith('.py'): code_diff.append(diff_obj.b_path) return code_diff
Get the list of added python files between a base commit and one or several commits. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). base_commit (`str`): The commit reference of where to compare for the diff. This is the current commit, not the branching point! commits (`List[str]`): The list of commits with which to compare the repo at `base_commit` (so the branching point). Returns: `List[str]`: The list of python files added between a base commit and one or several commits.
github-repos
def draw(canvas, mol): mol.require("ScaleAndCenter") mlb = mol.size2d[2] if not mol.atom_count(): return bond_type_fn = { 1: { 0: single_bond, 1: wedged_single, 2: dashed_wedged_single, 3: wave_single, }, 2: { 0: cw_double, 1: counter_cw_double, 2: double_bond, 3: cross_double }, 3: { 0: triple_bond } } for u, v, bond in mol.bonds_iter(): if not bond.visible: continue if (u < v) == bond.is_lower_first: f, s = (u, v) else: s, f = (u, v) p1 = mol.atom(f).coords p2 = mol.atom(s).coords if p1 == p2: continue if mol.atom(f).visible: p1 = gm.t_seg(p1, p2, F_AOVL, 2)[0] if mol.atom(s).visible: p2 = gm.t_seg(p1, p2, F_AOVL, 1)[1] color1 = mol.atom(f).color color2 = mol.atom(s).color bond_type_fn[bond.order][bond.type]( canvas, p1, p2, color1, color2, mlb) for n, atom in mol.atoms_iter(): if not atom.visible: continue p = atom.coords color = atom.color if atom.H_count: cosnbrs = [] hrzn = (p[0] + 1, p[1]) for nbr in mol.graph.neighbors(n): pnbr = mol.atom(nbr).coords try: cosnbrs.append(gm.dot_product(hrzn, pnbr, p) / gm.distance(p, pnbr)) except ZeroDivisionError: pass if not cosnbrs or min(cosnbrs) > 0: text = atom.formula_html(True) canvas.draw_text(p, text, color, "right") continue elif max(cosnbrs) < 0: text = atom.formula_html() canvas.draw_text(p, text, color, "left") continue text = atom.formula_html() canvas.draw_text(p, text, color, "center")
Draw molecule structure image. Args: canvas: draw.drawable.Drawable mol: model.graphmol.Compound
juraj-google-style
def really_unicode(in_string): if isinstance(in_string, StringType): for args in (('utf-8',), ('latin-1',), ('ascii', 'replace')): try: in_string = in_string.decode(*args) break except UnicodeDecodeError: continue if (not isinstance(in_string, UnicodeType)): raise ValueError(('%s is not a string at all.' % in_string)) return in_string
Make a string unicode. Really. Ensure ``in_string`` is returned as unicode through a series of progressively relaxed decodings. Args: in_string (str): The string to convert. Returns: str: Unicode. Raises: ValueError
codesearchnet
def _ProcessCompressedStreamTypes(self, mediator, path_spec, type_indicators): number_of_type_indicators = len(type_indicators) if number_of_type_indicators == 0: return self.processing_status = definitions.STATUS_INDICATOR_COLLECTING if number_of_type_indicators > 1: display_name = mediator.GetDisplayName() logger.debug(( 'Found multiple format type indicators: {0:s} for ' 'compressed stream file: {1:s}').format( type_indicators, display_name)) for type_indicator in type_indicators: if type_indicator == dfvfs_definitions.TYPE_INDICATOR_BZIP2: compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM, compression_method=dfvfs_definitions.COMPRESSION_METHOD_BZIP2, parent=path_spec) elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_GZIP: compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=path_spec) else: compressed_stream_path_spec = None warning_message = ( 'unsupported compressed stream format type indicators: ' '{0:s}').format(type_indicator) mediator.ProduceExtractionWarning( warning_message, path_spec=path_spec) if compressed_stream_path_spec: event_source = event_sources.FileEntryEventSource( path_spec=compressed_stream_path_spec) event_source.file_entry_type = dfvfs_definitions.FILE_ENTRY_TYPE_FILE mediator.ProduceEventSource(event_source) self.last_activity_timestamp = time.time()
Processes a data stream containing compressed stream types such as: bz2. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification. type_indicators(list[str]): dfVFS archive type indicators found in the data stream.
juraj-google-style
def get_miller_index_from_site_indexes(self, site_ids, round_dp=4, verbose=True): return self.lattice.get_miller_index_from_coords(self.frac_coords[site_ids], coords_are_cartesian=False, round_dp=round_dp, verbose=verbose)
Get the Miller index of a plane from a set of sites indexes. A minimum of 3 sites are required. If more than 3 sites are given the best plane that minimises the distance to all points will be calculated. Args: site_ids (list of int): A list of site indexes to consider. A minimum of three site indexes are required. If more than three sites are provided, the best plane that minimises the distance to all sites will be calculated. round_dp (int, optional): The number of decimal places to round the miller index to. verbose (bool, optional): Whether to print warnings. Returns: (tuple): The Miller index.
codesearchnet
class FeatureMixerBlock(nn.Module): def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP(in_features=config.d_model, out_features=config.d_model, config=config) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.d_model, out_size=config.d_model) def forward(self, hidden: torch.Tensor): residual = hidden hidden = self.norm(hidden) hidden = self.mlp(hidden) if self.gated_attn: hidden = self.gating_block(hidden) out = hidden + residual return out
This module mixes the hidden feature dimension. Args: config (`PatchTSMixerConfig`): Configuration.
github-repos
def get_unpartitioned_shape(self, shape): shape = tensor_shape.as_shape(shape) dims = shape.as_list() if self._shard_dimension is None or self._number_of_partitions is None or (not dims): return None if dims[self._shard_dimension] is None: raise ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known. ') if self._number_of_partitions > 1: dims[self._shard_dimension] *= self._number_of_partitions return tensor_shape.as_shape(dims)
Returns the shape of an unpartitioned Tensor. When given the shape of a 'sharded-size' Tensor, returns the shape of the full shape of its unpartitioned Tensor. Args: shape: The shape of the sharded Tensor. Returns: The shape of the unpartitioned version of the Tensor. Raises: ValueError: if shape has unknown sharded dimension
github-repos
def _ParseShellItemPathSegment(self, shell_item): path_segment = None if isinstance(shell_item, pyfwsi.root_folder): description = shell_folder_ids.DESCRIPTIONS.get( shell_item.shell_folder_identifier, None) if description: path_segment = description else: path_segment = '{{{0:s}}}'.format(shell_item.shell_folder_identifier) path_segment = '<{0:s}>'.format(path_segment) elif isinstance(shell_item, pyfwsi.volume): if shell_item.name: path_segment = shell_item.name elif shell_item.identifier: path_segment = '{{{0:s}}}'.format(shell_item.identifier) elif isinstance(shell_item, pyfwsi.file_entry): long_name = '' for extension_block in shell_item.extension_blocks: if isinstance(extension_block, pyfwsi.file_entry_extension): long_name = extension_block.long_name if long_name: path_segment = long_name elif shell_item.name: path_segment = shell_item.name elif isinstance(shell_item, pyfwsi.network_location): if shell_item.location: path_segment = shell_item.location if path_segment is None and shell_item.class_type == 0x00: pass if path_segment is None: path_segment = '<UNKNOWN: 0x{0:02x}>'.format(shell_item.class_type) return path_segment
Parses a shell item path segment. Args: shell_item (pyfwsi.item): shell item. Returns: str: shell item path segment.
juraj-google-style
def getRow(self, key): return Row(self._impl.getRow(Tuple(key)._impl))
Get a row by value of the indexing columns. If the index is not specified, gets the only row of a dataframe with no indexing columns. Args: key: Tuple representing the index of the desired row. Returns: The row.
juraj-google-style
def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.conv_pre(input_embeds) for i in range(self.num_upsamples): hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.upsampler[i](hidden_states) res_state = self.resblocks[i * self.num_kernels](hidden_states) for j in range(1, self.num_kernels): res_state += self.resblocks[i * self.num_kernels + j](hidden_states) hidden_states = res_state / self.num_kernels hidden_states = nn.functional.leaky_relu(hidden_states) hidden_states = self.conv_post(hidden_states) hidden_states = torch.tanh(hidden_states) waveform = hidden_states.squeeze(1) return waveform
Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech waveform. Args: spectrogram (`torch.FloatTensor`): Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length, model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim` is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`. Returns: `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
github-repos
def write_uint16(self, value, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.pack('%sH' % endian, value)
Pack the value as an unsigned integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
juraj-google-style
def assert_next(transformations): def _apply_fn(dataset): return _AssertNextDataset(dataset, transformations) return _apply_fn
A transformation that asserts which transformations happen next. Transformations should be referred to by their base name, not including version suffix. For example, use "Batch" instead of "BatchV2". "Batch" will match any of "Batch", "BatchV1", "BatchV2", etc. Args: transformations: A `tf.string` vector `tf.Tensor` identifying the transformations that are expected to happen next. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
github-repos
def get_library_barcode_sequence_hash(self, inverse=False): action = os.path.join(self.record_url, "get_library_barcode_sequence_hash") res = requests.get(url=action, headers=HEADERS, verify=False) res.raise_for_status() res_json = res.json() new_res = {} for lib_id in res_json: new_res[int(lib_id)] = res_json[lib_id] res_json = new_res if inverse: rev = {} for lib_id in res_json: rev[res_json[lib_id]] = lib_id res_json = rev return res_json
Calls the SequencingRequest's get_library_barcode_sequence_hash server-side endpoint to create a hash of the form {LibraryID -> barcode_sequence} for all Libraries on the SequencingRequest. Args: inverse: `bool`. True means to inverse the key and value pairs such that the barcode sequence serves as the key. Returns: `dict`.
juraj-google-style
def to_code(self): if (self.internals is not get_py_internals()): raise ValueError('CodeObject is not compatible with the running python internals.') if six.PY2: return types.CodeType(self.co_argcount, self.co_nlocals, self.co_stacksize, self.co_flags, self.co_code, self.co_consts, self.co_names, self.co_varnames, self.co_filename, self.co_name, self.co_firstlineno, self.co_lnotab, self.co_freevars, self.co_cellvars) else: return types.CodeType(self.co_argcount, self.co_kwonlyargcount, self.co_nlocals, self.co_stacksize, self.co_flags, self.co_code, self.co_consts, self.co_names, self.co_varnames, self.co_filename, self.co_name, self.co_firstlineno, self.co_lnotab, self.co_freevars, self.co_cellvars)
Convert this instance back into a native python code object. This only works if the internals of the code object are compatible with those of the running python version. Returns: types.CodeType: The native python code object.
codesearchnet
def get_backend(self, name=None, **kwargs): backends = self.backends(name, **kwargs) if len(backends) > 1: raise QiskitBackendNotFoundError('More than one backend matches the criteria') elif not backends: raise QiskitBackendNotFoundError('No backend matches the criteria') return backends[0]
Return a single backend matching the specified filtering. Args: name (str): name of the backend. **kwargs (dict): dict used for filtering. Returns: BaseBackend: a backend matching the filtering. Raises: QiskitBackendNotFoundError: if no backend could be found or more than one backend matches.
juraj-google-style
def gnuplot_3d_matrix(z_matrix, filename, title='', x_label='', y_label=''): _, ext = os.path.splitext(filename) if ext != '.png': filename += '.png' gnuplot_cmds = \ scr = _GnuplotScriptTemp(gnuplot_cmds) data = _GnuplotDataZMatrixTemp(z_matrix) args_dict = { 'filename': filename, 'filename_data': data.name, 'title': title, 'x_label': x_label, 'y_label': y_label } gnuplot(scr.name, args_dict)
Function to produce a general 3D plot from a 2D matrix. Args: z_matrix (list): 2D matrix. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label.
juraj-google-style
def compute_stats(array): q1 = np.percentile(array, 25) q3 = np.percentile(array, 75) low = q1 - 1.5 * (q3 - q1) high = q3 + 1.5 * (q3 - q1) filtered_array = list(filter(lambda x: low <= x and x <= high, array)) mean = np.mean(filtered_array) min_val = np.min(filtered_array) max_val = np.max(filtered_array) max_diff = max(max_val - mean, mean - min_val) diff = max_diff / mean * 100.0 return (mean, diff)
Reports mean and ± range for the given array. The range computation follows benchstat's. Args: array: The array to compute stats for. Returns: mean and ± %diff range.
github-repos
def _validate_path(self, settings, name, value): if not os.path.exists(value): raise SettingsInvalidError("Path from setting '{name}' does not " "exists: {value}".format( name=name, value=value )) return value
Validate path exists Args: settings (dict): Current settings. name (str): Setting name. value (str): Path to validate. Raises: boussole.exceptions.SettingsInvalidError: If path does not exists. Returns: str: Validated path.
juraj-google-style
def resample(self, data, input_rate): data16 = np.fromstring(string=data, dtype=np.int16) resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS) resample = signal.resample(data16, resample_size) resample16 = np.array(resample, dtype=np.int16) return resample16.tostring()
Microphone may not support our native processing sampling rate, so resample from input_rate to RATE_PROCESS here for webrtcvad and deepspeech Args: data (binary): Input audio stream input_rate (int): Input audio rate to resample from
juraj-google-style
def plot(self, figure_list): if self._current_subscript_stage is not None: if self._current_subscript_stage['current_subscript'] is not None: self._current_subscript_stage['current_subscript'].plot(figure_list) if (self.is_running is False) and not (self.data == {} or self.data is None): script_names = list(self.settings['script_order'].keys()) script_indices = [self.settings['script_order'][name] for name in script_names] _, sorted_script_names = list(zip(*sorted(zip(script_indices, script_names)))) last_script = self.scripts[sorted_script_names[-1]] last_script.force_update() axes_list = last_script.get_axes_layout(figure_list) try: last_script._plot(axes_list, self.data) except TypeError as err: print((warnings.warn('can\'t plot average script data because script.plot function doens\'t take data as optional argument. Plotting last data set instead'))) print((err.message)) last_script.plot(figure_list)
When each subscript is called, uses its standard plotting Args: figure_list: list of figures passed from the guit
juraj-google-style
def convert_padding(params, w_name, scope_name, inputs, layers, weights, names): print('Converting padding...') if params['mode'] == 'constant': if params['value'] != 0.0: raise AssertionError('Cannot convert non-zero padding') if names: tf_name = 'PADD' + random_string(4) else: tf_name = w_name + str(random.random()) padding_name = tf_name padding_layer = keras.layers.ZeroPadding2D( padding=((params['pads'][2], params['pads'][6]), (params['pads'][3], params['pads'][7])), name=padding_name ) layers[scope_name] = padding_layer(layers[inputs[0]]) elif params['mode'] == 'reflect': def target_layer(x, pads=params['pads']): layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT') return layer lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert padding layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def is_native_xmon_gate(gate: ops.Gate) -> bool: return isinstance(gate, (ops.CZPowGate, ops.MeasurementGate, ops.PhasedXPowGate, ops.XPowGate, ops.YPowGate, ops.ZPowGate))
Check if a gate is a native xmon gate. Args: gate: Input gate. Returns: True if the gate is native to the xmon, false otherwise.
codesearchnet
def merge_variables(program: cfg.Program, node: cfg.CFGNode, variables: Sequence[cfg.Variable]) -> cfg.Variable: if not variables: return program.NewVariable() elif all((v is variables[0] for v in variables)): return variables[0].AssignToNewVariable(node) else: v = program.NewVariable() for r in variables: v.PasteVariable(r, node) return v
Create a combined Variable for a list of variables. The purpose of this function is to create a final result variable for functions that return a list of "temporary" variables. (E.g. function calls). Args: program: A cfg.Program instance. node: The current CFG node. variables: A list of cfg.Variables. Returns: A cfg.Variable.
github-repos
def parse(self, request): assert isinstance(request, HttpRequest), "Invalid request type: %s" % type(request) if settings.INBOUND_MANDRILL_AUTHENTICATION_KEY: _check_mandrill_signature( request=request, key=settings.INBOUND_MANDRILL_AUTHENTICATION_KEY, ) try: messages = json.loads(request.POST['mandrill_events']) except (ValueError, KeyError) as ex: raise RequestParseError("Request is not a valid json: %s" % ex) if not messages: logger.debug("No messages found in mandrill request: %s", request.body) return [] emails = [] for message in messages: if message.get('event') != 'inbound': logger.debug("Discarding non-inbound message") continue msg = message.get('msg') try: from_email = msg['from_email'] to = list(self._get_recipients(msg['to'])) cc = list(self._get_recipients(msg['cc'])) if 'cc' in msg else [] bcc = list(self._get_recipients(msg['bcc'])) if 'bcc' in msg else [] subject = msg.get('subject', "") attachments = msg.get('attachments', {}) attachments.update(msg.get('images', {})) text = msg.get('text', "") html = msg.get('html', "") except (KeyError, ValueError) as ex: raise RequestParseError( "Inbound request is missing or got an invalid value.: %s." % ex ) email = EmailMultiAlternatives( subject=subject, body=text, from_email=self._get_sender( from_email=from_email, from_name=msg.get('from_name'), ), to=to, cc=cc, bcc=bcc, ) if html is not None and len(html) > 0: email.attach_alternative(html, "text/html") email = self._process_attachments(email, attachments) emails.append(email) return emails
Parse incoming request and return an email instance. Args: request: an HttpRequest object, containing a list of forwarded emails, as per Mandrill specification for inbound emails. Returns: a list of EmailMultiAlternatives instances
juraj-google-style
def set_tick(self, index, interval): name = self.tick_name(index) if (name is None): return pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY) self.ticks[name] = interval return Error.NO_ERROR
Update the a tick's interval. Args: index (int): The index of the tick that you want to fetch. interval (int): The number of seconds between ticks. Setting this to 0 will disable the tick. Returns: int: An error code.
codesearchnet
def reward(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> tf.Tensor: scope = self.reward_scope(state, action, next_state) r = self.compile_reward(scope).tensor with self.graph.as_default(): with tf.name_scope('reward'): return tf.expand_dims(r, -1)
Compiles the reward function given the current `state`, `action` and `next_state`. Args: state (Sequence[tf.Tensor]): A tuple of current state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. next_state (Sequence[tf.Tensor]): A tuple of next state tensors. Returns: (:obj:`tf.Tensor`): A tensor representing the reward function.
juraj-google-style
def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights, names): print('Converting reduce_sum ...') keepdims = (params['keepdims'] > 0) axis = params['axes'] def target_layer(x, keepdims=keepdims, axis=axis): import keras.backend as K return K.sum(x, keepdims=keepdims, axis=axis) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert reduce_sum layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def res_call(self, ns, types_ns, node, f_type, args, keywords): raise NotImplementedError('subclasses must implement')
Resolves the return type an external function or method call. Args: ns: namespace types_ns: types namespace node: str, the function name f_type: types of the actual function being called, if known args: types of each respective argument in node.args keywords: types of each respective argument in node.keywords Returns: Tuple (return_type, side_effect_types). The first element is just the return types of the function. The second element is a map from argument names to sets of types, and allow modelling side effects of functions (for example via global or nonlocal).
github-repos
def __init__(self, tag, *value): self.tag = tag self.value = list(flatten(value))
init Args: tag (str): The tag1 *value: the elements you want to put into single's value(list), can be one element or several seperate by comma, or put into a list or combination of those. *value will be flattend to a single one deminision list. In subclasses' init, raw data should be converted to single if needed according to specific subclass.
juraj-google-style
def case(self, case): LOG.debug("Getting case {0} from database".format(case.get('case_id'))) case_id = case['case_id'] return self.db.case.find_one({'case_id': case_id})
Get a case from the database Search the cases with the case id Args: case (dict): A case dictionary Returns: mongo_case (dict): A mongo case dictionary
juraj-google-style
def from_dict(cls, data): fulfillment = data['fulfillment'] if (not isinstance(fulfillment, (Fulfillment, type(None)))): try: fulfillment = Fulfillment.from_uri(data['fulfillment']) except ASN1DecodeError: raise InvalidSignature("Fulfillment URI couldn't been parsed") except TypeError: fulfillment = _fulfillment_from_details(data['fulfillment']) fulfills = TransactionLink.from_dict(data['fulfills']) return cls(fulfillment, data['owners_before'], fulfills)
Transforms a Python dictionary to an Input object. Note: Optionally, this method can also serialize a Cryptoconditions- Fulfillment that is not yet signed. Args: data (dict): The Input to be transformed. Returns: :class:`~bigchaindb.common.transaction.Input` Raises: InvalidSignature: If an Input's URI couldn't be parsed.
codesearchnet
def or_filter(self, **filters): clone = copy.deepcopy(self) clone.adapter.add_query([('OR_QRY', filters)]) return clone
Works like "filter" but joins given filters with OR operator. Args: **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Example: >>> Person.objects.or_filter(age__gte=16, name__startswith='jo')
codesearchnet
def get_2d_local_memory_v2(x, query_shape, memory_flange): (_, height, width, depth_x) = common_layers.shape_list(x) paddings = [[0, 0], [memory_flange[0], memory_flange[0]], [memory_flange[1], memory_flange[1]], [0, 0]] padded_x = tf.pad(x, paddings) padded_x.set_shape([None, height+2*memory_flange[0], width+2*memory_flange[1], depth_x]) num_h_memory_blocks = height num_w_memory_blocks = width x_memory_blocks = _extract_blocks(padded_x, query_shape[0], query_shape[1]) x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks, 2) x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2) x_right_width = tf.concat(x_width_blocks[1:], axis=2) x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4) x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1) x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1) x_bottom_height = tf.concat(x_height_blocks[1:], axis=1) x = tf.concat([x_top_height, x_bottom_height], axis=3) return x
Gathering memory blocks around query blocks. flange is half of query . Only works if memory flanges are half of query sizes. Args: x: a [batch, height, width, depth tensor] query_shape: 2-d integer list of query shape memory_flange: 2-d integer list of memory flanges Returns: x: A [batch, num_h_blocks, num_w_blocks, query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] tensor.
juraj-google-style
def decr(self, key, value, noreply=False): key = self.check_key(key) cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'decr', noreply) if noreply: return None if results[0] == b'NOT_FOUND': return None return int(results[0])
The memcached "decr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found.
juraj-google-style
def get_lock_requests(self): d = defaultdict(list) if self._context: for variant in self._context.resolved_packages: name = variant.name version = variant.version lock = self.patch_locks.get(name) if (lock is None): lock = self.default_patch_lock request = get_lock_request(name, version, lock) if (request is not None): d[lock].append(request) return d
Take the current context, and the current patch locks, and determine the effective requests that will be added to the main request. Returns: A dict of (PatchLock, [Requirement]) tuples. Each requirement will be a weak package reference. If there is no current context, an empty dict will be returned.
codesearchnet
def add_child(self, key, value): if (type(value) in (list, tuple, dict)): if (type(value) == dict): for k in value.keys(): self.add_child(k, value[k]) return i = 0 for child in value: self.add_child(key[i], child) i = (i + 1) return if hasattr(value, 'attributes'): value.attributes['data-parent-widget'] = self.identifier value._parent = self if (key in self.children): self._render_children_list.remove(key) self._render_children_list.append(key) self.children[key] = value
Adds a child to the Tag To retrieve the child call get_child or access to the Tag.children[key] dictionary. Args: key (str): Unique child's identifier, or iterable of keys value (Tag, str): can be a Tag, an iterable of Tag or a str. In case of iterable of Tag is a dict, each item's key is set as 'key' param
codesearchnet
def update_logging_config(context, log_name=None, file_name='worker.log'): log_name = (log_name or __name__.split('.')[0]) top_level_logger = logging.getLogger(log_name) datefmt = context.config['log_datefmt'] fmt = context.config['log_fmt'] formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) if context.config.get('verbose'): top_level_logger.setLevel(logging.DEBUG) if (len(top_level_logger.handlers) == 0): handler = logging.StreamHandler() handler.setFormatter(formatter) top_level_logger.addHandler(handler) else: top_level_logger.setLevel(logging.INFO) makedirs(context.config['log_dir']) path = os.path.join(context.config['log_dir'], file_name) if context.config['watch_log_file']: handler = logging.handlers.WatchedFileHandler(path) else: handler = logging.FileHandler(path) handler.setFormatter(formatter) top_level_logger.addHandler(handler) top_level_logger.addHandler(logging.NullHandler())
Update python logging settings from config. By default, this sets the ``scriptworker`` log settings, but this will change if some other package calls this function or specifies the ``log_name``. * Use formatting from config settings. * Log to screen if ``verbose`` * Add a rotating logfile from config settings. Args: context (scriptworker.context.Context): the scriptworker context. log_name (str, optional): the name of the Logger to modify. If None, use the top level module ('scriptworker'). Defaults to None.
codesearchnet
def publish(self, data): if (self.entity_api_key == ''): return {'status': 'failure', 'response': 'No API key found in request'} publish_url = (self.base_url + 'api/0.1.0/publish') publish_headers = {'apikey': self.entity_api_key} publish_data = {'exchange': 'amq.topic', 'key': str(self.entity_id), 'body': str(data)} with self.no_ssl_verification(): r = requests.post(publish_url, json.dumps(publish_data), headers=publish_headers) response = dict() if ('No API key' in str(r.content.decode('utf-8'))): response['status'] = 'failure' r = json.loads(r.content.decode('utf-8'))['message'] elif ('publish message ok' in str(r.content.decode('utf-8'))): response['status'] = 'success' r = r.content.decode('utf-8') else: response['status'] = 'failure' r = r.content.decode('utf-8') response['response'] = str(r) return response
This function allows an entity to publish data to the middleware. Args: data (string): contents to be published by this entity.
codesearchnet
def Run(self): if (not self.executable): logging.error(('Could not locate "%s"' % self.long_name)) return 0 finfo = os.stat(self.executable) self.date = time.localtime(finfo[stat.ST_MTIME]) logging.info(('Running: %s %s </dev/null 2>&1' % (self.executable, FLAGS.help_flag))) (child_stdin, child_stdout_and_stderr) = os.popen4([self.executable, FLAGS.help_flag]) child_stdin.close() self.output = child_stdout_and_stderr.readlines() child_stdout_and_stderr.close() if (len(self.output) < _MIN_VALID_USAGE_MSG): logging.error(('Error: "%s %s" returned only %d lines: %s' % (self.name, FLAGS.help_flag, len(self.output), self.output))) return 0 return 1
Run it and collect output. Returns: 1 (true) If everything went well. 0 (false) If there were problems.
codesearchnet
def angle( x, y ): dot = np.dot( x, y ) x_mod = np.linalg.norm( x ) y_mod = np.linalg.norm( y ) cos_angle = dot / ( x_mod * y_mod ) return np.degrees( np.arccos( cos_angle ) )
Calculate the angle between two vectors, in degrees. Args: x (np.array): one vector. y (np.array): the other vector. Returns: (float): the angle between x and y in degrees.
juraj-google-style
def _prepare_headers(self, request, filter=None, order_by=None, group_by=[], page=None, page_size=None): if filter: request.set_header('X-Nuage-Filter', filter) if order_by: request.set_header('X-Nuage-OrderBy', order_by) if page is not None: request.set_header('X-Nuage-Page', str(page)) if page_size: request.set_header('X-Nuage-PageSize', str(page_size)) if len(group_by) > 0: header = ", ".join(group_by) request.set_header('X-Nuage-GroupBy', 'true') request.set_header('X-Nuage-Attributes', header)
Prepare headers for the given request Args: request: the NURESTRequest to send filter: string order_by: string group_by: list of names page: int page_size: int
juraj-google-style
def _get_break_loop_node(break_node): loop_nodes = (astroid.For, astroid.While) parent = break_node.parent while not isinstance(parent, loop_nodes) or break_node in getattr( parent, "orelse", [] ): break_node = parent parent = parent.parent if parent is None: break return parent
Returns the loop node that holds the break node in arguments. Args: break_node (astroid.Break): the break node of interest. Returns: astroid.For or astroid.While: the loop node holding the break node.
juraj-google-style
def pooled_sample_variance(sample1, sample2): deg_freedom = ((len(sample1) + len(sample2)) - 2) mean1 = statistics.mean(sample1) squares1 = (((x - mean1) ** 2) for x in sample1) mean2 = statistics.mean(sample2) squares2 = (((x - mean2) ** 2) for x in sample2) return ((math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom))
Find the pooled sample variance for two samples. Args: sample1: one sample. sample2: the other sample. Returns: Pooled sample variance, as a float.
codesearchnet
def __init__(self, name, metadata): if not isinstance(metadata, dict): raise TypeError("Metadata should be a dictionary not a %s" % str( type(metadata))) self.name = name self.metadata = metadata self.relations = {} if 'relation' in self.metadata: for relation in self.metadata['relation']: tmp = re.split(r':\s+', relation) relname = tmp[0] relval = tmp[1] if relname in self.relations: self.relations[relname].append(relval) else: self.relations[relname] = [relval]
Initialize base GEO object. Args: name (:obj:`str`): Name of the object. metadata (:obj:`dict`): Metadata information. Raises: TypeError: Metadata should be a dict.
juraj-google-style
def check_media_service_name_availability(access_token, subscription_id, msname): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.media/CheckNameAvailability?', 'api-version=', MEDIA_API]) ms_body = {'name': msname} ms_body['type'] = 'mediaservices' body = json.dumps(ms_body) return do_post(endpoint, body, access_token)
Check media service name availability. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. msname (str): media service name. Returns: HTTP response.
codesearchnet
def nic_v2(msg, NICa, NICbc): if typecode(msg) < 5 or typecode(msg) > 22: raise RuntimeError( "%s: Not a surface position message (5<TC<8), \ airborne position message (8<TC<19), \ or airborne position with GNSS height (20<TC<22)" % msg ) tc = typecode(msg) NIC = uncertainty.TC_NICv2_lookup[tc] if 20<=tc<=22: NICs = 0 else: NICs = NICa*2 + NICbc try: if isinstance(NIC, dict): NIC = NIC[NICs] Rc = uncertainty.NICv2[NIC][NICs]['Rc'] except KeyError: Rc = uncertainty.NA return Rc
Calculate NIC, navigation integrity category, for ADS-B version 2 Args: msg (string): 28 bytes hexadecimal message string NICa (int or string): NIC supplement - A NICbc (int or srting): NIC supplement - B or C Returns: int or string: Horizontal Radius of Containment
juraj-google-style
def _patch_mask(body: dict) -> list: mask = set() if isinstance(body, dict): for parent, value in body.items(): children = _patch_mask(value) if children and parent not in ('budgetSegments', 'partnerCosts'): for child in children: mask.add(parent + '.' + child) else: mask.add(parent) elif isinstance(body, (list, tuple)): for value in body: mask.update(_patch_mask(value)) return list(mask)
Loop through dictionary defining API body and create patch mask on keys. Each patch mask has format parent.child repreated. Each leaf has a full path describing the patch. Exceptions are budgetSegments, and partnerCosts which are lists with an order and changed at the parent level not the leaves. Args: body: Any REST API call dictionary, defined by API endpoint. Returns: A list of strings representing full path to each leaf key.
github-repos
def add_chapter(self, c): try: assert (type(c) == chapter.Chapter) except AssertionError: raise TypeError('chapter must be of type Chapter') chapter_file_output = os.path.join(self.OEBPS_DIR, self.current_chapter_path) c._replace_images_in_chapter(self.OEBPS_DIR) c.write(chapter_file_output) self._increase_current_chapter_number() self.chapters.append(c)
Add a Chapter to your epub. Args: c (Chapter): A Chapter object representing your chapter. Raises: TypeError: Raised if a Chapter object isn't supplied to this method.
codesearchnet
def parse_line(line): columns = line.split() token = columns.pop(0) values = [float(column) for column in columns] return token, values
Parses a line of a text embedding file. Args: line: (str) One line of the text embedding file. Returns: A token string and its embedding vector in floats.
juraj-google-style
def add_enumerable_item_to_dict(dict_, key, item): dict_.setdefault(key, []) if isinstance(item, (list, tuple)): dict_[key].extend(item) else: dict_[key].append(item)
Add an item to a list contained in a dict. For example: If the dict is ``{'some_key': ['an_item']}``, then calling this function will alter the dict to ``{'some_key': ['an_item', 'another_item']}``. If the key doesn't exist yet, the function initializes it with a list containing the item. List-like items are allowed. In this case, the existing list will be extended. Args: dict_ (dict): the dict to modify key (str): the key to add the item to item (whatever): The item to add to the list associated to the key
codesearchnet
def _ReadSpecificationFile(self, path): specification_store = specification.FormatSpecificationStore() with io.open( path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object: for line in file_object.readlines(): line = line.strip() if not line or line.startswith(' continue try: identifier, offset, pattern = line.split() except ValueError: logger.error('[skipping] invalid line: {0:s}'.format(line)) continue try: offset = int(offset, 10) except ValueError: logger.error('[skipping] invalid offset in line: {0:s}'.format(line)) continue try: pattern = codecs.escape_decode(pattern)[0] except ValueError: logger.error( '[skipping] invalid pattern in line: {0:s}'.format(line)) continue format_specification = specification.FormatSpecification(identifier) format_specification.AddNewSignature(pattern, offset=offset) specification_store.AddSpecification(format_specification) return specification_store
Reads the format specification file. Args: path (str): path of the format specification file. Returns: FormatSpecificationStore: format specification store.
juraj-google-style
def _AnalyzeSolutionSpace(initial_state): count = 0 seen = set() p_queue = [] node = _StateNode(initial_state, False, None) heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(0, count), node)) count += 1 while p_queue: item = p_queue[0] penalty = item.ordered_penalty.penalty node = item.state_node if not node.state.next_token: break heapq.heappop(p_queue) if count > 10000: node.state.ignore_stack_for_comparison = True before_seen_count = len(seen) seen.add(node.state) if before_seen_count == len(seen): continue count = _AddNextStateToQueue(penalty, node, False, count, p_queue) count = _AddNextStateToQueue(penalty, node, True, count, p_queue) if not p_queue: return False _ReconstructPath(initial_state, heapq.heappop(p_queue).state_node) return True
Analyze the entire solution space starting from initial_state. This implements a variant of Dijkstra's algorithm on the graph that spans the solution space (LineStates are the nodes). The algorithm tries to find the shortest path (the one with the lowest penalty) from 'initial_state' to the state where all tokens are placed. Arguments: initial_state: (format_decision_state.FormatDecisionState) The initial state to start the search from. Returns: True if a formatting solution was found. False otherwise.
github-repos
def get_course_track_selection_url(course_run, query_parameters): try: course_root = reverse('course_modes_choose', kwargs={'course_id': course_run['key']}) except KeyError: LOGGER.exception( "KeyError while parsing course run data.\nCourse Run: \n[%s]", course_run, ) raise url = '{}{}'.format( settings.LMS_ROOT_URL, course_root ) course_run_url = update_query_parameters(url, query_parameters) return course_run_url
Return track selection url for the given course. Arguments: course_run (dict): A dictionary containing course run metadata. query_parameters (dict): A dictionary containing query parameters to be added to course selection url. Raises: (KeyError): Raised when course run dict does not have 'key' key. Returns: (str): Course track selection url.
juraj-google-style
def set_attributes(path, archive=None, hidden=None, normal=None, notIndexed=None, readonly=None, system=None, temporary=None): if (not os.path.exists(path)): raise CommandExecutionError('Path not found: {0}'.format(path)) if normal: if (archive or hidden or notIndexed or readonly or system or temporary): raise CommandExecutionError('Normal attribute may not be used with any other attributes') ret = win32file.SetFileAttributes(path, 128) return (True if (ret is None) else False) intAttributes = win32file.GetFileAttributes(path) if (archive is not None): if archive: intAttributes |= 32 else: intAttributes &= 65503 if (hidden is not None): if hidden: intAttributes |= 2 else: intAttributes &= 65533 if (notIndexed is not None): if notIndexed: intAttributes |= 8192 else: intAttributes &= 57343 if (readonly is not None): if readonly: intAttributes |= 1 else: intAttributes &= 65534 if (system is not None): if system: intAttributes |= 4 else: intAttributes &= 65531 if (temporary is not None): if temporary: intAttributes |= 256 else: intAttributes &= 65279 ret = win32file.SetFileAttributes(path, intAttributes) return (True if (ret is None) else False)
Set file attributes for a file. Note that the normal attribute means that all others are false. So setting it will clear all others. Args: path (str): The path to the file or directory archive (bool): Sets the archive attribute. Default is None hidden (bool): Sets the hidden attribute. Default is None normal (bool): Resets the file attributes. Cannot be used in conjunction with any other attribute. Default is None notIndexed (bool): Sets the indexed attribute. Default is None readonly (bool): Sets the readonly attribute. Default is None system (bool): Sets the system attribute. Default is None temporary (bool): Sets the temporary attribute. Default is None Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' file.set_attributes c:\\temp\\a.txt normal=True salt '*' file.set_attributes c:\\temp\\a.txt readonly=True hidden=True
codesearchnet
def replace_dimensions(tensor_or_shape, old_dim_or_dims, new_dim_or_dims): if isinstance(tensor_or_shape, Tensor): return reshape(tensor_or_shape, replace_dimensions(tensor_or_shape.shape, old_dim_or_dims, new_dim_or_dims)) if (not isinstance(tensor_or_shape, Shape)): raise ValueError(('tensor_or_shape must be a Tensor or Shape got %s' % (tensor_or_shape,))) in_dims = tensor_or_shape.dims if isinstance(old_dim_or_dims, Dimension): old_dim_or_dims = [old_dim_or_dims] if isinstance(new_dim_or_dims, Dimension): new_dim_or_dims = [new_dim_or_dims] if ((not isinstance(old_dim_or_dims, list)) or (not old_dim_or_dims)): raise ValueError(('old_dim_or_dims must be a Dimension or a list of Dimension got %s' % (old_dim_or_dims,))) if ((not isinstance(new_dim_or_dims, list)) or (not new_dim_or_dims)): raise ValueError(('new_dim_or_dims must be a Dimension or a list of Dimension got %s' % (new_dim_or_dims,))) try: positions = [in_dims.index(d) for d in old_dim_or_dims] pos = positions[0] if (positions != list(range(pos, (pos + len(positions))))): raise ValueError() except ValueError: raise ValueError(("old_dim_or_dims must be a subsequence of the input's dimensions old_dim_or_dims=%s input's dimensions=%s" % (old_dim_or_dims, in_dims))) return Shape(((in_dims[:pos] + new_dim_or_dims) + in_dims[(pos + len(old_dim_or_dims)):]))
Replace dimensions in a Tensor or Shape. old_dim_or_dims consists of a single dimension or a list of dimensions that must occur consecutively in the input shape. They are replaced by the dimensions in new_dim_or_dims. Args: tensor_or_shape: a Tensor or a Shape old_dim_or_dims: a Dimension or a list of Dimensions new_dim_or_dims: a Dimensions or a list of Dimensions Returns: a new Tensor or a Shape
codesearchnet
async def destroy_attachment(self, a: Attachment): (await self.connection('DELETE', 'tournaments/{}/matches/{}/attachments/{}'.format(self._tournament_id, self._id, a._id))) if (a in self.attachments): self.attachments.remove(a)
destroy a match attachment |methcoro| Args: a: the attachment you want to destroy Raises: APIException
codesearchnet
def scope(self, framebuffer, enable_only=None, *, textures=(), uniform_buffers=(), storage_buffers=()) -> 'Scope': textures = tuple((tex.mglo, idx) for tex, idx in textures) uniform_buffers = tuple((buf.mglo, idx) for buf, idx in uniform_buffers) storage_buffers = tuple((buf.mglo, idx) for buf, idx in storage_buffers) res = Scope.__new__(Scope) res.mglo = self.mglo.scope(framebuffer.mglo, enable_only, textures, uniform_buffers, storage_buffers) res.ctx = self res.extra = None return res
Create a :py:class:`Scope` object. Args: framebuffer (Framebuffer): The framebuffer to use when entering. enable_only (int): The enable_only flags to set when entering. Keyword Args: textures (list): List of (texture, binding) tuples. uniform_buffers (list): List of (buffer, binding) tuples. storage_buffers (list): List of (buffer, binding) tuples.
juraj-google-style
def _calculate_scores(self, query, key): if self.score_mode == 'dot': scores = ops.matmul(query, ops.transpose(key, axes=[0, 2, 1])) if self.scale is not None: scores *= self.scale elif self.score_mode == 'concat': q_reshaped = ops.expand_dims(query, axis=-2) k_reshaped = ops.expand_dims(key, axis=-3) if self.scale is not None: scores = self.concat_score_weight * ops.sum(ops.tanh(self.scale * (q_reshaped + k_reshaped)), axis=-1) else: scores = self.concat_score_weight * ops.sum(ops.tanh(q_reshaped + k_reshaped), axis=-1) else: raise ValueError('scores not computed') return scores
Calculates attention scores as a query-key dot product. Args: query: Query tensor of shape `(batch_size, Tq, dim)`. key: Key tensor of shape `(batch_size, Tv, dim)`. Returns: Tensor of shape `(batch_size, Tq, Tv)`.
github-repos