code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def batch_reduce(self, reduce_op, value_destination_pairs, options=None): if options is None: options = collective_util.Options() if not _validate_value_destination_pairs(value_destination_pairs): value_destination_pairs = _normalize_value_destination_pairs(value_destination_pairs) for _, d in value_destination_pairs: validate_destinations(d) if self._num_between_graph_workers == 1 and _all_devices_match(value_destination_pairs, self._canonicalize_devices) and (len(value_destination_pairs[0][0].values) == 1): return [distribute_utils.regroup(v.values, wrap_class=value_lib.Mirrored) for v, _ in value_destination_pairs] if options is None: options = collective_util.Options() return self.batch_reduce_implementation(reduce_op, value_destination_pairs, options)
Reduce values to destinations in batches. See `tf.distribute.StrategyExtended.batch_reduce_to`. This can only be called in the cross-replica context. Args: reduce_op: a `tf.distribute.ReduceOp` specifying how values should be combined. value_destination_pairs: a sequence of (value, destinations) pairs. See `tf.distribute.CrossDeviceOps.reduce` for descriptions. options: a `tf.distribute.experimental.CommunicationOptions`. See `tf.distribute.experimental.CommunicationOptions` for details. Returns: A list of `tf.Tensor` or `tf.distribute.DistributedValues`, one per pair in `value_destination_pairs`. Raises: ValueError: if `value_destination_pairs` is not an iterable of tuples of `tf.distribute.DistributedValues` and destinations.
github-repos
def put_headers_in_environ(headers, environ): for (key, value) in headers: environ[('HTTP_%s' % key.upper().replace('-', '_'))] = value
Given a list of headers, put them into environ based on PEP-333. This converts headers to uppercase, prefixes them with 'HTTP_', and converts dashes to underscores before adding them to the environ dict. Args: headers: A list of (header, value) tuples. The HTTP headers to add to the environment. environ: An environ dict for the request as defined in PEP-333.
codesearchnet
def is_lambda(fun): return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__
Check whether the given function is a lambda function. .. testsetup:: from proso.func import is_lambda .. testcode:: def not_lambda_fun(): return 1 lambda_fun = lambda: 1 print( is_lambda(not_lambda_fun), is_lambda(lambda_fun) ) .. testoutput:: False True Args: fun (function) Returns: bool: True if the given function is a lambda function, False otherwise
juraj-google-style
def __init__(self, report_interval: float = 5.0, max_pbcs: int = 4): LOG.info('Starting Processing Block Scheduler.') self._queue = self._init_queue() self._pb_events = ProcessingBlockList().subscribe(__service_name__) self._report_interval = report_interval self._num_pbcs = 0 self._max_pbcs = max_pbcs self._pb_list = ProcessingBlockList()
Initialise the Scheduler. Args: report_interval (float): Minimum interval between reports, in s max_pbcs (int): Maximum number of concurrent PBCs (and therefore PBs) that can be running.
juraj-google-style
def write_double(self, value, little_endian=True): if little_endian: endian = '<' else: endian = '>' return self.pack(('%sd' % endian), value)
Pack the value as a double and write 8 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
codesearchnet
def CreateSmartShoppingAdGroup(client, campaign_id): ad_group_service = client.GetService('AdGroupService', version='v201809') ad_group = { 'campaignId': campaign_id, 'name': 'Smart Shopping ad group 'adGroupType': 'SHOPPING_GOAL_OPTIMIZED_ADS' } adgroup_operations = { 'operator': 'ADD', 'operand': ad_group } ad_group = ad_group_service.mutate(adgroup_operations)['value'][0] ad_group_id = ad_group['id'] print ('AdGroup with name "%s" and ID "%s" was added.' % (ad_group['name'], ad_group_id)) return ad_group_id
Adds a new Smart Shopping ad group. Args: client: an AdWordsClient instance. campaign_id: the str ID of a Smart Shopping campaign. Returns: An ad group ID.
juraj-google-style
class Pop2PianoProcessor(ProcessorMixin): attributes = ['feature_extractor', 'tokenizer'] feature_extractor_class = 'Pop2PianoFeatureExtractor' tokenizer_class = 'Pop2PianoTokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, audio: Union[np.ndarray, List[float], List[np.ndarray]]=None, sampling_rate: Optional[Union[int, List[int]]]=None, steps_per_beat: int=2, resample: Optional[bool]=True, notes: Union[List, TensorType]=None, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, verbose: bool=True, **kwargs) -> Union[BatchFeature, BatchEncoding]: if (audio is None and sampling_rate is None) and notes is None: raise ValueError('You have to specify at least audios and sampling_rate in order to use feature extractor or notes to use the tokenizer part.') if audio is not None and sampling_rate is not None: inputs = self.feature_extractor(audio=audio, sampling_rate=sampling_rate, steps_per_beat=steps_per_beat, resample=resample, **kwargs) if notes is not None: encoded_token_ids = self.tokenizer(notes=notes, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) if notes is None: return inputs elif audio is None or sampling_rate is None: return encoded_token_ids else: inputs['token_ids'] = encoded_token_ids['token_ids'] return inputs def batch_decode(self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool=True) -> BatchEncoding: return self.tokenizer.batch_decode(token_ids=token_ids, feature_extractor_output=feature_extractor_output, return_midi=return_midi) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names)) def save_pretrained(self, save_directory, **kwargs): if os.path.isfile(save_directory): raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file') os.makedirs(save_directory, exist_ok=True) return super().save_pretrained(save_directory, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(*args)
Constructs an Pop2Piano processor which wraps a Pop2Piano Feature Extractor and Pop2Piano Tokenizer into a single processor. [`Pop2PianoProcessor`] offers all the functionalities of [`Pop2PianoFeatureExtractor`] and [`Pop2PianoTokenizer`]. See the docstring of [`~Pop2PianoProcessor.__call__`] and [`~Pop2PianoProcessor.decode`] for more information. Args: feature_extractor (`Pop2PianoFeatureExtractor`): An instance of [`Pop2PianoFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Pop2PianoTokenizer`): An instance of ['Pop2PianoTokenizer`]. The tokenizer is a required input.
github-repos
def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)` Args: mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` dtype (`torch.dtype`): The torch dtype the created mask shall have. tgt_len (`int`): The target length or query length the created mask shall have.
github-repos
def add_vectors(self, vectors): if isinstance(vectors[0], (list, np.ndarray)): for vec in vectors: self.vectors.append(vec) else: self.vectors.append(vectors)
Add a list of vectors to Bloch sphere. Args: vectors (array_like): Array with vectors of unit length or smaller.
juraj-google-style
def from_Z(z: int): for sym, data in _pt_data.items(): if data["Atomic no"] == z: return Element(sym) raise ValueError("No element with this atomic number %s" % z)
Get an element from an atomic number. Args: z (int): Atomic number Returns: Element with atomic number z.
juraj-google-style
def _tensor_list_column_heads(self, parsed, max_timestamp_width, max_dump_size_width, max_op_type_width): base_command = 'list_tensors' if parsed.tensor_filter: base_command += ' -f %s' % parsed.tensor_filter if parsed.op_type_filter: base_command += ' -t %s' % parsed.op_type_filter if parsed.node_name_filter: base_command += ' -n %s' % parsed.node_name_filter attr_segs = {0: []} row = self._TIMESTAMP_COLUMN_HEAD command = '%s -s %s' % (base_command, SORT_TENSORS_BY_TIMESTAMP) if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and (not parsed.reverse): command += ' -r' attr_segs[0].append((0, len(row), [debugger_cli_common.MenuItem(None, command), 'bold'])) row += ' ' * (max_timestamp_width - len(row)) prev_len = len(row) row += self._DUMP_SIZE_COLUMN_HEAD command = '%s -s %s' % (base_command, SORT_TENSORS_BY_DUMP_SIZE) if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and (not parsed.reverse): command += ' -r' attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem(None, command), 'bold'])) row += ' ' * (max_dump_size_width + max_timestamp_width - len(row)) prev_len = len(row) row += self._OP_TYPE_COLUMN_HEAD command = '%s -s %s' % (base_command, SORT_TENSORS_BY_OP_TYPE) if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and (not parsed.reverse): command += ' -r' attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem(None, command), 'bold'])) row += ' ' * (max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)) prev_len = len(row) row += self._TENSOR_NAME_COLUMN_HEAD command = '%s -s %s' % (base_command, SORT_TENSORS_BY_TENSOR_NAME) if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and (not parsed.reverse): command += ' -r' attr_segs[0].append((prev_len, len(row), [debugger_cli_common.MenuItem('', command), 'bold'])) row += ' ' * (max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)) return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs)
Generate a line containing the column heads of the tensor list. Args: parsed: Parsed arguments (by argparse) of the list_tensors command. max_timestamp_width: (int) maximum width of the timestamp column. max_dump_size_width: (int) maximum width of the dump size column. max_op_type_width: (int) maximum width of the op type column. Returns: A RichTextLines object.
github-repos
def get_full_description(self): try: time_segment = self.get_time_of_day_description() day_of_month_desc = self.get_day_of_month_description() month_desc = self.get_month_description() day_of_week_desc = self.get_day_of_week_description() year_desc = self.get_year_description() description = '{0}{1}{2}{3}{4}'.format(time_segment, day_of_month_desc, day_of_week_desc, month_desc, year_desc) description = self.transform_verbosity(description, self._options.verbose) description = self.transform_case(description, self._options.casing_type) except Exception: description = _('An error occured when generating the expression description. Check the cron expression syntax.') if self._options.throw_exception_on_parse_error: raise FormatException(description) return description
Generates the FULL description Returns: The FULL description Raises: FormatException: if formating fails and throw_exception_on_parse_error is True
codesearchnet
def officers(self, num, **kwargs): baseuri = self._BASE_URI + "company/{}/officers".format(num) res = self.session.get(baseuri, params=kwargs) self.handle_http_error(res) return res
Search for a company's registered officers by company number. Args: num (str): Company number to search on. kwargs (dict): additional keywords passed into requests.session.get *params* keyword.
juraj-google-style
def FileEntryExistsByPathSpec(self, path_spec): tsk_file = None inode = getattr(path_spec, 'inode', None) location = getattr(path_spec, 'location', None) try: if inode is not None: tsk_file = self._tsk_file_system.open_meta(inode=inode) elif location is not None: tsk_file = self._tsk_file_system.open(location) except IOError: pass return tsk_file is not None
Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file entry exists.
juraj-google-style
def get_configs(__pkg: str, __name: str='config') -> List[str]: dirs = [user_config(__pkg)] dirs.extend((path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':'))) configs = [] for dname in reversed(dirs): test_path = path.join(dname, __name) if path.exists(test_path): configs.append(test_path) return configs
Return all configs for given package. Args: __pkg: Package name __name: Configuration file name
codesearchnet
def convert_acquire(self, shift, instruction): meas_level = self._run_config.get('meas_level', 2) command_dict = {'name': 'acquire', 't0': (shift + instruction.start_time), 'duration': instruction.duration, 'qubits': [q.index for q in instruction.acquires], 'memory_slot': [m.index for m in instruction.mem_slots]} if (meas_level == 2): if instruction.command.discriminator: command_dict.update({'discriminators': [QobjMeasurementOption(name=instruction.command.discriminator.name, params=instruction.command.discriminator.params)]}) command_dict.update({'register_slot': [regs.index for regs in instruction.reg_slots]}) if (meas_level >= 1): if instruction.command.kernel: command_dict.update({'kernels': [QobjMeasurementOption(name=instruction.command.kernel.name, params=instruction.command.kernel.params)]}) return self._qobj_model(**command_dict)
Return converted `AcquireInstruction`. Args: shift(int): Offset time. instruction (AcquireInstruction): acquire instruction. Returns: dict: Dictionary of required parameters.
codesearchnet
def get_hosted_zone_by_name(client, zone_name): p = client.get_paginator('list_hosted_zones') for i in p.paginate(): for zone in i['HostedZones']: if (zone['Name'] == zone_name): return parse_zone_id(zone['Id']) return None
Get the zone id of an existing zone by name. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_name (string): The name of the DNS hosted zone to create. Returns: string: The Id of the Hosted Zone.
codesearchnet
def split_heads(self, x): with tf.name_scope("split_heads"): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] depth = (self.hidden_size x = tf.reshape(x, [batch_size, length, self.num_heads, depth]) return tf.transpose(x, [0, 2, 1, 3])
Split x into different heads, and transpose the resulting value. The tensor is transposed to insure the inner dimensions hold the correct values during the matrix multiplication. Args: x: A tensor with shape [batch_size, length, hidden_size] Returns: A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]
juraj-google-style
def get_path( self, start_x: int, start_y: int, goal_x: int, goal_y: int ) -> List[Tuple[int, int]]: lib.TCOD_path_compute(self._path_c, start_x, start_y, goal_x, goal_y) path = [] x = ffi.new("int[2]") y = x + 1 while lib.TCOD_path_walk(self._path_c, x, y, False): path.append((x[0], y[0])) return path
Return a list of (x, y) steps to reach the goal point, if possible. Args: start_x (int): Starting X position. start_y (int): Starting Y position. goal_x (int): Destination X position. goal_y (int): Destination Y position. Returns: List[Tuple[int, int]]: A list of points, or an empty list if there is no valid path.
juraj-google-style
def CopyToDict(self): dictionary = {} for (attribute_name, attribute_value) in self.GetAttributes(): if (attribute_value is None): continue dictionary[attribute_name] = attribute_value return dictionary
Copies the attribute container to a dictionary. Returns: dict[str, object]: attribute values per name.
codesearchnet
def parse_resource_type(self, response): links = [ link.split(";")[0].lstrip('<').rstrip('>') for link in response.headers['Link'].split(', ') if link.startswith('<http: ldp_resource_types = [ self.repo.namespace_manager.compute_qname(resource_type)[2] for resource_type in links] logger.debug('Parsed LDP resource types from LINK header: %s' % ldp_resource_types) if 'NonRDFSource' in ldp_resource_types: return NonRDFSource elif 'BasicContainer' in ldp_resource_types: return BasicContainer elif 'DirectContainer' in ldp_resource_types: return DirectContainer elif 'IndirectContainer' in ldp_resource_types: return IndirectContainer else: logger.debug('could not determine resource type from Link header, returning False') return False
parse resource type from self.http_request() Note: uses isinstance() as plugins may extend these base LDP resource type. Args: response (requests.models.Response): response object Returns: [NonRDFSource, BasicContainer, DirectContainer, IndirectContainer]
juraj-google-style
def _verify_output(self, submission_type): result = True if submission_type == 'defense': try: image_classification = load_defense_output( os.path.join(self._sample_output_dir, 'result.csv')) expected_keys = [IMAGE_NAME_PATTERN.format(i) for i in range(BATCH_SIZE)] if set(image_classification.keys()) != set(expected_keys): logging.error('Classification results are not saved for all images') result = False except IOError as e: logging.error('Failed to read defense output file: %s', e) result = False else: for i in range(BATCH_SIZE): image_filename = os.path.join(self._sample_output_dir, IMAGE_NAME_PATTERN.format(i)) try: img = np.array(Image.open(image_filename).convert('RGB')) if list(img.shape) != [299, 299, 3]: logging.error('Invalid image size %s for image %s', str(img.shape), image_filename) result = False except IOError as e: result = False return result
Verifies correctness of the submission output. Args: submission_type: type of the submission Returns: True if output looks valid
juraj-google-style
def __init__(self, reduce_to_device=None, accumulation_fn=None): self.reduce_to_device = reduce_to_device self.accumulation_fn = accumulation_fn or math_ops.add_n super(ReductionToOneDevice, self).__init__()
Initializes with a device to reduce to and a way to accumulate. Args: reduce_to_device: the intermediate device to reduce to. If None, reduce to the first device in `destinations` of the `reduce` method. accumulation_fn: a function that does accumulation. If None, `tf.math.add_n` is used.
github-repos
def minimize(f, start=None, smooth=False, log=None, array=False, **vargs): if (start is None): assert (not array), 'Please pass starting values explicitly when array=True' arg_count = f.__code__.co_argcount assert (arg_count > 0), 'Please pass starting values explicitly for variadic functions' start = ([0] * arg_count) if (not hasattr(start, '__len__')): start = [start] if array: objective = f else: @functools.wraps(f) def objective(args): return f(*args) if ((not smooth) and ('method' not in vargs)): vargs['method'] = 'Powell' result = optimize.minimize(objective, start, **vargs) if (log is not None): log(result) if (len(start) == 1): return result.x.item(0) else: return result.x
Minimize a function f of one or more arguments. Args: f: A function that takes numbers and returns a number start: A starting value or list of starting values smooth: Whether to assume that f is smooth and use first-order info log: Logging function called on the result of optimization (e.g. print) vargs: Other named arguments passed to scipy.optimize.minimize Returns either: (a) the minimizing argument of a one-argument function (b) an array of minimizing arguments of a multi-argument function
codesearchnet
def _process_new(self, feed_item): campaign = self.campaign_dao.get(feed_item, required=True) placement_group = self.placement_group_dao.get(feed_item, required=True) feed_item[FieldMap.CAMPAIGN_ID] = campaign['id'] feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name'] if placement_group: feed_item[FieldMap.PLACEMENT_GROUP_ID] = placement_group['id'] feed_item[FieldMap.PLACEMENT_GROUP_NAME] = placement_group['name'] result = {'name': feed_item.get(FieldMap.PLACEMENT_NAME, None), 'adBlockingOptOut': feed_item.get(FieldMap.PLACEMENT_AD_BLOCKING, False), 'campaignId': campaign['id'], 'placementGroupId': placement_group['id'] if placement_group else None, 'archived': feed_item.get(FieldMap.PLACEMENT_ARCHIVED, False), 'siteId': feed_item.get(FieldMap.SITE_ID, None), 'paymentSource': 'PLACEMENT_AGENCY_PAID', 'pricingSchedule': {'startDate': StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.PLACEMENT_START_DATE, None)), 'endDate': StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.PLACEMENT_END_DATE, None)), 'pricingType': feed_item.get(FieldMap.PLACEMENT_PRICING_SCHEDULE_COST_STRUCTURE, None) or 'PRICING_TYPE_CPM', 'pricingPeriods': [{'startDate': feed_item.get(FieldMap.PLACEMENT_START_DATE, None), 'endDate': feed_item.get(FieldMap.PLACEMENT_END_DATE, None)}]}} self._process_skipability(feed_item, result) if feed_item.get(FieldMap.PLACEMENT_ADDITIONAL_KEY_VALUES, None): result['tagSetting'] = {'additionalKeyValues': feed_item.get(FieldMap.PLACEMENT_ADDITIONAL_KEY_VALUES, None)} if feed_item.get(FieldMap.PLACEMENT_PRICING_TESTING_START, None): result['pricingSchedule']['testingStartDate'] = feed_item.get(FieldMap.PLACEMENT_PRICING_TESTING_START, None) self._process_active_view_and_verification(result, feed_item) if feed_item.get(FieldMap.PLACEMENT_TYPE, None) == 'VIDEO' or feed_item[FieldMap.PLACEMENT_TYPE] == 'IN_STREAM_VIDEO': result['compatibility'] = 'IN_STREAM_VIDEO' result['size'] = {'width': '0', 'height': '0'} result['tagFormats'] = ['PLACEMENT_TAG_INSTREAM_VIDEO_PREFETCH'] elif feed_item[FieldMap.PLACEMENT_TYPE] == 'IN_STREAM_AUDIO': result['compatibility'] = 'IN_STREAM_AUDIO' result['size'] = {'width': '0', 'height': '0'} result['tagFormats'] = ['PLACEMENT_TAG_INSTREAM_VIDEO_PREFETCH'] else: result['compatibility'] = 'DISPLAY' width = 1 height = 1 raw_size = feed_item.get(FieldMap.ASSET_SIZE, '0x0') if raw_size and 'x' in raw_size: width, height = raw_size.strip().lower().split('x') sizes = self.get_sizes(int(width), int(height)) if sizes: result['size'] = {'id': sizes[0]['id']} else: result['size'] = {'width': int(width), 'height': int(height)} result['tagFormats'] = ['PLACEMENT_TAG_STANDARD', 'PLACEMENT_TAG_JAVASCRIPT', 'PLACEMENT_TAG_IFRAME_JAVASCRIPT', 'PLACEMENT_TAG_IFRAME_ILAYER', 'PLACEMENT_TAG_INTERNAL_REDIRECT', 'PLACEMENT_TAG_TRACKING', 'PLACEMENT_TAG_TRACKING_IFRAME', 'PLACEMENT_TAG_TRACKING_JAVASCRIPT'] self._process_transcode(result, feed_item) self._process_pricing_schedule(result, feed_item) return result
Creates a new placement DCM object from a feed item representing an placement from the Bulkdozer feed. This function simply creates the object to be inserted later by the BaseDAO object. Args: feed_item: Feed item representing the placement from the Bulkdozer feed. Returns: An placement object ready to be inserted in DCM through the API.
github-repos
def get_table_metadata(engine, table): metadata = MetaData() metadata.reflect(bind=engine, only=[table]) table_metadata = Table(table, metadata, autoload=True) return table_metadata
Extract all useful infos from the given table Args: engine: SQLAlchemy connection engine table: table name Returns: Dictionary of infos
codesearchnet
def join(path, *paths): path_ = compat.as_str_any(compat.path_to_str(path)) if ': return urljoin(path, *paths) return os.path.join(path, *paths)
Join one or more path components intelligently. TensorFlow specific filesystems will be joined like a url (using "/" as the path seperator) on all platforms: On Windows or Linux/Unix-like: >>> tf.io.gfile.join("gcs://folder", "file.py") 'gcs://folder/file.py' >>> tf.io.gfile.join("ram://folder", "file.py") 'ram://folder/file.py' But the native filesystem is handled just like os.path.join: >>> path = tf.io.gfile.join("folder", "file.py") >>> if os.name == "nt": ... expected = "folder\\file.py" # Windows ... else: ... expected = "folder/file.py" # Linux/Unix-like >>> path == expected True Args: path: string, path to a directory paths: string, additional paths to concatenate Returns: path: the joined path.
github-repos
def add_ldap_group_link(self, cn, group_access, provider, **kwargs): path = '/groups/%s/ldap_group_links' % self.get_id() data = {'cn': cn, 'group_access': group_access, 'provider': provider} self.manager.gitlab.http_post(path, post_data=data, **kwargs)
Add an LDAP group link. Args: cn (str): CN of the LDAP group group_access (int): Minimum access level for members of the LDAP group provider (str): LDAP provider for the LDAP group **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request
juraj-google-style
def propose(self, n=1): proposed_params = [] for i in range(n): candidate_params = self._create_candidates() if (candidate_params is None): return None predictions = self.predict(candidate_params) idx = self._acquire(predictions) params = {} for i in range(candidate_params[(idx, :)].shape[0]): inverse_transformed = self.tunables[i][1].inverse_transform(candidate_params[(idx, i)]) params[self.tunables[i][0]] = inverse_transformed proposed_params.append(params) return (params if (n == 1) else proposed_params)
Use the trained model to propose a new set of parameters. Args: n (int, optional): number of candidates to propose Returns: Mapping of tunable name to proposed value. If called with n>1 then proposal is a list of dictionaries.
codesearchnet
def is_duplicated(self, item): if isinstance(item, dict): hashable_item = json.dumps(item, sort_keys=True) elif isinstance(item, list): hashable_item = frozenset(item) else: hashable_item = item if hashable_item in self._cache: return True else: if self.cache_capacity > 0 and len( self._cache) >= self.cache_capacity: self._cache.popitem(False) self._cache[hashable_item] = 1 return False
Check whether the item has been in the cache If the item has not been seen before, then hash it and put it into the cache, otherwise indicates the item is duplicated. When the cache size exceeds capacity, discard the earliest items in the cache. Args: item (object): The item to be checked and stored in cache. It must be immutable or a list/dict. Returns: bool: Whether the item has been in cache.
juraj-google-style
def emit(self, name, *args, **kwargs): e = self.__property_events.get(name) if (e is None): e = self.__events[name] return e(*args, **kwargs)
Dispatches an event to any subscribed listeners Note: If a listener returns :obj:`False`, the event will stop dispatching to other listeners. Any other return value is ignored. Args: name (str): The name of the :class:`Event` to dispatch *args (Optional): Positional arguments to be sent to listeners **kwargs (Optional): Keyword arguments to be sent to listeners
codesearchnet
def _validate_xoxp_token(self): if self.token.startswith('xoxb'): method_name = inspect.stack()[1][3] msg = "The method '{}' cannot be called with a Bot Token.".format(method_name) raise err.BotUserAccessError(msg)
Ensures that an xoxp token is used when the specified method is called. Raises: BotUserAccessError: If the API method is called with a Bot User OAuth Access Token.
codesearchnet
def add_documents(self, docs): for sent in docs: sent = map(self.process_token, sent) self._token_count.update(sent)
Update dictionary from a collection of documents. Each document is a list of tokens. Args: docs (list): documents to add.
juraj-google-style
def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams): def symbols_to_logits_fn(ids): 'Go from ids to logits.' ids = tf.expand_dims(ids, axis=2) latents_discrete = tf.pad(ids[(:, 1:)], [[0, 0], [0, 1], [0, 0]]) with tf.variable_scope(tf.get_variable_scope(), reuse=False): latents_dense = embed(tf.one_hot(latents_discrete, depth=(2 ** hparams.bottleneck_bits)), hparams.hidden_size) latents_pred = transformer_latent_decoder(latents_dense, inputs, ed, hparams, name='latent_prediction') logits = tf.layers.dense(latents_pred, (2 ** hparams.bottleneck_bits), name='logits_dense') current_output_position = (common_layers.shape_list(ids)[1] - 1) logits = logits[(:, current_output_position, :)] return logits initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32) length = tf.shape(latents_dense_in)[1] (ids, _, _) = beam_search.beam_search(symbols_to_logits_fn, initial_ids, 1, length, (2 ** hparams.bottleneck_bits), alpha=0.0, eos_id=(- 1), stop_early=False) res = tf.expand_dims(ids[(:, 0, :)], axis=2) return res[(:, 1:)]
Samples from the latent space in the autoencoder. Args: latents_dense_in: Tensor of shape [batch, length_q, ...]. Only the shape of its first two dimensions are used. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Encodings to attend to in decoder. ed: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. embed: Callable which embeds discrete latent hot-vectors and a hidden size and returns dense vectors. hparams: HParams. Returns: Tensor of shape [batch, length].
codesearchnet
def Begin(self, function_name): self.in_a_function = True self.lines_in_function = 0 self.current_function = function_name
Start analyzing function body. Args: function_name: The name of the function being tracked.
juraj-google-style
def generate_hpo_gene_list(self, *hpo_terms): genes = {} for term in hpo_terms: hpo_obj = self.hpo_term(term) if hpo_obj: for hgnc_id in hpo_obj['genes']: if (hgnc_id in genes): genes[hgnc_id] += 1 else: genes[hgnc_id] = 1 else: LOG.warning('Term %s could not be found', term) sorted_genes = sorted(genes.items(), key=operator.itemgetter(1), reverse=True) return sorted_genes
Generate a sorted list with namedtuples of hpogenes Each namedtuple of the list looks like (hgnc_id, count) Args: hpo_terms(iterable(str)) Returns: hpo_genes(list(HpoGene))
codesearchnet
def recipe_bigquery_run_query(config, auth_write, query, legacy): bigquery(config, {'auth': auth_write, 'run': {'query': query, 'legacy': legacy}})
Run query on a project. Args: auth_write (authentication) - Credentials used for writing data. query (text) - SQL with newlines and all. legacy (boolean) - Query type must match table and query format.
github-repos
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None): if filename.startswith('modelzoo: import torchvision model_urls = dict() for _, name, ispkg in pkgutil.walk_packages( torchvision.models.__path__): if not ispkg: _zoo = import_module('torchvision.models.{}'.format(name)) _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) model_name = filename[11:] checkpoint = model_zoo.load_url(model_urls[model_name]) elif filename.startswith('open-mmlab: model_name = filename[13:] checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name]) elif filename.startswith(('http: checkpoint = model_zoo.load_url(filename) else: if not osp.isfile(filename): raise IOError('{} is not a checkpoint file'.format(filename)) checkpoint = torch.load(filename, map_location=map_location) if isinstance(checkpoint, OrderedDict): state_dict = checkpoint elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: raise RuntimeError( 'No state_dict found in checkpoint file {}'.format(filename)) if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()} if hasattr(model, 'module'): load_state_dict(model.module, state_dict, strict, logger) else: load_state_dict(model, state_dict, strict, logger) return checkpoint
Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Either a filepath or URL or modelzoo://xxxxxxx. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. Returns: dict or OrderedDict: The loaded checkpoint.
juraj-google-style
def invoke_process_batch(self, windowed_batch, additional_args=None, additional_kwargs=None): raise NotImplementedError
Invokes the DoFn.process() function. Args: windowed_batch: a WindowedBatch object that gives a batch of elements for which process_batch() method should be invoked, along with the window each element belongs to. additional_args: additional arguments to be passed to the current `DoFn.process()` invocation, usually as side inputs. additional_kwargs: additional keyword arguments to be passed to the current `DoFn.process()` invocation.
github-repos
def _make_hostport(conn, default_host, default_port, default_user='', default_password=None): parsed = urllib.parse.urlparse(' return Connection( parsed.hostname or default_host, parsed.port or default_port, parsed.username if parsed.username is not None else default_user, parsed.password if parsed.password is not None else default_password, )
Convert a '[user[:pass]@]host:port' string to a Connection tuple. If the given connection is empty, use defaults. If no port is given, use the default. Args: conn (str): the string describing the target hsot/port default_host (str): the host to use if ``conn`` is empty default_port (int): the port to use if not given in ``conn``. Returns: (str, int): a (host, port) tuple.
juraj-google-style
def _parse_positive_int_param(request, param_name): param = request.args.get(param_name) if not param: return None try: param = int(param) if param <= 0: raise ValueError() return param except ValueError: return -1
Parses and asserts a positive (>0) integer query parameter. Args: request: The Werkzeug Request object param_name: Name of the parameter. Returns: Param, or None, or -1 if parameter is not a positive integer.
juraj-google-style
def get_entries(attr_name): assert attr_name in ['inputs', 'outputs'] entries = {} for op_type in ops._gradient_registry.list(): if op_type in _EXCLUDED_OPS: continue num_values = _get_num_inputs_outputs(op_type)[0 if attr_name == 'inputs' else 1] gradient_fn = ops._gradient_registry.lookup(op_type) if gradient_fn is None: if num_values != -1: entries[op_type] = '{"%s"},' % op_type continue used_tensors = _live_tensors(gradient_fn, attr_name=attr_name) if used_tensors is _ALL: continue elif not used_tensors: entries[op_type] = '{"%s"},' % op_type else: all_tensors = set(range(num_values)) unused_tensors = all_tensors - used_tensors if unused_tensors: unused_tensor_list = sorted(list(unused_tensors)) entries[op_type] = '{"%s", %d, {%s}},' % (op_type, len(unused_tensor_list), ', '.join((str(i) for i in unused_tensor_list))) return entries
Returns the dict of entries. Each entry is of the form {op_name, {true|false, indices}} true: All values are unused. false: `indices` are the only unused indices. Note: ops for which all values are used are not printed. Args: attr_name: inputs or outputs. Returns: A dict from op_type to formatted entry in the dict.
github-repos
def ping(self, destination, length=20): print '%s call ping' % self.port print 'destination: %s' %destination try: cmd = 'ping %s %s' % (destination, str(length)) print cmd self._sendline(cmd) self._expect(cmd) time.sleep(1) except Exception, e: ModuleHelper.WriteIntoDebugLogger("ping() Error: " + str(e))
send ICMPv6 echo request with a given length to a unicast destination address Args: destination: the unicast destination address of ICMPv6 echo request length: the size of ICMPv6 echo request payload
juraj-google-style
def __init__(self, error_formatter): self._formatter = error_formatter
Creates a ParserError instance. Args: error_formatter: An ErrorFormatter to format the parse errors.
github-repos
def validate_id(tx_body): tx_body = deepcopy(tx_body) try: proposed_tx_id = tx_body['id'] except KeyError: raise InvalidHash('No transaction id found!') tx_body['id'] = None tx_body_serialized = Transaction._to_str(tx_body) valid_tx_id = Transaction._to_hash(tx_body_serialized) if proposed_tx_id != valid_tx_id: err_msg = ("The transaction's id '{}' isn't equal to " "the hash of its body, i.e. it's not valid.") raise InvalidHash(err_msg.format(proposed_tx_id))
Validate the transaction ID of a transaction Args: tx_body (dict): The Transaction to be transformed.
juraj-google-style
def to_hgnc(self, hgnc_alias, build='37'): result = self.hgnc_genes(hgnc_symbol=hgnc_alias, build=build) if result: for gene in result: return gene['hgnc_symbol'] else: return None
Check if a hgnc symbol is an alias Return the correct hgnc symbol, if not existing return None Args: hgnc_alias(str) Returns: hgnc_symbol(str)
juraj-google-style
def add(self, element, multiplicity=1): if (multiplicity < 1): raise ValueError('Multiplicity must be positive') self._elements[element] += multiplicity self._total += multiplicity
Adds an element to the multiset. >>> ms = Multiset() >>> ms.add('a') >>> sorted(ms) ['a'] An optional multiplicity can be specified to define how many of the element are added: >>> ms.add('b', 2) >>> sorted(ms) ['a', 'b', 'b'] This extends the :meth:`MutableSet.add` signature to allow specifying the multiplicity. Args: element: The element to add to the multiset. multiplicity: The multiplicity i.e. count of elements to add.
codesearchnet
def poll(self, batch_id, retry_seconds=None, back_off=None, timeout=None, halt_on_error=True): if (self.halt_on_poll_error is not None): halt_on_error = self.halt_on_poll_error if ((self._poll_interval is None) and (self._batch_data_count is not None)): self._poll_interval = max(math.ceil((self._batch_data_count / 300)), 5) elif (self._poll_interval is None): self._poll_interval = 15 if (back_off is None): poll_interval_back_off = 2.5 else: poll_interval_back_off = float(back_off) if (retry_seconds is None): poll_retry_seconds = 5 else: poll_retry_seconds = int(retry_seconds) if (timeout is None): timeout = self.poll_timeout else: timeout = int(timeout) params = {'includeAdditional': 'true'} poll_count = 0 poll_time_total = 0 data = {} while True: poll_count += 1 poll_time_total += self._poll_interval time.sleep(self._poll_interval) self.tcex.log.info('Batch poll time: {} seconds'.format(poll_time_total)) try: r = self.tcex.session.get('/v2/batch/{}'.format(batch_id), params=params) if ((not r.ok) or ('application/json' not in r.headers.get('content-type', ''))): self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error) return data data = r.json() if (data.get('status') != 'Success'): self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error) except Exception as e: self.tcex.handle_error(540, [e], halt_on_error) if (data.get('data', {}).get('batchStatus', {}).get('status') == 'Completed'): modifier = (poll_time_total * 0.7) self._poll_interval_times = (self._poll_interval_times[(- 4):] + [modifier]) weights = [1] poll_interval_time_weighted_sum = 0 for poll_interval_time in self._poll_interval_times: poll_interval_time_weighted_sum += (poll_interval_time * weights[(- 1)]) weights.append((weights[(- 1)] * 1.5)) weights.pop() self._poll_interval = math.floor((poll_interval_time_weighted_sum / sum(weights))) if (poll_count == 1): self._poll_interval = (self._poll_interval * 0.85) self.tcex.log.debug('Batch Status: {}'.format(data)) return data self._poll_interval = min((poll_retry_seconds + int((poll_count * poll_interval_back_off))), 20) if (poll_time_total >= timeout): self.tcex.handle_error(550, [timeout], True)
Poll Batch status to ThreatConnect API. .. code-block:: javascript { "status": "Success", "data": { "batchStatus": { "id":3505, "status":"Completed", "errorCount":0, "successCount":0, "unprocessCount":0 } } } Args: batch_id (str): The ID returned from the ThreatConnect API for the current batch job. retry_seconds (int): The base number of seconds used for retries when job is not completed. back_off (float): A multiplier to use for backing off on each poll attempt when job has not completed. timeout (int, optional): The number of seconds before the poll should timeout. halt_on_error (bool, default:True): If True any exception will raise an error. Returns: dict: The batch status returned from the ThreatConnect API.
codesearchnet
def isprocess(pid, error=False): try: os.kill(pid, 0) return True except OSError: return False
Check that a process is running. Arguments: pid (int): Process ID to check. Returns: True if the process is running, else false.
juraj-google-style
def from_location(cls, location): if not location: return cls() try: if hasattr(location, 'isLocation'): return location elif hasattr(location, 'Latitude'): return cls(city=str(location.Name.replace(",", " ")), latitude=location.Latitude, longitude=location.Longitude) elif location.startswith('Site:'): loc, city, latitude, longitude, time_zone, elevation = \ [x.strip() for x in re.findall(r'\r*\n*([^\r\n]*)[,|;]', location, re.DOTALL)] else: try: city, latitude, longitude, time_zone, elevation = \ [key.split(":")[-1].strip() for key in location.split(",")] except ValueError: return cls(city=location) return cls(city=city, country=None, latitude=latitude, longitude=longitude, time_zone=time_zone, elevation=elevation) except Exception as e: raise ValueError( "Failed to create a Location from %s!\n%s" % (location, e))
Try to create a Ladybug location from a location string. Args: locationString: Location string Usage: l = Location.from_location(locationString)
juraj-google-style
def variable_accessed(variable): variables = _variables_override(variable) for var in variables: pywrap_tfe.TFE_Py_TapeVariableAccessed(var) pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
Notifies all tapes in the stack that a variable has been accessed. Args: variable: variable to be watched.
github-repos
def tick(self): self._handle_command_buffer() self._client.release() self._client.acquire() return self._get_full_state()
Ticks the environment once. Normally used for multi-agent environments. Returns: dict: A dictionary from agent name to its full state. The full state is another dictionary from :obj:`holodeck.sensors.Sensors` enum to np.ndarray, containing the sensors information for each sensor. The sensors always include the reward and terminal sensors.
codesearchnet
def init_from_class_batches(self, class_batches, num_shards=None): shards_for_submissions = {} shard_idx = 0 for (idx, (batch_id, batch_val)) in enumerate(iteritems(class_batches)): work_id = DEFENSE_WORK_ID_PATTERN.format(idx) submission_id = batch_val['submission_id'] shard_id = None if num_shards: shard_id = shards_for_submissions.get(submission_id) if (shard_id is None): shard_id = (shard_idx % num_shards) shards_for_submissions[submission_id] = shard_id shard_idx += 1 self.work[work_id] = {'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, 'submission_id': submission_id, 'shard_id': shard_id, 'output_classification_batch_id': batch_id}
Initializes work pieces from classification batches. Args: class_batches: dict with classification batches, could be obtained as ClassificationBatches.data num_shards: number of shards to split data into, if None then no sharding is done.
codesearchnet
def decompress_decoder(inputs, hparams, strides=(2, 2), kernel=(3, 3), name=None): with tf.variable_scope(name, default_name="decompress"): x = inputs x = tf.layers.dense(x, hparams.hidden_size, name=name + "_dense") x = residual_block_layer(x, hparams) for i in range(hparams.num_compress_steps j = hparams.num_compress_steps with tf.variable_scope(name + "_%d" % j): if hparams.do_decompress_attend: y = compress_self_attention_layer( x, hparams, name="decompress_selfatt") x += y y = tf.layers.conv2d_transpose( x, hparams.hidden_size, kernel, strides=strides, padding="SAME", activation=tf.nn.relu if i > 0 else None, name="decompress_conv") x = y return x
Decoder that decompresses 2-D inputs by 2**num_compress_steps. Args: inputs: Tensor of shape [batch, compress_height, compress_width, channels]. hparams: HParams. strides: Tuple, strides for conv block. kernel: Tuple, kernel window size for conv block. name: string, variable scope. Returns: Tensor of shape [batch, height, width, hparams.hidden_size].
juraj-google-style
def __init__(self, *args, **kwargs): super(InvocationTransaction, self).__init__(*args, **kwargs) self.Gas = Fixed8(0) self.Type = TransactionType.InvocationTransaction
Create an instance. Args: *args: **kwargs:
juraj-google-style
def get_video_features(self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor]=None): pixel_values_videos = pixel_values_videos.type(self.visual.dtype) video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) split_sizes = (video_grid_thw.prod(-1) video_embeds = torch.split(video_embeds, split_sizes) return video_embeds
Encodes videos into continuous embeddings that can be forwarded to the language model. Args: pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input videos. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM.
github-repos
def request_openbus(self, service, endpoint, **kwargs): if (service == 'bus'): endpoints = ENDPOINTS_BUS elif (service == 'geo'): endpoints = ENDPOINTS_GEO else: return None if (endpoint not in endpoints): return None url = (URL_OPENBUS + endpoints[endpoint]) kwargs['idClient'] = self._emt_id kwargs['passKey'] = self._emt_pass return requests.post(url, data=kwargs, verify=True).json()
Make a request to the given endpoint of the ``openbus`` server. This returns the plain JSON (dict) response which can then be parsed using one of the implemented types. Args: service (str): Service to fetch ('bus' or 'geo'). endpoint (str): Endpoint to send the request to. This string corresponds to the key in the ``ENDPOINTS`` dict. **kwargs: Request arguments. Returns: Obtained response (dict) or None if the endpoint was not found.
codesearchnet
def __init__(self, options): capacity = options[u"capacity"] if u"capacity" in options else 200 self._cache = pylru.lrucache(capacity)
Initializes an LruBackend. Args: options: a dictionary that contains configuration options.
juraj-google-style
def build_grab_exception(ex, curl): if ex.args[0] == 23: if getattr(curl, 'grab_callback_interrupted', None) is True: return None else: return error.GrabNetworkError(ex.args[1], ex) else: if ex.args[0] == 28: return error.GrabTimeoutError(ex.args[1], ex) elif ex.args[0] == 7: return error.GrabConnectionError(ex.args[1], ex) elif ex.args[0] == 67: return error.GrabAuthError(ex.args[1], ex) elif ex.args[0] == 47: return error.GrabTooManyRedirectsError(ex.args[1], ex) elif ex.args[0] == 6: return error.GrabCouldNotResolveHostError(ex.args[1], ex) elif ex.args[0] == 3: return error.GrabInvalidUrl(ex.args[1], ex) else: return error.GrabNetworkError(ex.args[1], ex)
Build Grab exception from the pycurl exception Args: ex - the original pycurl exception curl - the Curl instance raised the exception
juraj-google-style
def __init__(self, config_dict=None): self.config_dict = deepcopy(config_dict) self.plugins = Config.load_installed_plugins() self.analysis_groups = [] if not config_dict: return analysis = config_dict.get('analysis', {}) if isinstance(analysis, dict): for group_key, group_def in analysis.items(): try: self.analysis_groups.append( self.inflate_analysis_group(group_key, group_def)) except ValueError as e: logger.error( 'Error while inflating "%s" analysis group. ' 'The group will not be added to the list. ' 'Exception: %s.', group_key, e) else: raise ValueError('%s type is not supported for "analysis" key, ' 'use dict only' % type(analysis))
Initialization method. Args: config_dict (dict): the configuration as a dictionary.
juraj-google-style
def update(self, session, arrays=None, frame=None): new_config = self._get_config() if self._enough_time_has_passed(self.previous_config['FPS']): self.visualizer.update(new_config) self.last_update_time = time.time() final_image = self._update_frame(session, arrays, frame, new_config) self._update_recording(final_image, new_config)
Creates a frame and writes it to disk. Args: arrays: a list of np arrays. Use the "custom" option in the client. frame: a 2D np array. This way the plugin can be used for video of any kind, not just the visualization that comes with the plugin. frame can also be a function, which only is evaluated when the "frame" option is selected by the client.
juraj-google-style
def is_valid(self, value): if (not self.is_array): return self._valid(value) if isinstance(value, (list, set, tuple)): return all([self._valid(item) for item in value]) return self._valid(value)
Validate value before actual instance setting based on type. Args: value (object): The value object for validation. Returns: True if value validation succeeds else False.
codesearchnet
def find_from(path): realpath = os.path.realpath(path) config_path = os.path.join(realpath, '.ensime') if os.path.isfile(config_path): return config_path elif realpath == os.path.abspath('/'): return None else: dirname = os.path.dirname(realpath) return ProjectConfig.find_from(dirname)
Find path of an .ensime config, searching recursively upward from path. Args: path (str): Path of a file or directory from where to start searching. Returns: str: Canonical path of nearest ``.ensime``, or ``None`` if not found.
juraj-google-style
def __call__(self, name, value): super(IntegerTypeChecker, self).__call__(name, value) if isinstance(self.minimum, int): if value < self.minimum: raise ValueError("%s must be greater or equal %s" % (name, self.minimum)) if isinstance(self.maximum, int): if value > self.maximum: raise ValueError("%s must be less or equal %s" % (name, self.maximum))
Call method. Args: name (str): the value's name. value (int): the value to check. Raises: ValueError: if value is not type int. ValueError: if value is less than minimum. ValueError: if value is more than maximum.
juraj-google-style
def do_put(self, uri, resource, timeout, custom_headers): self.validate_resource_uri(uri) (task, body) = self._connection.put(uri, resource, custom_headers=custom_headers) if (not task): return body return self._task_monitor.wait_for_task(task, timeout)
Helps to make put requests. Args: uri: URI of the resource timeout: Time out for the request in seconds. custom_headers: Allows to set custom http headers. Retuns: Returns Task object
codesearchnet
def pan_and_scan(self, image: np.ndarray, pan_and_scan_min_crop_size: int, pan_and_scan_max_num_crops: int, pan_and_scan_min_ratio_to_activate: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): height, width = get_image_size(image) if width >= height: if width / height < pan_and_scan_min_ratio_to_activate: return [] num_crops_w = int(math.floor(width / height + 0.5)) num_crops_w = min(int(math.floor(width / pan_and_scan_min_crop_size)), num_crops_w) num_crops_w = max(2, num_crops_w) num_crops_w = min(pan_and_scan_max_num_crops, num_crops_w) num_crops_h = 1 else: if height / width < pan_and_scan_min_ratio_to_activate: return [] num_crops_h = int(math.floor(height / width + 0.5)) num_crops_h = min(int(math.floor(height / pan_and_scan_min_crop_size)), num_crops_h) num_crops_h = max(2, num_crops_h) num_crops_h = min(pan_and_scan_max_num_crops, num_crops_h) num_crops_w = 1 crop_size_w = int(math.ceil(width / num_crops_w)) crop_size_h = int(math.ceil(height / num_crops_h)) if min(crop_size_w, crop_size_h) < pan_and_scan_min_crop_size: return [] crop_positions_w = [crop_size_w * i for i in range(num_crops_w)] crop_positions_h = [crop_size_h * i for i in range(num_crops_h)] if input_data_format == ChannelDimension.LAST: image_crops = [image[pos_h:pos_h + crop_size_h, pos_w:pos_w + crop_size_w] for pos_h, pos_w in itertools.product(crop_positions_h, crop_positions_w)] else: image_crops = [image[:, pos_h:pos_h + crop_size_h, pos_w:pos_w + crop_size_w] for pos_h, pos_w in itertools.product(crop_positions_h, crop_positions_w)] return image_crops
Pan and Scan and image, by cropping into smaller images when the aspect ratio exceeds minimum allowed ratio. Args: image (`np.ndarray`): Image to resize. pan_and_scan_min_crop_size (`int`, *optional*): Minimum size of each crop in pan and scan. pan_and_scan_max_num_crops (`int`, *optional*): Maximum number of crops per image in pan and scan. pan_and_scan_min_ratio_to_activate (`float`, *optional*): Minimum aspect ratio to activate pan and scan. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred.
github-repos
def ldap_sync(self, **kwargs): path = ('/groups/%s/ldap_sync' % self.get_id()) self.manager.gitlab.http_post(path, **kwargs)
Sync LDAP groups. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request
codesearchnet
def _process_regular_parameters(sig, func, class_name, documented_params, indent_level, undocumented_parameters): docstring = '' source_args_dict = source_args_doc([ModelArgs, ImageProcessorArgs]) missing_args = {} for param_name, param in sig.parameters.items(): if param_name in ARGS_TO_IGNORE or param.kind == inspect.Parameter.VAR_POSITIONAL or param.kind == inspect.Parameter.VAR_KEYWORD: continue param_type, optional = _process_parameter_type(param, param_name, func) param_default = '' if param.default != inspect._empty and param.default is not None: param_default = f', defaults to `{str(param.default)}`' param_type, optional_string, shape_string, additional_info, description, is_documented = _get_parameter_info(param_name, documented_params, source_args_dict, param_type, optional) if is_documented: if param_name == 'config': if param_type == '': param_type = f'[`{class_name}`]' else: param_type = f'[`{param_type.split('.')[-1]}`]' elif param_type == '' and False: print(f'🚨 {param_name} for {func.__qualname__} in file {func.__code__.co_filename} has no type') param_type = param_type if '`' in param_type else f'`{param_type}`' if additional_info: param_docstring = f'{param_name} ({param_type}{additional_info}):{description}' else: param_docstring = f'{param_name} ({param_type}{shape_string}{optional_string}{param_default}):{description}' docstring += set_min_indent(param_docstring, indent_level + 8) else: missing_args[param_name] = {'type': param_type if param_type else '<fill_type>', 'optional': optional, 'shape': shape_string, 'description': description if description else '\n <fill_description>', 'default': param_default} undocumented_parameters.append(f"🚨 `{param_name}` is part of {func.__qualname__}'s signature, but not documented. Make sure to add it to the docstring of the function in {func.__code__.co_filename}.") return (docstring, missing_args)
Process all regular parameters (not kwargs parameters) from the function signature. Args: sig (`inspect.Signature`): Function signature func (`function`): Function the parameters belong to class_name (`str`): Name of the class documented_params (`dict`): Dictionary of parameters that are already documented indent_level (`int`): Indentation level undocumented_parameters (`list`): List to append undocumented parameters to
github-repos
def _ReadDefinitionFile(self, filename): if not filename: return None path = os.path.join(self._DEFINITION_FILES_PATH, filename) with open(path, 'rb') as file_object: definition = file_object.read() return dtfabric_fabric.DataTypeFabric(yaml_definition=definition)
Reads a dtFabric definition file. Args: filename (str): name of the dtFabric definition file. Returns: dtfabric.DataTypeFabric: data type fabric which contains the data format data type maps of the data type definition, such as a structure, that can be mapped onto binary data or None if no filename is provided.
juraj-google-style
def add_symbol(self, symbol_name, namespace_stack, node, module): if namespace_stack: last_namespace = self.namespaces for namespace in namespace_stack: last_namespace = last_namespace.setdefault(namespace, {}) else: last_namespace = self.namespaces[None] return self._add(symbol_name, last_namespace, node, module)
Adds symbol_name defined in namespace_stack to the symbol table. Args: symbol_name: 'name of the symbol to lookup' namespace_stack: None or ['namespaces', 'symbol', 'defined', 'in'] node: ast.Node that defines this symbol module: module (any object) this symbol is defined in Returns: bool(if symbol was *not* already present)
codesearchnet
def get_compound_pd(self): entry1 = PDEntry(self.entry1.composition, 0) entry2 = PDEntry(self.entry2.composition, 0) cpd = CompoundPhaseDiagram((self.rxn_entries + [entry1, entry2]), [Composition(entry1.composition.reduced_formula), Composition(entry2.composition.reduced_formula)], normalize_terminal_compositions=False) return cpd
Get the CompoundPhaseDiagram object, which can then be used for plotting. Returns: (CompoundPhaseDiagram)
codesearchnet
def files_comments_edit( self, *, comment: str, file: str, id: str, **kwargs ) -> SlackResponse: kwargs.update({"comment": comment, "file": file, "id": id}) return self.api_call("files.comments.edit", json=kwargs)
Edit an existing file comment. Args: comment (str): The body of the comment. e.g. 'Everyone should take a moment to read this file.' file (str): The file id. e.g. 'F1234467890' id (str): The file comment id. e.g. 'Fc1234567890'
juraj-google-style
def copartition_datasets(self, axis, other, left_func, right_func): if (left_func is None): new_self = self else: new_self = self.map_across_full_axis(axis, left_func) if (right_func is None): if ((axis == 0) and (not np.array_equal(other.block_lengths, new_self.block_lengths))): new_other = other.manual_shuffle(axis, (lambda x: x), new_self.block_lengths) elif ((axis == 1) and (not np.array_equal(other.block_widths, new_self.block_widths))): new_other = other.manual_shuffle(axis, (lambda x: x), new_self.block_widths) else: new_other = other else: new_other = other.manual_shuffle(axis, right_func, (new_self.block_lengths if (axis == 0) else new_self.block_widths)) return (new_self, new_other)
Copartition two BlockPartitions objects. Args: axis: The axis to copartition. other: The other BlockPartitions object to copartition with. left_func: The function to apply to left. If None, just use the dimension of self (based on axis). right_func: The function to apply to right. If None, check the dimensions of other and use the identity function if splitting needs to happen. Returns: A tuple of BlockPartitions objects, left and right.
codesearchnet
def get_unconditional_inputs(self, num_samples=1): last_hidden_state = torch.zeros((num_samples, 1, self.config.text_encoder.hidden_size), device=self.device, dtype=self.dtype) attention_mask = torch.zeros((num_samples, 1), device=self.device, dtype=torch.long) return MusicgenUnconditionalInput(encoder_outputs=(last_hidden_state,), attention_mask=attention_mask, guidance_scale=1.0)
Helper function to get null inputs for unconditional generation, enabling the model to be used without the feature extractor or tokenizer. Args: num_samples (int, *optional*): Number of audio samples to unconditionally generate. max_new_tokens (int, *optional*): Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of longer inference (since more audio tokens need to be generated per sample). Example: ```python >>> from transformers import MusicgenForConditionalGeneration >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> # get the unconditional (or 'null') inputs for the model >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1) >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) ```
github-repos
def DeregisterFormatter(cls, formatter_class): formatter_data_type = formatter_class.DATA_TYPE.lower() if formatter_data_type not in cls._formatter_classes: raise KeyError( 'Formatter class not set for data type: {0:s}.'.format( formatter_class.DATA_TYPE)) del cls._formatter_classes[formatter_data_type]
Deregisters a formatter class. The formatter classes are identified based on their lower case data type. Args: formatter_class (type): class of the formatter. Raises: KeyError: if formatter class is not set for the corresponding data type.
juraj-google-style
def w8a8_block_fp8_matmul_triton(A: torch.Tensor, B: torch.Tensor, As: torch.Tensor, Bs: torch.Tensor, block_size: List[int], output_dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(block_size) == 2 block_n, block_k = (block_size[0], block_size[1]) assert A.shape[-1] == B.shape[-1] assert A.shape[:-1] == As.shape[:-1] and A.is_contiguous() assert triton.cdiv(A.shape[-1], block_k) == As.shape[-1] M = A.numel() assert B.ndim == 2 and B.is_contiguous() and (Bs.ndim == 2) N, K = B.shape assert triton.cdiv(N, block_n) == Bs.shape[0] assert triton.cdiv(K, block_k) == Bs.shape[1] C_shape = A.shape[:-1] + (N,) C = A.new_empty(C_shape, dtype=output_dtype) BLOCK_SIZE_M = 128 if M < BLOCK_SIZE_M: BLOCK_SIZE_M = triton.next_power_of_2(M) BLOCK_SIZE_M = max(BLOCK_SIZE_M, 16) BLOCK_SIZE_K = block_k assert block_k % BLOCK_SIZE_K == 0 BLOCK_SIZE_N = block_n def grid(META): return (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),) _w8a8_block_fp8_matmul[grid](A, B, C, As, Bs, M, N, K, block_n, block_k, A.stride(-2), A.stride(-1), B.stride(1), B.stride(0), C.stride(-2), C.stride(-1), As.stride(-2), As.stride(-1), Bs.stride(1), Bs.stride(0), BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE_M=8) return C
This function performs matrix multiplication with block-wise quantization. It takes two input tensors `A` and `B` with scales `As` and `Bs`. The output is returned in the specified `output_dtype`. Args: A: The input tensor, e.g., activation. B: The input tensor, e.g., weight. As: The per-token-group quantization scale for `A`. Bs: The per-block quantization scale for `B`. block_size: The block size for per-block quantization. It should be 2-dim, e.g., [128, 128]. output_dytpe: The dtype of the returned tensor. Returns: torch.Tensor: The result of matmul.
github-repos
def render_list(self, cnt, unique=False, progress_callback=None, **kwargs): rendered_list = [] i = 0 total_attempts = 0 while True: if i >= cnt: break if total_attempts > cnt * self.unique_attempts_factor: raise StringGenerator.UniquenessError(u"couldn't satisfy uniqueness") s = self.render(**kwargs) if unique: if not s in rendered_list: rendered_list.append(s) i += 1 else: rendered_list.append(s) i += 1 total_attempts += 1 if progress_callback and callable(progress_callback): progress_callback(i, cnt) return rendered_list
Return a list of generated strings. Args: cnt (int): length of list unique (bool): whether to make entries unique Returns: list. We keep track of total attempts because a template may specify something impossible to attain, like [1-9]{} with cnt==1000
juraj-google-style
def _write_to_hdx(self, action, data, id_field_name, file_to_upload=None): file = None try: if file_to_upload: file = open(file_to_upload, 'rb') files = [('upload', file)] else: files = None return self.configuration.call_remoteckan(self.actions()[action], data, files=files) except Exception as e: raisefrom(HDXError, ('Failed when trying to %s %s! (POST)' % (action, data[id_field_name])), e) finally: if (file_to_upload and file): file.close()
Creates or updates an HDX object in HDX and return HDX object metadata dict Args: action (str): Action to perform eg. 'create', 'update' data (Dict): Data to write to HDX id_field_name (str): Name of field containing HDX object identifier or None file_to_upload (Optional[str]): File to upload to HDX Returns: Dict: HDX object metadata
codesearchnet
def add(self, pattern: Union[(Pattern, FlatTerm)], final_label: T=None) -> int: index = len(self._patterns) self._patterns.append((pattern, final_label)) flatterm = (FlatTerm(pattern.expression) if (not isinstance(pattern, FlatTerm)) else pattern) if (flatterm.is_syntactic or (len(flatterm) == 1)): net = self._generate_syntactic_net(flatterm, index) else: net = self._generate_net(flatterm, index) if self._root: self._root = self._product_net(self._root, net) else: self._root = net return index
Add a pattern to the discrimination net. Args: pattern: The pattern which is added to the DiscriminationNet. If an expression is given, it will be converted to a `FlatTerm` for internal processing. You can also pass a `FlatTerm` directly. final_label: A label that is returned if the pattern matches when using :meth:`match`. This will default to the pattern itself. Returns: The index of the newly added pattern. This is used internally to later to get the pattern and its final label once a match is found.
codesearchnet
def get_csr(self, bay_number=None): uri = "{}/https/certificaterequest".format(self.data['uri']) if bay_number: uri += "?bayNumber=%d" % (bay_number) return self._helper.do_get(uri)
Get an enclosure's Certificate Signing Request (CSR) that was generated by previous POST to the same URI. Args: bay_number: OA to retrieve the previously generated CSR. Returns: dict
juraj-google-style
def send_data(data): datalength = len(data) csm1 = checksum1(data, datalength) csm2 = checksum2(csm1) data.insert(0, 255) data.insert(1, 255) data.insert(5, csm1) data.insert(6, csm2) stringtosend = '' for i in range(len(data)): byteformat = ('%02X' % data[i]) stringtosend = ((stringtosend + '\\x') + byteformat) try: SERPORT.write(stringtosend.decode('string-escape')) except: raise HerkulexError('could not communicate with motors')
Send data to herkulex Paketize & write the packet to serial port Args: data (list): the data to be sent Raises: SerialException: Error occured while opening serial port
codesearchnet
def __init__(self, location=None, parent=None, **kwargs): if not parent: raise ValueError('Missing parent value.') super(CPIOPathSpec, self).__init__( location=location, parent=parent, **kwargs)
Initializes a path specification. Note that the CPIO file path specification must have a parent. Args: location (Optional[str]): CPIO file internal location string prefixed with a path separator character. parent (Optional[PathSpec]): parent path specification. Raises: ValueError: when parent is not set.
juraj-google-style
def _update_docstring(discretized_pulse: Callable, sampler_inst: Callable) -> Callable: wrapped_docstring = pydoc.render_doc(discretized_pulse, '%s') header, body = wrapped_docstring.split('\n', 1) body = textwrap.indent(body, ' ') wrapped_docstring = header+body updated_ds = .format(continuous_name=discretized_pulse.__name__, sampler_name=sampler_inst.__name__, continuous_doc=wrapped_docstring) discretized_pulse.__doc__ = updated_ds return discretized_pulse
Update annotations of discretized continuous pulse function. Args: discretized_pulse: Discretized decorated continuous pulse. sampler_inst: Applied sampler.
juraj-google-style
def fit_transform(self, X, y=None, **params): return self.fit(X, y).transform(X, y)
Learn vocabulary and return document id matrix. This is equivalent to fit followed by transform. Args: X : iterable an iterable which yields either str, unicode or file objects. Returns: list : document id matrix. list: label id matrix.
codesearchnet
def requested_packages(self, include_implicit=False): if include_implicit: return self._package_requests + self.implicit_packages else: return self._package_requests
Get packages in the request. Args: include_implicit (bool): If True, implicit packages are appended to the result. Returns: List of `PackageRequest` objects.
juraj-google-style
def Query(args): query = args.query.encode("utf-8") timeout = args.timeout_millis / 1000 try: command = [config.CONFIG["Osquery.path"], "--S", "--json", query] proc = subprocess.run( command, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except subprocess.TimeoutExpired as error: raise TimeoutError(cause=error) except subprocess.CalledProcessError as error: raise Error("osquery invocation error", cause=error) stdout = proc.stdout.decode("utf-8") stderr = proc.stderr.decode("utf-8").strip() return ProcOutput(stdout=stdout, stderr=stderr)
Calls osquery with given query and returns its output. Args: args: A query to call osquery with. Returns: A "parsed JSON" representation of the osquery output. Raises: QueryError: If the query is incorrect. TimeoutError: If a call to the osquery executable times out. Error: If anything else goes wrong with the subprocess call.
juraj-google-style
def plot_real_feature(df, feature_name, bins=50, figsize=(15, 15)): ix_negative_target = df[df.target == 0].index ix_positive_target = df[df.target == 1].index plt.figure(figsize=figsize) ax_overall_dist = plt.subplot2grid((3, 2), (0, 0), colspan=2) ax_target_conditional_dist = plt.subplot2grid((3, 2), (1, 0), colspan=2) ax_botplot = plt.subplot2grid((3, 2), (2, 0)) ax_violin_plot = plt.subplot2grid((3, 2), (2, 1)) ax_overall_dist.set_title('Distribution of {}'.format(feature_name), fontsize=16) sns.distplot( df[feature_name], bins=50, ax=ax_overall_dist ) sns.distplot( df.loc[ix_positive_target][feature_name], bins=bins, ax=ax_target_conditional_dist, label='Positive Target' ) sns.distplot( df.loc[ix_negative_target][feature_name], bins=bins, ax=ax_target_conditional_dist, label='Negative Target' ) ax_target_conditional_dist.legend(loc='upper right', prop={'size': 14}) sns.boxplot( y=feature_name, x='target', data=df, ax=ax_botplot ) sns.violinplot( y=feature_name, x='target', data=df, ax=ax_violin_plot ) plt.show()
Plot the distribution of a real-valued feature conditioned by the target. Examples: `plot_real_feature(X, 'emb_mean_euclidean')` Args: df: Pandas dataframe containing the target column (named 'target'). feature_name: The name of the feature to plot. bins: The number of histogram bins for the distribution plot. figsize: The size of the plotted figure.
juraj-google-style
def is_complex_format_str(node): inferred = utils.safe_infer(node) if inferred is None or not isinstance(inferred.value, str): return True try: parsed = list(string.Formatter().parse(inferred.value)) except ValueError: return False for _, _, format_spec, _ in parsed: if format_spec: return True return False
Checks if node represents a string with complex formatting specs. Args: node (astroid.node_classes.NodeNG): AST node to check Returns: bool: True if inferred string uses complex formatting, False otherwise
juraj-google-style
def GetParserPluginsInformation(cls, parser_filter_expression=None): parser_plugins_information = [] for _, parser_class in cls.GetParsers( parser_filter_expression=parser_filter_expression): if parser_class.SupportsPlugins(): for plugin_name, plugin_class in parser_class.GetPlugins(): description = getattr(plugin_class, 'DESCRIPTION', '') parser_plugins_information.append((plugin_name, description)) return parser_plugins_information
Retrieves the parser plugins information. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: list[tuple[str, str]]: pairs of parser plugin names and descriptions.
juraj-google-style
def get_db_prep_value(self, value, connection, prepared=False): if prepared: return value if (value is None): return [] values = (value if self.multi_valued_field else [value]) prepared_values = [self.get_prep_value(v) for v in values] return list(sorted(set((v for v in prepared_values if v))))
Prepare a value for DB interaction. Returns: - list(bytes) if not prepared - list(str) if prepared
codesearchnet
def _forward_and_backward_functions(self, inference_args, input_tangents): outputs = [] iteration_count = 0 while len(outputs) < len(self._func_graph.outputs) and any((backprop_util.IsTrainable(output) for output in self._func_graph.outputs[len(outputs):])): iteration_count += 1 if iteration_count >= 20 and iteration_count % 5 == 0: new_op_with_trainable_output = None num_new_trainable_outputs = 0 for output in self._func_graph.outputs[len(outputs):]: if backprop_util.IsTrainable(output): num_new_trainable_outputs += 1 new_op_with_trainable_output = output.op logging.warning("Determining side outputs for the function '{}' is taking longer than expected ({} iterations, typically this converges in 5 or so). This could indicate that a gradient registration is adding new ops to the forward pass every time gradients are generated. {} new trainable output(s) were added this iteration, one from the following op:\n {}\nThis may indicate a TensorFlow bug, or an issue in a tf.custom_gradient.".format(self._func_graph.name, iteration_count, num_new_trainable_outputs, new_op_with_trainable_output)) outputs = list(self._func_graph.outputs) self._build_functions_for_outputs(outputs, inference_args, input_tangents) forward_function, forward_graph, backward_function, output_indices, num_output_tangents = self._build_functions_for_outputs(outputs, inference_args, input_tangents) if len(self._func_graph.outputs) > len(outputs) and any((backprop_util.IsTrainable(output) for output in self._func_graph.outputs[len(outputs):])): raise errors.InternalError(f'Unexpectedly added new outputs to the forward function when building the backward function: {self._func_graph.outputs[len(outputs):]}.') return (forward_function, forward_graph, backward_function, output_indices, num_output_tangents)
Forward and backward functions suitable for higher-order gradients. Unlike in `_FirstOrderTapeGradientFunctions`, the backward function built by this method accepts gradients for all of the outputs of the returned forward function, including side outputs. Args: inference_args: A flat list of Tensors, arguments to the inference function. input_tangents: A flat list of Tensors, jvps associated with `inference_args`. Returns: A tuple of (forward_function, backward_function): forward_function: Takes the same inputs as the inference function, but returns side outputs used by backward_function in addition to the inference function's outputs. backward_function: Takes side outputs from forward_function and gradients with respect to all of its outputs, real and side. Returns gradients with respect to the inputs.
github-repos
def write_markdown_to_file(self, f): print('---', file=f) print('---', file=f) print('<!-- This file is machine generated: DO NOT EDIT! -->', file=f) print('', file=f) print(' if self._prefix: print(self._prefix, file=f) print('[TOC]', file=f) print('', file=f) if (self._module is not None): self._write_module_markdown_to_file(f, self._module)
Prints this library to file `f`. Args: f: File to write to. Returns: Dictionary of documented members.
codesearchnet
def open_usb_handle(self, port_num): serial = self.get_usb_serial(port_num) return local_usb.LibUsbHandle.open(serial_number=serial)
open usb port Args: port_num: port number on the Cambrionix unit Return: usb handle
codesearchnet
def get_volumes(blocks, layout_info): volumes = {} vol_blocks_lists = sort.by_vol_id(blocks, layout_info[2]) for vol_rec in blocks[layout_info[0]].vtbl_recs: vol_name = vol_rec.name.strip(b'\x00').decode('utf-8') if vol_rec.rec_index not in vol_blocks_lists: vol_blocks_lists[vol_rec.rec_index] = [] volumes[vol_name] = description(vol_rec.rec_index, vol_rec, vol_blocks_lists[vol_rec.rec_index]) return volumes
Get a list of UBI volume objects from list of blocks Arguments: List:blocks -- List of layout block objects List:layout_info -- Layout info (indexes of layout blocks and associated data blocks.) Returns: Dict -- Of Volume objects by volume name, including any relevant blocks.
juraj-google-style
def _CheckStorageMetadata(cls, metadata_values, check_readable_only=False): format_version = metadata_values.get('format_version', None) if not format_version: raise IOError('Missing format version.') try: format_version = int(format_version, 10) except (TypeError, ValueError): raise IOError('Invalid format version: {0!s}.'.format(format_version)) if not check_readable_only and format_version != cls._FORMAT_VERSION: raise IOError('Format version: {0:d} is not supported.'.format( format_version)) if format_version < cls._COMPATIBLE_FORMAT_VERSION: raise IOError( 'Format version: {0:d} is too old and no longer supported.'.format( format_version)) if format_version > cls._FORMAT_VERSION: raise IOError( 'Format version: {0:d} is too new and not yet supported.'.format( format_version)) metadata_values['format_version'] = format_version compression_format = metadata_values.get('compression_format', None) if compression_format not in definitions.COMPRESSION_FORMATS: raise IOError('Unsupported compression format: {0:s}'.format( compression_format)) serialization_format = metadata_values.get('serialization_format', None) if serialization_format != definitions.SERIALIZER_FORMAT_JSON: raise IOError('Unsupported serialization format: {0:s}'.format( serialization_format)) storage_type = metadata_values.get('storage_type', None) if storage_type not in definitions.STORAGE_TYPES: raise IOError('Unsupported storage type: {0:s}'.format( storage_type))
Checks the storage metadata. Args: metadata_values (dict[str, str]): metadata values per key. check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. Raises: IOError: if the format version or the serializer format is not supported. OSError: if the format version or the serializer format is not supported.
juraj-google-style
def _create_scalar_select(lhs_result: _sql_data_types.StandardSqlExpression, rhs_result: _sql_data_types.StandardSqlExpression, scalar_check_op: str, sql_data_type: _sql_data_types.StandardSqlDataType, sql_alias: str): return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(f'({lhs_result.as_operand()} {scalar_check_op} {rhs_result.as_operand()})', _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)
Construct a Spark SQL select statement for scalar values. Args: lhs_result: The result of the left-hand side expression. rhs_result: The result of the right-hand side expression. scalar_check_op: The scalar operation to be applied ('=' or '!='). sql_data_type: The SQL data type for the result. sql_alias: The SQL alias for the result. Returns: A compiled Spark SQL select statement.
github-repos
def get_book_metadata(self, asin): kbm = self._get_api_call('get_book_metadata', ('"%s"' % asin)) return KindleCloudReaderAPI._kbm_to_book(kbm)
Returns a book's metadata. Args: asin: The ASIN of the book to be queried. Returns: A `KindleBook` instance corresponding to the book associated with `asin`.
codesearchnet
def model_config(instance_type, model, role=None, image=None): s3_operations = {} model.image = (image or model.image) if isinstance(model, sagemaker.model.FrameworkModel): container_def = prepare_framework_container_def(model, instance_type, s3_operations) else: container_def = model.prepare_container_def(instance_type) base_name = utils.base_name_from_image(container_def['Image']) model.name = (model.name or utils.name_from_base(base_name)) primary_container = session._expand_container_def(container_def) config = {'ModelName': model.name, 'PrimaryContainer': primary_container, 'ExecutionRoleArn': (role or model.role)} if model.vpc_config: config['VpcConfig'] = model.vpc_config if s3_operations: config['S3Operations'] = s3_operations return config
Export Airflow model config from a SageMaker model Args: instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge' model (sagemaker.model.FrameworkModel): The SageMaker model to export Airflow config from role (str): The ``ExecutionRoleArn`` IAM Role ARN for the model image (str): An container image to use for deploying the model Returns: dict: Model config that can be directly used by SageMakerModelOperator in Airflow. It can also be part of the config used by SageMakerEndpointOperator and SageMakerTransformOperator in Airflow.
codesearchnet
def __init__(self, scandir_path, system, name, header, bytes_path): self._cache = dict() self._system = system self._name = name self._header = header self._path = ''.join(( scandir_path if scandir_path[-1] == '/' else (scandir_path + '/'), name)) self._bytes_path = bytes_path
Should only be instantiated by "scandir". Args: scandir_path (str): scandir path argument. system (pycosio._core.io_system.SystemBase subclass): Storage system. name (str): Name of the object relative to "scandir_path". header (dict): Object header bytes_path (bool): True if path must be returned as bytes.
juraj-google-style
def get_metric_parsers(metric_packages=tuple(), include_defaults=True): metric_parsers = set() if include_defaults: import git_code_debt.metrics metric_parsers.update(discover(git_code_debt.metrics, is_metric_cls)) for metric_package in metric_packages: metric_parsers.update(discover(metric_package, is_metric_cls)) return metric_parsers
Gets all of the metric parsers. Args: metric_packages - Defaults to no extra packages. An iterable of metric containing packages. A metric inherits DiffParserBase and does not have __metric__ = False A metric package must be imported using import a.b.c include_defaults - Whether to include the generic metric parsers
codesearchnet