code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_gap(self, tol=0.001, abs_tol=False, spin=None): (cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin) return max((cbm - vbm), 0.0)
Expects a DOS object and finds the gap. Args: tol: tolerance in occupations for determining the gap abs_tol: An absolute tolerance (True) and a relative one (False) spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: gap in eV
codesearchnet
def get_uri(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): if is_list: return self._get_typed_list_value(key=key, target_type=UriSpec, type_convert=self.parse_uri_spec, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=UriSpec, type_convert=self.parse_uri_spec, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)
Get a the value corresponding to the key and converts it to `UriSpec`. Args key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key.
codesearchnet
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): devices = match.get('Devices', {}) for (device_identifier, device_information) in iter(devices.items()): datetime_value = device_information.get('Connected', None) if (not datetime_value): continue event_data = IPodPlistEventData() event_data.device_id = device_identifier for (key, value) in iter(device_information.items()): if (key == 'Connected'): continue attribute_name = key.lower().replace(' ', '_') setattr(event_data, attribute_name, value) event = time_events.PythonDatetimeEvent(datetime_value, definitions.TIME_DESCRIPTION_LAST_CONNECTED) parser_mediator.ProduceEventWithEventData(event, event_data)
Extract device information from the iPod plist. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
codesearchnet
def batch_frexp(inputs, max_bit=31): shape_of_input = inputs.size() inputs = inputs.view(-1) output_m, output_e = np.frexp(inputs.cpu().numpy()) tmp_m = [] for m in output_m: int_m_shifted = int(decimal.Decimal(m * 2 ** max_bit).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP)) tmp_m.append(int_m_shifted) output_m = np.array(tmp_m) output_e = float(max_bit) - output_e return (torch.from_numpy(output_m).to(inputs.device).view(shape_of_input), torch.from_numpy(output_e).to(inputs.device).view(shape_of_input))
Decompose the scaling factor into mantissa and twos exponent. Args: scaling_factor (`torch.Tensor`): Target scaling factor to decompose. Returns: ``Tuple(torch.Tensor, torch.Tensor)`: mantisa and exponent
github-repos
def transformer_moe_2k(): hparams = transformer_moe_8k() hparams.batch_size = 2048 hparams.default_ff = 'sep' encoder_archi = 'a/a/a/a/a' decoder_archi = 'a-sepm/a-sepm/a-moe/a-sepm/a-sepm' hparams.layer_types = '{} return hparams
Base transformers model with moe. Will have the following architecture: * No encoder. * Layer 0: a - sep (self-attention - unmasked separable convolutions) * Layer 1: a - sep * Layer 2: a - sep * Layer 3: a - sep * Layer 4: a - sep * Decoder architecture: * Layer 0: a - a - sepm (self-attention - enco/deco-attention - masked sep) * Layer 1: a - a - sepm * Layer 2: a - a - moe (mixture of expert layers in the middle) * Layer 3: a - a - sepm * Layer 4: a - a - sepm Returns: hparams
codesearchnet
def load_stopwords(self, path): if path: with open(path) as f: self.stopwords = set(f.read().splitlines()) else: self.stopwords = set(pkgutil.get_data('textplot', 'data/stopwords.txt').decode('utf8').splitlines())
Load a set of stopwords. Args: path (str): The stopwords file path.
codesearchnet
def For(start, limit, delta, inputs, body, name=None, hostmem=None, rewrite_with_while=None): if rewrite_with_while: return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem) if body.captured_inputs: ret = gen_functional_ops._for(start, limit, delta, inputs + body.captured_inputs, _LoopBodyCaptureWrapper(body), name=name) ret = ret[:-len(body.captured_inputs)] else: ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name) if hostmem: num_for_params = 3 input_attr = attr_value_pb2.AttrValue() input_attr.list.i.extend([num_for_params + i for i in hostmem]) ret[0].op._set_attr('_input_hostmem', input_attr) output_attr = attr_value_pb2.AttrValue() output_attr.list.i.extend(hostmem) ret[0].op._set_attr('_output_hostmem', output_attr) return ret
out = input; for i in range(start, limit, delta) out = body(i, out). Args: start: A `Tensor` of type `int32`. limit: A `Tensor` of type `int32`. delta: A `Tensor` of type `int32`. inputs: A list of `Tensor` objects. A list of input tensors whose types are T. body: A function takes a list of tensors and returns another list of tensors. Both lists have the same types as (int32, T...). name: A name for the operation (optional). hostmem: A list of integer. If i is in the list, inputs[i] is a host memory tensor. In other words, (i+1)-th argument of the body function is expecting a host memory. rewrite_with_while: If True, using While op to implement the For. Returns: A list of `Tensor` objects. Has the same type as `input`. A list of output tensors whose types are T.
github-repos
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5): y_pred_rank = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred).shape.ndims y_true_rank = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_true).shape.ndims if y_true_rank is not None and y_pred_rank is not None: if y_pred_rank > 2: y_pred = array_ops.reshape(y_pred, [-1, y_pred.shape[-1]]) if y_true_rank > 1: y_true = array_ops.reshape(y_true, [-1]) return math_ops.cast(nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), backend.floatx())
Computes how often integer targets are in the top `K` predictions. Standalone usage: >>> y_true = [2, 1] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy( ... y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Sparse top K categorical accuracy value.
github-repos
def __init__( self, name, description='', creator='', raw={}): BossResource.__init__(self, name, description, creator, raw)
Constructor. Args: name (string): Collection name. description (optional[string]): Collection description. Defaults to empty. creator (optional[string]): Resource creator. raw (optional[dictionary]): Holds JSON data returned by the Boss API on a POST (create) or GET operation.
juraj-google-style
def _wrap_method(name): method = getattr(datetime.datetime, name) @functools.wraps(method, ("__name__", "__doc__"), ()) def wrapper(self, *args, **kw): r = method(self, *args, **kw) if isinstance(r, datetime.datetime) and not isinstance(r, type(self)): r = type(self)(r) return r setattr(datetime_tz, name, wrapper)
Wrap a method. Patch a method which might return a datetime.datetime to return a datetime_tz.datetime_tz instead. Args: name: The name of the method to patch
juraj-google-style
def compute_video_metrics_from_predictions(predictions, decode_hparams): all_results = {} (ssim_all_decodes, psnr_all_decodes) = ([], []) for single_decode in predictions: args = get_zipped_dataset_from_predictions(single_decode) (psnr_single, ssim_single) = compute_one_decoding_video_metrics(*args) psnr_all_decodes.append(psnr_single) ssim_all_decodes.append(ssim_single) psnr_all_decodes = np.array(psnr_all_decodes) ssim_all_decodes = np.array(ssim_all_decodes) all_results.update({'PSNR': psnr_all_decodes, 'SSIM': ssim_all_decodes}) return compute_all_metrics_statistics(all_results)
Computes metrics from predictions. Args: predictions: list of list of dicts. outer length: num_decodes, inner_length: num_samples decode_hparams: Decode hparams. instance of HParams. Returns: statistics: dict of Tensors, key being the metric with each Tensor having the shape (num_samples, num_frames).
codesearchnet
def bytes(self) -> bytes | None: if self.part.text: return self.text.encode() if isinstance(self.part.inline_data, genai_types.Blob): return self.part.inline_data.data return None
Returns part contents as bytes. Returns: Text encoded into bytes or bytes from inline data if the underlying part is a Blob.
github-repos
def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction: result_dict: dict[str, Any] = {} _AggModelIdMixin.add_model_id(self, result_dict) _SourcePredictionMixin.add_source_predictions(self, result_dict, predictions) scores = [prediction.score for prediction in predictions if prediction.score is not None and (not math.isnan(prediction.score))] if len(scores) > 0: result_dict['score'] = self._agg(scores) elif all(map(lambda x: x.score is None, predictions)): result_dict['score'] = None else: result_dict['score'] = float('NaN') return AnomalyPrediction(**result_dict)
Applies the score aggregation function to a list of predictions. Args: predictions (Iterable[AnomalyPrediction]): A collection of `AnomalyPrediction` objects to be aggregated. Returns: AnomalyPrediction: A single `AnomalyPrediction` object with the aggregated score. The aggregated score is determined as follows: - If there are any non-missing and non-error scores, the `agg_func` is applied to aggregate them. - If all scores are error scores (`None`), the aggregated score is also `None`. - If there are a mix of missing (`NaN`) and error scores (`None`), the aggregated score is `NaN`.
github-repos
def load(self, binary: pyquil.Program) -> 'QuantumFlowQVM': assert self.status in ['connected', 'done'] prog = quil_to_program(str(binary)) self._prog = prog self.program = binary self.status = 'loaded' return self
Load a pyQuil program, and initialize QVM into a fresh state. Args: binary: A pyQuil program
juraj-google-style
def new_message_from_header(header): message_type = header.message_type if (not isinstance(message_type, Type)): try: if isinstance(message_type, str): message_type = Type[message_type] elif isinstance(message_type, int): message_type = Type(message_type) except ValueError: raise ValueError message = new_message_from_message_type(message_type) message.header.xid = header.xid message.header.length = header.length return message
Given an OF Header, return an empty message of header's message_type. Args: header (~pyof.v0x01.common.header.Header): Unpacked OpenFlow Header. Returns: Empty OpenFlow message of the same type of message_type attribute from the given header. The header attribute of the message will be populated. Raises: KytosUndefinedMessageType: Unkown Message_Type.
codesearchnet
def publish(self, message): if (not isinstance(message, types.PubsubMessage)): message = types.PubsubMessage(**message) future = None with self._state_lock: if (not self.will_accept(message)): return future new_size = (self._size + message.ByteSize()) new_count = (len(self._messages) + 1) overflow = ((new_size > self.settings.max_bytes) or (new_count >= self._settings.max_messages)) if ((not self._messages) or (not overflow)): self._messages.append(message) self._size = new_size future = futures.Future(completed=threading.Event()) self._futures.append(future) if overflow: self.commit() return future
Publish a single message. Add the given message to this object; this will cause it to be published once the batch either has enough messages or a sufficient period of time has elapsed. This method is called by :meth:`~.PublisherClient.publish`. Args: message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message. Returns: Optional[~google.api_core.future.Future]: An object conforming to the :class:`~concurrent.futures.Future` interface or :data:`None`. If :data:`None` is returned, that signals that the batch cannot accept a message.
codesearchnet
def __call__(self, stream, content_type): try: return json.load(codecs.getreader('utf-8')(stream)) finally: stream.close()
Decode a JSON object into the corresponding Python object. Args: stream (stream): The response stream to be deserialized. content_type (str): The content type of the response. Returns: object: Body of the response deserialized into a JSON object.
juraj-google-style
def _implicit_credentials_from_files(): credentials_filename = _get_environment_variable_file() if (not credentials_filename): credentials_filename = _get_well_known_file() if os.path.isfile(credentials_filename): extra_help = ' (produced automatically when running "gcloud auth login" command)' else: credentials_filename = None else: extra_help = ((' (pointed to by ' + GOOGLE_APPLICATION_CREDENTIALS) + ' environment variable)') if (not credentials_filename): return SETTINGS.env_name = DEFAULT_ENV_NAME try: return _get_application_default_credential_from_file(credentials_filename) except (ApplicationDefaultCredentialsError, ValueError) as error: _raise_exception_for_reading_json(credentials_filename, extra_help, error)
Attempts to get implicit credentials from local credential files. First checks if the environment variable GOOGLE_APPLICATION_CREDENTIALS is set with a filename and then falls back to a configuration file (the "well known" file) associated with the 'gcloud' command line tool. Returns: Credentials object associated with the GOOGLE_APPLICATION_CREDENTIALS file or the "well known" file if either exist. If neither file is define, returns None, indicating no credentials from a file can detected from the current environment.
codesearchnet
def CancelBatchJob(client, batch_job, max_poll_attempts=MAX_POLL_ATTEMPTS): batch_job_service = client.GetService('BatchJobService', 'v201809') batch_job['status'] = 'CANCELING' operation = {'operator': 'SET', 'operand': batch_job} batch_job_service.mutate([operation]) poll_attempt = 0 while ((poll_attempt in range(max_poll_attempts)) and (batch_job['status'] != 'CANCELED')): sleep_interval = ((30 * (2 ** poll_attempt)) + (random.randint(0, 10000) / 1000)) print(('Batch Job not finished canceling, sleeping for %s seconds.' % sleep_interval)) time.sleep(sleep_interval) batch_job = GetBatchJob(client, batch_job['id']) poll_attempt += 1 if (batch_job['status'] == 'CANCELED'): print(('Batch Job with ID "%d" has been successfully canceled.' % batch_job['id'])) else: print(('Batch Job with ID "%d" failed to cancel after polling %d times.' % (batch_job['id'], max_poll_attempts)))
Cancels the given BatchJob. Args: client: an instantiated AdWordsClient used to cancel the BatchJob. batch_job: a BatchJob to be canceled. max_poll_attempts: an int defining the number of times the BatchJob will be checked to determine whether it has been canceled.
codesearchnet
def assert_same_structure(nest1, nest2, check_types=True): nest_util.assert_same_structure(nest_util.Modality.DATA, nest1, nest2, check_types)
Asserts that two structures are nested in the same way. Args: nest1: an arbitrarily nested structure. nest2: an arbitrarily nested structure. check_types: if `True` (default) types of sequences should be same as well. For dictionary, "type" of dictionary is considered to include its keys. In other words, two dictionaries with different keys are considered to have a different "type". If set to `False`, two iterables are considered same as long as they yield the elements that have same structures. Raises: ValueError: If the two structures do not have the same number of elements or if the two structures are not nested in the same way. TypeError: If the two structures differ in the type of sequence in any of their substructures. Only possible if `check_types` is `True`.
github-repos
def __init__(self, date_time, date_time_description): super(OLECFSummaryInformationEvent, self).__init__( date_time, date_time_description) self.name = 'Summary Information'
Initializes an event. Args: date_time (dfdatetime.DateTimeValues): date and time values. date_time_description (str): description of the meaning of the date and time values.
juraj-google-style
def preprocess_image(image_buffer, output_height, output_width, num_channels, is_training=False): if is_training: image = _decode_crop_and_flip(image_buffer, num_channels) mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE, value=[output_height, output_width]) image = _resize_image(image, output_height, output_width) else: image = tf.image.decode_jpeg(image_buffer, channels=num_channels) image = _aspect_preserving_resize(image, _RESIZE_MIN) mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE, value=[output_height, output_width]) image = _central_crop(image, output_height, output_width) image.set_shape([output_height, output_width, num_channels]) return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
Preprocesses the given image. Preprocessing includes decoding, cropping, and resizing for both training and eval images. Training preprocessing, however, introduces some random distortion of the image to improve accuracy. Args: image_buffer: scalar string Tensor representing the raw JPEG image buffer. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. num_channels: Integer depth of the image buffer for decoding. is_training: `True` if we're preprocessing the image for training and `False` otherwise. Returns: A preprocessed image.
codesearchnet
def find_importer_frame(): byte = (lambda ch: (ord(ch) if PY2 else ch)) frame = inspect.currentframe() try: while frame: code = frame.f_code lasti = frame.f_lasti if (byte(code.co_code[lasti]) == dis.opmap['IMPORT_NAME']): arg = (byte(code.co_code[(lasti + 1)]) + (byte(code.co_code[(lasti + 2)]) * 256)) name = code.co_names[arg] if (name == 'end'): break end end frame = frame.f_back end return frame finally: del frame end
Returns the outer frame importing this "end" module. If this module is being imported by other means than import statement, None is returned. Returns: A frame object or None.
codesearchnet
def _create_environment(config): if isinstance(config.env, str): env = gym.make(config.env) else: env = config.env() if config.max_length: env = tools.wrappers.LimitDuration(env, config.max_length) if isinstance(env.action_space, gym.spaces.Box): if config.normalize_ranges: env = tools.wrappers.RangeNormalize(env) env = tools.wrappers.ClipAction(env) elif isinstance(env.action_space, gym.spaces.Discrete): if config.normalize_ranges: env = tools.wrappers.RangeNormalize(env, action=False) else: message = "Unsupported action space '{}'".format(type(env.action_space)) raise NotImplementedError(message) env = tools.wrappers.ConvertTo32Bit(env) env = tools.wrappers.CacheSpaces(env) return env
Constructor for an instance of the environment. Args: config: Object providing configurations via attributes. Raises: NotImplementedError: For action spaces other than Box and Discrete. Returns: Wrapped OpenAI Gym environment.
juraj-google-style
def replace_batch_norm(model): for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = GroundingDinoFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device('meta'): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module)
Recursively replace all `torch.nn.BatchNorm2d` with `GroundingDinoFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model
github-repos
class PatchTSMixerForTimeSeriesClassification(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config) self.head = PatchTSMixerLinearHead(config=config) self.use_return_dict = config.use_return_dict if config.scaling in ['std', 'mean', True]: self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches) else: self.inject_scale = None if config.post_init: self.post_init() @auto_docstring def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForTimeSeriesClassificationOutput: loss = torch.nn.CrossEntropyLoss() return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model(past_values, output_hidden_states=output_hidden_states, return_dict=return_dict) if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) if self.inject_scale is not None: model_output.last_hidden_state = self.inject_scale(model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale) y_hat = self.head(model_output.last_hidden_state) if target_values is not None and return_loss is True: loss_val = loss(y_hat, target_values) else: loss_val = None if not return_dict: return tuple((v for v in [loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states])) return PatchTSMixerForTimeSeriesClassificationOutput(loss=loss_val, prediction_outputs=y_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states)
`PatchTSMixer` for classification application. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`.
github-repos
def connected_emulators(self, host=enums.JLinkHost.USB): res = self._dll.JLINKARM_EMU_GetList(host, 0, 0) if res < 0: raise errors.JLinkException(res) num_devices = res info = (structs.JLinkConnectInfo * num_devices)() num_found = self._dll.JLINKARM_EMU_GetList(host, info, num_devices) if num_found < 0: raise errors.JLinkException(num_found) return list(info)[:num_found]
Returns a list of all the connected emulators. Args: self (JLink): the ``JLink`` instance host (int): host type to search (default: ``JLinkHost.USB``) Returns: List of ``JLinkConnectInfo`` specifying the connected emulators. Raises: JLinkException: if fails to enumerate devices.
juraj-google-style
def ragged_cumsum(x: ragged_tensor.Ragged, axis: int=0, exclusive: bool=False, reverse: bool=False, name: typing.Optional[str]=None): with ops.name_scope(name, 'RaggedCumSum', [x, axis, exclusive, reverse]): axis = array_ops.get_positive_axis(axis, x.shape.rank, ndims_name='rank') if axis == x.ragged_rank: last_rp = x._nested_row_partitions[-1] return x.with_flat_values(_cumsum_flat_values_at_ragged_rank(last_rp, x.flat_values, exclusive=exclusive, reverse=reverse)) elif axis > x.ragged_rank: new_axis = axis - x.ragged_rank cumsum_bound = functools.partial(math_ops.cumsum, axis=new_axis, exclusive=exclusive, reverse=reverse) return ragged_functional_ops.map_flat_values(cumsum_bound, x) else: dense_version = x.to_tensor() result = math_ops.cumsum(dense_version, axis, exclusive=exclusive, reverse=reverse, name=name) return ragged_tensor.RaggedTensor.from_tensor(result, lengths=x.nested_row_lengths())
Calculate math_ops.cumsum for a RaggedTensor. Given a ragged tensor `x`, the `result` is a ragged tensor with the same shape. One can calculate the value of `result[i_1...i_k]` as follows: ``` dense_result=tf.math.cumsum(rt.to_tensor(), axis=axis, exclusive=exclusive, reverse=reverse) result[i_1...i_k]=dense_result[i_1...i_k] ``` Args: x: the original ragged tensor to sum. axis: the axis along which to sum, can range -rank<=axis<rank. exclusive: is the sum exclusive or inclusive? If True, then result[0]=0. If False, then result[0]=x[0]. reverse: If True, sum from back to front. name: the name of the op. Returns: the cumulative sum.
github-repos
def CreateSession(cls, artifact_filter_names=None, command_line_arguments=None, debug_mode=False, filter_file_path=None, preferred_encoding='utf-8', preferred_time_zone=None, preferred_year=None): session = sessions.Session() session.artifact_filters = artifact_filter_names session.command_line_arguments = command_line_arguments session.debug_mode = debug_mode session.filter_file = filter_file_path session.preferred_encoding = preferred_encoding session.preferred_time_zone = preferred_time_zone session.preferred_year = preferred_year return session
Creates a session attribute container. Args: artifact_filter_names (Optional[list[str]]): names of artifact definitions that are used for filtering file system and Windows Registry key paths. command_line_arguments (Optional[str]): the command line arguments. debug_mode (bool): True if debug mode was enabled. filter_file_path (Optional[str]): path to a file with find specifications. preferred_encoding (Optional[str]): preferred encoding. preferred_time_zone (Optional[str]): preferred time zone. preferred_year (Optional[int]): preferred year. Returns: Session: session attribute container.
codesearchnet
def set_status(self, on, switch=1): if isinstance(switch, int): switch = str(switch) payload = self.generate_payload(SET, {switch:on}) data = self._send_receive(payload) log.debug('set_status received data=%r', data) return data
Set status of the device to 'on' or 'off'. Args: on(bool): True for 'on', False for 'off'. switch(int): The switch to set
juraj-google-style
def draw_lines(self, *points): point_array = ffi.new('SDL_Point[]', len(points)) for i, p in enumerate(points): point_array[i] = p._ptr[0] check_int_err(lib.SDL_RenderDrawLines(self._ptr, point_array, len(points)))
Draw a series of connected lines on the current rendering target. Args: *points (Point): The points along the lines. Raises: SDLError: If an error is encountered.
juraj-google-style
def handle_event(self, event_handler, event_name, user_args, event_timeout=None, cond=None, cond_timeout=None): worker = self.executor.submit(self._handle, event_handler, event_name, user_args, event_timeout, cond, cond_timeout) return worker
Handle events that don't have registered handlers In a new thread, poll one event of specified type from its queue and execute its handler. If no such event exists, the thread waits until one appears. Args: event_handler: Handler for the event, which should take at least one argument - the event json object. event_name: Name of the event to be handled. user_args: User arguments for the handler; to be passed in after the event json. event_timeout: Number of seconds to wait for the event to come. cond: A condition to wait on before executing the handler. Should be a threading.Event object. cond_timeout: Number of seconds to wait before the condition times out. Never times out if None. Returns: A concurrent.Future object associated with the handler. If blocking call worker.result() is triggered, the handler needs to return something to unblock.
codesearchnet
def get_header(message, name): header = message.get(name) log.debug("Getting header {!r}: {!r}".format(name, header)) if header: return decode_header_part(header) return six.text_type()
Gets an email.message.Message and a header name and returns the mail header decoded with the correct charset. Args: message (email.message.Message): email message object name (string): header to get Returns: decoded header
juraj-google-style
def suggestions(self, word): suggestions = set(self._misspelling_dict.get(word, [])).union( set(self._misspelling_dict.get(word.lower(), []))) return sorted([same_case(source=word, destination=w) for w in suggestions])
Returns a list of suggestions for a misspelled word. Args: word: The word to check. Returns: List of zero or more suggested replacements for word.
juraj-google-style
def dumps(o, encoder=None): retval = "" if encoder is None: encoder = TomlEncoder(o.__class__) addtoretval, sections = encoder.dump_sections(o, "") retval += addtoretval while sections: newsections = encoder.get_empty_table() for section in sections: addtoretval, addtosections = encoder.dump_sections( sections[section], section) if addtoretval or (not addtoretval and not addtosections): if retval and retval[-2:] != "\n\n": retval += "\n" retval += "[" + section + "]\n" if addtoretval: retval += addtoretval for s in addtosections: newsections[section + "." + s] = addtosections[s] sections = newsections return retval
Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict
juraj-google-style
def __init__(self, section): self.section = section super().__init__('invalid section name: {}'.format(section))
Initialization of instances: Args: section (str): invalid section name. Attributes: section (str): invalid section name.
juraj-google-style
def color_string(self, x): diff_str = "" color = "black" if len(x) == 2 and self.compare_file is not None: difference = x[0] - x[1] if difference: color, sign = ('green', '-') if difference < 0 else ('red', '+') diff_str = '{}{}'.format(sign, self.format_measure(difference)) return [self.format_measure(x[0]), [diff_str, color]]
Return a string formatted delta for the values in x. Args: x: 2-item list of integers (representing number of calls) or 2-item list of floats (representing seconds of runtime). Returns: A list with [formatted x[0], [color, formatted delta]], where color reflects whether x[1] is lower, greater, or the same as x[0].
juraj-google-style
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): if (((class_info.last_line - class_info.starting_linenum) <= 24) or (linenum <= class_info.starting_linenum)): return matched = Match('\\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: prev_line = clean_lines.lines[(linenum - 1)] if ((not IsBlankLine(prev_line)) and (not Search('\\b(class|struct)\\b', prev_line)) and (not Search('\\\\$', prev_line))): end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search('\\{\\s*$', clean_lines.lines[i]): end_class_head = i break if (end_class_head < (linenum - 1)): error(filename, linenum, 'whitespace/blank_line', 3, ('"%s:" should be preceded by a blank line' % matched.group(1)))
Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def create_module_file(txt, directory): name = nonpresent_module_filename() path = os.path.join(directory, name) with open(path, 'w') as fh: fh.write(txt) return path
Create a file in the given directory with a valid module name populated with the given txt. Returns: A path to the file
codesearchnet
def generate_encodeable_characters(characters: Iterable[str], encodings: Iterable[str]) -> Iterable[str]: for c in characters: for encoding in encodings: try: c.encode(encoding) yield c except UnicodeEncodeError: pass
Generates the subset of 'characters' that can be encoded by 'encodings'. Args: characters: The characters to check for encodeability e.g. 'abcd'. encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5']. Returns: The subset of 'characters' that can be encoded using one of the provided encodings.
juraj-google-style
def __init__(self, make_distribution_fn, convert_to_tensor_fn=tfd.Distribution.sample, **kwargs): if isinstance(make_distribution_fn, six.string_types): make_distribution_fn = _deserialize_function(make_distribution_fn) convert_to_tensor_fn = _get_convert_to_tensor_fn(convert_to_tensor_fn) kwargs.pop('function', None) def _fn(*fargs, **fkwargs): d = make_distribution_fn(*fargs, **fkwargs) value_is_seq = isinstance(d.dtype, collections.Sequence) maybe_composite_convert_to_tensor_fn = ( (lambda d: tensor_tuple.TensorTuple(convert_to_tensor_fn(d))) if value_is_seq else convert_to_tensor_fn) distribution = dtc._TensorCoercible( distribution=d, convert_to_tensor_fn=maybe_composite_convert_to_tensor_fn) value = distribution._value() value._tfp_distribution = distribution if value_is_seq: value.shape = value[-1].shape value.get_shape = value[-1].get_shape value.dtype = value[-1].dtype distribution.shape = value[-1].shape distribution.get_shape = value[-1].get_shape else: distribution.shape = value.shape distribution.get_shape = value.get_shape return distribution, value super(DistributionLambda, self).__init__(_fn, **kwargs) self._make_distribution_fn = make_distribution_fn self._convert_to_tensor_fn = convert_to_tensor_fn self._enter_dunder_call = False
Create a `DistributionLambda` Keras layer. Args: make_distribution_fn: Python `callable` that takes previous layer outputs and returns a `tfd.Distribution` instance. convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution` instance and returns a `tf.Tensor`-like object. For examples, see `class` docstring. Default value: `tfd.Distribution.sample`. **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
juraj-google-style
def padFrameRange(frange, zfill): def _do_pad(match): '\n Substitutes padded for unpadded frames.\n ' result = list(match.groups()) result[1] = pad(result[1], zfill) if result[4]: result[4] = pad(result[4], zfill) return ''.join((i for i in result if i)) return PAD_RE.sub(_do_pad, frange)
Return the zero-padded version of the frame range string. Args: frange (str): a frame range to test zfill (int): Returns: str:
codesearchnet
def project(self, **kwargs: Dict[str, Any]) -> Union[Hist, Dict[str, Hist]]: if self.single_observable_projection: return self._project_single_observable(**kwargs) else: return self._project_dict(**kwargs)
Perform the requested projection(s). Note: All cuts on the original histograms will be reset when this function is completed. Args: kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...) Returns: The projected histogram(s). The projected histograms are also stored in ``output_observable``.
juraj-google-style
def UpdateNumberOfEvents( self, number_of_consumed_events, number_of_produced_events): consumed_events_delta = 0 if number_of_consumed_events is not None: if number_of_consumed_events < self.number_of_consumed_events: raise ValueError( 'Number of consumed events smaller than previous update.') consumed_events_delta = ( number_of_consumed_events - self.number_of_consumed_events) self.number_of_consumed_events = number_of_consumed_events self.number_of_consumed_events_delta = consumed_events_delta produced_events_delta = 0 if number_of_produced_events is not None: if number_of_produced_events < self.number_of_produced_events: raise ValueError( 'Number of produced events smaller than previous update.') produced_events_delta = ( number_of_produced_events - self.number_of_produced_events) self.number_of_produced_events = number_of_produced_events self.number_of_produced_events_delta = produced_events_delta return consumed_events_delta > 0 or produced_events_delta > 0
Updates the number of events. Args: number_of_consumed_events (int): total number of events consumed by the process. number_of_produced_events (int): total number of events produced by the process. Returns: bool: True if either number of events has increased. Raises: ValueError: if the consumed or produced number of events is smaller than the value of the previous update.
juraj-google-style
def get_list(self, id, name=None): return self.create_list(dict(id=id, name=name))
Get a list Returns: List: The list with the given `id`
codesearchnet
def set_pattern_actual_step(self, patternnumber, value): _checkPatternNumber(patternnumber) _checkStepNumber(value) address = _calculateRegisterAddress('actualstep', patternnumber) self.write_register(address, value, 0)
Set the 'actual step' parameter for a given pattern. Args: * patternnumber (integer): 0-7 * value (integer): 0-7
codesearchnet
def dump(voevent, file, pretty_print=True, xml_declaration=True): file.write(dumps(voevent, pretty_print, xml_declaration))
Writes the voevent to the file object. e.g.:: with open('/tmp/myvoevent.xml','wb') as f: voeventparse.dump(v, f) Args: voevent(:class:`Voevent`): Root node of the VOevent etree. file (io.IOBase): An open (binary mode) file object for writing. pretty_print pretty_print(bool): See :func:`dumps` xml_declaration(bool): See :func:`dumps`
codesearchnet
def from_ops(*operations: ops.OP_TREE, strategy: InsertStrategy = InsertStrategy.EARLIEST, device: devices.Device = devices.UnconstrainedDevice ) -> 'Circuit': result = Circuit(device=device) result.append(operations, strategy) return result
Creates an empty circuit and appends the given operations. Args: operations: The operations to append to the new circuit. strategy: How to append the operations. device: Hardware that the circuit should be able to run on. Returns: The constructed circuit containing the operations.
juraj-google-style
def concat(values, axis, name: str='concat'): if name is None: name = 'concat' _assert_concat_compatible_structured_tensors(values) def leaf_op(values): return array_ops.concat(values, axis) axis = array_ops.get_positive_axis(axis, values[0].rank) with ops.name_scope(name, 'StructuredConcat', values): return _extend_op(values, leaf_op)
tf.concat for structured tensors. Does not support (yet) checks on illegal axis values, et cetera. Args: values: a sequence of StructuredTensors. axis: an axis to concatenate upon. name: the name of the op(s). Returns: the params reorganized according to indices.
github-repos
def get_variation_for_experiment(self, experiment_id): return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY)
Helper method to retrieve variation ID for given experiment. Args: experiment_id: ID for experiment for which variation needs to be looked up for. Returns: Variation ID corresponding to the experiment. None if no decision available.
juraj-google-style
def delete_direct(self, addresses): with self._lock: for address in addresses: self._validate_write(address) if address in self._state: self._state[address].set_deleted() else: fut = _ContextFuture(address=address) self._state[address] = fut fut.set_deleted()
Called in the context manager's delete method to either mark an entry for deletion , or create a new future and immediately set it for deletion in the future. Args: address_list (list of str): The unique full addresses. Raises: AuthorizationException
juraj-google-style
def process_alias_export_namespace(namespace): namespace.export_path = os.path.abspath(namespace.export_path) if os.path.isfile(namespace.export_path): raise CLIError(FILE_ALREADY_EXISTS_ERROR.format(namespace.export_path)) export_path_dir = os.path.dirname(namespace.export_path) if not os.path.isdir(export_path_dir): os.makedirs(export_path_dir) if os.path.isdir(namespace.export_path): namespace.export_path = os.path.join(namespace.export_path, ALIAS_FILE_NAME)
Validate input arguments when the user invokes 'az alias export'. Args: namespace: argparse namespace object.
juraj-google-style
def of(cls, key: SearchKey, params: SearchParams) -> 'SearchCriteria': key_name = key.value if key_name in params.disabled: raise SearchNotAllowed(key_name) elif key.inverse: return InverseSearchCriteria(key.not_inverse, params) elif key_name == b'SEQSET': return SequenceSetSearchCriteria(key.filter_sequence_set, params) elif key_name == b'KEYSET': return SearchCriteriaSet(key.filter_key_set, params) elif key_name == b'ALL': return AllSearchCriteria(params) elif key_name == b'OR': left_key, right_key = key.filter_key_or return OrSearchCriteria(left_key, right_key, params) elif key_name == b'ANSWERED': return HasFlagSearchCriteria(Answered, True, params) elif key_name == b'UNANSWERED': return HasFlagSearchCriteria(Answered, False, params) elif key_name == b'DELETED': return HasFlagSearchCriteria(Deleted, True, params) elif key_name == b'UNDELETED': return HasFlagSearchCriteria(Deleted, False, params) elif key_name == b'DRAFT': return HasFlagSearchCriteria(Draft, True, params) elif key_name == b'UNDRAFT': return HasFlagSearchCriteria(Draft, False, params) elif key_name == b'FLAGGED': return HasFlagSearchCriteria(Flagged, True, params) elif key_name == b'UNFLAGGED': return HasFlagSearchCriteria(Flagged, False, params) elif key_name == b'RECENT': return HasFlagSearchCriteria(Recent, True, params) elif key_name == b'OLD': return HasFlagSearchCriteria(Recent, False, params) elif key_name == b'SEEN': return HasFlagSearchCriteria(Seen, True, params) elif key_name == b'UNSEEN': return HasFlagSearchCriteria(Seen, False, params) elif key_name == b'KEYWORD': return HasFlagSearchCriteria(key.filter_flag, True, params) elif key_name == b'UNKEYWORD': return HasFlagSearchCriteria(key.filter_flag, False, params) elif key_name == b'NEW': return NewSearchCriteria(params) elif key_name == b'BEFORE': return DateSearchCriteria(key.filter_datetime, '<', params) elif key_name == b'ON': return DateSearchCriteria(key.filter_datetime, '=', params) elif key_name == b'SINCE': return DateSearchCriteria(key.filter_datetime, '>=', params) elif key_name == b'SENTBEFORE': return HeaderDateSearchCriteria(key.filter_datetime, '<', params) elif key_name == b'SENTON': return HeaderDateSearchCriteria(key.filter_datetime, '=', params) elif key_name == b'SENTSINCE': return HeaderDateSearchCriteria(key.filter_datetime, '>=', params) elif key_name == b'SMALLER': return SizeSearchCriteria(key.filter_int, '<', params) elif key_name == b'LARGER': return SizeSearchCriteria(key.filter_int, '>', params) elif key_name in (b'BCC', b'CC', b'FROM', b'SUBJECT', b'TO'): return EnvelopeSearchCriteria(key_name, key.filter_str, params) elif key_name == b'HEADER': name, value = key.filter_header return HeaderSearchCriteria(name, value, params) elif key_name in (b'BODY', b'TEXT'): return BodySearchCriteria(key.filter_str, params) raise SearchNotAllowed(key_name)
Factory method for producing a search criteria sub-class from a search key. Args: key: The search key defining the criteria. params: The parameters that may be used by some searches.
juraj-google-style
def hpo_diseases(username, password, hpo_ids, p_value_treshold=1): try: results = query_phenomizer.query(username, password, *hpo_ids) diseases = [result for result in results if (result['p_value'] <= p_value_treshold)] return diseases except SystemExit: return None
Return the list of HGNC symbols that match annotated HPO terms. Args: username (str): username to use for phenomizer connection password (str): password to use for phenomizer connection Returns: query_result: a generator of dictionaries on the form { 'p_value': float, 'disease_source': str, 'disease_nr': int, 'gene_symbols': list(str), 'description': str, 'raw_line': str }
codesearchnet
def ucast_ip(ip_addr, return_tuple=True): regex_ucast_ip = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))$") if return_tuple: while not regex_ucast_ip.match(ip_addr): print("Not a good unicast IP.") print("Please try again.") ip_addr = input("Please enter a unicast IP address in the following format x.x.x.x: ") return ip_addr elif not return_tuple: if not regex_ucast_ip.match(ip_addr): return False else: return True
Function to check if a address is unicast Args: ip_addr: Unicast IP address in the following format 192.168.1.1 return_tuple: Set to True it returns a IP, set to False returns True or False Returns: see return_tuple for return options
juraj-google-style
def _rpc(self, method, *args): with self._lock: apiid = next(self._counter) data = {'id': apiid, 'method': method, 'params': args} request = json.dumps(data) self._client_send(request) response = self._client_receive() if not response: raise ProtocolError(self._ad, ProtocolError.NO_RESPONSE_FROM_SERVER) result = json.loads(str(response, encoding='utf8')) if result['error']: raise ApiError(self._ad, result['error']) if result['id'] != apiid: raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID) if result.get('callback') is not None: if self._event_client is None: self._event_client = self._start_event_client() return callback_handler.CallbackHandler( callback_id=result['callback'], event_client=self._event_client, ret_value=result['result'], method_name=method, ad=self._ad) return result['result']
Sends an rpc to the app. Args: method: str, The name of the method to execute. args: any, The args of the method. Returns: The result of the rpc. Raises: ProtocolError: Something went wrong with the protocol. ApiError: The rpc went through, however executed with errors.
juraj-google-style
def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs): done = False records = {} page = 1 while (not done): kwargs['page'] = page response = self.__cloudflare_request(account=account, path='/zones/{}/dns_records'.format(zoneID), args=kwargs) info = response['result_info'] if (('total_pages' not in info) or (page >= info['total_pages'])): done = True else: page += 1 for record in response['result']: if (record['name'] in records): records[record['name']]['value'] = sorted((records[record['name']]['value'] + [record['content']])) else: records[record['name']] = {'name': record['name'], 'value': sorted([record['content']]), 'type': record['type']} return list(records.values())
Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and their information. Args: account (:obj:`CloudFlareAccount`): A CloudFlare Account object zoneID (`int`): Internal CloudFlare ID of the DNS zone **kwargs (`dict`): Additional arguments to be consumed by the API endpoint Returns: :obj:`dict` of `str`: `dict`
codesearchnet
def run(func, options, args=(), kwargs={}, host='localhost', port=8000): run_stats = run_profilers((func, args, kwargs), options) result = None for prof in run_stats: if (not result): result = run_stats[prof]['result'] del run_stats[prof]['result'] post_data = gzip.compress(json.dumps(run_stats).encode('utf-8')) urllib.request.urlopen(('http: return result
Runs profilers on a function. Args: func: A Python function. options: A string with profilers configuration (i.e. 'cmh'). args: func non-keyword arguments. kwargs: func keyword arguments. host: Host name to send collected data. port: Port number to send collected data. Returns: A result of func execution.
codesearchnet
def _pool(inputs, initial_value, reduce_fn, pool_size, strides=None, padding='valid'): if padding not in ('same', 'valid'): raise ValueError(f"Invalid padding '{padding}', must be 'same' or 'valid'.") padding = padding.upper() return lax.reduce_window(inputs, initial_value, reduce_fn, pool_size, strides, padding)
Helper function to define pooling functions. Args: inputs: input data of shape `N+2`. initial_value: the initial value for the reduction. reduce_fn: a reduce function of the form `(T, T) -> T`. pool_size: a sequence of `N` integers, representing the window size to reduce over. strides: a sequence of `N` integers, representing the inter-window strides (default: `(1, ..., 1)`). padding: either the string `same` or `valid`. Returns: The output of the reduction for each window slice.
github-repos
def offTagAdd(self, name, func): if ('*' in name): self.ontagaddglobs.rem(name, func) return cblist = self.ontagadds.get(name) if (cblist is None): return try: cblist.remove(func) except ValueError: pass
Unregister a callback for tag addition. Args: name (str): The name of the tag or tag glob. func (function): The callback func(node, tagname, tagval).
codesearchnet
def _cast_value(self, value): if (self.convert_datetimes): try: date_time = datetime.datetime.fromtimestamp(float(value)) if datetime.datetime(1970, 1, 1) > date_time: raise ValueError else: return date_time except ValueError: pass tests = (int, float, str) for test in tests: try: return test(value) except ValueError: continue return value
Internal method that makes sure every value in dictionary is properly cast into the correct types, instead of just treating everything like a string from the csv file. Args: value : The value to be casted Returns: A casted Value.
juraj-google-style
def distribute_tensor(tensor, layout): if isinstance(tensor, KerasTensor): return tensor return distribution_lib.distribute_tensor(tensor, layout)
Change the layout of a Tensor value in the jit function execution. Args: tensor: a Tensor to change the layout. layout: `TensorLayout` to be applied on the value. Returns: a new value with the specified tensor layout.
github-repos
def delete(self, id, **kwargs): if (id is None): path = self.path else: if (not isinstance(id, int)): id = id.replace('/', '%2F') path = ('%s/%s' % (self.path, id)) self.gitlab.http_delete(path, **kwargs)
Delete an object on the server. Args: id: ID of the object to delete **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabDeleteError: If the server cannot perform the request
codesearchnet
def quadratic_2d(data): arg_data_max = np.argmax(data) i, j = np.unravel_index(arg_data_max, data.shape) z_ = data[i-1:i+2, j-1:j+2] try: a = (-z_[0,0] + 2*z_[0,1] - z_[0,2] + 2*z_[1,0] + 5*z_[1,1] + 2*z_[1,2] - z_[2,0] + 2*z_[2,1] - z_[2,2]) / 9 b = (-z_[0,0] - z_[0,1] - z_[0,2] + z_[2,0] + z_[2,1] + z_[2,2]) / 6 c = (-z_[0,0] + z_[0,2] - z_[1,0] + z_[1,2] - z_[2,0] + z_[2,2]) / 6 d = (z_[0,0] + z_[0,1] + z_[0,2] - z_[1,0]*2 - z_[1,1]*2 - z_[1,2]*2 + z_[2,0] + z_[2,1] + z_[2,2])/6 e = (z_[0,0] - z_[0,2] - z_[2,0] + z_[2,2]) * .25 f = (z_[0,0] - 2 * z_[0,1] + z_[0,2] + z_[1,0] - 2 * z_[1,1] + z_[1,2] + z_[2,0] - 2 * z_[2,1] + z_[2,2]) / 6 except IndexError: return (i, j) det = 4 * d * f - e ** 2 xm = - (2 * f * b - c * e) / det ym = - (2 * d * c - b * e) / det return (i+xm, j+ym)
Compute the quadratic estimate of the centroid in a 2d-array. Args: data (2darray): two dimensional data array Returns center (tuple): centroid estimate on the row and column directions, respectively
juraj-google-style
def get(self, ID, index='vector-web-s'): url = self.get_url % index r = self.gbdx_connection.get(url + ID) r.raise_for_status() return r.json()
Retrieves a vector. Not usually necessary because searching is the best way to find & get stuff. Args: ID (str): ID of the vector object index (str): Optional. Index the object lives in. defaults to 'vector-web-s' Returns: record (dict): A dict object identical to the json representation of the catalog record
juraj-google-style
def forward(self, inputs_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeddings encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None projected_features = [self.channel_projection_layers[i](feature) for i, feature in enumerate(hidden_states)] for encoder_layer_index, feature_to_project_index in enumerate(self.encoder_projection_indices): if output_hidden_states: encoder_states = encoder_states + (projected_features[feature_to_project_index],) height, width = projected_features[feature_to_project_index].shape[2:] src_flatten = projected_features[feature_to_project_index].flatten(2).permute(0, 2, 1) if self.training or self.eval_size is None: pos_embed = self.build_2d_sincos_position_embedding(width, height, self.encoder_hidden_dim, self.positional_encoding_temperature, device=src_flatten.device, dtype=src_flatten.dtype).to(src_flatten.device, src_flatten.dtype) else: pos_embed = None layer_outputs = self.encoder[encoder_layer_index](src_flatten, pos_embed=pos_embed, output_attentions=output_attentions) projected_features[feature_to_project_index] = layer_outputs[0].permute(0, 2, 1).reshape(-1, self.encoder_hidden_dim, height, width).contiguous() if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (projected_features[feature_to_project_index],) fpn_feature_maps = [projected_features[-1]] for idx in range(len(self.in_channels) - 1, 0, -1): feat_high = fpn_feature_maps[0] feat_low = projected_features[idx - 1] feat_high = self.lateral_convs[len(self.in_channels) - 1 - idx](feat_high) fpn_feature_maps[0] = feat_high upsample_feat = F.interpolate(feat_high, scale_factor=2.0, mode='nearest') fps_map = self.fpn_blocks[len(self.in_channels) - 1 - idx](torch.concat([upsample_feat, feat_low], dim=1)) fpn_feature_maps.insert(0, fps_map) fpn_states = [fpn_feature_maps[0]] for idx in range(len(self.in_channels) - 1): feat_low = fpn_states[-1] feat_high = fpn_feature_maps[idx + 1] downsample_feat = self.downsample_convs[idx](feat_low) hidden_states = self.pan_blocks[idx](torch.concat([downsample_feat, feat_high.to(downsample_feat.device)], dim=1)) fpn_states.append(hidden_states) if not return_dict: return (fpn_states[-1], encoder_states, all_attentions, fpn_states) return OmDetTurboEncoderOutput(last_hidden_state=fpn_states[-1], hidden_states=encoder_states, attentions=all_attentions, extracted_states=fpn_states)
Args: inputs_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layers) that is passed to the encoder. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
github-repos
def __init__(self, timestep, natoms, box, data): self.timestep = timestep self.natoms = natoms self.box = box self.data = data
Base constructor. Args: timestep (int): Current timestep. natoms (int): Total number of atoms in the box. box (LammpsBox): Simulation box. data (pd.DataFrame): Dumped atomic data.
juraj-google-style
def extend(self, elts): elts = elts[:] self._in_deque.append(elts) event = self._event_for(elts) self._event_deque.append(event) return event
Adds elts to the tasks. Args: elts (Sequence): a iterable of elements that can be appended to the task's bundle_field. Returns: Event: an event that can be used to wait on the response.
juraj-google-style
def _get_fullname(obj): if not hasattr(obj, "__name__"): obj = obj.__class__ if obj.__module__ in ("builtins", "__builtin__"): return obj.__name__ return "{}.{}".format(obj.__module__, obj.__name__)
Get the full name of an object including the module. Args: obj: An object. Returns: The full class name of the object.
juraj-google-style
def set_scf_algorithm_and_iterations(self, algorithm="diis", iterations=50): available_algorithms = {"diis", "dm", "diis_dm", "diis_gdm", "gdm", "rca", "rca_diis", "roothaan"} if algorithm.lower() not in available_algorithms: raise ValueError("Algorithm " + algorithm + " is not available in QChem") self.params["rem"]["scf_algorithm"] = algorithm.lower() self.params["rem"]["max_scf_cycles"] = iterations
Set algorithm used for converging SCF and max number of SCF iterations. Args: algorithm: The algorithm used for converging SCF. (str) iterations: The max number of SCF iterations. (Integer)
juraj-google-style
def _finalize_outputs(cls, mapreduce_spec, mapreduce_state): if (mapreduce_spec.mapper.output_writer_class() and mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS): mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state)
Finalize outputs. Args: mapreduce_spec: an instance of MapreduceSpec. mapreduce_state: an instance of MapreduceState.
juraj-google-style
def create_game(self, map_name): map_inst = maps.get(map_name) map_data = map_inst.data(self._run_config) if map_name not in self._saved_maps: for controller in self._controllers: controller.save_map(map_inst.path, map_data) self._saved_maps.add(map_name) create = sc_pb.RequestCreateGame( local_map=sc_pb.LocalMap(map_path=map_inst.path), disable_fog=False) for _ in range(self._num_agents): create.player_setup.add(type=sc_pb.Participant) self._controllers[0].create_game(create)
Create a game for the agents to join. Args: map_name: The map to use.
juraj-google-style
def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: out = [] for i, hidden_state in enumerate(hidden_states): if i not in self.neck_ignore_stages: cls_token, hidden_state = (hidden_state[:, 0], hidden_state[:, 1:]) batch_size, sequence_length, num_channels = hidden_state.shape if patch_height is not None and patch_width is not None: hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels) else: size = torch_int(sequence_length ** 0.5) hidden_state = hidden_state.reshape(batch_size, size, size, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_shape = hidden_state.shape if self.config.readout_type == 'project': hidden_state = hidden_state.flatten(2).permute((0, 2, 1)) readout = cls_token.unsqueeze(1).expand_as(hidden_state) hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1)) hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape) elif self.config.readout_type == 'add': hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1) hidden_state = hidden_state.reshape(feature_shape) hidden_state = self.layers[i](hidden_state) out.append(hidden_state) return out
Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`): List of hidden states from the backbone.
github-repos
def connect_engine(self): try: self.connection = self.engine.connect() return True except sa.exc.OperationalError as opex: LOG.fatal("Could not connect to the database. The error was: '%s'", str(opex)) return False
Establish a connection to the database. Provides simple error handling for fatal errors. Returns: True, if we could establish a connection, else False.
codesearchnet
def _ReadStreamDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False): if is_member: supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE else: supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE return self._ReadElementSequenceDataTypeDefinition(definitions_registry, definition_values, data_types.StreamDefinition, definition_name, supported_definition_values)
Reads a stream data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StreamDefinition: stream data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
codesearchnet
def write_to_path(self,path,suffix='',format='png',overwrite=False): if os.path.exists(path) and overwrite is False: raise ValueError("Error: use ovewrite=True to overwrite images") if not os.path.exists(path): os.makedirs(path) for i,r in self.iterrows(): spath = os.path.join(path,r['project_name'],r['sample_name']) if not os.path.exists(spath): os.makedirs(spath) if suffix == '': fname = os.path.join(spath,r['frame_name']+'.'+format) else: fname = os.path.join(spath,r['frame_name']+'_'+suffix+'.'+format) imageio.imwrite(fname, r['image'],format=format)
Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame Args: path (str): Where to write the directory of images suffix (str): for labeling the imaages you write format (str): default 'png' format to write the file overwrite (bool): default False. if true can overwrite files in the path Modifies: Creates path folder if necessary and writes images to path
juraj-google-style
def get_extrema(self, normalize_rxn_coordinate=True): x = np.arange(0, np.max(self.r), 0.01) y = (self.spline(x) * 1000) scale = (1 if (not normalize_rxn_coordinate) else (1 / self.r[(- 1)])) min_extrema = [] max_extrema = [] for i in range(1, (len(x) - 1)): if ((y[i] < y[(i - 1)]) and (y[i] < y[(i + 1)])): min_extrema.append(((x[i] * scale), y[i])) elif ((y[i] > y[(i - 1)]) and (y[i] > y[(i + 1)])): max_extrema.append(((x[i] * scale), y[i])) return (min_extrema, max_extrema)
Returns the positions of the extrema along the MEP. Both local minimums and maximums are returned. Args: normalize_rxn_coordinate (bool): Whether to normalize the reaction coordinate to between 0 and 1. Defaults to True. Returns: (min_extrema, max_extrema), where the extrema are given as [(x1, y1), (x2, y2), ...].
codesearchnet
def install(self, ref, table_name=None, index_columns=None,logger=None): try: obj_number = ObjectNumber.parse(ref) if isinstance(obj_number, TableNumber): table = self._library.table(ref) connection = self._backend._get_connection() return self._backend.install_table(connection, table, logger=logger) else: raise NotObjectNumberError except NotObjectNumberError: partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install( connection, partition, table_name=table_name, index_columns=index_columns, logger=logger)
Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition.
juraj-google-style
def _fill_from_default(self, default_job_config): if (self._job_type != default_job_config._job_type): raise TypeError(((('attempted to merge two incompatible job types: ' + repr(self._job_type)) + ', ') + repr(default_job_config._job_type))) new_job_config = self.__class__() default_job_properties = copy.deepcopy(default_job_config._properties) for key in self._properties: if (key != self._job_type): default_job_properties[key] = self._properties[key] default_job_properties[self._job_type].update(self._properties[self._job_type]) new_job_config._properties = default_job_properties return new_job_config
Merge this job config with a default job config. The keys in this object take precedence over the keys in the default config. The merge is done at the top-level as well as for keys one level below the job type. Arguments: default_job_config (google.cloud.bigquery.job._JobConfig): The default job config that will be used to fill in self. Returns: google.cloud.bigquery.job._JobConfig A new (merged) job config.
codesearchnet
def set_bool(self, location, value): element = self._handle_location(location) if isinstance(value, basestring): value = True if value.upper() == "TRUE" else False elif not isinstance(value, bool): raise ValueError if value is True: element.text = "true" else: element.text = "false"
Set a boolean value. Casper booleans in XML are string literals of "true" or "false". This method sets the text value of "location" to the correct string representation of a boolean. Args: location: Element or a string path argument to find() value: Boolean or string value to set. (Accepts "true"/"True"/"TRUE"; all other strings are False).
juraj-google-style
def update_detector(self, detector_id, detector): resp = self._put(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id), data=detector) resp.raise_for_status() return resp.json()
Update an existing detector. Args: detector_id (string): the ID of the detector. detector (object): the detector model object. Will be serialized as JSON. Returns: dictionary of the response (updated detector model).
codesearchnet
def commit(self): commit_response = self._client._firestore_api.commit(self._client._database_string, self._write_pbs, transaction=None, metadata=self._client._rpc_metadata) self._write_pbs = [] self.write_results = results = list(commit_response.write_results) self.commit_time = commit_response.commit_time return results
Commit the changes accumulated in this batch. Returns: List[google.cloud.proto.firestore.v1beta1.\ write_pb2.WriteResult, ...]: The write results corresponding to the changes committed, returned in the same order as the changes were applied to this batch. A write result contains an ``update_time`` field.
codesearchnet
def _create_mirrored_tpu_replicated_variables(**kwargs): initial_value = kwargs['initial_value'] with maybe_init_scope(): initial_value = initial_value() if callable(initial_value) else initial_value mirrored_replicated_var_list = [] for replica_id in range(num_replicas): replicated_var_list = [] for logic_core_id in range(num_cores_per_replica): with ops.device(self._tpu_devices[replica_id][logic_core_id]): kwargs['initial_value'] = initial_value v = next_creator(**kwargs) replicated_var_list.append(v) replica_name = '{}/r:{}'.format(kwargs['name'], replica_id) tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(variables=replicated_var_list, name=replica_name) mirrored_replicated_var_list.append(tpu_replicated_var) return mirrored_replicated_var_list
Returns a list of `TPUReplicatedVariable`s. The list consists of `num_replicas` `TPUReplicatedVariable`s and can be used to initialize a `TPUMirroredVariable`. Each `TPUReplicatedVariable` contains a list of `tf.Variable`s which are replicated to `num_cores_per_replica` logical cores to enable XLA SPMD compilation. Args: **kwargs: the keyword arguments for creating a variable
github-repos
def start_entry(self, target, var_id): self.in_progress = ConfigEntry(target, var_id, b'') if self.data_size - self.data_index < self.in_progress.data_space(): return Error.DESTINATION_BUFFER_TOO_SMALL self.in_progress.data += struct.pack("<H", var_id) self.data_index += self.in_progress.data_space() return Error.NO_ERROR
Begin a new config database entry. If there is a current entry in progress, it is aborted but the data was already committed to persistent storage so that space is wasted. Args: target (SlotIdentifer): The target slot for this config variable. var_id (int): The config variable ID Returns: int: An error code from the global Errors enum.
juraj-google-style
def _construct_location_to_filter_list(match_query): location_to_filters = {} for match_traversal in match_query.match_traversals: for match_step in match_traversal: current_filter = match_step.where_block if current_filter is not None: current_location = match_step.as_block.location location_to_filters.setdefault(current_location, []).append( current_filter) return location_to_filters
Return a dict mapping location -> list of filters applied at that location. Args: match_query: MatchQuery object from which to extract location -> filters dict Returns: dict mapping each location in match_query to a list of Filter objects applied at that location
juraj-google-style
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): output_shape = (kernel.shape[0],) return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format)
Apply 1D conv with un-shared weights. Args: inputs: 3D tensor with shape: (batch_size, steps, input_dim) if data_format is "channels_last" or (batch_size, input_dim, steps) if data_format is "channels_first". kernel: the unshared weight for convolution, with shape (output_length, feature_dim, filters). kernel_size: a tuple of a single integer, specifying the length of the 1D convolution window. strides: a tuple of a single integer, specifying the stride length of the convolution. data_format: the data format, channels_first or channels_last. Returns: A 3d tensor with shape: (batch_size, output_length, filters) if data_format='channels_first' or 3D tensor with shape: (batch_size, filters, output_length) if data_format='channels_last'.
github-repos
def sort_elements_by_child_values(obj_pyxb, child_name_list): obj_pyxb.sort(key=(lambda x: [get_auto(getattr(x, n)) for n in child_name_list]))
In-place sort simple or complex elements in a PyXB object by values they contain in child elements. Args: obj_pyxb: PyXB object child_name_list: list of str List of element names that are direct children of the PyXB object.
codesearchnet
def Lock(fd, path, blocking): operation = (fcntl.LOCK_EX if blocking else (fcntl.LOCK_EX | fcntl.LOCK_NB)) try: fcntl.flock(fd, operation) except IOError as e: if (e.errno == errno.EWOULDBLOCK): raise IOError(('Exception locking %s. File already locked.' % path)) else: raise IOError(('Exception locking %s. %s.' % (path, str(e))))
Lock the provided file descriptor. Args: fd: int, the file descriptor of the file to lock. path: string, the name of the file to lock. blocking: bool, whether the function should return immediately. Raises: IOError, raised from flock while attempting to lock a file.
codesearchnet
def get_excel_workbook(api_data, result_info_key, identifier_keys): cleaned_data = [] for item_data in api_data: result_info = item_data.pop(result_info_key, {}) cleaned_item_data = {} if 'meta' in item_data: meta = item_data.pop('meta') cleaned_item_data['meta'] = meta for key in item_data: cleaned_item_data[key] = item_data[key]['result'] cleaned_item_data[result_info_key] = result_info cleaned_data.append(cleaned_item_data) data_list = copy.deepcopy(cleaned_data) workbook = openpyxl.Workbook() write_worksheets(workbook, data_list, result_info_key, identifier_keys) return workbook
Generates an Excel workbook object given api_data returned by the Analytics API Args: api_data: Analytics API data as a list of dicts (one per identifier) result_info_key: the key in api_data dicts that contains the data results identifier_keys: the list of keys used as requested identifiers (address, zipcode, block_id, etc) Returns: raw excel file data
juraj-google-style
def key_periods(ciphertext, max_key_period): if (max_key_period <= 0): raise ValueError('max_key_period must be a positive integer') key_scores = [] for period in range(1, (min(max_key_period, len(ciphertext)) + 1)): score = abs((ENGLISH_IC - index_of_coincidence(*split_columns(ciphertext, period)))) key_scores.append((period, score)) return [p[0] for p in sorted(key_scores, key=(lambda x: x[1]))]
Rank all key periods for ``ciphertext`` up to and including ``max_key_period`` Example: >>> key_periods(ciphertext, 30) [2, 4, 8, 3, ...] Args: ciphertext (str): The text to analyze max_key_period (int): The maximum period the key could be Returns: Sorted list of keys Raises: ValueError: If max_key_period is less than or equal to 0
codesearchnet
def pull(self, platform=None): (repository, _) = parse_repository_tag(self.image_name) return self.collection.pull(repository, tag=self.id, platform=platform)
Pull the image digest. Args: platform (str): The platform to pull the image for. Default: ``None`` Returns: (:py:class:`Image`): A reference to the pulled image.
codesearchnet
def read(self, filename, encoding=None): with open(filename, encoding=encoding) as fp: self._read(fp, filename) self._filename = os.path.abspath(filename)
Read and parse a filename. Args: filename (str): path to file encoding (str): encoding of file, default None
codesearchnet
def mean(x, axis=None, keepdims=False): if any_symbolic_tensors((x,)): return Mean(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.mean(x, axis=axis, keepdims=keepdims)
Compute the arithmetic mean along the specified axes. Args: x: Input tensor. axis: Axis or axes along which the means are computed. The default is to compute the mean of the flattened tensor. keepdims: If this is set to `True`, the axes which are reduced are left in the result as dimensions with size one. Returns: Output tensor containing the mean values.
github-repos
def scheduled_sample_count(ground_truth_x, generated_x, batch_size, scheduled_sample_var): num_ground_truth = scheduled_sample_var idx = tf.random_shuffle(tf.range(batch_size)) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size)) ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) generated_examps = tf.gather(generated_x, generated_idx) output = tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps]) if isinstance(batch_size, int): output.set_shape(([batch_size] + common_layers.shape_list(output)[1:])) return output
Sample batch with specified mix of groundtruth and generated data points. Args: ground_truth_x: tensor of ground-truth data points. generated_x: tensor of generated data points. batch_size: batch size scheduled_sample_var: number of ground-truth examples to include in batch. Returns: New batch with num_ground_truth sampled from ground_truth_x and the rest from generated_x.
codesearchnet
def penalty_satisfaction(response, bqm): record = response.record label_dict = response.variables.index if (len(bqm.info['reduction']) == 0): return np.array(([1] * len(record.sample))) penalty_vector = np.prod([((record.sample[(:, label_dict[qi])] * record.sample[(:, label_dict[qj])]) == record.sample[(:, label_dict[valdict['product']])]) for ((qi, qj), valdict) in bqm.info['reduction'].items()], axis=0) return penalty_vector
Creates a penalty satisfaction list Given a sampleSet and a bqm object, will create a binary list informing whether the penalties introduced during degree reduction are satisfied for each sample in sampleSet Args: response (:obj:`.SampleSet`): Samples corresponding to provided bqm bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains its reduction info. Returns: :obj:`numpy.ndarray`: a binary array of penalty satisfaction information
codesearchnet
def _FormatDescription(self, event): date_time_string = timelib.Timestamp.CopyToIsoFormat( event.timestamp, timezone=self._output_mediator.timezone) timestamp_description = event.timestamp_desc or 'UNKNOWN' message, _ = self._output_mediator.GetFormattedMessages(event) if message is None: data_type = getattr(event, 'data_type', 'UNKNOWN') raise errors.NoFormatterFound( 'Unable to find event formatter for: {0:s}.'.format(data_type)) description = '{0:s}; {1:s}; {2:s}'.format( date_time_string, timestamp_description, message.replace(self._DESCRIPTION_FIELD_DELIMITER, ' ')) return self._SanitizeField(description)
Formats the description. Args: event (EventObject): event. Returns: str: formatted description field.
juraj-google-style
def add_layer(self, label, change_layer=True): self.layer_stack.insert((self.last_layer() + 1), label) if change_layer: self.set_current_layer(self.last_layer()) return None
Add new mesh layer to the end of the stack Args: label (str): new label for the mesh layer change_layer (bool): change to the newly created layer
codesearchnet
def plugins(self): if (not self.loaded): self.load_modules() return get_plugins()[self.group]._filter(blacklist=self.blacklist, newest_only=True, type_filter=self.type_filter)
Newest version of all plugins in the group filtered by ``blacklist`` Returns: dict: Nested dictionary of plugins accessible through dot-notation. Plugins are returned in a nested dictionary, but can also be accessed through dot-notion. Just as when accessing an undefined dictionary key with index-notation, a :py:exc:`KeyError` will be raised if the plugin type or plugin does not exist. Parent types are always included. Child plugins will only be included if a valid, non-blacklisted plugin is available.
codesearchnet
def export_vms(self, vms_names=None, standalone=False, export_dir='.', compress=False, init_file_name='LagoInitFile', out_format=YAMLOutFormatPlugin(), collect_only=False, with_threads=True): return self.virt_env.export_vms(vms_names, standalone, export_dir, compress, init_file_name, out_format, collect_only, with_threads)
Export vm images disks and init file. The exported images and init file can be used to recreate the environment. Args: vms_names(list of str): Names of the vms to export, if None export all the vms in the env (default=None) standalone(bool): If false, export a layered image (default=False) export_dir(str): Dir to place the exported images and init file compress(bool): If True compress the images with xz (default=False) init_file_name(str): The name of the exported init file (default='LagoInitfile') out_format(:class:`lago.plugins.output.OutFormatPlugin`): The type of the exported init file (the default is yaml) collect_only(bool): If True, return only a mapping from vm name to the disks that will be exported. (default=False) with_threads(bool): If True, run the export in parallel (default=True) Returns Unless collect_only == True, a mapping between vms' disks.
codesearchnet
def get_version(here_path, default_version=DEFAULT_VERSION): if ('site-packages' in here_path): return _version_from_file(here_path) if os.environ.get('TRAVIS_TAG'): if (not TEST_MODE): return os.environ.get('TRAVIS_TAG').replace('v', '') else: warnings.warn('Travis detected, but TEST_MODE enabled', exceptions.ProsperVersionTestModeWarning) try: current_tag = _read_git_tags(default_version=default_version) except Exception: return _version_from_file(here_path) with open(os.path.join(here_path, 'version.txt'), 'w') as v_fh: v_fh.write(current_tag) return current_tag
tries to resolve version number Args: here_path (str): path to project local dir default_version (str): what version to return if all else fails Returns: str: semantic_version information for library
codesearchnet