code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def encode(self, s): sentence = s tokens = sentence.strip().split() if self._replace_oov is not None: tokens = [t if t in self._token_to_id else self._replace_oov for t in tokens] ret = [self._token_to_id[tok] for tok in tokens] return ret[::-1] if self._reverse else ret
Converts a space-separated string of tokens to a list of ids.
def map_property_instances(original_part, new_part): get_mapping_dictionary()[original_part.id] = new_part for prop_original in original_part.properties: get_mapping_dictionary()[prop_original.id] = [prop_new for prop_new in new_part.properties if get_mapping_dictionary()[prop_original._json_data['model']].id == prop_new._json_data['model']][0]
Map the id of the original part with the `Part` object of the newly created one. Updated the singleton `mapping dictionary` with the new mapping table values. :param original_part: `Part` object to be copied/moved :type original_part: :class:`Part` :param new_part: `Part` object copied/moved :type new_part: :class:`Part` :return: None
def unlock(self): success = self.set_status(CONST.STATUS_LOCKOPEN_INT) if success: self._json_state['status'] = CONST.STATUS_LOCKOPEN return success
Unlock the device.
def send_request(self, request, callback=None, timeout=None, no_response=False): if callback is not None: thread = threading.Thread(target=self._thread_body, args=(request, callback)) thread.start() else: self.protocol.send_message(request) if no_response: return try: response = self.queue.get(block=True, timeout=timeout) except Empty: response = None return response
Send a request to the remote server. :param request: the request to send :param callback: the callback function to invoke upon response :param timeout: the timeout of the request :return: the response
async def update( self, service_id: str, version: str, *, image: str = None, rollback: bool = False ) -> bool: if image is None and rollback is False: raise ValueError("You need to specify an image.") inspect_service = await self.inspect(service_id) spec = inspect_service["Spec"] if image is not None: spec["TaskTemplate"]["ContainerSpec"]["Image"] = image params = {"version": version} if rollback is True: params["rollback"] = "previous" data = json.dumps(clean_map(spec)) await self.docker._query_json( "services/{service_id}/update".format(service_id=service_id), method="POST", data=data, params=params, ) return True
Update a service. If rollback is True image will be ignored. Args: service_id: ID or name of the service. version: Version of the service that you want to update. rollback: Rollback the service to the previous service spec. Returns: True if successful.
def get_master_url(request, image_id): im = get_object_or_404(MasterImage, pk=image_id) return JsonResponse({'url': im.get_master_url()})
get image's master url ... :param request: http GET request /renderer/master/url/<image_id>/ :param image_id: the master image primary key :return: master url in a json dictionary
def GET(self, *args, **kwargs): if self.user_manager.session_logged_in(): if not self.user_manager.session_username() and not self.__class__.__name__ == "ProfilePage": raise web.seeother("/preferences/profile") if not self.is_lti_page and self.user_manager.session_lti_info() is not None: self.user_manager.disconnect_user() return self.template_helper.get_renderer().auth(self.user_manager.get_auth_methods(), False) return self.GET_AUTH(*args, **kwargs) else: return self.template_helper.get_renderer().auth(self.user_manager.get_auth_methods(), False)
Checks if user is authenticated and calls GET_AUTH or performs logout. Otherwise, returns the login template.
def record_command(self, cmd, prg=''): self._log(self.logFileCommand , force_to_string(cmd), prg)
record the command passed - this is usually the name of the program being run or task being run
def build_from_generator(cls, generator, target_size, max_subtoken_length=None, reserved_tokens=None): token_counts = collections.defaultdict(int) for item in generator: for tok in tokenizer.encode(native_to_unicode(item)): token_counts[tok] += 1 encoder = cls.build_to_target_size( target_size, token_counts, 1, 1e3, max_subtoken_length=max_subtoken_length, reserved_tokens=reserved_tokens) return encoder
Builds a SubwordTextEncoder from the generated text. Args: generator: yields text. target_size: int, approximate vocabulary size to create. max_subtoken_length: Maximum length of a subtoken. If this is not set, then the runtime and memory use of creating the vocab is quadratic in the length of the longest token. If this is set, then it is instead O(max_subtoken_length * length of longest token). reserved_tokens: List of reserved tokens. The global variable `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this argument is `None`, it will use `RESERVED_TOKENS`. Returns: SubwordTextEncoder with `vocab_size` approximately `target_size`.
def open_links(self): if self._is_open: raise Exception('Already opened') try: self.parallel_safe(lambda scf: scf.open_link()) self._is_open = True except Exception as e: self.close_links() raise e
Open links to all individuals in the swarm
def getConfigurableParent(cls): for p in cls.__bases__: if isinstance(p, Configurable) and p is not Configurable: return p return None
Return the parent from which this class inherits configurations
def CreateTaskStart(self): task_start = TaskStart() task_start.identifier = self.identifier task_start.session_identifier = self.session_identifier task_start.timestamp = self.start_time return task_start
Creates a task start. Returns: TaskStart: task start attribute container.
def bgseq(code): if isinstance(code, str): code = nametonum(code) if code == -1: return "" s = termcap.get('setab', code) or termcap.get('setb', code) return s
Returns the background color terminal escape sequence for the given color code number.
def to_source(node, indentation=' ' * 4): if isinstance(node, gast.AST): node = gast.gast_to_ast(node) generator = SourceWithCommentGenerator(indentation, False, astor.string_repr.pretty_string) generator.visit(node) generator.result.append('\n') return astor.source_repr.pretty_source(generator.result).lstrip()
Return source code of a given AST.
def is_comparable_type(var, type_): other_types = COMPARABLE_TYPES.get(type_, type_) return isinstance(var, other_types)
Check to see if `var` is an instance of known compatible types for `type_` Args: var (?): type_ (?): Returns: bool: CommandLine: python -m utool.util_type is_comparable_type --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_type import * # NOQA >>> import utool as ut >>> flags = [] >>> flags += [is_comparable_type(0, float)] >>> flags += [is_comparable_type(0, np.float32)] >>> flags += [is_comparable_type(0, np.int32)] >>> flags += [is_comparable_type(0, int)] >>> flags += [is_comparable_type(0.0, int)] >>> result = ut.repr2(flags) >>> print(result) [True, True, True, True, False]
def get_version_from_file(path): filename = os.path.join(path, VERSION_FILE) if not os.path.isfile(filename): filename = os.path.join(os.path.dirname(path), VERSION_FILE) if not os.path.isfile(filename): filename = '' if filename: with open(filename) as fh: version = fh.readline().strip() if version: return version
Find the VERSION_FILE and return its contents. Returns ------- version : string or None
def solid(self, x, y): if not(0 <= x < self.xsize) or not(0 <= y < self.ysize): return False if self.data[x, y] == 0: return False return True
Determine whether the pixel x,y is nonzero Parameters ---------- x, y : int The pixel of interest. Returns ------- solid : bool True if the pixel is not zero.
def set_statics(self): if not os.path.exists(self.results_dir): return None try: shutil.copytree(os.path.join(self.templates_dir, 'css'), os.path.join(self.results_dir, 'css')) shutil.copytree(os.path.join(self.templates_dir, 'scripts'), os.path.join(self.results_dir, 'scripts')) shutil.copytree(os.path.join(self.templates_dir, 'fonts'), os.path.join(self.results_dir, 'fonts')) except OSError as e: if e.errno == 17: print("WARNING : existing output directory for static files, will not replace them") else: raise try: shutil.copytree(os.path.join(self.templates_dir, 'img'), os.path.join(self.results_dir, 'img')) except OSError as e: pass
Create statics directory and copy files in it
def randint(self, a: int, b: int, n: Optional[int] = None) -> Union[List[int], int]: max_n = self.config.MAX_NUMBER_OF_INTEGERS return self._generate_randoms(self._request_randints, max_n=max_n, a=a, b=b, n=n)
Generate n numbers as a list or a single one if no n is given. n is used to minimize the number of requests made and return type changes to be compatible with :py:mod:`random`'s interface
def cmdline(argv, flags): rules = dict([(flag, {'flags': ["--%s" % flag]}) for flag in flags]) return parse(argv, rules)
A cmdopts wrapper that takes a list of flags and builds the corresponding cmdopts rules to match those flags.
def available_packages(*args, **kwargs): try: plugin_packages_info_json = ch.conda_exec('search', '--json', '^microdrop\.', verbose=False) return json.loads(plugin_packages_info_json) except RuntimeError, exception: if 'CondaHTTPError' in str(exception): logger.warning('Could not connect to Conda server.') else: logger.warning('Error querying available MicroDrop plugins.', exc_info=True) except Exception, exception: logger.warning('Error querying available MicroDrop plugins.', exc_info=True) return {}
Query available plugin packages based on specified Conda channels. Parameters ---------- *args Extra arguments to pass to Conda ``search`` command. Returns ------- dict .. versionchanged:: 0.24 All Conda packages beginning with ``microdrop.`` prefix from all configured channels. Each *key* corresponds to a package name. Each *value* corresponds to a ``list`` of dictionaries, each corresponding to an available version of the respective package. For example: { "microdrop.dmf-device-ui-plugin": [ ... { ... "build_number": 0, "channel": "microdrop-plugins", "installed": true, "license": "BSD", "name": "microdrop.dmf-device-ui-plugin", "size": 62973, "version": "2.1.post2", ... }, ...], ... }
def sub_hmm(self, states): r pi_sub = self._Pi[states] pi_sub /= pi_sub.sum() P_sub = self._Tij[states, :][:, states] assert np.all(P_sub.sum(axis=1) > 0), \ 'Illegal sub_hmm request: transition matrix cannot be normalized on ' + str(states) P_sub /= P_sub.sum(axis=1)[:, None] out_sub = self.output_model.sub_output_model(states) return HMM(pi_sub, P_sub, out_sub, lag=self.lag)
r""" Returns HMM on a subset of states Returns the HMM restricted to the selected subset of states. Will raise exception if the hidden transition matrix cannot be normalized on this subset
def update_record(cls, fqdn, name, type, value, ttl, content): data = { "rrset_name": name, "rrset_type": type, "rrset_values": value, } if ttl: data['rrset_ttl'] = int(ttl) meta = cls.get_fqdn_info(fqdn) if content: url = meta['domain_records_href'] kwargs = {'headers': {'Content-Type': 'text/plain'}, 'data': content} return cls.json_put(url, **kwargs) url = '%s/domains/%s/records/%s/%s' % (cls.api_url, fqdn, name, type) return cls.json_put(url, data=json.dumps(data))
Update all records for a domain.
def updateJoin(self): text = self.uiJoinSBTN.currentAction().text() if text == 'AND': joiner = QueryCompound.Op.And else: joiner = QueryCompound.Op.Or self._containerWidget.setCurrentJoiner(self.joiner())
Updates the joining method used by the system.
def get_action(self, action=None): if action: self.action = action if self.action not in AjaxResponseAction.choices: raise ValueError( "Invalid action selected: '{}'".format(self.action)) return self.action
Returns action to take after call
def _filter(self, criteria: Q, db): negated = criteria.negated input_db = None if criteria.connector == criteria.AND: input_db = db for child in criteria.children: if isinstance(child, Q): input_db = self._filter(child, input_db) else: input_db = self.provider._evaluate_lookup(child[0], child[1], negated, input_db) else: input_db = {} for child in criteria.children: if isinstance(child, Q): results = self._filter(child, db) else: results = self.provider._evaluate_lookup(child[0], child[1], negated, db) input_db = {**input_db, **results} return input_db
Recursive function to filter items from dictionary
def put_file(buffer, modified_file): import mimetypes import boto3 file_type, _ = mimetypes.guess_type(modified_file) s3 = boto3.resource('s3') bucket_name, object_key = _parse_s3_file(modified_file) extra_args = { 'ACL': 'public-read', 'ContentType': file_type } bucket = s3.Bucket(bucket_name) logger.info("Uploading {0} to {1}".format(object_key, bucket_name)) bucket.upload_fileobj(buffer, object_key, ExtraArgs=extra_args)
write the buffer to modified_file. modified_file should be in the format 's3://bucketname/path/to/file.txt'
def is_valid_single_address(self, single_address): if not isinstance(single_address, SingleAddress): raise TypeError( 'Parameter "{}" is of type {}, expecting type {}.'.format( single_address, type(single_address), SingleAddress)) try: return bool(self.scan_specs([single_address])) except AddressLookupError: return False
Check if a potentially ambiguous single address spec really exists. :param single_address: A SingleAddress spec. :return: True if given spec exists, False otherwise.
def _remove_bound_conditions(agent, keep_criterion): new_bc = [] for ind in range(len(agent.bound_conditions)): if keep_criterion(agent.bound_conditions[ind].agent): new_bc.append(agent.bound_conditions[ind]) agent.bound_conditions = new_bc
Removes bound conditions of agent such that keep_criterion is False. Parameters ---------- agent: Agent The agent whose bound conditions we evaluate keep_criterion: function Evaluates removal_criterion(a) for each agent a in a bound condition and if it evaluates to False, removes a from agent's bound_conditions
def process_superclass(self, entity: List[dict]) -> List[dict]: superclass = entity.pop('superclass') label = entity['label'] if not superclass.get('ilx_id'): raise self.SuperClassDoesNotExistError( f'Superclass not given an interlex ID for label: {label}') superclass_data = self.get_entity(superclass['ilx_id']) if not superclass_data['id']: raise self.SuperClassDoesNotExistError( 'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch') entity['superclasses'] = [{'superclass_tid': superclass_data['id']}] return entity
Replaces ILX ID with superclass ID
def safe_print(ustring, errors='replace', **kwargs): encoding = sys.stdout.encoding or 'utf-8' if sys.version_info[0] == 3: print(ustring, **kwargs) else: bytestr = ustring.encode(encoding, errors=errors) print(bytestr, **kwargs)
Safely print a unicode string
def buildEXPmask(self, chip, dqarr): log.info("Applying EXPTIME weighting to DQ mask for chip %s" % chip) exparr = self._image[self.scienceExt,chip]._exptime expmask = exparr*dqarr return expmask.astype(np.float32)
Builds a weight mask from an input DQ array and the exposure time per pixel for this chip.
def copy(self, extra=None): if extra is None: extra = dict() bestModel = self.bestModel.copy(extra) avgMetrics = self.avgMetrics subModels = self.subModels return CrossValidatorModel(bestModel, avgMetrics, subModels)
Creates a copy of this instance with a randomly generated uid and some extra params. This copies the underlying bestModel, creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. It does not copy the extra Params into the subModels. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance
def get_variables_path(export_dir): return os.path.join( tf.compat.as_bytes(export_dir), tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_DIRECTORY), tf.compat.as_bytes(tf_v1.saved_model.constants.VARIABLES_FILENAME))
Returns the path for storing variables checkpoints.
def get_require_by_kind(self, kind, value): for r in self.requires: if r.kind == kind and r.value == value: return r return None
Returns a requires object of a specific value
def _is_substitute_element(head, sub): if not isinstance(head, ElementDeclaration) or not isinstance(sub, ElementDeclaration): return False try: group = sub.substitutionGroup except (AttributeError, TypeError): return False ged = GED(*group) print (head.nspname == ged.nspname and head.pname == ged.pname) if head is ged or not (head.nspname == ged.nspname and head.pname == ged.pname): return False return True
if head and sub are both GEDs, and sub declares head as its substitutionGroup then return True. head -- Typecode instance sub -- Typecode instance
def wait(self, timeout=None): if self._thread is None: return self._thread.join(timeout=timeout)
Blocking wait for task status.
def color(self): return self.tty_stream if self.options.color is None \ else self.options.color
Whether or not color should be output
def create_logger(): global logger formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s') handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1) handler.setFormatter(formatter) handler.setLevel(log_level) handler.suffix = "%Y-%m-%d" logger = logging.getLogger("sacplus") logger.setLevel(log_level) logger.addHandler(handler)
Initial the global logger variable
def create_estimation_obj(model_obj, init_vals, mappings=None, ridge=None, constrained_pos=None, weights=None): mapping_matrices =\ model_obj.get_mappings_for_fit() if mappings is None else mappings zero_vector = np.zeros(init_vals.shape[0]) internal_model_name = display_name_to_model_type[model_obj.model_type] estimator_class, current_split_func =\ (model_type_to_resources[internal_model_name]['estimator'], model_type_to_resources[internal_model_name]['split_func']) estimation_obj = estimator_class(model_obj, mapping_matrices, ridge, zero_vector, current_split_func, constrained_pos, weights=weights) return estimation_obj
Should return a model estimation object corresponding to the model type of the `model_obj`. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. init_vals : 1D ndarray. The initial values to start the estimation process with. In the following order, there should be one value for each nest coefficient, shape parameter, outside intercept parameter, or index coefficient that is being estimated. mappings : OrderedDict or None, optional. Keys will be `["rows_to_obs", "rows_to_alts", "chosen_row_to_obs", "rows_to_nests"]`. The value for `rows_to_obs` will map the rows of the `long_form` to the unique observations (on the columns) in their order of appearance. The value for `rows_to_alts` will map the rows of the `long_form` to the unique alternatives which are possible in the dataset (on the columns), in sorted order--not order of appearance. The value for `chosen_row_to_obs`, if not None, will map the rows of the `long_form` that contain the chosen alternatives to the specific observations those rows are associated with (denoted by the columns). The value of `rows_to_nests`, if not None, will map the rows of the `long_form` to the nest (denoted by the column) that contains the row's alternative. Default == None. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. weights : 1D ndarray. Should contain the weights for each corresponding observation for each row of the long format data.
def from_dict(d): if isinstance(d, str): return from_json(d) if not isinstance(d, dict): raise TypeError("argument must be of type dict") if 'nparray' not in d.keys(): raise ValueError("input dictionary missing 'nparray' entry") classname = d.pop('nparray').title() return getattr(_wrappers, classname)(**d)
load an nparray object from a dictionary @parameter str d: dictionary representing the nparray object
def _find_base_version_ids(self, symbol, version_ids): cursor = self._versions.find({'symbol': symbol, '_id': {'$nin': version_ids}, 'base_version_id': {'$exists': True}, }, projection={'base_version_id': 1}) return [version["base_version_id"] for version in cursor]
Return all base_version_ids for a symbol that are not bases of version_ids
def create_post(self, path, **kw): content = kw.pop('content', None) onefile = kw.pop('onefile', False) kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata, comment_wrap=False, site=self.site, compiler=self)) fd.write(content)
Create a new post.
def data_type_to_numpy(datatype, unsigned=False): basic_type = _dtypeLookup[datatype] if datatype in (stream.STRING, stream.OPAQUE): return np.dtype(basic_type) if unsigned: basic_type = basic_type.replace('i', 'u') return np.dtype('=' + basic_type)
Convert an ncstream datatype to a numpy one.
def private(self): req = self.request(self.mist_client.uri+'/keys/'+self.id+"/private") private = req.get().json() return private
Return the private ssh-key :returns: The private ssh-key as string
def format_help(self): formatter = self._get_formatter() formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) formatter.add_text(self.description) for action_group in self._action_groups: if is_subparser(action_group): continue formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() formatter.add_text(self.epilog) return formatter.format_help()
Overrides format_help to not print subparsers
def POST(self, id): id = int(id) model.del_todo(id) raise web.seeother('/')
Delete based on ID
def stop_refresh(self): self.logger.debug("stopping timed refresh") self.rf_flags['done'] = True self.rf_timer.clear()
Stop redrawing the canvas at the previously set timed interval.
def get_email_context(self, activation_key): scheme = 'https' if self.request.is_secure() else 'http' return { 'scheme': scheme, 'activation_key': activation_key, 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS, 'site': get_current_site(self.request) }
Build the template context used for the activation email.
def _key(self, additional_key: Any=None) -> str: return '_'.join([self.key, str(self.clock()), str(additional_key), str(self.seed)])
Construct a hashable key from this object's state. Parameters ---------- additional_key : Any additional information used to seed random number generation. Returns ------- str A key to seed random number generation.
def _make_parser(streamer, the_struct): "Return a function that parses the given structure into a dict" struct_items = [s.split(":") for s in the_struct.split()] names = [s[0] for s in struct_items] types = ''.join(s[1] for s in struct_items) def f(message_stream): return streamer.parse_as_dict(names, types, message_stream) return f
Return a function that parses the given structure into a dict
def _fit_stage_componentwise(X, residuals, sample_weight, **fit_params): n_features = X.shape[1] base_learners = [] error = numpy.empty(n_features) for component in range(n_features): learner = ComponentwiseLeastSquares(component).fit(X, residuals, sample_weight) l_pred = learner.predict(X) error[component] = squared_norm(residuals - l_pred) base_learners.append(learner) best_component = numpy.nanargmin(error) best_learner = base_learners[best_component] return best_learner
Fit component-wise weighted least squares model
def check(self, data): if isinstance(data, Iterable): data = "".join(str(x) for x in data) try: data = str(data) except UnicodeDecodeError: return False return bool(data and self.__regexp.match(data))
returns True if any match any regexp
def codes_get_string_array(handle, key, size, length=None): if length is None: length = codes_get_string_length(handle, key) values_keepalive = [ffi.new('char[]', length) for _ in range(size)] values = ffi.new('char*[]', values_keepalive) size_p = ffi.new('size_t *', size) _codes_get_string_array(handle, key.encode(ENC), values, size_p) return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
Get string array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: T.List[bytes]
def manifestation_model_factory(*, validator=validators.is_manifestation_model, ld_type='CreativeWork', **kwargs): return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
Generate a Manifestation model. Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and ``ld_context`` as keyword arguments.
def _on_cancelok(self, cancel_frame): _log.info("Consumer canceled; returning all unprocessed messages to the queue") self._channel.basic_nack(delivery_tag=0, multiple=True, requeue=True)
Called when the server acknowledges a cancel request. Args: cancel_frame (pika.spec.Basic.CancelOk): The cancelok frame from the server.
def put(self, measurementId): json = request.get_json() try: start = self._calculateStartTime(json) except ValueError: return 'invalid date format in request', 400 duration = json['duration'] if 'duration' in json else 10 if start is None: return 'no start time', 400 else: scheduled, message = self._measurementController.schedule(measurementId, duration, start, description=json.get('description')) return message, 200 if scheduled else 400
Initiates a new measurement. Accepts a json payload with the following attributes; * duration: in seconds * startTime OR delay: a date in YMD_HMS format or a delay in seconds * description: some free text information about the measurement :return:
def verification_events(self): queued = self._assemble_event('Verifier_Queued') started = self._assemble_event('Verifier_Started') return [x for x in [queued, started] if x]
Events related to command verification. :type: List[:class:`.CommandHistoryEvent`]
def shell_command(class_path): loader = ClassLoader(*class_path) shell.start_shell(local_ns={ 'ClassFile': ClassFile, 'loader': loader, 'constants': importlib.import_module('jawa.constants'), })
Drop into a debugging shell.
def cleanup(self): shutil.rmtree(self.temp_tagdir) parentdir = os.path.dirname(self.temp_tagdir) if os.path.basename(parentdir).startswith(self.package): os.rmdir(parentdir) os.chdir(self.start_directory)
Clean up temporary tag checkout dir.
def add_if_unique(self, name): with self.lock: if name not in self.names: self.names.append(name) return True return False
Returns ``True`` on success. Returns ``False`` if the name already exists in the namespace.
def get_cmd_out(command): if isinstance(command, list): result = sp.check_output(command) else: result = sp.check_output(command, shell=True) return result.decode('utf-8').rstrip()
Get the output of a command. Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command. Args: command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`). Note: If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash). Returns: str: The ``stdout`` of the command.
def new_action(self, method='GET', **kwargs): if method not in self.methods: raise TypeError('{} not in valid method(s): {}.'.format(method, self.methods)) return Action(self, method, **kwargs)
Create a new Action linked to this endpoint with the given args.
def search_index_advanced(self, index, query): request = self.session url = 'http://%s:%s/%s/_search' % (self.host, self.port, index) if self.params: content = dict(query=query, **self.params) else: content = dict(query=query) if self.verbose: print content response = request.post(url,content) return response
Advanced search query against an entire index > query = ElasticQuery().query_string(query='imchi') > search = ElasticSearch()
def pop_choice(self, key: str, choices: List[Any], default_to_first_choice: bool = False) -> Any: default = choices[0] if default_to_first_choice else self.DEFAULT value = self.pop(key, default) if value not in choices: key_str = self.history + key message = '%s not in acceptable choices for %s: %s' % (value, key_str, str(choices)) raise ConfigurationError(message) return value
Gets the value of ``key`` in the ``params`` dictionary, ensuring that the value is one of the given choices. Note that this `pops` the key from params, modifying the dictionary, consistent with how parameters are processed in this codebase. Parameters ---------- key: str Key to get the value from in the param dictionary choices: List[Any] A list of valid options for values corresponding to ``key``. For example, if you're specifying the type of encoder to use for some part of your model, the choices might be the list of encoder classes we know about and can instantiate. If the value we find in the param dictionary is not in ``choices``, we raise a ``ConfigurationError``, because the user specified an invalid value in their parameter file. default_to_first_choice: bool, optional (default=False) If this is ``True``, we allow the ``key`` to not be present in the parameter dictionary. If the key is not present, we will use the return as the value the first choice in the ``choices`` list. If this is ``False``, we raise a ``ConfigurationError``, because specifying the ``key`` is required (e.g., you `have` to specify your model class when running an experiment, but you can feel free to use default settings for encoders if you want).
def deprecated(fun_name=None, msg=""): def _deprecated(fun): @wraps(fun) def _wrapper(*args, **kwargs): name = fun_name if fun_name is not None else fun.__name__ _warn_deprecated('Call to deprecated function %s. %s' % (name, msg)) return fun(*args, **kwargs) return _wrapper return _deprecated
Issue a deprecation warning for a function
def services_resolved(self): self._disconnect_service_signals() services_regex = re.compile(self._device_path + '/service[0-9abcdef]{4}$') managed_services = [ service for service in self._object_manager.GetManagedObjects().items() if services_regex.match(service[0])] self.services = [Service( device=self, path=service[0], uuid=service[1]['org.bluez.GattService1']['UUID']) for service in managed_services] self._connect_service_signals()
Called when all device's services and characteristics got resolved.
def bind_key_name(self, function, object_name): for funcname, name in self.name_map.items(): if funcname == function: self.name_map[ funcname] = object_name
Bind a key to an object name
def _get_openscm_var_from_filepath(filepath): reader = determine_tool(filepath, "reader")(filepath) openscm_var = convert_magicc7_to_openscm_variables( convert_magicc6_to_magicc7_variables(reader._get_variable_from_filepath()) ) return openscm_var
Determine the OpenSCM variable from a filepath. Uses MAGICC's internal, implicit, filenaming conventions. Parameters ---------- filepath : str Filepath from which to determine the OpenSCM variable. Returns ------- str The OpenSCM variable implied by the filepath.
def true_range(close_data, period): catch_errors.check_for_period_error(close_data, period) tr = [np.max([np.max(close_data[idx+1-period:idx+1]) - np.min(close_data[idx+1-period:idx+1]), abs(np.max(close_data[idx+1-period:idx+1]) - close_data[idx-1]), abs(np.min(close_data[idx+1-period:idx+1]) - close_data[idx-1])]) for idx in range(period-1, len(close_data))] tr = fill_for_noncomputable_vals(close_data, tr) return tr
True Range. Formula: TRt = MAX(abs(Ht - Lt), abs(Ht - Ct-1), abs(Lt - Ct-1))
def _poll_once(self, timeout_ms, max_records): self._coordinator.poll() if not self._subscription.has_all_fetch_positions(): self._update_fetch_positions(self._subscription.missing_fetch_positions()) records, partial = self._fetcher.fetched_records(max_records) if records: if not partial: self._fetcher.send_fetches() return records self._fetcher.send_fetches() timeout_ms = min(timeout_ms, self._coordinator.time_to_next_poll() * 1000) self._client.poll(timeout_ms=timeout_ms) if self._coordinator.need_rejoin(): return {} records, _ = self._fetcher.fetched_records(max_records) return records
Do one round of polling. In addition to checking for new data, this does any needed heart-beating, auto-commits, and offset updates. Arguments: timeout_ms (int): The maximum time in milliseconds to block. Returns: dict: Map of topic to list of records (may be empty).
def type_name(value): if inspect.isclass(value): cls = value else: cls = value.__class__ if cls.__module__ in set(['builtins', '__builtin__']): return cls.__name__ return '%s.%s' % (cls.__module__, cls.__name__)
Returns a user-readable name for the type of an object :param value: A value to get the type name of :return: A unicode string of the object's type name
def get_download_link(self): url = None if not self.get("downloadable"): try: url = self.client.get_location( self.client.STREAM_URL % self.get("id")) except serror as e: print(e) if not url: try: url = self.client.get_location( self.client.DOWNLOAD_URL % self.get("id")) except serror as e: print(e) return url
Get direct download link with soudcloud's redirect system.
def delete_rule(self, rule_id): if rule_id not in self.rules: LOG.error("No Rule id present for deleting %s", rule_id) return del self.rules[rule_id] self.rule_cnt -= 1
Delete the specific Rule from dictionary indexed by rule id.
def create_new(cls, mapreduce_id, shard_number): shard_id = cls.shard_id_from_number(mapreduce_id, shard_number) state = cls(key_name=shard_id, mapreduce_id=mapreduce_id) return state
Create new shard state. Args: mapreduce_id: unique mapreduce id as string. shard_number: shard number for which to create shard state. Returns: new instance of ShardState ready to put into datastore.
def _default_next_colour(particle): return particle.colours[ (len(particle.colours) - 1) * particle.time // particle.life_time]
Default next colour implementation - linear progression through each colour tuple.
def to_dict(self, index=True, ordered=False): result = OrderedDict() if ordered else dict() if index: result.update({self._index_name: self._index}) if ordered: data_dict = [(self._data_name, self._data)] else: data_dict = {self._data_name: self._data} result.update(data_dict) return result
Returns a dict where the keys are the data and index names and the values are list of the data and index. :param index: If True then include the index in the dict with the index_name as the key :param ordered: If True then return an OrderedDict() to preserve the order of the columns in the Series :return: dict or OrderedDict()
def _load_from_tar(self, dtype_out_time, dtype_out_vert=False): path = os.path.join(self.dir_tar_out, 'data.tar') utils.io.dmget([path]) with tarfile.open(path, 'r') as data_tar: ds = xr.open_dataset( data_tar.extractfile(self.file_name[dtype_out_time]) ) return ds[self.name]
Load data save in tarball form on the file system.
async def build(self, building: UnitTypeId, near: Union[Point2, Point3], max_distance: int=20, unit: Optional[Unit]=None, random_alternative: bool=True, placement_step: int=2): if isinstance(near, Unit): near = near.position.to2 elif near is not None: near = near.to2 else: return p = await self.find_placement(building, near.rounded, max_distance, random_alternative, placement_step) if p is None: return ActionResult.CantFindPlacementLocation unit = unit or self.select_build_worker(p) if unit is None or not self.can_afford(building): return ActionResult.Error return await self.do(unit.build(building, p))
Build a building.
def hello(event, context): body = { "message": "Go Serverless v1.0! Your function executed successfully!", "input": event } response = { "statusCode": 200, "body": json.dumps(body) } return response
Return Serverless Hello World.
def MergeAllSummaries(period=0, run_alone=False, key=None): if key is None: key = tf.GraphKeys.SUMMARIES period = int(period) if run_alone: return MergeAllSummaries_RunAlone(period, key) else: return MergeAllSummaries_RunWithOp(period, key)
This callback is enabled by default. Evaluate all summaries by ``tf.summary.merge_all``, and write them to logs. Args: period (int): by default the callback summarizes once every epoch. This option (if not set to 0) makes it additionally summarize every ``period`` steps. run_alone (bool): whether to evaluate the summaries alone. If True, summaries will be evaluated after each epoch alone. If False, summaries will be evaluated together with the `sess.run` calls, in the last step of each epoch. For :class:`SimpleTrainer`, it needs to be False because summary may depend on inputs. key (str): the collection of summary tensors. Same as in ``tf.summary.merge_all``. Default is ``tf.GraphKeys.SUMMARIES``.
def links(self): headers = self.headers or {} header = headers.get('link') li = {} if header: links = parse_header_links(header) for link in links: key = link.get('rel') or link.get('url') li[key] = link return li
Returns the parsed header links of the response, if any
def _check_repo_sign_utils_support(name): if salt.utils.path.which(name): return True else: raise CommandExecutionError( 'utility \'{0}\' needs to be installed or made available in search path'.format(name) )
Check for specified command name in search path
def close_connection(self): self._logger.info('Closing connection') self._closing = True self._connection.close()
This method closes the connection to RabbitMQ.
def ISIs(self,time_dimension=0,units=None,min_t=None,max_t=None): units = self._default_units(units) converted_dimension,st = self.spike_times.get_converted(time_dimension,units) if min_t is None: min_t = converted_dimension.min if max_t is None: max_t = converted_dimension.max return np.diff(sorted(st[(st>min_t) * (st <max_t)]))
returns the Inter Spike Intervals `time_dimension`: which dimension contains the spike times (by default the first) `units`,`min_t`,`max_t`: define the units of the output and the range of spikes that should be considered
def remove(self, id_filter): if not is_valid_int_param(id_filter): raise InvalidParameterError( u'The identifier of Filter is invalid or was not informed.') url = 'filter/' + str(id_filter) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
Remove Filter by the identifier. :param id_filter: Identifier of the Filter. Integer value and greater than zero. :return: None :raise InvalidParameterError: Filter identifier is null and invalid. :raise FilterNotFoundError: Filter not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def write(self, data: bytes) -> None: if self.finished(): if self._exc: raise self._exc raise WriteAfterFinishedError if not data: return try: self._delegate.write_data(data, finished=False) except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
Write the data.
def static_singleton(*args, **kwargs): def __static_singleton_wrapper(cls): if cls not in __singleton_instances: __singleton_instances[cls] = cls(*args, **kwargs) return __singleton_instances[cls] return __static_singleton_wrapper
STATIC Singleton Design Pattern Decorator Class is initialized with arguments passed into the decorator. :Usage: >>> @static_singleton('yop') class Bob(Person): def __init__(arg1): self.info = arg1 def says(self): print self.info b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance b2 = Bob #here b1 is the same object as b2 Bob.says() # it will display 'yop'
def domain(self, default): inferred = infer_domain(self._output_terms) if inferred is GENERIC and self._domain is GENERIC: return default elif inferred is GENERIC and self._domain is not GENERIC: return self._domain elif inferred is not GENERIC and self._domain is GENERIC: return inferred else: if inferred is not self._domain: raise ValueError( "Conflicting domains in Pipeline. Inferred {}, but {} was " "passed at construction.".format(inferred, self._domain) ) return inferred
Get the domain for this pipeline. - If an explicit domain was provided at construction time, use it. - Otherwise, infer a domain from the registered columns. - If no domain can be inferred, return ``default``. Parameters ---------- default : zipline.pipeline.Domain Domain to use if no domain can be inferred from this pipeline by itself. Returns ------- domain : zipline.pipeline.Domain The domain for the pipeline. Raises ------ AmbiguousDomain ValueError If the terms in ``self`` conflict with self._domain.
def capture(self): captured_charge = self.api_retrieve().capture() return self.__class__.sync_from_stripe_data(captured_charge)
Capture the payment of an existing, uncaptured, charge. This is the second half of the two-step payment flow, where first you created a charge with the capture option set to False. See https://stripe.com/docs/api#capture_charge
def send_request(self, request): logger.debug("send_request - " + str(request)) assert isinstance(request, Request) try: host, port = request.destination except AttributeError: return request.timestamp = time.time() transaction = Transaction(request=request, timestamp=request.timestamp) if transaction.request.type is None: transaction.request.type = defines.Types["CON"] if transaction.request.mid is None: transaction.request.mid = self.fetch_mid() key_mid = str_append_hash(host, port, request.mid) self._transactions[key_mid] = transaction key_token = str_append_hash(host, port, request.token) self._transactions_token[key_token] = transaction return self._transactions[key_mid]
Create the transaction and fill it with the outgoing request. :type request: Request :param request: the request to send :rtype : Transaction :return: the created transaction
def fetch( self, url, filename=None, decompress=False, force=False, timeout=None, use_wget_if_available=True): key = (url, decompress) if not force and key in self._local_paths: path = self._local_paths[key] if exists(path): return path else: del self._local_paths[key] path = download.fetch_file( url, filename=filename, decompress=decompress, subdir=self.subdir, force=force, timeout=timeout, use_wget_if_available=use_wget_if_available) self._local_paths[key] = path return path
Return the local path to the downloaded copy of a given URL. Don't download the file again if it's already present, unless `force` is True.
def _get_revision(self): assert self._revisions, "no migration revision exist" revision = self._rev or self._revisions[-1] assert revision in self._revisions, "invalid revision specified" return revision
Validate and return the revision to use for current command
def fromovl(args): p = OptionParser(fromovl.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ovlfile, fastafile = args ovl = OVL(ovlfile) g = ovl.graph fw = open("contained.ids", "w") print("\n".join(sorted(ovl.contained)), file=fw) graph_to_agp(g, ovlfile, fastafile, exclude=ovl.contained, verbose=False)
%prog graph nucmer2ovl.ovl fastafile Build overlap graph from ovl file which is converted using NUCMER2OVL.
def getcolors(spec, n, cmap=None, value=None): if cmap is not None and spec is not None: from matplotlib.colors import LinearSegmentedColormap from matplotlib.cm import get_cmap if isinstance(cmap, LinearSegmentedColormap): return cmap(value)[:, 0:3] if isinstance(cmap, str): return get_cmap(cmap, n)(value)[:, 0:3] if isinstance(spec, str): return [getcolor(spec) for i in range(n)] elif isinstance(spec, list) and isinstance(spec[0], str): return [getcolor(s) for s in spec] elif (isinstance(spec, list) or isinstance(spec, ndarray)) and asarray(spec).shape == (3,): return [spec for i in range(n)] else: return spec
Turn list of color specs into list of arrays.
def kill_pid(pid, signal=15): try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False
Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9
def all_points_mutual_reachability(X, labels, cluster_id, metric='euclidean', d=None, **kwd_args): if metric == 'precomputed': if d is None: raise ValueError('If metric is precomputed a ' 'd value must be provided!') distance_matrix = X[labels == cluster_id, :][:, labels == cluster_id] else: subset_X = X[labels == cluster_id, :] distance_matrix = pairwise_distances(subset_X, metric=metric, **kwd_args) d = X.shape[1] core_distances = all_points_core_distance(distance_matrix.copy(), d=d) core_dist_matrix = np.tile(core_distances, (core_distances.shape[0], 1)) result = np.dstack( [distance_matrix, core_dist_matrix, core_dist_matrix.T]).max(axis=-1) return result, core_distances
Compute the all-points-mutual-reachability distances for all the points of a cluster. If metric is 'precomputed' then assume X is a distance matrix for the full dataset. Note that in this case you must pass in 'd' the dimension of the dataset. Parameters ---------- X : array (n_samples, n_features) or (n_samples, n_samples) The input data of the clustering. This can be the data, or, if metric is set to `precomputed` the pairwise distance matrix used for the clustering. labels : array (n_samples) The label array output by the clustering, providing an integral cluster label to each data point, with -1 for noise points. cluster_id : integer The cluster label for which to compute the all-points mutual-reachability (which should be done on a cluster by cluster basis). metric : string The metric used to compute distances for the clustering (and to be re-used in computing distances for mr distance). If set to `precomputed` then X is assumed to be the precomputed distance matrix between samples. d : integer (or None) The number of features (dimension) of the dataset. This need only be set in the case of metric being set to `precomputed`, where the ambient dimension of the data is unknown to the function. **kwd_args : Extra arguments to pass to the distance computation for other metrics, such as minkowski, Mahanalobis etc. Returns ------- mutual_reachaibility : array (n_samples, n_samples) The pairwise mutual reachability distances between all points in `X` with `label` equal to `cluster_id`. core_distances : array (n_samples,) The all-points-core_distance of all points in `X` with `label` equal to `cluster_id`. References ---------- Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J., 2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
def get_capacity_grav(self, min_voltage=None, max_voltage=None, use_overall_normalization=True): pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage) normalization_mass = self.normalization_mass \ if use_overall_normalization or len(pairs_in_range) == 0 \ else pairs_in_range[-1].mass_discharge return sum([pair.mAh for pair in pairs_in_range]) / normalization_mass
Get the gravimetric capacity of the electrode. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. use_overall_normalization (booL): If False, normalize by the discharged state of only the voltage pairs matching the voltage criteria. if True, use default normalization of the full electrode path. Returns: Gravimetric capacity in mAh/g across the insertion path (a subset of the path can be chosen by the optional arguments).
def connect(self): connection_method = 'SMTP_SSL' if self.ssl else 'SMTP' self._logger.debug('Trying to connect via {}'.format(connection_method)) smtp = getattr(smtplib, connection_method) if self.port: self._smtp = smtp(self.address, self.port) else: self._smtp = smtp(self.address) self._smtp.ehlo() if self.tls: self._smtp.starttls() self._smtp.ehlo() self._logger.info('Got smtp connection') if self.username and self.password: self._logger.info('Logging in') self._smtp.login(self.username, self.password) self._connected = True
Initializes a connection to the smtp server :return: True on success, False otherwise
def attach_log_stream(self): if self.has_api_logs: self.log_stream = self.attach(stdout=True, stderr=True, stream=True)
A log stream can only be attached if the container uses a json-file log driver.