code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def decode(self, encoded): if self.enforce_reversible: self.enforce_reversible = False if self.encode(self.decode(encoded)) != encoded: raise ValueError('Decoding is not reversible for "%s"' % encoded) self.enforce_reversible = True return encoded
Decodes an object. Args: object_ (object): Encoded object. Returns: object: Object decoded.
def clear_unattached_processes(self): for pid in self.get_process_ids(): aProcess = self.get_process(pid) if not aProcess.is_being_debugged(): self._del_process(aProcess)
Removes Process objects from the snapshot referring to processes not being debugged.
def get_filename(self, index): if index: path = self.fsmodel.filePath(self.proxymodel.mapToSource(index)) return osp.normpath(to_text_string(path))
Return filename from index
def surround_parse(self, node, pre_char, post_char): self.add_text(pre_char) self.subnode_parse(node) self.add_text(post_char)
Parse the subnodes of a given node. Subnodes with tags in the `ignore` list are ignored. Prepend `pre_char` and append `post_char` to the output in self.pieces.
def end_output (self, **kwargs): self.write_edges() self.end_graph() if self.has_part("outro"): self.write_outro() self.close_fileoutput()
Write edges and end of checking info as gml comment.
def load_data(self, idx): for subseqs in self: if isinstance(subseqs, abctools.InputSequencesABC): subseqs.load_data(idx)
Call method |InputSequences.load_data| of all handled |InputSequences| objects.
def convert_to_sqlite(self, destination=None, method="shell", progress=False): if progress: progress = tqdm.tqdm else: progress = lambda x:x if destination is None: destination = self.replace_extension('sqlite') destination.remove() if method == 'shell': return self.sqlite_by_shell(destination) if method == 'object': return self.sqlite_by_object(destination, progress) if method == 'dataframe': return self.sqlite_by_df(destination, progress)
Who wants to use Access when you can deal with SQLite databases instead?
def has_permission(self, request, view): if request.method == 'POST': user = Profile.objects.only('id', 'username').get(username=view.kwargs['username']) return request.user.id == user.id return True
applies to social-link-list
def serialise(self, element: Element, **kwargs) -> str: return json.dumps(self.serialise_dict(element), **kwargs)
Serialises the given element into JSON. >>> JSONSerialiser().serialise(String(content='Hello')) '{"element": "string", "content": "Hello"}'
def validate_replicas(self, data): environment = data.get('environment') if environment and environment.replicas: validate_replicas(data.get('framework'), environment.replicas)
Validate distributed experiment
def drop_post(self): post_index = self.version.find('.post') if post_index >= 0: self.version = self.version[:post_index]
Remove .postXXXX postfix from version
def Format(self, format_string, rdf): result = [] for literal_text, field_name, _, _ in self.parse(format_string): if literal_text: result.append(literal_text) if field_name is not None: rslts = [] objs = self.expander(rdf, field_name) for o in objs: rslts.extend(self.FanOut(o)) result.append(",".join(rslts)) return "".join(result)
Apply string formatting templates to rdf data. Uses some heuristics to coerce rdf values into a form compatible with string formatter rules. Repeated items are condensed into a single comma separated list. Unlike regular string.Formatter operations, we use objectfilter expansion to fully acquire the target attribute in one pass, rather than recursing down each element of the attribute tree. Args: format_string: A format string specification. rdf: The rdf value to be formatted. Returns: A string of formatted data.
def max_id_length(self): if config().identifiers() == "text": return max_id_length(len(self._todos)) else: try: return math.ceil(math.log(len(self._todos), 10)) except ValueError: return 0
Returns the maximum length of a todo ID, used for formatting purposes.
def store_magic_envelope_doc(self, payload): try: json_payload = json.loads(decode_if_bytes(payload)) except ValueError: xml = unquote(decode_if_bytes(payload)) xml = xml.lstrip().encode("utf-8") logger.debug("diaspora.protocol.store_magic_envelope_doc: xml payload: %s", xml) self.doc = etree.fromstring(xml) else: logger.debug("diaspora.protocol.store_magic_envelope_doc: json payload: %s", json_payload) self.doc = self.get_json_payload_magic_envelope(json_payload)
Get the Magic Envelope, trying JSON first.
def field_adaptors(self): with exception_logging(logger, 'Exception in `field_adaptors` property'): conjunction_globs = self.get_sources() if conjunction_globs is None: return tuple() sources = conjunction_globs.non_path_globs conjunction = conjunction_globs.conjunction if not sources: return tuple() base_globs = BaseGlobs.from_sources_field(sources, self.address.spec_path) path_globs = base_globs.to_path_globs(self.address.spec_path, conjunction) return (SourcesField( self.address, 'sources', base_globs.filespecs, base_globs, path_globs, self.validate_sources, ),)
Returns a tuple of Fields for captured fields which need additional treatment.
def encode(self, transmission): data = '' data += self._record_encode(transmission.header) for group in transmission.groups: data += self._record_encode(group.group_header) for transaction in group.transactions: for record in transaction: data += self._record_encode(record) data += self._record_encode(group.group_trailer) data += self._record_encode(transmission.trailer) return data
Encodes the data, creating a CWR structure from an instance from the domain model. :param entity: the instance to encode :return: a cwr string structure created from the received data
def _ParseLayerConfigJSON(self, parser_mediator, file_object): file_content = file_object.read() file_content = codecs.decode(file_content, self._ENCODING) json_dict = json.loads(file_content) if 'docker_version' not in json_dict: raise errors.UnableToParseFile( 'not a valid Docker layer configuration file, missing ' '\'docker_version\' key.') if 'created' in json_dict: layer_creation_command_array = [ x.strip() for x in json_dict['container_config']['Cmd']] layer_creation_command = ' '.join(layer_creation_command_array).replace( '\t', '') event_data = DockerJSONLayerEventData() event_data.command = layer_creation_command event_data.layer_id = self._GetIdentifierFromPath(parser_mediator) timestamp = timelib.Timestamp.FromTimeString(json_dict['created']) event = time_events.TimestampEvent( timestamp, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Docker filesystem layer configuration file. The path of each filesystem layer config file is: DOCKER_DIR/graph/<layer_id>/json Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file is not a valid layer config file.
def cli_help_message(self, description): config_files_listing = '\n'.join(' {}. {!s}'.format(i, path) for i, path in enumerate(self._paths, 1)) text = dedent( ).format( config_file='{}.conf'.format(self._configuration_name), description=description, config_files_listing=config_files_listing ) return text
Get a user friendly help message that can be dropped in a `click.Command`\ 's epilog. Parameters ---------- description : str Description of the configuration file to include in the message. Returns ------- str A help message that uses :py:mod:`click`\ 's help formatting constructs (e.g. ``\b``).
def slice(self, tf_tensor, tensor_shape): tensor_layout = self.tensor_layout(tensor_shape) if tensor_layout.is_fully_replicated: return self.LaidOutTensor([tf_tensor]) else: slice_shape = self.slice_shape(tensor_shape) slice_begins = [ self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size) ] slice_begins_tensor = tf.stack(slice_begins) selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor) return self.LaidOutTensor( [tf.slice(tf_tensor, selected_slice_begin, slice_shape)])
Slice out the corresponding part of tensor given the pnum variable.
def percentage_of_reoccurring_values_to_all_values(x): if not isinstance(x, pd.Series): x = pd.Series(x) if x.size == 0: return np.nan value_counts = x.value_counts() reoccuring_values = value_counts[value_counts > 1].sum() if np.isnan(reoccuring_values): return 0 return reoccuring_values / x.size
Returns the ratio of unique values, that are present in the time series more than once. # of data points occurring more than once / # of all data points This means the ratio is normalized to the number of data points in the time series, in contrast to the percentage_of_reoccurring_datapoints_to_all_datapoints. :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float
def _post(self, url, data=None, json=None, params=None, headers=None): url = self.clean_url(url) response = requests.post(url, data=data, json=json, params=params, headers=headers, timeout=self.timeout, verify=self.verify) return response
Wraps a POST request with a url check
def enable_cache(): try: import requests_cache except ImportError as err: sys.stderr.write('Failed to enable cache: {0}\n'.format(str(err))) return if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR) requests_cache.install_cache(CACHE_FILE)
Enable requests library cache.
def is_cached(self): from dvc.remote.local import RemoteLOCAL from dvc.remote.s3 import RemoteS3 old = Stage.load(self.repo, self.path) if old._changed_outs(): return False for dep in self.deps: dep.save() old_d = old.dumpd() new_d = self.dumpd() old_d.pop(self.PARAM_MD5, None) new_d.pop(self.PARAM_MD5, None) outs = old_d.get(self.PARAM_OUTS, []) for out in outs: out.pop(RemoteLOCAL.PARAM_CHECKSUM, None) out.pop(RemoteS3.PARAM_CHECKSUM, None) if old_d != new_d: return False old.commit() return True
Checks if this stage has been already ran and stored
def upcaseTokens(s,l,t): return [ tt.upper() for tt in map(_ustr,t) ]
Helper parse action to convert tokens to upper case.
def more_data(pipe_out): r, _, _ = select.select([pipe_out], [], [], 0) return bool(r)
Check if there is more data left on the pipe :param pipe_out: The os pipe_out :rtype: bool
def reload_texts(self, texts, ids, vocabulary=None): self._check_id_length(ids) self.ids = np.array(sorted(ids)) if vocabulary: self.vectorizer.vocabulary = vocabulary sorted_texts = [x for (y, x) in sorted(zip(ids, texts))] self.term_mat = self.vectorizer.fit_transform(sorted_texts) self._update_tfidf()
Calcula los vectores de terminos de textos y los almacena. A diferencia de :func:`~TextClassifier.TextClassifier.store_text` esta funcion borra cualquier informacion almacenada y comienza el conteo desde cero. Se usa para redefinir el vocabulario sobre el que se construyen los vectores. Args: texts (list): Una lista de N textos a incorporar. ids (list): Una lista de N ids alfanumericos para los textos.
def calc_qout_v1(self): der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess flu.qout = 0. for idx in range(der.nmb): flu.qout += flu.qpout[idx]
Sum up the results of the different response functions. Required derived parameter: |Nmb| Required flux sequences: |QPOut| Calculated flux sequence: |QOut| Examples: Initialize an arma model with three different response functions: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> fluxes.qpout.shape = 3 Define the output values of the three response functions and apply method |calc_qout_v1|: >>> fluxes.qpout = 1.0, 2.0, 3.0 >>> model.calc_qout_v1() >>> fluxes.qout qout(6.0)
def isRealmUser(self, realmname, username, environ): try: course = self.course_factory.get_course(realmname) ok = self.user_manager.has_admin_rights_on_course(course, username=username) return ok except: return False
Returns True if this username is valid for the realm, False otherwise.
def backends_to_mutate(self, namespace, stream): if namespace not in self.namespaces: raise NamespaceMissing('`{}` namespace is not configured' .format(namespace)) return self.prefix_confs[namespace][self.get_matching_prefix(namespace, stream)]
Return all the backends enabled for writing for `stream`.
def check_plugins(self): checkers = {} for ep in pkg_resources.iter_entry_points(group='frosted.plugins'): checkers.update({ep.name: ep.load()}) for plugin_name, plugin in checkers.items(): if self.filename != '(none)': messages = plugin.check(self.filename) for message, loc, args, kwargs in messages: self.report(message, loc, *args, **kwargs)
collect plugins from entry point 'frosted.plugins' and run their check() method, passing the filename
def _validate_group(self, group): if self._show_all_groups: return if group.id in self._allowed_group_ids: return for parent_group_id in self._allowed_subgroup_ids: parent_group = self._swimlane.groups.get(id=parent_group_id) parent_group_child_ids = set([g['id'] for g in parent_group._raw['groups']]) if group.id in parent_group_child_ids: return raise ValidationError( self.record, 'Group `{}` is not a valid selection for field `{}`'.format( group, self.name ) )
Validate a Group instance against allowed group IDs or subgroup of a parent group
def show_time_as_short_string(self, seconds): if seconds < 60: return str(seconds) + ' seconds' elif seconds < 3600: return str(round(seconds/60, 1)) + ' minutes' elif seconds < 3600*24: return str(round(seconds/(60*24), 1)) + ' hours' elif seconds < 3600*24*365: return str(round(seconds/(3600*24), 1)) + ' days' else: print('WARNING - this will take ' + str(seconds/(60*24*365)) + ' YEARS to run' ) return str(round(seconds/(60*24*365), 1)) + ' years'
converts seconds to a string in terms of seconds -> years to show complexity of algorithm
def query_object(self, obj_uuid): if not isinstance(obj_uuid, basestring): raise TypeError("obj_uuid can only be an instance of type basestring") return_interface = self._call("queryObject", in_p=[obj_uuid]) return_interface = Interface(return_interface) return return_interface
Queries the IUnknown interface to an object in the extension pack main module. This allows plug-ins and others to talk directly to an extension pack. in obj_uuid of type str The object ID. What exactly this is return return_interface of type Interface The queried interface.
def resample_returns( returns, func, seed=0, num_trials=100 ): if type(returns) is pd.Series: stats = pd.Series(index=range(num_trials)) elif type(returns) is pd.DataFrame: stats = pd.DataFrame( index=range(num_trials), columns=returns.columns ) else: raise(TypeError("returns needs to be a Series or DataFrame!")) n = returns.shape[0] for i in range(num_trials): random_indices = resample(returns.index, n_samples=n, random_state=seed + i) stats.loc[i] = func(returns.loc[random_indices]) return stats
Resample the returns and calculate any statistic on every new sample. https://en.wikipedia.org/wiki/Resampling_(statistics) :param returns (Series, DataFrame): Returns :param func: Given the resampled returns calculate a statistic :param seed: Seed for random number generator :param num_trials: Number of times to resample and run the experiment :return: Series of resampled statistics
def load_checkpoint(with_local=False): gptr = ctypes.POINTER(ctypes.c_char)() global_len = ctypes.c_ulong() if with_local: lptr = ctypes.POINTER(ctypes.c_char)() local_len = ctypes.c_ulong() version = _LIB.RabitLoadCheckPoint( ctypes.byref(gptr), ctypes.byref(global_len), ctypes.byref(lptr), ctypes.byref(local_len)) if version == 0: return (version, None, None) return (version, _load_model(gptr, global_len.value), _load_model(lptr, local_len.value)) else: version = _LIB.RabitLoadCheckPoint( ctypes.byref(gptr), ctypes.byref(global_len), None, None) if version == 0: return (version, None) return (version, _load_model(gptr, global_len.value))
Load latest check point. Parameters ---------- with_local: bool, optional whether the checkpoint contains local model Returns ------- tuple : tuple if with_local: return (version, gobal_model, local_model) else return (version, gobal_model) if returned version == 0, this means no model has been CheckPointed and global_model, local_model returned will be None
def extend(self, protocol: Union[Iterable[Dict], 'Pipeline']) -> 'Pipeline': for data in protocol: name, args, kwargs = _get_protocol_tuple(data) self.append(name, *args, **kwargs) return self
Add another pipeline to the end of the current pipeline. :param protocol: An iterable of dictionaries (or another Pipeline) :return: This pipeline for fluid query building Example: >>> p1 = Pipeline.from_functions(['enrich_protein_and_rna_origins']) >>> p2 = Pipeline.from_functions(['remove_pathologies']) >>> p1.extend(p2)
def update(self, data): if not isinstance(data, list): data = [data] master = Handler.ALL_VERS_DATA for record in data: for k,v in iteritems(record): try: record[k] = int(v) except ValueError: record[k] = v try: label = record["label"] except KeyError: raise ValueError("Must provide a valid label argument. Given:%s%s"%(\ os.linesep, ("%s "%(os.linesep)).join( ["%15s:%s"%(k,v) for k,v in iteritems(kwargs)] ))) try: masterLabel = master[label] except KeyError: master[label] = record self._updated = True continue for k,v in iteritems(record): try: if masterLabel[k] == v: continue except KeyError: pass self._updated = True try: master[label].update(record) except KeyError: break
update known data with with newly provided data
def send( self, *args: str, text: str=None, ) -> IterationRecord: if text is not None: final_text = text else: if len(args) == 0: raise BotSkeletonException(("Please provide text either as a positional arg or " "as a keyword arg (text=TEXT)")) else: final_text = args[0] record = IterationRecord(extra_keys=self.extra_keys) for key, output in self.outputs.items(): if output["active"]: self.log.info(f"Output {key} is active, calling send on it.") entry: Any = output["obj"] output_result = entry.send(text=final_text) record.output_records[key] = output_result else: self.log.info(f"Output {key} is inactive. Not sending.") self.history.append(record) self.update_history() return record
Post text-only to all outputs. :param args: positional arguments. expected: text to send as message in post. keyword text argument is preferred over this. :param text: text to send as message in post. :returns: new record of iteration
def process_request(self, request): super(SubdomainURLRoutingMiddleware, self).process_request(request) subdomain = getattr(request, 'subdomain', UNSET) if subdomain is not UNSET: urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain) if urlconf is not None: logger.debug("Using urlconf %s for subdomain: %s", repr(urlconf), repr(subdomain)) request.urlconf = urlconf
Sets the current request's ``urlconf`` attribute to the urlconf associated with the subdomain, if it is listed in ``settings.SUBDOMAIN_URLCONFS``.
def exist(self, table: str, libref: str ="") -> bool: code = "data _null_; e = exist('" if len(libref): code += libref+"." code += table+"');\n" code += "v = exist('" if len(libref): code += libref+"." code += table+"', 'VIEW');\n if e or v then e = 1;\n" code += "te='TABLE_EXISTS='; put te e;run;" ll = self.submit(code, "text") l2 = ll['LOG'].rpartition("TABLE_EXISTS= ") l2 = l2[2].partition("\n") exists = int(l2[0]) return bool(exists)
table - the name of the SAS Data Set libref - the libref for the Data Set, defaults to WORK, or USER if assigned Returns True it the Data Set exists and False if it does not
def cc(self, cc_emails, global_substitutions=None, is_multiple=False, p=0): if isinstance(cc_emails, list): for email in cc_emails: if isinstance(email, str): email = Cc(email, None) if isinstance(email, tuple): email = Cc(email[0], email[1]) self.add_cc(email, global_substitutions, is_multiple, p) else: if isinstance(cc_emails, str): cc_emails = Cc(cc_emails, None) if isinstance(cc_emails, tuple): cc_emails = To(cc_emails[0], cc_emails[1]) self.add_cc(cc_emails, global_substitutions, is_multiple, p)
Adds Cc objects to the Personalization object :param cc_emails: An Cc or list of Cc objects :type cc_emails: Cc, list(Cc), tuple :param global_substitutions: A dict of substitutions for all recipients :type global_substitutions: dict :param is_multiple: Create a new personilization for each recipient :type is_multiple: bool :param p: p is the Personalization object or Personalization object index :type p: Personalization, integer, optional
def search_variant_sets(self, dataset_id): request = protocol.SearchVariantSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "variantsets", protocol.SearchVariantSetsResponse)
Returns an iterator over the VariantSets fulfilling the specified conditions from the specified Dataset. :param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset` of interest. :return: An iterator over the :class:`ga4gh.protocol.VariantSet` objects defined by the query parameters.
def get_tohu_items_name(cls): assert issubclass(cls, TohuBaseGenerator) try: tohu_items_name = cls.__dict__['__tohu_items_name__'] logger.debug(f"Using item class name '{tohu_items_name}' (derived from attribute '__tohu_items_name__')") except KeyError: m = re.match('^(.*)Generator$', cls.__name__) if m is not None: tohu_items_name = m.group(1) logger.debug(f"Using item class name '{tohu_items_name}' (derived from custom generator name)") else: msg = ( "Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'" ) raise ValueError(msg) return tohu_items_name
Return a string which defines the name of the namedtuple class which will be used to produce items for the custom generator. By default this will be the first part of the class name (before '...Generator'), for example: FoobarGenerator -> Foobar QuuxGenerator -> Quux However, it can be set explicitly by the user by defining `__tohu_items_name__` in the class definition, for example: class Quux(CustomGenerator): __tohu_items_name__ = 'MyQuuxItem'
def terms_required(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if DJANGO_VERSION <= (2, 0, 0): user_authenticated = request.user.is_authenticated() else: user_authenticated = request.user.is_authenticated if not user_authenticated or not TermsAndConditions.get_active_terms_not_agreed_to(request.user): return view_func(request, *args, **kwargs) current_path = request.path login_url_parts = list(urlparse(ACCEPT_TERMS_PATH)) querystring = QueryDict(login_url_parts[4], mutable=True) querystring['returnTo'] = current_path login_url_parts[4] = querystring.urlencode(safe='/') return HttpResponseRedirect(urlunparse(login_url_parts)) return _wrapped_view
This decorator checks to see if the user is logged in, and if so, if they have accepted the site terms.
def get_workflow_config(runtime, code_dir, project_dir): selectors_by_runtime = { "python2.7": BasicWorkflowSelector(PYTHON_PIP_CONFIG), "python3.6": BasicWorkflowSelector(PYTHON_PIP_CONFIG), "python3.7": BasicWorkflowSelector(PYTHON_PIP_CONFIG), "nodejs4.3": BasicWorkflowSelector(NODEJS_NPM_CONFIG), "nodejs6.10": BasicWorkflowSelector(NODEJS_NPM_CONFIG), "nodejs8.10": BasicWorkflowSelector(NODEJS_NPM_CONFIG), "ruby2.5": BasicWorkflowSelector(RUBY_BUNDLER_CONFIG), "dotnetcore2.0": BasicWorkflowSelector(DOTNET_CLIPACKAGE_CONFIG), "dotnetcore2.1": BasicWorkflowSelector(DOTNET_CLIPACKAGE_CONFIG), "java8": ManifestWorkflowSelector([ JAVA_GRADLE_CONFIG._replace(executable_search_paths=[code_dir, project_dir]), JAVA_KOTLIN_GRADLE_CONFIG._replace(executable_search_paths=[code_dir, project_dir]), JAVA_MAVEN_CONFIG ]), } if runtime not in selectors_by_runtime: raise UnsupportedRuntimeException("'{}' runtime is not supported".format(runtime)) selector = selectors_by_runtime[runtime] try: config = selector.get_config(code_dir, project_dir) return config except ValueError as ex: raise UnsupportedRuntimeException("Unable to find a supported build workflow for runtime '{}'. Reason: {}" .format(runtime, str(ex)))
Get a workflow config that corresponds to the runtime provided. This method examines contents of the project and code directories to determine the most appropriate workflow for the given runtime. Currently the decision is based on the presence of a supported manifest file. For runtimes that have more than one workflow, we choose a workflow by examining ``code_dir`` followed by ``project_dir`` for presence of a supported manifest. Parameters ---------- runtime str The runtime of the config code_dir str Directory where Lambda function code is present project_dir str Root of the Serverless application project. Returns ------- namedtuple(Capability) namedtuple that represents the Builder Workflow Config
def register_service(cls, primary_key_type): view_func = cls.as_view(cls.__name__.lower()) methods = set(cls.__model__.__methods__) if 'GET' in methods: current_app.add_url_rule( cls.__model__.__url__ + '/', defaults={'resource_id': None}, view_func=view_func, methods=['GET']) current_app.add_url_rule( '{resource}/meta'.format(resource=cls.__model__.__url__), view_func=view_func, methods=['GET']) if 'POST' in methods: current_app.add_url_rule( cls.__model__.__url__ + '/', view_func=view_func, methods=['POST', ]) current_app.add_url_rule( '{resource}/<{pk_type}:{pk}>'.format( resource=cls.__model__.__url__, pk='resource_id', pk_type=primary_key_type), view_func=view_func, methods=methods - {'POST'}) current_app.classes.append(cls)
Register an API service endpoint. :param cls: The class to register :param str primary_key_type: The type (as a string) of the primary_key field
def _get_final_set(self, sets, pk, sort_options): if self._lazy_collection['intersects']: sets = sets[::] sets.extend(self._lazy_collection['intersects']) if not self._lazy_collection['sets'] and not self.stored_key: sets.append(self.cls.get_field('pk').collection_key) final_set, keys_to_delete_later = super(ExtendedCollectionManager, self)._get_final_set(sets, pk, sort_options) if final_set and self._sort_by_sortedset_before: base_tmp_key, tmp_keys = self._prepare_sort_by_score(None, sort_options) if not keys_to_delete_later: keys_to_delete_later = [] keys_to_delete_later.append(base_tmp_key) keys_to_delete_later += tmp_keys return final_set, keys_to_delete_later
Add intersects fo sets and call parent's _get_final_set. If we have to sort by sorted score, and we have a slice, we have to convert the whole sorted set to keys now.
def prefix_shared_name_attributes(meta_graph, absolute_import_scope): shared_name_attr = "shared_name" for node in meta_graph.graph_def.node: shared_name_value = node.attr.get(shared_name_attr, None) if shared_name_value and shared_name_value.HasField("s"): if shared_name_value.s: node.attr[shared_name_attr].s = tf.compat.as_bytes( prepend_name_scope( shared_name_value.s, import_scope=absolute_import_scope))
In-place prefixes shared_name attributes of nodes.
def _set_resultdir(name=None): resultdir_name = name or "enos_" + datetime.today().isoformat() resultdir_path = os.path.abspath(resultdir_name) if os.path.isfile(resultdir_path): raise EnosFilePathError(resultdir_path, "Result directory cannot be created due " "to existing file %s" % resultdir_path) if not os.path.isdir(resultdir_path): os.mkdir(resultdir_path) logger.info("Generate results directory %s" % resultdir_path) link_path = SYMLINK_NAME if os.path.lexists(link_path): os.remove(link_path) try: os.symlink(resultdir_path, link_path) logger.info("Symlink %s to %s" % (resultdir_path, link_path)) except OSError: logger.warning("Symlink %s to %s failed" % (resultdir_path, link_path)) return resultdir_path
Set or get the directory to store experiment results. Looks at the `name` and create the directory if it doesn"t exist or returns it in other cases. If the name is `None`, then the function generates an unique name for the results directory. Finally, it links the directory to `SYMLINK_NAME`. Args: name (str): file path to an existing directory. It could be weather an absolute or a relative to the current working directory. Returns: the file path of the results directory.
def associate_keys(user_dict, client): added_keys = user_dict['keypairs'] print ">>>Updating Keys-Machines association" for key in added_keys: machines = added_keys[key]['machines'] if machines: try: for machine in machines: cloud_id = machine[0] machine_id = machine[1] ssh_user = machine[3] ssh_port = machine[-1] key = client.keys[key] cloud = cloud_from_id(client, cloud_id) cloud.update_machines() mach = machine_from_id(cloud, machine_id) public_ips = mach.info.get('public_ips', None) if public_ips: host = public_ips[0] else: host = "" key.associate_to_machine(cloud_id=cloud_id, machine_id=machine_id, host=host, ssh_port=ssh_port, ssh_user=ssh_user) print "associated machine %s" % machine_id except Exception as e: pass client.update_keys() print
This whole function is black magic, had to however cause of the way we keep key-machine association
def __get_tokens(self, row): row_tokenizer = RowTokenizer(row, self.config) line = row_tokenizer.next() while line: yield line line = row_tokenizer.next()
Row should be a single string
def delete_edge_by_id(self, edge_id): edge = self.get_edge(edge_id) from_node_id = edge['vertices'][0] from_node = self.get_node(from_node_id) from_node['edges'].remove(edge_id) to_node_id = edge['vertices'][1] to_node = self.get_node(to_node_id) to_node['edges'].remove(edge_id) del self.edges[edge_id] self._num_edges -= 1
Removes the edge identified by "edge_id" from the graph.
def find_by_id(self, submission_id): return self._attacks.get( submission_id, self._defenses.get( submission_id, self._targeted_attacks.get(submission_id, None)))
Finds submission by ID. Args: submission_id: ID of the submission Returns: SubmissionDescriptor with information about submission or None if submission is not found.
def pad(attrs, inputs, proto_obj): new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width', 'value' : 'constant_value' }) new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width')) return 'pad', new_attrs, inputs
Add padding to input tensor
def match_handle(loc, tokens): if len(tokens) == 4: matches, match_type, item, stmts = tokens cond = None elif len(tokens) == 5: matches, match_type, item, cond, stmts = tokens else: raise CoconutInternalException("invalid match statement tokens", tokens) if match_type == "in": invert = False elif match_type == "not in": invert = True else: raise CoconutInternalException("invalid match type", match_type) matching = Matcher(loc, match_check_var) matching.match(matches, match_to_var) if cond: matching.add_guard(cond) return ( match_to_var + " = " + item + "\n" + matching.build(stmts, invert=invert) )
Process match blocks.
def wait_for_bump(self, buttons, timeout_ms=None): start_time = time.time() if self.wait_for_pressed(buttons, timeout_ms): if timeout_ms is not None: timeout_ms -= int((time.time() - start_time) * 1000) return self.wait_for_released(buttons, timeout_ms) return False
Wait for the button to be pressed down and then released. Both actions must happen within timeout_ms.
def namer(cls, imageUrl, pageUrl): parts, year, month, stripname = pageUrl.rsplit('/', 3) stripname = stripname.rsplit('.', 1)[0] parts, imagename = imageUrl.rsplit('/', 1) return '%s-%s-%s-%s' % (year, month, stripname, imagename)
Use page URL to construct meaningful image name.
def _call(self, x): return sum(fi(xi) for xi, fi in zip(x, self.functionals))
Return the separable sum evaluated in ``x``.
def replace_filehandler(logname, new_file, level=None, frmt=None): log = logging.getLogger(logname) if level is not None: level = get_level(level) explicit_level = True else: level = logging.DEBUG explicit_level = False if frmt is not None: frmt = logging.Formatter(frmt) explicit_frmt = True else: frmt = logging.Formatter(STANDARD_FORMAT) explicit_frmt = False old_filehandler = None for handler in log.handlers: if type(handler) == logging.FileHandler: old_filehandler = handler if not explicit_level: level = handler.level if not explicit_frmt: frmt = handler.formatter break new_filehandler = logging.FileHandler(new_file) new_filehandler.setLevel(level) new_filehandler.setFormatter(frmt) log.addHandler(new_filehandler) if old_filehandler is not None: old_filehandler.close() log.removeHandler(old_filehandler)
This utility function will remove a previous Logger FileHandler, if one exists, and add a new filehandler. Parameters: logname The name of the log to reconfigure, 'openaccess_epub' for example new_file The file location for the new FileHandler level Optional. Level of FileHandler logging, if not used then the new FileHandler will have the same level as the old. Pass in name strings, 'INFO' for example frmt Optional string format of Formatter for the FileHandler, if not used then the new FileHandler will inherit the Formatter of the old, pass in format strings, '%(message)s' for example It is best practice to use the optional level and frmt arguments to account for the case where a previous FileHandler does not exist. In the case that they are not used and a previous FileHandler is not found, then the level will be set logging.DEBUG and the frmt will be set to openaccess_epub.utils.logs.STANDARD_FORMAT as a matter of safety.
def Shift(self, term): new = self.Copy() new.xs = [x + term for x in self.xs] return new
Adds a term to the xs. term: how much to add
def isNonNegative(matrix): try: if (matrix >= 0).all(): return True except (NotImplementedError, AttributeError, TypeError): try: if (matrix.data >= 0).all(): return True except AttributeError: matrix = _np.array(matrix) if (matrix.data >= 0).all(): return True return False
Check that ``matrix`` is row non-negative. Returns ======= is_stochastic : bool ``True`` if ``matrix`` is non-negative, ``False`` otherwise.
def build_graph(self): import tensorflow as tf input_jpeg = tf.placeholder(tf.string, shape=None) image = tf.image.decode_jpeg(input_jpeg, channels=self.CHANNELS) image = tf.expand_dims(image, 0) image = tf.image.convert_image_dtype(image, dtype=tf.float32) image = tf.image.resize_bilinear( image, [self.HEIGHT, self.WIDTH], align_corners=False) image = tf.subtract(image, 0.5) inception_input = tf.multiply(image, 2.0) with tf.contrib.slim.arg_scope(_inceptionlib.inception_v3_arg_scope()): _, end_points = _inceptionlib.inception_v3(inception_input, is_training=False) embedding = end_points['PreLogits'] return input_jpeg, embedding
Forms the core by building a wrapper around the inception graph. Here we add the necessary input & output tensors, to decode jpegs, serialize embeddings, restore from checkpoint etc. To use other Inception models modify this file. Note that to use other models beside Inception, you should make sure input_shape matches their input. Resizing or other modifications may be necessary as well. See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for details about InceptionV3. Returns: input_jpeg: A tensor containing raw image bytes as the input layer. embedding: The embeddings tensor, that will be materialized later.
def read_cf1_config(self): target = self._cload.targets[0xFF] config_page = target.flash_pages - 1 return self._cload.read_flash(addr=0xFF, page=config_page)
Read a flash page from the specified target
def is_directory(path, use_sudo=False): result = single_line_stdout('if [[ -f {0} ]]; then echo 0; elif [[ -d {0} ]]; then echo 1; else echo -1; fi'.format(path), sudo=use_sudo, quiet=True) if result == '0': return False elif result == '1': return True else: return None
Check if the remote path exists and is a directory. :param path: Remote path to check. :type path: unicode :param use_sudo: Use the `sudo` command. :type use_sudo: bool :return: `True` if the path exists and is a directory; `False` if it exists, but is a file; `None` if it does not exist. :rtype: bool or ``None``
def initialize_logging(self): for loggername in [None] + list(logging.Logger.manager.loggerDict.keys()): logger = logging.getLogger(loggername) while logger.handlers: logger.removeHandler(logger.handlers[0]) root_logger = logging.getLogger() root_logger.setLevel(logging.WARN) root_logger.addHandler(workflows.logging.CallbackHandler(self._log_send)) self.log = logging.getLogger(self._logger_name) if self.start_kwargs.get("verbose_log"): self.log_verbosity = logging.DEBUG self.log.setLevel(self.log_verbosity) console = logging.StreamHandler() console.setLevel(logging.CRITICAL) root_logger.addHandler(console)
Reset the logging for the service process. All logged messages are forwarded to the frontend. If any filtering is desired, then this must take place on the service side.
def safe_lshift(a, b): if b > MAX_SHIFT: raise RuntimeError("Invalid left shift, max left shift is {}".format(MAX_SHIFT)) return a << b
safe version of lshift
def get_index_by_id(self, id): for i in range(len(self.vertices)): if self.vertices[i].id == id: return i raise ValueError('Reverse look up of id failed.')
Give the index associated with a given vertex id.
def unproxy(possible_proxy): while isinstance(possible_proxy, ThreadLocalProxy): possible_proxy = ThreadLocalProxy.get_reference(possible_proxy) return possible_proxy
Unwrap and return the object referenced by a proxy. This function is very similar to :func:`get_reference`, but works for both proxies and regular objects. If the specified object is a proxy, its reference is extracted with ``get_reference`` and returned. If it is not a proxy, it is returned as is. If the object references by the proxy is itself a proxy, the unwrapping is repeated until a regular (non-proxy) object is found. possible_proxy: object that might or might not be a proxy.
def show_content(self, state_model): upper_most_lib_state_m = None if isinstance(state_model, LibraryStateModel): uppermost_library_root_state = state_model.state.get_uppermost_library_root_state() if uppermost_library_root_state is None: upper_most_lib_state_m = state_model else: upper_lib_state = uppermost_library_root_state.parent upper_most_lib_state_m = self._selected_sm_model.get_state_model_by_path(upper_lib_state.get_path()) if upper_most_lib_state_m: return upper_most_lib_state_m.show_content() else: return True
Check state machine tree specific show content flag. Is returning true if the upper most library state of a state model has a enabled show content flag or if there is no library root state above this state. :param rafcon.gui.models.abstract_state.AbstractStateModel state_model: The state model to check
def minimize_source(source): source = mitogen.core.to_text(source) tokens = tokenize.generate_tokens(StringIO(source).readline) tokens = strip_comments(tokens) tokens = strip_docstrings(tokens) tokens = reindent(tokens) return tokenize.untokenize(tokens)
Remove comments and docstrings from Python `source`, preserving line numbers and syntax of empty blocks. :param str source: The source to minimize. :returns str: The minimized source.
def optional_install(): print('{BOLD}Setting up Reduce (optional){END_C}'.format(**text_colours)) reduce = {} reduce_path = get_user_path('Please provide a path to your reduce executable.', required=False) reduce['path'] = str(reduce_path) reduce['folder'] = str(reduce_path.parent) if reduce_path else '' settings['reduce'] = reduce print('{BOLD}Setting up naccess (optional){END_C}'.format(**text_colours)) naccess = {} naccess_path = get_user_path('Please provide a path to your naccess executable.', required=False) naccess['path'] = str(naccess_path) settings['naccess'] = naccess print('{BOLD}Setting up ProFit (optional){END_C}'.format(**text_colours)) profit = {} profit_path = get_user_path('Please provide a path to your ProFit executable.', required=False) profit['path'] = str(profit_path) settings['profit'] = profit return
Generates configuration settings for optional functionality of ISAMBARD.
def _GenerateNonImplementedMethod(self, method): return lambda inst, rpc_controller, request, callback: ( self._NonImplementedMethod(method.name, rpc_controller, callback))
Generates and returns a method that can be set for a service methods. Args: method: Descriptor of the service method for which a method is to be generated. Returns: A method that can be added to the service class.
def _find_adapter(registry, ob): types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) for t in types: if t in registry: return registry[t]
Return an adapter factory for `ob` from `registry`
def make_bin_array(bins) -> np.ndarray: bins = np.asarray(bins) if bins.ndim == 1: return np.hstack((bins[:-1, np.newaxis], bins[1:, np.newaxis])) elif bins.ndim == 2: if bins.shape[1] != 2: raise RuntimeError("Binning schema with ndim==2 must have 2 columns") return bins else: raise RuntimeError("Binning schema must have ndim==1 or ndim==2")
Turn bin data into array understood by HistogramXX classes. Parameters ---------- bins: array_like Array of edges or array of edge tuples Examples -------- >>> make_bin_array([0, 1, 2]) array([[0, 1], [1, 2]]) >>> make_bin_array([[0, 1], [2, 3]]) array([[0, 1], [2, 3]])
def thread(function): @wraps(function) def wrapper(*args, **kwargs): future = Future() launch_thread(_function_handler, function, args, kwargs, future) return future return wrapper
Runs the decorated function within a concurrent thread, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called.
def agent_reqs(): echo_info("Validating requirements-agent-release.txt...") agent_reqs_content = parse_agent_req_file(read_file(get_agent_release_requirements())) ok_checks = 0 unreleased_checks = 0 failed_checks = 0 for check_name in get_valid_checks(): if check_name not in AGENT_V5_ONLY | NOT_CHECKS: package_name = get_package_name(check_name) check_version = get_version_string(check_name) pinned_version = agent_reqs_content.get(package_name) if package_name not in agent_reqs_content: unreleased_checks += 1 echo_warning('{} has not yet been released'.format(check_name)) elif check_version != pinned_version: failed_checks += 1 echo_failure("{} has version {} but is pinned to {}".format(check_name, check_version, pinned_version)) else: ok_checks += 1 if ok_checks: echo_success("{} correctly pinned checks".format(ok_checks)) if unreleased_checks: echo_warning("{} unreleased checks".format(unreleased_checks)) if failed_checks: echo_failure("{} checks out of sync".format(failed_checks)) abort()
Verify that the checks versions are in sync with the requirements-agent-release.txt file
def match_event_roll_lengths(event_roll_a, event_roll_b, length=None): if length is None: length = max(event_roll_b.shape[0], event_roll_a.shape[0]) else: length = int(length) if length < event_roll_a.shape[0]: event_roll_a = event_roll_a[0:length, :] else: event_roll_a = pad_event_roll( event_roll=event_roll_a, length=length ) if length < event_roll_b.shape[0]: event_roll_b = event_roll_b[0:length, :] else: event_roll_b = pad_event_roll( event_roll=event_roll_b, length=length ) return event_roll_a, event_roll_b
Fix the length of two event rolls Parameters ---------- event_roll_a: np.ndarray, shape=(m1,k) Event roll A event_roll_b: np.ndarray, shape=(m2,k) Event roll B length: int, optional Length of the event roll, if none given, shorter event roll is padded to match longer one. Returns ------- event_roll_a: np.ndarray, shape=(max(m1,m2),k) Padded event roll A event_roll_b: np.ndarray, shape=(max(m1,m2),k) Padded event roll B
def format_help(self): if self._subparsers: for action in self._subparsers._actions: if isinstance(action, LazySubParsersAction): for parser_name, parser in action._name_parser_map.iteritems(): action._setup_subparser(parser_name, parser) return super(LazyArgumentParser, self).format_help()
Sets up all sub-parsers when help is requested.
def modify_class(original_class, modifier_class, override=True): modifier_methods = inspect.getmembers(modifier_class, inspect.ismethod) for method_tuple in modifier_methods: name = method_tuple[0] method = method_tuple[1] if isinstance(method, types.UnboundMethodType): if hasattr(original_class, name) and not override: return None else: setattr(original_class, name, method.im_func)
Adds class methods from modifier_class to original_class. If override is True existing methods in original_class are overriden by those provided by modifier_class.
def get_config(self): config = super(BoltzmannGumbelQPolicy, self).get_config() config['C'] = self.C return config
Return configurations of BoltzmannGumbelQPolicy # Returns Dict of config
def delete_migration(connection, basename): sql = "DELETE FROM migrations_applied WHERE name = %s" with connection.cursor() as cursor: cursor.execute(sql, (basename,)) connection.commit() return True
Delete a migration in `migrations_applied` table
def validate(self, instance, value): if not isinstance(value, (tuple, list, np.ndarray)): self.error(instance, value) if self.coerce: value = self.wrapper(value) valid_class = ( self.wrapper if isinstance(self.wrapper, type) else np.ndarray ) if not isinstance(value, valid_class): self.error(instance, value) allowed_kinds = ''.join(TYPE_MAPPINGS[typ] for typ in self.dtype) if value.dtype.kind not in allowed_kinds: self.error(instance, value, extra='Invalid dtype.') if self.shape is None: return value for shape in self.shape: if len(shape) != value.ndim: continue for i, shp in enumerate(shape): if shp not in ('*', value.shape[i]): break else: return value self.error(instance, value, extra='Invalid shape.')
Determine if array is valid based on shape and dtype
def install(self, release_id): release_path = os.path.join(self._releases, release_id) if not self._runner.exists(release_path): self._runner.run("mkdir -p '{0}'".format(release_path)) if self._remote_name is not None: destination = os.path.join(release_path, self._remote_name) else: destination = os.path.join(release_path, self._get_file_from_url(self._artifact_url)) return self._downloader( self._artifact_url, destination, retries=self._retries, retry_delay=self._retry_delay )
Download and install an artifact into the remote release directory, optionally with a different name the the artifact had. If the directory for the given release ID does not exist on the remote system, it will be created. The directory will be created according to the standard Tunic directory structure (see :doc:`design`). :param str release_id: Timestamp-based identifier for this deployment. :return: The results of the download function being run. This return value should be the result of running a command with Fabric. By default this will be the result of running ``wget``.
def _force(self,obj,objtype=None): gen=super(Dynamic,self).__get__(obj,objtype) if hasattr(gen,'_Dynamic_last'): return self._produce_value(gen,force=True) else: return gen
Force a new value to be generated, and return it.
def create_media_asset(access_token, name, options="0"): path = '/Assets' endpoint = ''.join([ams_rest_endpoint, path]) body = '{"Name": "' + name + '", "Options": "' + str(options) + '"}' return do_ams_post(endpoint, path, body, access_token)
Create Media Service Asset. Args: access_token (str): A valid Azure authentication token. name (str): Media Service Asset Name. options (str): Media Service Options. Returns: HTTP response. JSON body.
def _build_join(t): t.source.name = t.source.parsed_name t.source.alias = t.source.parsed_alias[0] if t.source.parsed_alias else '' return t
Populates join token fields.
def styleInheritedByChild(node, style, nodeIsChild=False): if node.nodeType != Node.ELEMENT_NODE: return False if nodeIsChild: if node.getAttribute(style) not in ['', 'inherit']: return False styles = _getStyle(node) if (style in styles) and not (styles[style] == 'inherit'): return False else: if not node.childNodes: return False if node.childNodes: for child in node.childNodes: if styleInheritedByChild(child, style, True): return True if node.nodeName in ['a', 'defs', 'glyph', 'g', 'marker', 'mask', 'missing-glyph', 'pattern', 'svg', 'switch', 'symbol']: return False return True
Returns whether 'style' is inherited by any children of the passed-in node If False is returned, it is guaranteed that 'style' can safely be removed from the passed-in node without influencing visual output of it's children If True is returned, the passed-in node should not have its text-based attributes removed. Warning: This method only considers presentation attributes and inline styles, any style sheets are ignored!
def execute_step(self, step): inputs = self.get_inputs(step.ins) outputs = defaultdict(list) for output in step.run(inputs): for k, v in output.items(): outputs[k].extend(v) for output in step.finalise(): for k, v in output.items(): outputs[k].extend(v) self.unresolved_steps.remove(step) return outputs
Execute the named step. Also control the multiplicity of input and output entities :param step: step to prepare input for :param kwargs: input to be prepared :return: dict of output by entity type
def _get_file(self): f = tempfile.NamedTemporaryFile(delete=False) self.tmp_files.add(f.name) return f
return an opened tempfile pointer that can be used http://docs.python.org/2/library/tempfile.html
def loadJSON(self, json_string): g = get_root(self).globals user = json.loads(json_string)['user'] def setField(widget, field): val = user.get(field) if val is not None: widget.set(val) setField(self.prog_ob.obid, 'OB') setField(self.target, 'target') setField(self.prog_ob.progid, 'ID') setField(self.pi, 'PI') setField(self.observers, 'Observers') setField(self.comment, 'comment') setField(self.filter, 'filters') setField(g.observe.rtype, 'flags')
Sets the values of the run parameters given an JSON string
def random_game(nums_actions, random_state=None): N = len(nums_actions) if N == 0: raise ValueError('nums_actions must be non-empty') random_state = check_random_state(random_state) players = [ Player(random_state.random_sample(nums_actions[i:]+nums_actions[:i])) for i in range(N) ] g = NormalFormGame(players) return g
Return a random NormalFormGame instance where the payoffs are drawn independently from the uniform distribution on [0, 1). Parameters ---------- nums_actions : tuple(int) Tuple of the numbers of actions, one for each player. random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- g : NormalFormGame
def execute_ls(host_list, remote_user, remote_pass): runner = spam.ansirunner.AnsibleRunner() result, failed_hosts = runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args="ls -1") print "Result: ", result
Execute any adhoc command on the hosts.
def sample_poly(self, poly, penalty_strength=1.0, keep_penalty_variables=False, discard_unsatisfied=False, **parameters): bqm = make_quadratic(poly, penalty_strength, vartype=poly.vartype) response = self.child.sample(bqm, **parameters) return polymorph_response(response, poly, bqm, penalty_strength=penalty_strength, keep_penalty_variables=keep_penalty_variables, discard_unsatisfied=discard_unsatisfied)
Sample from the given binary polynomial. Takes the given binary polynomial, introduces penalties, reduces the higher-order problem into a quadratic problem and sends it to its child sampler. Args: poly (:obj:`.BinaryPolynomial`): A binary polynomial. penalty_strength (float, optional): Strength of the reduction constraint. Insufficient strength can result in the binary quadratic model not having the same minimization as the polynomial. keep_penalty_variables (bool, optional): default is True. if False will remove the variables used for penalty from the samples discard_unsatisfied (bool, optional): default is False. If True will discard samples that do not satisfy the penalty conditions. **parameters: Parameters for the sampling method, specified by the child sampler. Returns: :obj:`dimod.SampleSet`
def read(self): assert os.path.isfile(self.file_path), 'No such file exists: ' + str(self.file_path) with open(self.file_path, 'r') as f: reader = csv_builtin.reader(f) loaded_data = list(reader) return juggle_types(loaded_data)
Reads CSV file and returns list of contents
def __get_bit_values(self, number, size=32): res = list(self.__gen_bit_values(number)) res.reverse() res = [0] * (size - len(res)) + res return res
Get bit values as a list for a given number >>> get_bit_values(1) == [0]*31 + [1] True >>> get_bit_values(0xDEADBEEF) [1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L] You may override the default word size of 32-bits to match your actual application. >>> get_bit_values(0x3, 2) [1L, 1L] >>> get_bit_values(0x3, 4) [0L, 0L, 1L, 1L]
def delete(self): if self._writeable: self._write(('CRVDEL', Integer), self.idx) else: raise RuntimeError('Can not delete read-only curves.')
Deletes the current curve. :raises RuntimeError: Raises when` when one tries to delete a read-only curve.
def data(self): try: request_body_size = int(self.environ.get('CONTENT_LENGTH', 0)) except (ValueError): request_body_size = 0 data = self.environ['wsgi.input'].read(request_body_size) return data
Returns the data sent with the request.
def configure_sentry_errors(self): sentry_errors_logger = logging.getLogger('sentry.errors') root_logger = logging.getLogger() for handler in root_logger.handlers: sentry_errors_logger.addHandler(handler)
Configure sentry.errors to use the same loggers as the root handler @rtype: None
def _list_function_infos(jvm): jinfos = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listBuiltinFunctionInfos() infos = [] for jinfo in jinfos: name = jinfo.getName() usage = jinfo.getUsage() usage = usage.replace("_FUNC_", name) if usage is not None else usage infos.append(ExpressionInfo( className=jinfo.getClassName(), name=name, usage=usage, arguments=jinfo.getArguments().replace("_FUNC_", name), examples=jinfo.getExamples().replace("_FUNC_", name), note=jinfo.getNote(), since=jinfo.getSince(), deprecated=jinfo.getDeprecated())) return sorted(infos, key=lambda i: i.name)
Returns a list of function information via JVM. Sorts wrapped expression infos by name and returns them.
def set(self, key, value): self._full_config = None self._override[key] = value
Set a value in the `Bison` configuration. Args: key (str): The configuration key to set a new value for. value: The value to set.