docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Sets default attributes when None. Args: default_attr: dict. Key-val of attr, default-value.
def _set_default_attr(self, default_attr): for attr, val in six.iteritems(default_attr): if getattr(self, attr, None) is None: setattr(self, attr, val)
1,141,801
Serialize self as JSON Args: drop_null: bool, default True. Remove 'empty' attributes. See to_dict. camel: bool, default True. Convert keys to camelCase. indent: int, default None. See json built-in. sort_keys: bool, default False. See json built-...
def to_json(self, drop_null=True, camel=False, indent=None, sort_keys=False): return json.dumps(self.to_dict(drop_null, camel), indent=indent, sort_keys=sort_keys)
1,141,802
Serialize self as dict. Args: drop_null: bool, default True. Remove 'empty' attributes. camel: bool, default True. Convert keys to camelCase. Return: dict: object params.
def to_dict(self, drop_null=True, camel=False): #return _to_dict(self, drop_null, camel) def to_dict(obj, drop_null, camel): if isinstance(obj, (Body, BodyChild)): obj = obj.__dict__ if isinstance(obj, dict): data = {} ...
1,141,803
Parse JSON request, storing content in object attributes. Args: body: str. HTTP request body. Returns: self
def parse(self, body): if isinstance(body, six.string_types): body = json.loads(body) # version version = body['version'] self.version = version # session session = body['session'] self.session.new = session['new'] self.session.sessi...
1,141,805
Set response output speech as plain text type. Args: text: str. Response speech used when type is 'PlainText'. Cannot exceed 8,000 characters.
def set_speech_text(self, text): self.response.outputSpeech.type = 'PlainText' self.response.outputSpeech.text = text
1,141,815
Set response output speech as SSML type. Args: ssml: str. Response speech used when type is 'SSML', should be formatted with Speech Synthesis Markup Language. Cannot exceed 8,000 characters.
def set_speech_ssml(self, ssml): self.response.outputSpeech.type = 'SSML' self.response.outputSpeech.ssml = ssml
1,141,816
Set response card as simple type. title and content cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. content: str. Content of Simple type card.
def set_card_simple(self, title, content): self.response.card.type = 'Simple' self.response.card.title = title self.response.card.content = content
1,141,817
Set response reprompt output speech as plain text type. Args: text: str. Response speech used when type is 'PlainText'. Cannot exceed 8,000 characters.
def set_reprompt_text(self, text): self.response.reprompt.outputSpeech.type = 'PlainText' self.response.reprompt.outputSpeech.text = text
1,141,819
Set response reprompt output speech as SSML type. Args: ssml: str. Response speech used when type is 'SSML', should be formatted with Speech Synthesis Markup Language. Cannot exceed 8,000 characters.
def set_reprompt_ssml(self, ssml): self.response.reprompt.outputSpeech.type = 'SSML' self.response.reprompt.outputSpeech.ssml = ssml
1,141,820
Constructor. Args: conf_path (str): Path to the ZEO configuration file. Default :attr:`~storage.settings.ZEO_CLIENT_PATH`. project_key (str): Project key, which is used for lookups into ZEO. Default :attr:`~storage.settings.TREE_PROJECT_KEY`.
def __init__(self, conf_path=ZEO_CLIENT_PATH, project_key=PROJECT_KEY): super(self.__class__, self).__init__( conf_path=conf_path, project_key=project_key ) # tree.name -> tree self.name_db_key = "name_db" self.name_db = self._get_key_or_create(s...
1,141,933
Add `item` to `db` under `index`. If `index` is not yet in `db`, create it using `default`. Args: db (dict-obj): Dict-like object used to connect to database. index (str): Index used to look in `db`. item (obj): Persistent object, which may be stored in DB. ...
def _add_to(self, db, index, item, default=OOSet): row = db.get(index, None) if row is None: row = default() db[index] = row row.add(item)
1,141,934
Add `tree` into database. Args: tree (obj): :class:`.Tree` instance. parent (ref, default None): Reference to parent tree. This is used for all sub-trees in recursive call.
def add_tree(self, tree, parent=None): if tree.path in self.path_db: self.remove_tree_by_path(tree.path) # index all indexable attributes for index in tree.indexes: if not getattr(tree, index): continue self._add_to( ...
1,141,935
Remove the tree from database by given `path`. Args: path (str): Path of the tree.
def remove_tree_by_path(self, path): with transaction.manager: trees = self.path_db.get(path, None) if not trees: return for tree in trees: return self._remove_tree(tree)
1,141,936
Remove `item` from `db` at `index`. Note: This function is inverse to :meth:`._add_to`. Args: db (dict-obj): Dict-like object used to connect to database. index (str): Index used to look in `db`. item (obj): Persistent object, which may be stored in DB.
def _remove_from(self, db, index, item): with transaction.manager: row = db.get(index, None) if row is None: return with transaction.manager: if item in row: row.remove(item) with transaction.manager: if not row:...
1,141,937
Really remove the tree identified by `tree` instance from all indexes from database. Args: tree (obj): :class:`.Tree` instance. parent (obj, default None): Reference to parent.
def _remove_tree(self, tree, parent=None): # remove sub-trees for sub_tree in tree.sub_trees: self._remove_tree(sub_tree, parent=tree) # remove itself for index in tree.indexes: if not getattr(tree, index): continue self._rem...
1,141,938
Search trees by `issn`. Args: issn (str): :attr:`.Tree.issn` property of :class:`.Tree`. Returns: set: Set of matching :class:`Tree` instances.
def trees_by_issn(self, issn): return set( self.issn_db.get(issn, OOSet()).keys() )
1,141,939
Search trees by `path`. Args: path (str): :attr:`.Tree.path` property of :class:`.Tree`. Returns: set: Set of matching :class:`Tree` instances.
def trees_by_path(self, path): return set( self.path_db.get(path, OOSet()).keys() )
1,141,940
Search trees by `sub_path` using ``Tree.path.startswith(sub_path)`` comparison. Args: sub_path (str): Part of the :attr:`.Tree.path` property of :class:`.Tree`. Returns: set: Set of matching :class:`Tree` instances.
def trees_by_subpath(self, sub_path): matches = ( self.path_db[tree_path].keys() for tree_path in self.path_db.iterkeys() if tree_path.startswith(sub_path) ) return set(sum(matches, []))
1,141,941
Get parent for given `tree` or `alt` if not found. Args: tree (obj): :class:`.Tree` instance, which is already stored in DB. alt (obj, default None): Alternative value returned when `tree` is not found. Returns: obj: :class:`.Tree` parent to given `t...
def get_parent(self, tree, alt=None): parent = self.parent_db.get(tree.path) if not parent: return alt return list(parent)[0]
1,141,942
Initialize the object. Args: conf_path (str): See :attr:`conf_path`. project_key (str, default None): See :attr:`project_key`. If not set, the root of the database is used (this may cause performace issues). run_asyncore_thread (bool, default ...
def __init__(self, project_key=None, run_asyncore_thread=True): self.project_key = project_key self.default_type = OOBTree self._root = None #: Reference to the root of the database. self._connection = None #: Internal handler for the ZEO connection. if run_asyncore_...
1,142,007
Get and initialize the ZEO root object. Args: attempts (int, default 3): How many times to try, if the connection was lost.
def _init_zeo_root(self, attempts=3): try: db_root = self._connection.root() except ConnectionStateError: if attempts <= 0: raise self._open_connection() return self._init_zeo_root(attempts=attempts-1) # init the root, if...
1,142,009
Converts a list of uris to elasticsearch json objects args: uri_list: list of uris to convert num: the ending count within the batch batch_num: the batch number
def _index_sub(self, uri_list, num, batch_num): bname = '%s-%s' % (batch_num, num) log.debug("batch_num '%s' starting es_json conversion", bname) qry_data = get_all_item_data([item[0] for item in uri_list], self.tstore_conn, ...
1,142,378
updates the triplestore with success of saves and failues of indexing Args: ----- es_result: the elasticsearch result list action_list: list of elasticsearch action items that were indexed
def _update_triplestore(self, es_result, action_list, **kwargs): idx_time = XsdDatetime(datetime.datetime.utcnow()) uri_keys = {} bnode_keys = {} for item in action_list: try: uri_keys[item['_id']] = item['_source']["uri"] except KeyError:...
1,142,381
Removes all of the index status triples from the datastore Args: ----- rdf_class: The class of items to remove the status from
def delete_idx_status(self, rdf_class): sparql_template = rdf_types = [rdf_class.uri] + [item.uri for item in rdf_class.subclasses] sparql = sparql_template.format("\n\t\t".join(rdf_types)) log.warn("Deleting index status for %s", rdf_cla...
1,142,382
Creates and returns a tuple of either: (pymongo.mongo_client.MongoClient, pymongo.database.Database) or (pymongo.mongo_replica_set_client.MongoReplicaSetClient, pymongo.database.Database) utilizing either a passed in Flask 'app' instance, an imported module object, o...
def __new__(cls, app_or_object_or_dict): config = {} app_name = get_app_name() is_flask = False # If the object is a flask.app.Flask instance if flask_app and isinstance(app_or_object_or_dict, flask_app.Flask): config.update(app_or_object_or_dict.config) ...
1,142,441
Scan `path` for viruses using ``clamd`` antivirus daemon. Args: path (str): Relative or absolute path of file/directory you need to scan. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. Raises: ValueError: When the server is not running. ...
def scan_file(path): path = os.path.abspath(path) assert os.path.exists(path), "Unreachable file '%s'." % path try: cd = pyclamd.ClamdUnixSocket() cd.ping() except pyclamd.ConnectionError: cd = pyclamd.ClamdNetworkSocket() try: cd.ping() except p...
1,142,536
Construct a new node. Args: name: Specifying the name of this node. If not given, use strings returned from __str__ method.
def __init__(self, graph, name=None): if not isinstance(graph, BipartiteGraph): raise ValueError( "Given graph is not instance of Bipartite:", graph) self._graph = graph if name: self.name = name else: self.name = super(_Node,...
1,142,602
Set summary. Args: v: A new summary. It could be a single number or lists.
def summary(self, v): if hasattr(v, "__iter__"): self._summary = self._summary_cls(v) else: self._summary = self._summary_cls(float(v))
1,142,610
Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCre...
def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary sel...
1,142,612
Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance.
def new_reviewer(self, name, anomalous=None): n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n
1,142,613
Create a new product. Args: name: name of the new product. Returns: A new product instance.
def new_product(self, name): n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n
1,142,614
Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: T...
def add_review(self, reviewer, product, review, date=None): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._p...
1,142,615
Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed.
def retrieve_products(self, reviewer): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer))
1,142,616
Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is con...
def retrieve_reviewers(self, product): if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product))
1,142,617
Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specifie...
def retrieve_review(self, reviewer, product): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): ...
1,142,618
Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer.
def _weight_generator(self, reviewers): scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): try: exp = math.exp(self.alpha * (v - mu) / sigma) ...
1,142,620
Dump credibilities of all products. Args: output: a writable object.
def dump_credibilities(self, output): for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n")
1,142,621
Writes a line to a log file Arguments: namespace {str} -- namespace of document document {dict} -- document to write to the logs
def write_log_file(namespace, document): log_timestamp = asctime(gmtime(document[TS])) with open("{}{}.{}.log".format(LOG_DIR, namespace, DAY_STRING), "a") as f: log_string = dumps({ "datetime": log_timestamp.upper(), "namespace": namespace, "log": document[LOG_K...
1,142,687
Retrieve token of TonicDNS API. Arguments: usename: TonicDNS API username password: TonicDNS API password server: TonicDNS API server
def get_token(username, password, server): method = 'PUT' uri = 'https://' + server + '/authenticate' token = '' authinfo = { "username": username, "password": password, "local_user": username} token = tonicdns_client(uri, method, token, data=authinfo) return toke...
1,142,795
TonicDNS API client Arguments: uri: TonicDNS API URI method: TonicDNS API request method token: TonicDNS API authentication token data: Post data to TonicDNS API keyword: Processing keyword of response content: data exist flag raw_flag: True ...
def tonicdns_client(uri, method, token='', data='', keyword='', content='', raw_flag=False): res = request(uri, method, data, token) if token: if keyword == 'serial': args = {"token": token, "keyword": keyword, "content": content} cur_soa, new_soa = respo...
1,142,796
Request to TonicDNS API. Arguments: uri: TonicDNS API URI method: TonicDNS API request method data: Post data to TonicDNS API token: TonicDNS API authentication token
def request(uri, method, data, token=''): socket.setdefaulttimeout(__timeout__) obj = urllib.build_opener(urllib.HTTPHandler) # encoding json encoded = json.JSONEncoder(object).encode(data) # encoding utf8 data_utf8 = encoded.encode('utf-8') req = urllib.Request(uri, data=data_utf8) ...
1,142,797
Response of tonicdns_client request Arguments: uri: TonicDNS API URI method: TonicDNS API request method res: Response of against request to TonicDNS API token: TonicDNS API token keyword: Processing keyword content: JSON data raw_flag: True...
def response(uri, method, res, token='', keyword='', content='', raw_flag=False): if method == 'GET' or (method == 'PUT' and not token): # response body data = res.read() data_utf8 = data.decode('utf-8') if token: datas = json.loads(data_utf8) el...
1,142,798
Search target JSON -> dictionary Arguments: datas: dictionary of record datas keyword: search keyword (default is null) Key target is "name" or "content" or "type". default null. Either key and type, or on the other hand. When keyword has include camma ",", Separate keyword to na...
def search_record(datas, keyword): key_name, key_type, key_content = False, False, False if keyword.find(',') > -1: if len(keyword.split(',')) == 3: key_content = keyword.split(',')[2] key_name = keyword.split(',')[0] key_type = keyword.split(',')[1] result = [] ...
1,142,799
Create a response object. Args: message: the message object (it could be any type of object.) errors: the errors to attach (it could be any type of object.) status (int): the status of the response. Errors should use the status that is the most appropriate. S...
def __init__(self, message=None, errors=None, status=None): self.status = status or 200 self.message = message self.errors = errors if self.errors and self.status == 200: self.status = 400
1,142,898
Formulates a log file name that incorporates the provided tags. The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``. Args: tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag will be added in the same order as provided.
def get_logfile_name(tags): if not os.path.exists(sd.LOG_DIR): os.mkdir(sd.LOG_DIR) filename = "log" for tag in tags: filename += "_{}".format(tag) filename += ".txt" filename = os.path.join(sd.LOG_DIR,filename) return filename
1,142,983
Creates and Adds a file handler (`logging.FileHandler` instance) to the specified logger. Args: logger: The `logging.Logger` instance to add the new file handler to. level: `str`. The logging level for which the handler accepts messages, i.e. `logging.INFO`. tags: `list` of tags to append to the l...
def add_file_handler(logger,level,tags): f_formatter = logging.Formatter('%(asctime)s:%(name)s:\t%(message)s') filename = get_logfile_name(tags) handler = logging.FileHandler(filename=filename,mode="a") handler.setLevel(level) handler.setFormatter(f_formatter) logger.addHandler(handler)
1,142,984
Sets the widget style to the class defaults. Parameters: ----------- colors : str, optional (default lightbg) Whether to use the default IPython light background or dark background or B&W style.
def set_default_style(self, colors='lightbg'): colors = colors.lower() if colors=='lightbg': self.style_sheet = styles.default_light_style_sheet self.syntax_style = styles.default_light_syntax_style elif colors=='linux': self.style_sheet = styles.defa...
1,143,088
Opens a Python script for editing. Parameters: ----------- filename : str A path to a local system file. line : int, optional A line of interest in the file.
def _edit(self, filename, line=None): if self.custom_edit: self.custom_edit_requested.emit(filename, line) elif not self.editor: self._append_plain_text('No default editor available.\n' 'Specify a GUI text editor in the `IPythonWidget.editor` ' 'c...
1,143,089
sends a passed in action_list to elasticsearch args: data: that data dictionary to save kwargs: id: es id to use / None = auto
def save(self, data, **kwargs): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) es = self.es es_index = get2(kwargs, "es_index", self.es_index) reset_index = kwargs.get("reset_index",self.reset_index) doc_...
1,143,408
Reads a list of data and replaces the ids with es id of the item args: data_list: list of items to find in replace prop: full prop name in es format i.e. make.id lookup_src: dictionary with index doc_type ie. {"es_index": "reference", "doc_type": "devic...
def _find_ids(self, data_list, prop, lookup_index, lookup_doc_type, lookup_field): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) rtn_list...
1,143,409
Get nodes matching the query in the response of the GET request sent to each `url` of `urls` Params: urls: iterable of strs
def query_multiple_endpoints(urls, query: PyQuery): urls = list(urls) return _query_multiple_endpoints(urls, query)
1,143,720
Return a rendered field. Checks to see if the field has a custom widget set. If it does not have a custom widget, the field type is looked up in the lookup dictionary to get the default renderer for this field type. If you wish to not perform any lookup, simply call field() without ...
def __call__(self, field, **kwargs): if not hasattr(field.widget, '__webwidget__'): if field.type in self._lookup: field.widget = self._lookup[field.type] return field(**kwargs)
1,143,901
Retrieve the versions from PyPI by ``project_name``. Args: project_name (str): The name of the project we wish to retrieve the versions of. Returns: list: Of string versions.
def package_releases(self, project_name): try: return self._connection.package_releases(project_name) except Exception as err: raise PyPIClientError(err)
1,143,991
Storage package information in ``self.packages`` Args: project_name (str): This will be used as a the key in the dictionary. versions (list): List of ``str`` representing the available versions of a project.
def set_package_releases(self, project_name, versions): self.packages[project_name] = sorted(versions, reverse=True)
1,143,992
Returns dict containing zmq configuration arguments parsed from xbahn url Arguments: - u (urlparse.urlparse result) Returns: dict: - id (str): connection index key - typ_str (str): string representation of zmq socket type - typ (int): zmq socket type (...
def config_from_url(u, **kwargs): path = u.path.lstrip("/").split("/") if len(path) > 2 or not path: raise AssertionError("zmq url format: zmq://<host>:<port>/<pub|sub>/<topic>") typ = path[0].upper() try: topic = path[1] except IndexError as _: topic = '' param...
1,144,059
records the dictionay in the the 'blank' attribute based on the 'list_blank' path args: ----- current: the current dictionay counts dict_obj: the original dictionary object
def _record_blank(self, current, dict_obj): if not self.list_blank: return if self.list_blank not in current: self.blank.append(dict_obj)
1,144,089
cycles through the object and adds in count values Args: ----- obj: the object to parse path: the current path kwargs: ------- current: a dictionary of counts for current call sub_val: the value to use for subtotal aggregation
def _count_objs(self, obj, path=None, **kwargs): sub_val = None # pdb.set_trace() if isinstance(obj, dict): for key, value in obj.items(): if isinstance(value, (list, dict)): kwargs = self._count_objs(value, ...
1,144,090
increments the property path count args: ----- prop: the key for the prop path: the path to the prop kwargs: ------- current: dictionary count for the current dictionay
def _increment_prop(self, prop, path=None, **kwargs): new_path = self.make_path(prop, path) if self.method == 'simple': counter = kwargs['current'] else: counter = self.counts try: counter[new_path] += 1 except KeyError: co...
1,144,091
updates counts for the class instance based on the current dictionary counts args: ----- current: current dictionary counts
def update_counts(self, current): for item in current: try: self.counts[item] += 1 except KeyError: self.counts[item] = 1
1,144,092
updates sub_total counts for the class instance based on the current dictionary counts args: ----- current: current dictionary counts sub_key: the key/value to use for the subtotals
def update_subtotals(self, current, sub_key): if not self.sub_counts.get(sub_key): self.sub_counts[sub_key] = {} for item in current: try: self.sub_counts[sub_key][item] += 1 except KeyError: self.sub_counts[sub_key][item] = 1
1,144,093
Converts protobuf message to JSON format. Args: message: The protocol buffers message instance to serialize. including_default_value_fields: If True, singular primitive fields, repeated fields, and map fields will always be serialized. If False, only serialize non-empty fields. Singular mes...
def MessageToJson(message, including_default_value_fields=False): js = _MessageToJsonObject(message, including_default_value_fields) return json.dumps(js, indent=2)
1,144,139
Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol beffer message to merge into. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems.
def Parse(text, message): if not isinstance(text, six.text_type): text = text.decode('utf-8') try: if sys.version_info < (2, 7): # object_pair_hook is not supported before python2.7 js = json.loads(text) else: js = json.loads(text, object_pairs_hook=_DuplicateChecker) except ValueErro...
1,144,142
Convert field value pairs into regular message. Args: js: A JSON object to convert the field value pairs. message: A regular protocol message to record the data. Raises: ParseError: In case of problems converting.
def _ConvertFieldValuePair(js, message): names = [] message_descriptor = message.DESCRIPTOR for name in js: try: field = message_descriptor.fields_by_camelcase_name.get(name, None) if not field: raise ParseError( 'Message type "{0}" has no field named "{1}".'.format( ...
1,144,143
Convert a JSON object into a message. Args: value: A JSON object. message: A WKT or regular protocol message to record the data. Raises: ParseError: In case of convert problems.
def _ConvertMessage(value, message): message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): _ConvertWrapperMessage(value, message) elif full_name in _WKTJSONMETHODS: _WKTJSONMETHODS[full_name][1](value, message) else: _ConvertF...
1,144,144
Convert communication namedtuple to this class. Args: pub (obj): :class:`.Publication` instance which will be converted. Returns: obj: :class:`DBPublication` instance.
def from_comm(cls, pub): filename = None if pub.b64_data: filename = cls._save_to_unique_filename(pub) return cls( title=pub.title, author=pub.author, pub_year=pub.pub_year, isbn=pub.isbn, urnnbn=pub.urnnbn, ...
1,144,303
Goes through all the options in `data`, and prompts new values. This function calls itself recursively if it finds an inner dictionary. Arguments: data -- The dictionary to loop through. key_string -- The dot-notated key of the dictionary being checked through.
def configure_data(self, data, key_string = ''): # If there's no keys in this dictionary, we have nothing to do. if len(data.keys()) == 0: return # Split the key string by its dots to find out how deep we are. key_parts = key_string.rsplit('.') prefix = ' ...
1,144,344
Parses a single value and sets it in an inner dictionary. Arguments: inner_dict -- The dictionary containing the value to set label -- The label to show for the prompt. key -- The key in the dictionary to set the value for. value -- The value...
def parse_value(self, inner_dict, label, key, value, default): t = type(default) if t is dict: return select = self.data.get_select(key) k = key.split('.')[-1] if select: inner_dict[k] = self.prompt.select(label, select, value, default = de...
1,144,346
Sets a single value in a preconfigured data file. Arguments: key -- The full dot-notated key to set the value for. value -- The value to set.
def set(self, key, value): d = self.data.data keys = key.split('.') latest = keys.pop() for k in keys: d = d.setdefault(k, {}) schema = Schema().load(self.schema_file) self.data.internal = schema.internal self.parse_value(d, '', key, value, ...
1,144,347
Internal load function. Creates the object and returns it. Arguments: data_file -- The filename to load.
def _load(self, data_file): # Load the data from a file. try: data = Schema().load(data_file) except (Exception, IOError, ValueError) as e: raise e return data
1,144,348
Loads a data file and sets it to self.data. Arguments: data_file -- The filename to load.
def load(self, data_file = None): if not data_file: data_file = '' elif data_file[-1] != '/': data_file += '/' if data_file[-6:] != self.lazy_folder: data_file += self.lazy_folder data_file += self.data_filename self.data =...
1,144,349
Add all keyword arguments to self.args args: **defaults: key and value represents dictionary key and value
def set_defaults(self, **defaults): try: defaults_items = defaults.iteritems() except AttributeError: defaults_items = defaults.items() for key, val in defaults_items: if key not in self.args.keys(): self.args[key] = val
1,144,411
Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value
def set_args(self, **kwargs): try: kwargs_items = kwargs.iteritems() except AttributeError: kwargs_items = kwargs.items() for key, val in kwargs_items: self.args[key] = val
1,144,412
Method to get bestfit line using the defined self.bestfit_func method args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 ...
def get_bestfit_line(self, x_min=None, x_max=None, resolution=None): x = self.args["x"] if x_min is None: x_min = min(x) if x_max is None: x_max = max(x) if resolution is None: resolution = self.args.get("resolution", 1000) bestfit_x =...
1,144,414
Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps b...
def get_rmse(self, data_x=None, data_y=None): if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") ...
1,144,415
Get Mean Absolute Error using self.bestfit_func args: data_x: array_like, default=x x value used to determine rmse, used if only a section of x is to be calculated data_y: array_like, default=y y value used to determine rmse, used ...
def get_mae(self, data_x=None, data_y=None): if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") ...
1,144,416
bind and return a connection instance from url arguments: - url (str): xbahn connection url
def listen(url, prefix=None, **kwargs): return listener(url, prefix=get_prefix(prefix), **kwargs)
1,144,478
connect and return a connection instance from url arguments: - url (str): xbahn connection url
def connect(url, prefix=None, **kwargs): return connection(url, prefix=get_prefix(prefix), **kwargs)
1,144,479
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: ApplicationException: when execution fails fo...
def execute(self, correlation_id, args): # Validate arguments if self._schema != None: self.validate_and_throw_exception(correlation_id, args) # Call the function try: return self._function(correlation_id, args) # Intercept unhan...
1,144,520
Generates a property class from the defintion dictionary args: prop_defs: the dictionary defining the property prop_name: the base name of the property cls_name: the name of the rdf_class with which the property is associated
def make_property(prop_defs, prop_name, cls_names=[], hierarchy=[]): register = False try: cls_names.remove('RdfClassBase') except ValueError: pass if cls_names: new_name = "%s_%s" % (prop_name.pyuri, "_".join(cls_names)) prop_defs['kds_appliesToClass'] = cls_names ...
1,144,558
Generates a property class linked to the rdfclass args: prop: unlinked property class cls_name: the name of the rdf_class with which the property is associated cls_object: the rdf_class
def link_property(prop, cls_object): register = False cls_name = cls_object.__name__ if cls_name and cls_name != 'RdfBaseClass': new_name = "%s_%s" % (prop._prop_name, cls_name) else: new_name = prop._prop_name new_prop = types.new_class(new_name, ...
1,144,559
Reads through the prop_defs and returns a dictionary filtered by the current class args: prop_defs: the defintions from the rdf vocabulary defintion cls_object: the class object to tie the property cls_names: the name of the classes
def filter_prop_defs(prop_defs, hierarchy, cls_names): def _is_valid(test_list, valid_list): for test in test_list: if test in valid_list: return True return False new_dict = {} valid_classes = [Uri('kdr_AllClasses')] + cls_names + hierarchy f...
1,144,561
Examines and adds any missing defs to the prop_defs dictionary for use with the RdfPropertyMeta.__prepare__ method Args: ----- prop_defs: the defintions from the rdf vocabulary defintion prop_name: the property name cls_names: the name of the associated classes Returns: ---...
def prepare_prop_defs(prop_defs, prop_name, cls_names): def get_def(prop_defs, def_fields, default_val=None): rtn_list = [] for fld in def_fields: if prop_defs.get(fld): rtn_list += prop_defs.get(fld) if not rtn_list and default_val: rtn_...
1,144,562
reads through the prop attributes and filters them for the associated class and returns a dictionary for meta_class __prepare__ args: prop: class object to read cls_name: the name of the class to tie the porperty to
def tie_prop_to_class(prop, cls_name): attr_list = [attr for attr in dir(prop) if type(attr, Uri)] prop_defs = kwargs.pop('prop_defs') prop_name = kwargs.pop('prop_name') cls_name = kwargs.pop('cls_name') if cls_name == 'RdfClassBase': return {} doc_string = make_doc_string(name, ...
1,144,563
reads the prop defs and adds applicable processors for the property Args: processor_cat(str): The category of processors to retreive prop_defs: property defintions as defined by the rdf defintions data_attr: the attr to manipulate during processing. Returns: list: a list of pro...
def get_processors(processor_cat, prop_defs, data_attr=None): processor_defs = prop_defs.get(processor_cat,[]) processor_list = [] for processor in processor_defs: proc_class = PropertyProcessor[processor['rdf_type'][0]] processor_list.append(proc_class(processor.get('kds_params', [{}])...
1,144,565
takes an rdf list and merges it into a python list args: rdf_list: the RdfDataset object with the list values returns: list of values
def merge_rdf_list(rdf_list): # pdb.set_trace() if isinstance(rdf_list, list): rdf_list = rdf_list[0] rtn_list = [] # for item in rdf_list: item = rdf_list if item.get('rdf_rest') and item.get('rdf_rest',[1])[0] != 'rdf_nil': rtn_list += merge_rdf_list(item['rdf_rest'][0]) ...
1,144,566
Get a named profile from the CONFIG_FILE. Args: name The name of the profile to load. Returns: A dictionary with the profile's ``repo`` and ``token`` values.
def read_profile(name): config = configparser.ConfigParser() config.read(CONFIG_FILE) profile = config[name] repo = profile["repo"] token = profile["token"] return {"repo": repo, "token": token}
1,144,748
Inits a Skill class with proxy request and response. Args: app_id: str, default None. Skill application ID, declare to validate against application ID in the request.
def __init__(self, app_id=None): self.valid = Valid(app_id) self.request = RequestBody() self.response = ResponseBody() self.logic = dict() self.launch = self.register('LaunchRequest') self.intent = self.register self.session_ended = self.register('Sessio...
1,144,896
Creates instance of timing object that calculates elapsed time and stores it to specified performance counters component under specified name. Args: counter: a name of the counter to record elapsed time interval. callback: a performance counters component to store calculate...
def __init__(self, counter = None, callback = None): self._counter = counter self._callback = callback self._start = time.clock() * 1000
1,145,002
Fetches all messages at @conn from @directory. Params: conn IMAP4_SSL connection directory The IMAP directory to look for readonly readonly mode, true or false Returns: List of subject-body tuples
def fetch_all_messages(self, conn, directory, readonly): conn.select(directory, readonly) message_data = [] typ, data = conn.search(None, 'All') # Loop through each message object for num in data[0].split(): typ, data = conn.fetch(num, '(RFC822)') ...
1,145,027
Create a repr of a property based class quickly Args: obj -- instance of class *attrs -- list of attrs to add to the representation **kwargs -- Extra arguments to add that are not captured as attributes Returns: A string representing the class
def rep(obj, *attrs, **kwargs): s = obj.__class__.__name__ args = chain(((attr, getattr(obj, attr)) for attr in attrs), kwargs.items()) s += '(%s)' % ','.join('{}={!r}'.format(k, v) for k, v in args) return s
1,145,095
Not a decorator, but a helper function to retrieve the cached item for a key created via get_cache_key. Args: - cache_key: if there was a specific cache key used to cache the function, it should be provided here. If not this should be None - func: the function which was cache - *...
def get_cached_item(cache_key, alternative_cache_key, *func_args, **func_kwargs): key = get_cache_key(cache_key, func, *func_args, **func_kwargs) return cache.get(key)
1,145,123
Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems.
def FromJsonString(self, value): if len(value) < 1 or value[-1] != 's': raise ParseError( 'Duration must end with letter "s": {0}.'.format(value)) try: pos = value.find('.') if pos == -1: self.seconds = int(value[:-1]) self.nanos = 0 else: self.seco...
1,145,202
Replace my :attr:`scopes` for the duration of the with block. My global scope is not replaced. Args: new_scopes (list of dict-likes): The new :attr:`scopes` to use.
def scopes_as(self, new_scopes): old_scopes, self.scopes = self.scopes, new_scopes yield self.scopes = old_scopes
1,145,444
Add a new innermost scope for the duration of the with block. Args: new_scope (dict-like): The scope to add.
def new_scope(self, new_scope={}): old_scopes, self.scopes = self.scopes, self.scopes.new_child(new_scope) yield self.scopes = old_scopes
1,145,445
Add a new value to me. Args: val (LispVal): The value to be added. Returns: LispVal: The added value. Raises: ~parthial.errs.LimitationError: If I already contain the maximum number of elements.
def new(self, val): if len(self.things) >= self.max_things: raise LimitationError('too many things') self.things.add(val) return val
1,145,446
Recursively add a new value and its children to me. Args: val (LispVal): The value to be added. Returns: LispVal: The added value.
def rec_new(self, val): if val not in self.things: for child in val.children(): self.rec_new(child) self.new(val) return val
1,145,447
Recursively add a new value and its children to me, and assign a variable to it. Args: k (str): The name of the variable to assign. val (LispVal): The value to be added and assigned. Returns: LispVal: The added value.
def add_rec_new(self, k, val): self.rec_new(val) self[k] = val return val
1,145,448
Look up a variable. Args: k (str): The name of the variable to look up. Returns: LispVal: The value assigned to the variable. Raises: KeyError: If the variable has not been assigned to.
def __getitem__(self, k): chain = ChainMap(self.scopes, self.globals) return chain.__getitem__(k)
1,145,450
Check whether a variable has been assigned to. This is **not** the same kind of element-of as described in the class documentation. Args: k (str): The name of the variable to check. Returns: bool: Whether or not the variable has been assigned to.
def __contains__(self, k): chain = ChainMap(self.scopes, self.globals) return chain.__contains__(k)
1,145,451
:meth:`eval` an expression in a new, temporary :class:`Context`. This should be safe to use directly on user input. Args: expr (LispVal): The expression to evaluate. *args: Args for the :class:`Context` constructor. **kwargs: Kwargs for the :class:`Context` construc...
def eval_in_new(cls, expr, *args, **kwargs): ctx = cls(*args, **kwargs) ctx.env.rec_new(expr) return ctx.eval(expr)
1,145,454