code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def build_object(self, obj): if not obj.exclude_from_static: super(ShowPage, self).build_object(obj)
Override django-bakery to skip pages marked exclude_from_static
def backup(file_name, jail=None, chroot=None, root=None): ret = __salt__['cmd.run']( _pkg(jail, chroot, root) + ['backup', '-d', file_name], output_loglevel='trace', python_shell=False ) return ret.split('...')[1]
Export installed packages into yaml+mtree file CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg jail Backup packages from the specified jail. Note that this will run the command within the jail, and so the path to the backup file will be relative to the root of the jail CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg jail=<jail name or id> chroot Backup packages from the specified chroot (ignored if ``jail`` is specified). Note that this will run the command within the chroot, and so the path to the backup file will be relative to the root of the chroot. root Backup packages from the specified root (ignored if ``jail`` is specified). Note that this will run the command within the root, and so the path to the backup file will be relative to the root of the root. CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg chroot=/path/to/chroot
def verify_existence_and_get(id, table, name=None, get_id=False): where_clause = table.c.id == id if name: where_clause = table.c.name == name if 'state' in table.columns: where_clause = sql.and_(table.c.state != 'archived', where_clause) query = sql.select([table]).where(where_clause) result = flask.g.db_conn.execute(query).fetchone() if result is None: raise dci_exc.DCIException('Resource "%s" not found.' % id, status_code=404) if get_id: return result.id return result
Verify the existence of a resource in the database and then return it if it exists, according to the condition, or raise an exception. :param id: id of the resource :param table: the table object :param name: the name of the row to look for :param get_id: if True, return only the ID :return:
def get_info(node_id, info_id): exp = experiment(session) node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/info, node does not exist") info = models.Info.query.get(info_id) if info is None: return error_response(error_type="/info GET, info does not exist", participant=node.participant) elif (info.origin_id != node.id and info.id not in [t.info_id for t in node.transmissions(direction="incoming", status="received")]): return error_response(error_type="/info GET, forbidden info", status=403, participant=node.participant) try: exp.info_get_request(node=node, infos=info) session.commit() except: return error_response(error_type="/info GET server error", status=403, participant=node.participant) return success_response(field="info", data=info.__json__(), request_type="info get")
Get a specific info. Both the node and info id must be specified in the url.
def save_model(self, request, obj, form, change): if 'config.menu_structure' in form.changed_data: from menus.menu_pool import menu_pool menu_pool.clear(all=True) return super(BlogConfigAdmin, self).save_model(request, obj, form, change)
Clear menu cache when changing menu structure
def validate_file(parser, arg): if not os.path.isfile(arg): parser.error("%s is not a file." % arg) return arg
Validates that `arg` is a valid file.
def smallest(heap, predicate): n = heap.size() items = deque([0]) while items: current = items.popleft() if current >= n: continue if predicate(heap.peek(current)): return current child1 = 2 * current + 1 child2 = child1 + 1 if child1 < n and child2 < n and heap.lt(child2, child1): child1, child2 = child2, child1 if child1 < n: items.append(child1) if child2 < n: items.append(child2) raise NoMatchError()
Finds the index of the smallest item in the heap that matches the given predicate. :param heap: Heap on which this search is being performed. :param predicate: Function that accepts an item from the heap and returns true or false. :returns: Index of the first item for which ``predicate`` returned true. :raises NoMatchError: If no matching items were found.
def view_atype(self, atype): if not self.cur_prj: return log.debug('Viewing atype %s', atype.name) self.cur_atype = None self.pages_tabw.setCurrentIndex(4) self.atype_name_le.setText(atype.name) self.atype_desc_pte.setPlainText(atype.description) assetrootdata = treemodel.ListItemData(['Name', 'Description']) assetrootitem = treemodel.TreeItem(assetrootdata) self.atype_asset_model = treemodel.TreeModel(assetrootitem) self.atype_asset_treev.setModel(self.atype_asset_model) for a in djadapter.assets.filter(project=self.cur_prj, atype=atype): assetdata = djitemdata.AssetItemData(a) treemodel.TreeItem(assetdata, assetrootitem) self.cur_atype = atype
View the given atype on the atype page :param atype: the atype to view :type atype: :class:`jukeboxcore.djadapter.models.Atype` :returns: None :rtype: None :raises: None
def from_json(cls, json_doc): d = json.loads(json_doc) token = cls() token.__dict__.update(d) return token
Create and return a new Session Token based on the contents of a JSON document. :type json_doc: str :param json_doc: A string containing a JSON document with a previously saved Credentials object.
def rst_filename_rel_autodoc_index(self, index_filename: str) -> str: index_dir = dirname(abspath(expanduser(index_filename))) return relpath(self.target_rst_filename, start=index_dir)
Returns the filename of the target RST file, relative to a specified index file. Used to make the index refer to the RST.
def _print_results(filename, data): if filename: with open(filename, 'wb') as f: f.write(data) else: print data
Print data to a file or STDOUT. Args: filename (str or None): If None, print to STDOUT; otherwise, print to the file with this name. data (str): Data to print.
def remove_padding(sequence): length = sequence.pop('length') sequence = tools.nested.map(lambda tensor: tensor[:length], sequence) return sequence
Selects the used frames of a sequence, up to its length. This function does not expect a batch of sequences, but a single sequence. The sequence must be a dict with `length` key, which will removed from the result. Args: sequence: Nested dict of tensors with time dimension. Returns: Nested dict of tensors with padding elements and `length` key removed.
def list_lb_nat_rules(access_token, subscription_id, resource_group, lb_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, 'inboundNatRules?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
List the inbound NAT rules for a load balancer. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. lb_name (str): Name of the load balancer. Returns: HTTP response. JSON body of load balancer NAT rules.
def parse_content(self, text): match = re.search( self.usage_re_str.format(self.usage_name), text, flags=(re.DOTALL if self.case_sensitive else (re.DOTALL | re.IGNORECASE))) if match is None: return dic = match.groupdict() logger.debug(dic) self.raw_content = dic['raw'] if dic['sep'] in ('\n', '\r\n'): self.formal_content = dic['section'] return reallen = len(dic['name']) replace = ''.ljust(reallen) drop_name = match.expand('%s\\g<sep>\\g<section>' % replace) self.formal_content = self.drop_started_empty_lines(drop_name).rstrip()
get Usage section and set to `raw_content`, `formal_content` of no title and empty-line version
def _get_name(self): if self.name is not None: return self.name if self.scoring_ is None: return 'score' if isinstance(self.scoring_, str): return self.scoring_ if isinstance(self.scoring_, partial): return self.scoring_.func.__name__ if isinstance(self.scoring_, _BaseScorer): return self.scoring_._score_func.__name__ return self.scoring_.__name__
Find name of scoring function.
def write_branch_data(self, file, padding=" "): attrs = ['%s="%s"' % (k,v) for k,v in self.branch_attr.iteritems()] attr_str = ", ".join(attrs) for br in self.case.branches: file.write("%s%s -> %s [%s];\n" % \ (padding, br.from_bus.name, br.to_bus.name, attr_str))
Writes branch data in Graphviz DOT language.
def register_nscf_task(self, *args, **kwargs): kwargs["task_class"] = NscfTask return self.register_task(*args, **kwargs)
Register a nscf task.
async def update_chat(self): other = await self.bot.get_chat(self.id) for key, value in other: self[key] = value
User this method to update Chat data :return: None
def readACTIONRECORD(self): action = None actionCode = self.readUI8() if actionCode != 0: actionLength = self.readUI16() if actionCode >= 0x80 else 0 action = SWFActionFactory.create(actionCode, actionLength) action.parse(self) return action
Read a SWFActionRecord
def to_json(self): result = super(ContentType, self).to_json() result.update({ 'name': self.name, 'description': self.description, 'displayField': self.display_field, 'fields': [f.to_json() for f in self.fields] }) return result
Returns the JSON representation of the content type.
def search(self, **kw): q = db.select(self.table).condition('status', 'active') for k, v in kw: q.condition(k, v) data = q.execute() users = [] for user in data: users.append(self.load(user, self.model)) return users
Find the users match the condition in kw
def Tag(env, target, source, *more_tags, **kw_tags): if not target: target=source first_tag=None else: first_tag=source if first_tag: kw_tags[first_tag[0]] = '' if len(kw_tags) == 0 and len(more_tags) == 0: raise UserError("No tags given.") for x in more_tags: kw_tags[x] = '' if not SCons.Util.is_List(target): target=[target] else: target=env.Flatten(target) for t in target: for (k,v) in kw_tags.items(): if k[:10] != 'PACKAGING_': k='PACKAGING_'+k t.Tag(k, v)
Tag a file with the given arguments, just sets the accordingly named attribute on the file object. TODO: FIXME
def handle_profile_delete(self, sender, instance, **kwargs): try: self.handle_save(instance.user.__class__, instance.user) except (get_profile_model().DoesNotExist): pass
Custom handler for user profile delete
def decode(stream, *args, **kwargs): encoding = kwargs.pop('encoding', DEFAULT_ENCODING) decoder = get_decoder(encoding, stream, *args, **kwargs) return decoder
A generator function to decode a datastream. @param stream: AMF data to be decoded. @type stream: byte data. @kwarg encoding: AMF encoding type. One of L{ENCODING_TYPES}. @return: A generator that will decode each element in the stream.
def my_glob(pattern): result = [] if pattern[0:4] == 'vos:': dirname = os.path.dirname(pattern) flist = listdir(dirname) for fname in flist: fname = '/'.join([dirname, fname]) if fnmatch.fnmatch(fname, pattern): result.append(fname) else: result = glob(pattern) return result
get a listing matching pattern @param pattern: @return:
def get_oqparam(job_ini, pkg=None, calculators=None, hc_id=None, validate=1, **kw): from openquake.calculators import base OqParam.calculation_mode.validator.choices = tuple( calculators or base.calculators) if not isinstance(job_ini, dict): basedir = os.path.dirname(pkg.__file__) if pkg else '' job_ini = get_params([os.path.join(basedir, job_ini)]) if hc_id: job_ini.update(hazard_calculation_id=str(hc_id)) job_ini.update(kw) oqparam = OqParam(**job_ini) if validate: oqparam.validate() return oqparam
Parse a dictionary of parameters from an INI-style config file. :param job_ini: Path to configuration file/archive or dictionary of parameters :param pkg: Python package where to find the configuration file (optional) :param calculators: Sequence of calculator names (optional) used to restrict the valid choices for `calculation_mode` :param hc_id: Not None only when called from a post calculation :param validate: Flag. By default it is true and the parameters are validated :param kw: String-valued keyword arguments used to override the job.ini parameters :returns: An :class:`openquake.commonlib.oqvalidation.OqParam` instance containing the validate and casted parameters/values parsed from the job.ini file as well as a subdictionary 'inputs' containing absolute paths to all of the files referenced in the job.ini, keyed by the parameter name.
def add_to_heap(self, heap, descriptors='stale', data='stale'): if descriptors not in ['stale', 'all', 'none']: raise ValueError("descriptors must be one of 'stale', 'all', 'none'") if data not in ['stale', 'all', 'none']: raise ValueError("data must be one of 'stale', 'all', 'none'") for item in self._item_group.values(): info = self._get_info(item) if (descriptors == 'all') or (descriptors == 'stale' and self._descriptor_stale(item, info)): heap.add_descriptor(item) info.descriptor_cnt = self._descriptor_cnt if item.value is not None: if (data == 'all') or (data == 'stale' and info.version != item.version): heap.add_item(item) info.version = item.version self._descriptor_cnt += 1 return heap
Update a heap to contains all the new items and item descriptors since the last call. Parameters ---------- heap : :py:class:`Heap` The heap to update. descriptors : {'stale', 'all', 'none'} Which descriptors to send. The default ('stale') sends only descriptors that have not been sent, or have not been sent recently enough according to the `descriptor_frequency` passed to the constructor. The other options are to send all the descriptors or none of them. Sending all descriptors is useful if a new receiver is added which will be out of date. data : {'stale', 'all', 'none'} Which data items to send. item_group : :py:class:`ItemGroup`, optional If specified, uses the items from this item group instead of the one passed to the constructor (which could be `None`). Raises ------ ValueError if `descriptors` or `data` is not one of the legal values
def request_password_change_mail(self, password): message = Msg(EMsg.ClientRequestChangeMail, extended=True) message.body.password = password resp = self.send_job_and_wait(message, timeout=10) if resp is None: return EResult.Timeout else: return EResult(resp.eresult)
Request password change mail :param password: current account password :type password: :class:`str` :return: result :rtype: :class:`.EResult`
def _validate_question_area(self): hazard_index = self.hazard_layer_combo.currentIndex() exposure_index = self.exposure_layer_combo.currentIndex() if hazard_index == -1 or exposure_index == -1: if self.conflicting_plugin_detected: message = conflicting_plugin_message() else: message = getting_started_message() return False, message else: return True, None
Helper method to evaluate the current state of the dialog. This function will determine if it is appropriate for the OK button to be enabled or not. .. note:: The enabled state of the OK button on the dialog will NOT be updated (set True or False) depending on the outcome of the UI readiness tests performed - **only** True or False will be returned by the function. :returns: A two-tuple where the first element is a Boolean reflecting the results of the validation tests and the second is a message indicating any reason why the validation may have failed. :rtype: (Boolean, safe.messaging.Message) Example:: flag,message = self._validate_question_area()
def listdir(self, folder_id='0', offset=None, limit=None, fields=None): 'Get Box object, representing list of objects in a folder.' if fields is not None\ and not isinstance(fields, types.StringTypes): fields = ','.join(fields) return self( join('folders', folder_id, 'items'), dict(offset=offset, limit=limit, fields=fields) )
Get Box object, representing list of objects in a folder.
def perform_ops(self): with self.db: with closing(self.db.cursor()) as cursor: cursor.execute('BEGIN TRANSACTION') self._perform_ops(cursor)
Performs the stored operations on the database connection.
def weighted_random(sample, embedding): unembeded = {} for v, chain in iteritems(embedding): vals = [sample[u] for u in chain] unembeded[v] = random.choice(vals) yield unembeded
Determines the sample values by weighed random choice. Args: sample (dict): A sample of the form {v: val, ...} where v is a variable in the target graph and val is the associated value as determined by a binary quadratic model sampler. embedding (dict): The mapping from the source graph to the target graph. Should be of the form {v: {s, ...}, ...} where v is a node in the source graph and s is a node in the target graph. Yields: dict: The unembedded sample. When there is a chain break, the value is chosen randomly, weighted by the frequency of the values within the chain.
def get_or_create(self, write_concern=None, auto_save=True, *q_objs, **query): defaults = query.pop('defaults', {}) try: doc = self.get(*q_objs, **query) return doc, False except self._document.DoesNotExist: query.update(defaults) doc = self._document(**query) if auto_save: doc.save(write_concern=write_concern) return doc, True
Retrieve unique object or create, if it doesn't exist. Returns a tuple of ``(object, created)``, where ``object`` is the retrieved or created object and ``created`` is a boolean specifying whether a new object was created. Taken back from: https://github.com/MongoEngine/mongoengine/ pull/1029/files#diff-05c70acbd0634d6d05e4a6e3a9b7d66b
def code_binary(item): code_str = code(item) if isinstance(code_str, six.string_types): return code_str.encode('utf-8') return code_str
Return a binary 'code' suitable for hashing.
def local_manager_consider_for_user(self): if not self.local_management_enabled: return False request = get_current_request() if authenticated_userid(request) == security.ADMIN_USER: return False roles = security.authenticated_user(request).roles if 'admin' in roles or 'manager' in roles: return False return True
Flag whether local manager ACL should be considered for current authenticated user.
def set_default_backend(self, backend_name): if backend_name not in BACKENDS: raise ValueError(f"Unknown backend '{backend_name}'.") self._default_backend = backend_name
Set the default backend of this circuit. This setting is only applied for this circuit. If you want to change the default backend of all gates, use `BlueqatGlobalSetting.set_default_backend()`. After set the default backend by this method, global setting is ignored even if `BlueqatGlobalSetting.set_default_backend()` is called. If you want to use global default setting, call this method with backend_name=None. Args: backend_name (str or None): new default backend name. If None is given, global setting is applied. Raises: ValueError: If `backend_name` is not registered backend.
def parse_value(self, values): result = self.get_default_value() if not values: return result if not isinstance(values, list): return values return [self._cast_value(value) for value in values]
Cast value to proper collection.
def find_defined_levels(): defined_levels = {} for name in dir(logging): if name.isupper(): value = getattr(logging, name) if isinstance(value, int): defined_levels[name] = value return defined_levels
Find the defined logging levels. :returns: A dictionary with level names as keys and integers as values. Here's what the result looks like by default (when no custom levels or level names have been defined): >>> find_defined_levels() {'NOTSET': 0, 'DEBUG': 10, 'INFO': 20, 'WARN': 30, 'WARNING': 30, 'ERROR': 40, 'FATAL': 50, 'CRITICAL': 50}
def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): out = {} for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False entry = self.sections[i] out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out
Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
def _create_messages(self, names, data, isDms=False): chats = {} empty_dms = [] formatter = SlackFormatter(self.__USER_DATA, data) for name in names: dir_path = os.path.join(self._PATH, name) messages = [] day_files = glob.glob(os.path.join(dir_path, "*.json")) if not day_files: if isDms: empty_dms.append(name) continue for day in sorted(day_files): with io.open(os.path.join(self._PATH, day), encoding="utf8") as f: day_messages = json.load(f) messages.extend([Message(formatter, d) for d in day_messages]) chats[name] = messages if isDms: self._EMPTY_DMS = empty_dms return chats
Creates object of arrays of messages from each json file specified by the names or ids :param [str] names: names of each group of messages :param [object] data: array of objects detailing where to get the messages from in the directory structure :param bool isDms: boolean value used to tell if the data is dm data so the function can collect the empty dm directories and store them in memory only :return: object of arrays of messages :rtype: object
def name(self): clonify_ids = [p.heavy['clonify']['id'] for p in self.heavies if 'clonify' in p.heavy] if len(clonify_ids) > 0: return clonify_ids[0] return None
Returns the lineage name, or None if the name cannot be found.
def install(environment, opts): environment.require_data() install_all(environment, opts['--clean'], verbose=not opts['--quiet'], packages=opts['PACKAGE']) for site in environment.sites: environment = Environment.load(environment.name, site) if 'web' in environment.containers_running(): manage.reload_(environment, { '--address': opts['--address'], '--background': False, '--no-watch': False, '--production': False, 'PORT': None, '--syslog': False, '--site-url': None, '--interactive': False })
Install or reinstall Python packages within this environment Usage: datacats install [-q] [--address=IP] [ENVIRONMENT [PACKAGE ...]] datacats install -c [q] [--address=IP] [ENVIRONMENT] Options: --address=IP The address to bind to when reloading after install -c --clean Reinstall packages into a clean virtualenv -q --quiet Do not show output from installing packages and requirements. ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.'
def getDetails(self): response = self.pingdom.request('GET', 'checks/%s' % self.id) self.__addDetails__(response.json()['check']) return response.json()['check']
Update check details, returns dictionary of details
def render_to_texture(self, data, texture, offset, size): assert isinstance(texture, Texture2D) set_state(blend=False, depth_test=False) orig_tex = Texture2D(255 - data, format='luminance', wrapping='clamp_to_edge', interpolation='nearest') edf_neg_tex = self._render_edf(orig_tex) orig_tex[:, :, 0] = data edf_pos_tex = self._render_edf(orig_tex) self.program_insert['u_texture'] = orig_tex self.program_insert['u_pos_texture'] = edf_pos_tex self.program_insert['u_neg_texture'] = edf_neg_tex self.fbo_to[-1].color_buffer = texture with self.fbo_to[-1]: set_viewport(tuple(offset) + tuple(size)) self.program_insert.draw('triangle_strip')
Render a SDF to a texture at a given offset and size Parameters ---------- data : array Must be 2D with type np.ubyte. texture : instance of Texture2D The texture to render to. offset : tuple of int Offset (x, y) to render to inside the texture. size : tuple of int Size (w, h) to render inside the texture.
def ingest(self, token, endpoint=None, timeout=None, compress=None): from . import ingest if ingest.sf_pbuf: client = ingest.ProtoBufSignalFxIngestClient else: _logger.warn('Protocol Buffers not installed properly; ' 'falling back to JSON.') client = ingest.JsonSignalFxIngestClient compress = compress if compress is not None else self._compress return client( token=token, endpoint=endpoint or self._ingest_endpoint, timeout=timeout or self._timeout, compress=compress)
Obtain a datapoint and event ingest client.
def send_request(self, job_request, message_expiry_in_seconds=None): request_id = self.request_counter self.request_counter += 1 meta = {} wrapper = self._make_middleware_stack( [m.request for m in self.middleware], self._base_send_request, ) try: with self.metrics.timer('client.send.including_middleware', resolution=TimerResolution.MICROSECONDS): wrapper(request_id, meta, job_request, message_expiry_in_seconds) return request_id finally: self.metrics.commit()
Send a JobRequest, and return a request ID. The context and control_extra arguments may be used to include extra values in the context and control headers, respectively. :param job_request: The job request object to send :type job_request: JobRequest :param message_expiry_in_seconds: How soon the message will expire if not received by a server (defaults to sixty seconds unless the settings are otherwise) :type message_expiry_in_seconds: int :return: The request ID :rtype: int :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge
def header_length(bytearray): groups_of_3, leftover = divmod(len(bytearray), 3) n = groups_of_3 * 4 if leftover: n += 4 return n
Return the length of s when it is encoded with base64.
def get_project(self, projectname): cmd = ["list", "accounts", "where", "name=%s" % projectname] results = self._read_output(cmd) if len(results) == 0: return None elif len(results) > 1: logger.error( "Command returned multiple results for '%s'." % projectname) raise RuntimeError( "Command returned multiple results for '%s'." % projectname) the_result = results[0] the_project = the_result["Account"] if projectname.lower() != the_project.lower(): logger.error( "We expected projectname '%s' " "but got projectname '%s'." % (projectname, the_project)) raise RuntimeError( "We expected projectname '%s' " "but got projectname '%s'." % (projectname, the_project)) return the_result
Get the project details from Slurm.
def get_object(self, obj_class, data=None, subset=None): if subset: if not isinstance(subset, list): if isinstance(subset, basestring): subset = subset.split("&") else: raise TypeError if data is None: return self.get_list(obj_class, data, subset) elif isinstance(data, (basestring, int)): return self.get_individual_object(obj_class, data, subset) elif isinstance(data, ElementTree.Element): return self.get_new_object(obj_class, data) else: raise ValueError
Return a subclassed JSSObject instance by querying for existing objects or posting a new object. Args: obj_class: The JSSObject subclass type to search for or create. data: The data parameter performs different operations depending on the type passed. None: Perform a list operation, or for non-container objects, return all data. int: Retrieve an object with ID of <data>. str: Retrieve an object with name of <str>. For some objects, this may be overridden to include searching by other criteria. See those objects for more info. xml.etree.ElementTree.Element: Create a new object from xml. subset: A list of XML subelement tags to request (e.g. ['general', 'purchasing']), OR an '&' delimited string (e.g. 'general&purchasing'). This is not supported for all JSSObjects. Returns: JSSObjectList: for empty or None arguments to data. JSSObject: Returns an object of type obj_class for searches and new objects. (FUTURE) Will return None if nothing is found that match the search criteria. Raises: TypeError: if subset not formatted properly. JSSMethodNotAllowedError: if you try to perform an operation not supported by that object type. JSSGetError: If object searched for is not found. JSSPostError: If attempted object creation fails.
def certificate(self): if not hasattr(self, '_certificate'): cert_url = self._get_cert_url() if not cert_url: self._certificate = None return self._certificate try: import requests except ImportError: raise ImproperlyConfigured("requests is required for bounce message verification.") try: import M2Crypto except ImportError: raise ImproperlyConfigured("M2Crypto is required for bounce message verification.") response = requests.get(cert_url) if response.status_code != 200: logger.warning(u'Could not download certificate from %s: "%s"', cert_url, response.status_code) self._certificate = None return self._certificate try: self._certificate = M2Crypto.X509.load_cert_string(response.content) except M2Crypto.X509.X509Error as e: logger.warning(u'Could not load certificate from %s: "%s"', cert_url, e) self._certificate = None return self._certificate
Retrieves the certificate used to sign the bounce message. TODO: Cache the certificate based on the cert URL so we don't have to retrieve it for each bounce message. *We would need to do it in a secure way so that the cert couldn't be overwritten in the cache*
def receive_message(self, message, data): if self._socket_client.is_stopped: return True if data[MESSAGE_TYPE] == TYPE_CLOSE: self._socket_client.disconnect_channel(message.source_id) self._socket_client.receiver_controller.update_status() return True return False
Called when a connection message is received.
def lru_cache(fn): @wraps(fn) def memoized_fn(*args): pargs = pickle.dumps(args) if pargs not in memoized_fn.cache: memoized_fn.cache[pargs] = fn(*args) return memoized_fn.cache[pargs] for attr, value in iter(fn.__dict__.items()): setattr(memoized_fn, attr, value) memoized_fn.cache = {} return memoized_fn
Memoization wrapper that can handle function attributes, mutable arguments, and can be applied either as a decorator or at runtime. :param fn: Function :type fn: function :returns: Memoized function :rtype: function
def resolve_job(self, name): for r in self.job_resolvers(): resolved_name = r(name) if resolved_name is not None: return resolved_name return None
Attempt to resolve the task name in to a job name. If no job resolver can resolve the task, i.e. they all return None, return None. Keyword arguments: name -- Name of the task to be resolved.
def check_publish_block(self, block_header): if any(publisher_key != block_header.signer_public_key for publisher_key in self._valid_block_publishers): return False if self._min_wait_time == 0: return True if self._min_wait_time < 0: return False assert self._min_wait_time > 0 if self._max_wait_time <= 0: return self._start_time + self._min_wait_time <= time.time() assert self._max_wait_time > 0 if self._max_wait_time <= self._min_wait_time: return False assert 0 < self._min_wait_time < self._max_wait_time return self._start_time + self._wait_time <= time.time()
Check if a candidate block is ready to be claimed. block_header (BlockHeader): the block_header to be checked if it should be claimed Returns: Boolean: True if the candidate block_header should be claimed.
def user_timeline(self, delegate, user=None, params={}, extra_args=None): if user: params['id'] = user return self.__get('/statuses/user_timeline.xml', delegate, params, txml.Statuses, extra_args=extra_args)
Get the most recent updates for a user. If no user is specified, the statuses for the authenticating user are returned. See search for example of how results are returned.
def update_comment(self, comment_id, body): path = '/msg/update_comment' req = ET.Element('request') ET.SubElement(req, 'comment_id').text = str(int(comment_id)) comment = ET.SubElement(req, 'comment') ET.SubElement(comment, 'body').text = str(body) return self._request(path, req)
Update a specific comment. This can be used to edit the content of an existing comment.
def parser(cls, buf, offset): stats = OFPStats() reserved, length = struct.unpack_from('!HH', buf, offset) stats.length = length offset += 4 length -= 4 fields = [] while length > 0: n, value, _, field_len = ofproto.oxs_parse(buf, offset) k, uv = ofproto.oxs_to_user(n, value, None) fields.append((k, uv)) offset += field_len length -= field_len stats.fields = fields return stats
Returns an object which is generated from a buffer including the expression of the wire protocol of the flow stats.
def OnNodeActivated(self, event): self.activated_node = self.selected_node = event.node self.squareMap.SetModel(event.node, self.adapter) self.squareMap.SetSelected( event.node ) if editor: if self.SourceShowFile(event.node): if hasattr(event.node,'lineno'): self.sourceCodeControl.GotoLine(event.node.lineno) self.RecordHistory()
Double-click or enter on a node in some control...
def set_naming_params(self, autonaming=None, prefix=None, suffix=None, name=None): self._set('auto-procname', autonaming, cast=bool) self._set('procname-prefix%s' % ('-spaced' if prefix and prefix.endswith(' ') else ''), prefix) self._set('procname-append', suffix) self._set('procname', name) return self._section
Setups processes naming parameters. :param bool autonaming: Automatically set process name to something meaningful. Generated process names may be 'uWSGI Master', 'uWSGI Worker #', etc. :param str|unicode prefix: Add prefix to process names. :param str|unicode suffix: Append string to process names. :param str|unicode name: Set process names to given static value.
def get_feature(self, cat, img, feature): filename = self.path(cat, img, feature) data = loadmat(filename) name = [k for k in list(data.keys()) if not k.startswith('__')] if self.size is not None: return imresize(data[name.pop()], self.size) return data[name.pop()]
Load a feature from disk.
def _write_cron_lines(user, lines): lines = [salt.utils.stringutils.to_str(_l) for _l in lines] path = salt.utils.files.mkstemp() if _check_instance_uid_match(user) or __grains__.get('os_family') in ('Solaris', 'AIX'): with salt.utils.files.fpopen(path, 'w+', uid=__salt__['file.user_to_uid'](user), mode=0o600) as fp_: fp_.writelines(lines) ret = __salt__['cmd.run_all'](_get_cron_cmdstr(path), runas=user, python_shell=False) else: with salt.utils.files.fpopen(path, 'w+', mode=0o600) as fp_: fp_.writelines(lines) ret = __salt__['cmd.run_all'](_get_cron_cmdstr(path, user), python_shell=False) os.remove(path) return ret
Takes a list of lines to be committed to a user's crontab and writes it
def jsonify(obj): if hasattr(obj, 'to_json'): d = obj.to_json() _push_metadata(d, obj) return jsonify(d) if isinstance(obj, np.ndarray): return obj.tolist() if isinstance(obj, (np.int32, np.int64)): return int(obj) if isinstance(obj, np.float64): return float(obj) if isinstance(obj, dict): return _jsonify_dict(obj) if hasattr(obj, '__dict__'): return _jsonify_dict(obj.__dict__) if isinstance(obj, (list, tuple)): return [jsonify(item) for item in obj] return obj
Return a JSON-encodable representation of an object, recursively using any available ``to_json`` methods, converting NumPy arrays and datatypes to native lists and types along the way.
def draw_group_labels(self): for i, label in enumerate(self.groups): label_x = self.group_label_coords["x"][i] label_y = self.group_label_coords["y"][i] label_ha = self.group_label_aligns["has"][i] label_va = self.group_label_aligns["vas"][i] color = self.group_label_color[i] self.ax.text( s=label, x=label_x, y=label_y, ha=label_ha, va=label_va, color=color, fontsize=self.fontsize, family=self.fontfamily, )
Renders group labels to the figure.
def run(): command = sys.argv[1].strip().lower() print('[COMMAND]:', command) if command == 'test': return run_test() elif command == 'build': return run_build() elif command == 'up': return run_container() elif command == 'serve': import cauldron cauldron.run_server(port=5010, public=True)
Execute the Cauldron container command
def _x_axis(self, draw_axes=True): axis = self.svg.node(self.nodes['plot'], class_="axis y gauge") x, y = self.view((0, 0)) self.svg.node(axis, 'circle', cx=x, cy=y, r=4)
Override x axis to put a center circle in center
def bots_delete(self, bot): self.client.bots.__getattr__(bot.name).__call__(_method="DELETE", _params=dict(botName=bot.name))
Delete existing bot :param bot: bot to delete :type bot: Bot
def handle(self, **options): self.set_options(**options) os.makedirs(self.destination_path, exist_ok=True) if self.interactive and any(os.listdir(self.destination_path)): self.get_confirmation() if self.clear: self.clear_dir() self.collect()
Collect tools.
def plot_shade_mask(ax, ind, mask, facecolor='gray', alpha=0.5): ymin, ymax = ax.get_ylim() ax.fill_between(ind, ymin, ymax, where=mask, facecolor=facecolor, alpha=alpha) return ax
Shade across x values where boolean mask is `True` Args ---- ax: pyplot.ax Axes object to plot with a shaded region ind: ndarray The indices to use for the x-axis values of the data mask: ndarray Boolean mask array to determine which regions should be shaded facecolor: matplotlib color Color of the shaded area Returns ------- ax: pyplot.ax Axes object with the shaded region added
def get_visualizations(): if not hasattr(g, 'visualizations'): g.visualizations = {} for VisClass in _get_visualization_classes(): vis = VisClass(get_model()) g.visualizations[vis.__class__.__name__] = vis return g.visualizations
Get the available visualizations from the request context. Put the visualizations in the request context if they are not yet there. Returns: :obj:`list` of instances of :class:`.BaseVisualization` or derived class
def insert_loan_entries(database, entries): entries = map(clean_entry, entries) database.loans.insert(entries, continue_on_error=True)
Insert a set of records of a loan report in the provided database. Insert a set of new records into the provided database without checking for conflicting entries. @param db: The MongoDB database to operate on. The loans collection will be used from this database. @type db: pymongo.database.Database @param entries: The entries to insert into the database. @type entries: dict
def set_values(self,x): x = numpy.atleast_2d(x) x = x.real C_inv = self.__C_inv__ theta = numpy.dot( x, C_inv ) self.theta = theta return theta
Updates self.theta parameter. No returns values
def fit_transform(self, X, y=None): U, S, V = self._fit(X) U = U[:, : self.n_components_] if self.whiten: U *= np.sqrt(X.shape[0] - 1) else: U *= S[: self.n_components_] return U
Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. y : Ignored Returns ------- X_new : array-like, shape (n_samples, n_components)
def contains_plural_field(model, fields): source_model = model for orm_path in fields: model = source_model bits = orm_path.lstrip('+-').split('__') for bit in bits[:-1]: field = model._meta.get_field(bit) if field.many_to_many or field.one_to_many: return True model = get_model_at_related_field(model, bit) return False
Returns a boolean indicating if ``fields`` contains a relationship to multiple items.
def binary_arguments_to_tensors(x1, x2): if not isinstance(x1, Tensor) and not isinstance(x2, Tensor): raise ValueError("at least one of x1 and x2 must be an mtf Tensor") elif isinstance(x1, Tensor) and isinstance(x2, Tensor): return x1, x2 elif isinstance(x1, Tensor): return x1, import_tf_tensor( x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([])) else: return import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype), Shape([])), x2
Convert argument of a binary operation to Tensors. Args: x1: a Tensor or something convertible to a tf Scalar x2: a Tensor or something convertible to a tf Scalar Returns: new_x1: a Tensor new_x2: a Tensor Raises: ValueError: on failure
def split(self, pat, side='left'): check_type(pat, str) check_type(side, str) _split_mapping = { 'left': 0, 'right': 1 } if side not in _split_mapping: raise ValueError('Can only select left or right side of split') return _series_str_result(self, weld_str_split, pat=pat, side=_split_mapping[side])
Split once each element from the left and select a side to return. Note this is unlike pandas split in that it essentially combines the split with a select. Parameters ---------- pat : str side : {'left', 'right'} Which side of the split to select and return in each element. Returns ------- Series
def get_rule_table(rules): table = formatting.Table(['Id', 'KeyName'], "Rules") for rule in rules: table.add_row([rule['id'], rule['keyName']]) return table
Formats output from get_all_rules and returns a table.
def apply_defaults(self, other_config): if isinstance(other_config, self.__class__): self.config.load_from_dict(other_config.config, overwrite=False) else: self.config.load_from_dict(other_config, overwrite=False)
Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject. If there are any values in this object that are also in the default object, it will use the values from this object.
def en010(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `en010`'.format(value)) self._en010 = value
Corresponds to IDD Field `en010` mean coincident dry-bulb temperature to Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `en010` Unit: kJ/kg if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def a_send_line(text, ctx): if hasattr(text, '__iter__'): try: ctx.ctrl.sendline(text.next()) except StopIteration: ctx.finished = True else: ctx.ctrl.sendline(text) return True
Send text line to the controller followed by `os.linesep`.
def consume(self, amount=1): current = self.leak() if current + amount > self.capacity: return False self._incr(amount) return True
Consume one or more units from the bucket.
def pretty_print_str(self): retval = '' todo = [self.root] while todo: current = todo.pop() for char in reversed(sorted(current.keys())): todo.append(current[char]) indent = ' ' * (current.depth * 2) retval += indent + current.__unicode__() + '\n' return retval.rstrip('\n')
Create a string to pretty-print this trie to standard output.
def get_axis_value(self, axis): if self.type != EventType.POINTER_AXIS: raise AttributeError(_wrong_meth.format(self.type)) return self._libinput.libinput_event_pointer_get_axis_value( self._handle, axis)
Return the axis value of the given axis. The interpretation of the value depends on the axis. For the two scrolling axes :attr:`~libinput.constant.PointerAxis.SCROLL_VERTICAL` and :attr:`~libinput.constant.PointerAxis.SCROLL_HORIZONTAL`, the value of the event is in relative scroll units, with the positive direction being down or right, respectively. For the interpretation of the value, see :attr:`axis_source`. If :meth:`has_axis` returns False for an axis, this method returns 0 for that axis. For pointer events that are not of type :attr:`~libinput.constant.Event.POINTER_AXIS`, this method raises :exc:`AttributeError`. Args: axis (~libinput.constant.PointerAxis): The axis who's value to get. Returns: float: The axis value of this event. Raises: AttributeError
def _use_widgets(objs): from ..models.widgets import Widget return _any(objs, lambda obj: isinstance(obj, Widget))
Whether a collection of Bokeh objects contains a any Widget Args: objs (seq[Model or Document]) : Returns: bool
def regexp_replace(str, pattern, replacement): r sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement) return Column(jc)
r"""Replace all substrings of the specified string value that match regexp with rep. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect() [Row(d=u'-----')]
def as_fixture(self, name=None): if name is None: name = self.name def deco(f): @functools.wraps(f) def wrapper(*args, **kw): with self: kw[name] = self return f(*args, **kw) return wrapper return deco
A decorator to inject this container into a function as a test fixture.
def ToManagedObject(self): from Ucs import ClassFactory cln = UcsUtils.WordU(self.classId) mo = ClassFactory(cln) if mo and (isinstance(mo, ManagedObject) == True): metaClassId = UcsUtils.FindClassIdInMoMetaIgnoreCase(self.classId) for property in self.properties: if UcsUtils.WordU(property) in UcsUtils.GetUcsPropertyMetaAttributeList(metaClassId): mo.setattr(UcsUtils.WordU(property), self.properties[property]) else: WriteUcsWarning("Property %s Not Exist in MO %s" % (UcsUtils.WordU(property), metaClassId)) if len(self.child): for ch in self.child: moch = ch.ToManagedObject() mo.child.append(moch) return mo else: return None
Method creates and returns an object of ManagedObject class using the classId and information from the Generic managed object.
def i2repr(self, pkt, packed_seconds): time_struct = time.gmtime(self._convert_seconds(packed_seconds)) return time.strftime("%a %b %d %H:%M:%S %Y", time_struct)
Convert the internal representation to a nice one using the RFC format.
def process_train_set(hdf5_file, train_archive, patch_archive, n_train, wnid_map, shuffle_seed=None): producer = partial(train_set_producer, train_archive=train_archive, patch_archive=patch_archive, wnid_map=wnid_map) consumer = partial(image_consumer, hdf5_file=hdf5_file, num_expected=n_train, shuffle_seed=shuffle_seed) producer_consumer(producer, consumer)
Process the ILSVRC2010 training set. Parameters ---------- hdf5_file : :class:`h5py.File` instance HDF5 file handle to which to write. Assumes `features`, `targets` and `filenames` already exist and have first dimension larger than `n_train`. train_archive : str or file-like object Filename or file handle for the TAR archive of training images. patch_archive : str or file-like object Filename or file handle for the TAR archive of patch images. n_train : int The number of items in the training set. wnid_map : dict A dictionary mapping WordNet IDs to class indices. shuffle_seed : int or sequence, optional Seed for a NumPy random number generator that permutes the training set on disk. If `None`, no permutation is performed (this is the default).
def do_layout(self, *args): if self.size == [1, 1]: return for i in range(0, len(self.decks)): self.layout_deck(i)
Layout each of my decks
def register_function_compilation(self, func, compilation_cbk, listclass): self.compilations_function[func] = { 'callback': compilation_cbk, 'listclass': listclass }
Register given compilation method for given function. :param str path: Function name. :param callable compilation_cbk: Compilation callback to be called. :param class listclass: List class to use for lists.
def _determine_redirect(self, url, verb, timeout=15, headers={}): requests_verb = getattr(self.session, verb) r = requests_verb(url, timeout=timeout, headers=headers, allow_redirects=False) redirect = 300 <= r.status_code < 400 url_new = url if redirect: redirect_url = r.headers['Location'] url_new = redirect_url relative_redirect = not redirect_url.startswith('http') if relative_redirect: url_new = url base_redir = base_url(redirect_url) base_supplied = base_url(url) same_base = base_redir == base_supplied if same_base: url_new = url return url_new
Internal redirect function, focuses on HTTP and worries less about application-y stuff. @param url: the url to check @param verb: the verb, e.g. head, or get. @param timeout: the time, in seconds, that requests should wait before throwing an exception. @param headers: a set of headers as expected by requests. @return: the url that needs to be scanned. It may be equal to the url parameter if no redirect is needed.
def get_current_consumer_offsets( kafka_client, group, topics, raise_on_error=True, ): topics = _verify_topics_and_partitions(kafka_client, topics, raise_on_error) group_offset_reqs = [ OffsetFetchRequestPayload(topic, partition) for topic, partitions in six.iteritems(topics) for partition in partitions ] group_offsets = {} send_api = kafka_client.send_offset_fetch_request_kafka if group_offset_reqs: group_resps = send_api( group=group, payloads=group_offset_reqs, fail_on_error=False, callback=pluck_topic_offset_or_zero_on_unknown, ) for resp in group_resps: group_offsets.setdefault( resp.topic, {}, )[resp.partition] = resp.offset return group_offsets
Get current consumer offsets. NOTE: This method does not refresh client metadata. It is up to the caller to avoid using stale metadata. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param group: kafka group_id :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method ignores missing topics and missing partitions. It still may fail on the request send. :returns: a dict topic: partition: offset :raises: :py:class:`kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error.
def serialize(self, keep_readonly=False): serializer = Serializer(self._infer_class_models()) return serializer._serialize(self, keep_readonly=keep_readonly)
Return the JSON that would be sent to azure from this model. This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. :param bool keep_readonly: If you want to serialize the readonly attributes :returns: A dict JSON compatible object :rtype: dict
def clear(self): for i in range(4): while len(self._panels[i]): key = sorted(list(self._panels[i].keys()))[0] panel = self.remove(key) panel.setParent(None) panel.deleteLater()
Removes all panel from the CodeEditor.
def _raise_on_incompatible(left, right): if isinstance(right, np.ndarray): other_freq = None elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)): other_freq = right.freqstr else: other_freq = _delta_to_tick(Timedelta(right)).freqstr msg = DIFFERENT_FREQ.format(cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq) raise IncompatibleFrequency(msg)
Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : DateOffset, Period, ndarray, or timedelta-like Raises ------ IncompatibleFrequency
def _create_bv_circuit(self, bit_map: Dict[str, str]) -> Program: unitary, _ = self._compute_unitary_oracle_matrix(bit_map) full_bv_circuit = Program() full_bv_circuit.defgate("BV-ORACLE", unitary) full_bv_circuit.inst(X(self.ancilla), H(self.ancilla)) full_bv_circuit.inst([H(i) for i in self.computational_qubits]) full_bv_circuit.inst( tuple(["BV-ORACLE"] + sorted(self.computational_qubits + [self.ancilla], reverse=True))) full_bv_circuit.inst([H(i) for i in self.computational_qubits]) return full_bv_circuit
Implementation of the Bernstein-Vazirani Algorithm. Given a list of input qubits and an ancilla bit, all initially in the :math:`\\vert 0\\rangle` state, create a program that can find :math:`\\vec{a}` with one query to the given oracle. :param Dict[String, String] bit_map: truth-table of a function for Bernstein-Vazirani with the keys being all possible bit vectors strings and the values being the function values :rtype: Program
def find_and_reserve_fcp(self, assigner_id): fcp_list = self.db.get_from_assigner(assigner_id) if not fcp_list: new_fcp = self.db.find_and_reserve() if new_fcp is None: LOG.info("no more fcp to be allocated") return None LOG.debug("allocated %s fcp for %s assigner" % (new_fcp, assigner_id)) return new_fcp else: old_fcp = fcp_list[0][0] self.db.reserve(fcp_list[0][0]) return old_fcp
reserve the fcp to assigner_id The function to reserve a fcp for user 1. Check whether assigner_id has a fcp already if yes, make the reserve of that record to 1 2. No fcp, then find a fcp and reserve it fcp will be returned, or None indicate no fcp
def _coord(self): _coord = [] _node = self while _node.parent: _idx = _node.parent.childs.index(_node) _coord.insert(0, _idx) _node = _node.parent return tuple(_coord)
Attribute indicating the tree coordinates for this node. The tree coordinates of a node are expressed as a tuple of the indices of the node and its ancestors, for example: A grandchild node with node path `/root.name/root.childs[2].name/root.childs[2].childs[0].name` would have coordinates `(2,0)`. The root node _coord is an empty tuple: `()` :returns: the tree coordinates for this node. :rtype: tuple
def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp): for remote_filepath in corpus_files: filepath = maybe_copy_file_to_directory(remote_filepath, tmp_dir) tf.logging.info("Reading from " + filepath) command = ["7z", "x", "-so", filepath] tf.logging.info("Running command: %s", command) p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1) for page in file_page_generator(p.stdout, 2**max_page_size_exp): yield page
Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings
def receive_reply(self, msg, content): reply_head = content.head() if reply_head == 'error': comment = content.gets('comment') logger.error('Got error reply: "%s"' % comment) else: extractions = content.gets('ekb') self.extractions.append(extractions) self.reply_counter -= 1 if self.reply_counter == 0: self.exit(0)
Handle replies with reading results.